diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d3b284b98ba6..9711fb85683c 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -311,6 +311,22 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) +/* + * With non-canonical CFI jump tables, the compiler replaces function + * address references with the address of the function's CFI jump + * table entry. This results in __pa_symbol(function) returning the + * physical address of the jump table entry, which can lead to address + * space confusion since the jump table points to the function's + * virtual address. Therefore, use inline assembly to ensure we are + * always taking the address of the actual function. + */ +#define __pa_function(x) ({ \ + unsigned long addr; \ + asm("adrp %0, " __stringify(x) "\n\t" \ + "add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \ + __pa_symbol(addr); \ +}) + /* * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 1b7aa97b9c9a..95d5d759f5c0 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -149,7 +149,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) phys_addr_t pgd_phys = virt_to_phys(pgdp); - replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); + replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1); cpu_install_idmap(); replace_phys(pgd_phys); diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h index fad90e4935fb..f300e7577688 100644 --- a/arch/arm64/kernel/cpu-reset.h +++ b/arch/arm64/kernel/cpu-reset.h @@ -25,7 +25,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long entry, unsigned long el2_switch = !is_kernel_in_hyp_mode() && is_hyp_mode_available(); - restart = (void *)__pa_symbol(__cpu_soft_restart); + restart = (void *)__pa_function(__cpu_soft_restart); cpu_install_idmap(); restart(el2_switch, entry, arg0, arg1, arg2); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 301647d08a28..eb18d9aed1c7 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -973,7 +973,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) if (kpti_applied) return; - remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings); cpu_install_idmap(); remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index e8edbf13302a..c150cde3454c 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -46,7 +46,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu) static int cpu_psci_cpu_boot(unsigned int cpu) { - int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry)); + int err = psci_ops.cpu_on(cpu_logical_map(cpu), + __pa_function(secondary_entry)); if (err) pr_err("failed to boot CPU%d (%d)\n", cpu, err); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 93034651c87e..1ba281dc4fae 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -99,7 +99,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * boot-loader's endianess before jumping. This is mandated by * the boot protocol. */ - writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); + writeq_relaxed(__pa_function(secondary_holding_pen), release_addr); __flush_dcache_area((__force void *)release_addr, sizeof(*release_addr));