ANDROID: arm64: add __pa_function

We use non-canonical CFI jump tables with CONFIG_CFI_CLANG, which
means the compiler replaces function address references with the
address of the function's CFI jump table entry. This results in
__pa_symbol(function) returning the physical address of the jump
table entry, which can lead to address space confusion since the
jump table points to a virtual address.

This change adds a __pa_function macro, which uses inline assembly
to take the actual function address instead.

Bug: 145210207
Change-Id: I14995e522365ad09a5c9bd676e1203b2b642cd5a
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:
Sami Tolvanen 2019-10-04 09:14:05 -07:00 committed by Alistair Delva
parent f447a38390
commit a8ef2a2913
6 changed files with 22 additions and 5 deletions

View File

@ -311,6 +311,22 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
* With non-canonical CFI jump tables, the compiler replaces function
* address references with the address of the function's CFI jump
* table entry. This results in __pa_symbol(function) returning the
* physical address of the jump table entry, which can lead to address
* space confusion since the jump table points to the function's
* virtual address. Therefore, use inline assembly to ensure we are
* always taking the address of the actual function.
*/
#define __pa_function(x) ({ \
unsigned long addr; \
asm("adrp %0, " __stringify(x) "\n\t" \
"add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \
__pa_symbol(addr); \
})
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid

View File

@ -149,7 +149,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
phys_addr_t pgd_phys = virt_to_phys(pgdp);
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
replace_phys(pgd_phys);

View File

@ -25,7 +25,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long entry,
unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
restart = (void *)__pa_symbol(__cpu_soft_restart);
restart = (void *)__pa_function(__cpu_soft_restart);
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);

View File

@ -973,7 +973,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (kpti_applied)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings);
cpu_install_idmap();
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));

View File

@ -46,7 +46,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
int err = psci_ops.cpu_on(cpu_logical_map(cpu),
__pa_function(secondary_entry));
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);

View File

@ -99,7 +99,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
writeq_relaxed(__pa_function(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));