Revert "msm: Allow lowmem to be non contiguous and mixed"

This reverts commit 4be85acb2e.

Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231 2022-04-26 21:06:44 +05:30 committed by spakkkk
parent 151d2098e9
commit f658688a2d
4 changed files with 3 additions and 70 deletions

View File

@ -92,8 +92,7 @@ void __init add_static_vm_early(struct static_vm *svm)
void *vaddr;
vm = &svm->vm;
if (!vm_area_check_early(vm))
vm_area_add_early(vm);
vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {

View File

@ -1452,21 +1452,12 @@ static void __init map_lowmem(void)
struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
struct static_vm *svm;
phys_addr_t start;
phys_addr_t end;
unsigned long vaddr;
unsigned long pfn;
unsigned long length;
unsigned int type;
int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
struct map_desc map;
start = reg->base;
end = start + reg->size;
nr++;
if (memblock_is_nomap(reg))
continue;
@ -1518,33 +1509,6 @@ static void __init map_lowmem(void)
}
}
}
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for_each_memblock(memory, reg) {
struct vm_struct *vm;
start = reg->base;
end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
vm = &svm->vm;
pfn = __phys_to_pfn(start);
vaddr = __phys_to_virt(start);
length = end - start;
type = MT_MEMORY_RW;
vm->addr = (void *)(vaddr & PAGE_MASK);
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(pfn);
vm->flags = VM_LOWMEM;
vm->flags |= VM_ARM_MTYPE(type);
vm->caller = map_lowmem;
add_static_vm_early(svm++);
}
}
#ifdef CONFIG_ARM_PV_FIXUP

View File

@ -21,8 +21,6 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@ -182,7 +180,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
extern __init int vm_area_check_early(struct vm_struct *vm);
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU

View File

@ -1747,33 +1747,6 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
/**
* vm_area_check_early - check if vmap area is already mapped
* @vm: vm_struct to be checked
*
* This function is used to check if the vmap area has been
* mapped already. @vm->addr, @vm->size and @vm->flags should
* contain proper values.
*
*/
int __init vm_area_check_early(struct vm_struct *vm)
{
struct vm_struct *tmp, **p;
BUG_ON(vmap_initialized);
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
if (tmp->addr >= vm->addr) {
if (tmp->addr < vm->addr + vm->size)
return 1;
} else {
if (tmp->addr + tmp->size > vm->addr)
return 1;
}
}
return 0;
}
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add