From 2c2c5d1d10f700b7d280713717ad0b457ce27df7 Mon Sep 17 00:00:00 2001 From: Giulio Benetti Date: Fri, 4 Nov 2022 21:46:18 +0100 Subject: [PATCH] ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation [ Upstream commit 340a982825f76f1cff0daa605970fe47321b5ee7 ] Actually in no-MMU SoCs(i.e. i.MXRT) ZERO_PAGE(vaddr) expands to ``` virt_to_page(0) ``` that in order expands to: ``` pfn_to_page(virt_to_pfn(0)) ``` and then virt_to_pfn(0) to: ``` ((((unsigned long)(0) - PAGE_OFFSET) >> PAGE_SHIFT) + PHYS_PFN_OFFSET) ``` where PAGE_OFFSET and PHYS_PFN_OFFSET are the DRAM offset(0x80000000) and PAGE_SHIFT is 12. This way we obtain 16MB(0x01000000) summed to the base of DRAM(0x80000000). When ZERO_PAGE(0) is then used, for example in bio_add_page(), the page gets an address that is out of DRAM bounds. So instead of using fake virtual page 0 let's allocate a dedicated zero_page during paging_init() and assign it to a global 'struct page * empty_zero_page' the same way mmu.c does and it's the same approach used in m68k with commit dc068f462179 as discussed here[0]. Then let's move ZERO_PAGE() definition to the top of pgtable.h to be in common between mmu.c and nommu.c. [0]: https://lore.kernel.org/linux-m68k/2a462b23-5b8e-bbf4-ec7d-778434a3b9d7@google.com/T/#m1266ceb63 ad140743174d6b3070364d3c9a5179b Signed-off-by: Giulio Benetti Reviewed-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) Signed-off-by: Sasha Levin --- arch/arm/include/asm/pgtable-nommu.h | 6 ------ arch/arm/include/asm/pgtable.h | 16 +++++++++------- arch/arm/mm/nommu.c | 19 +++++++++++++++++++ 3 files changed, 28 insertions(+), 13 deletions(-) diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index 010fa1a35a683..e8ac2f95fb374 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -51,12 +51,6 @@ typedef pte_t *pte_addr_t; -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(0)) - /* * Mark the prot value as uncacheable and unbufferable. */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 3ae120cd1715f..ecfd6e7e128f1 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,6 +10,15 @@ #include #include +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +#define ZERO_PAGE(vaddr) (empty_zero_page) +#endif + #ifndef CONFIG_MMU #include @@ -166,13 +175,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define __S111 __PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern struct page *empty_zero_page; -#define ZERO_PAGE(vaddr) (empty_zero_page) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 24ecf8d30a1e9..a3ad8a1b0e079 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -26,6 +26,13 @@ unsigned long vectors_base; +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); + #ifdef CONFIG_ARM_MPU struct mpu_rgn_info mpu_rgn_info; #endif @@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void) */ void __init paging_init(const struct machine_desc *mdesc) { + void *zero_page; + early_trap_init((void *)vectors_base); mpu_setup(); + + /* allocate the zero page. */ + zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + bootmem_init(); + + empty_zero_page = virt_to_page(zero_page); + flush_dcache_page(empty_zero_page); } /*