Skip to content

Commit 84b04d3

Browse files
ardbiesheuvelctmarinas
authored andcommitted
arm64: kernel: Create initial ID map from C code
The asm code that creates the initial ID map is rather intricate and hard to follow. This is problematic because it makes adding support for things like LPA2 or WXN more difficult than necessary. Also, it is parameterized like the rest of the MM code to run with a configurable number of levels, which is rather pointless, given that all AArch64 CPUs implement support for 48-bit virtual addressing, and that many systems exist with DRAM located outside of the 39-bit addressable range, which is the only smaller VA size that is widely used, and we need additional tricks to make things work in that combination. So let's bite the bullet, and rip out all the asm macros, and fiddly code, and replace it with a C implementation based on the newly added routines for creating the early kernel VA mappings. And while at it, create the initial ID map based on 48-bit virtual addressing as well, regardless of the number of configured levels for the kernel proper. Note that this code may execute with the MMU and caches disabled, and is therefore not permitted to make unaligned accesses. This shouldn't generally happen in any case for the algorithm as implemented, but to be sure, let's pass -mstrict-align to the compiler just in case. Signed-off-by: Ard Biesheuvel <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Catalin Marinas <[email protected]>
1 parent 34b98e5 commit 84b04d3

File tree

11 files changed

+88
-295
lines changed

11 files changed

+88
-295
lines changed

arch/arm64/include/asm/assembler.h

-14
Original file line numberDiff line numberDiff line change
@@ -345,20 +345,6 @@ alternative_cb_end
345345
bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
346346
.endm
347347

348-
/*
349-
* idmap_get_t0sz - get the T0SZ value needed to cover the ID map
350-
*
351-
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
352-
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
353-
* this number conveniently equals the number of leading zeroes in
354-
* the physical address of _end.
355-
*/
356-
.macro idmap_get_t0sz, reg
357-
adrp \reg, _end
358-
orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
359-
clz \reg, \reg
360-
.endm
361-
362348
/*
363349
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
364350
* ID_AA64MMFR0_EL1.PARange value

arch/arm64/include/asm/kernel-pgtable.h

+23-27
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#define SWAPPER_TABLE_SHIFT (SWAPPER_BLOCK_SHIFT + PAGE_SHIFT - 3)
3030

3131
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL)
32+
#define INIT_IDMAP_PGTABLE_LEVELS (IDMAP_LEVELS - SWAPPER_SKIP_LEVEL)
3233

3334
#define IDMAP_VA_BITS 48
3435
#define IDMAP_LEVELS ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS)
@@ -48,44 +49,39 @@
4849
#define EARLY_ENTRIES(vstart, vend, shift, add) \
4950
(SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
5051

51-
#define EARLY_LEVEL(lvl, vstart, vend, add) \
52-
(SWAPPER_PGTABLE_LEVELS > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
52+
#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
53+
(lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
5354

54-
#define EARLY_PAGES(vstart, vend, add) (1 /* PGDIR page */ \
55-
+ EARLY_LEVEL(3, (vstart), (vend), add) /* each entry needs a next level page table */ \
56-
+ EARLY_LEVEL(2, (vstart), (vend), add) /* each entry needs a next level page table */ \
57-
+ EARLY_LEVEL(1, (vstart), (vend), add))/* each entry needs a next level page table */
58-
#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(KIMAGE_VADDR, _end, EXTRA_PAGE) + EARLY_SEGMENT_EXTRA_PAGES))
55+
#define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \
56+
+ EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
57+
+ EARLY_LEVEL(2, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
58+
+ EARLY_LEVEL(1, (lvls), (vstart), (vend), add))/* each entry needs a next level page table */
59+
#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \
60+
+ EARLY_SEGMENT_EXTRA_PAGES))
5961

60-
/* the initial ID map may need two extra pages if it needs to be extended */
61-
#if VA_BITS < 48
62-
#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
63-
#else
64-
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
65-
#endif
66-
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
62+
#define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, _end, 1))
63+
#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE)
64+
65+
#define INIT_IDMAP_FDT_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1)
66+
#define INIT_IDMAP_FDT_SIZE ((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE)
6767

6868
/* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */
6969
#define KERNEL_SEGMENT_COUNT 5
7070

7171
#if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN
7272
#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1)
73-
#else
74-
#define EARLY_SEGMENT_EXTRA_PAGES 0
75-
#endif
76-
7773
/*
78-
* Initial memory map attributes.
74+
* The initial ID map consists of the kernel image, mapped as two separate
75+
* segments, and may appear misaligned wrt the swapper block size. This means
76+
* we need 3 additional pages. The DT could straddle a swapper block boundary,
77+
* so it may need 2.
7978
*/
80-
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED | PTE_UXN)
81-
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S | PTE_UXN)
82-
83-
#ifdef CONFIG_ARM64_4K_PAGES
84-
#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS | PTE_WRITE)
85-
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
79+
#define EARLY_IDMAP_EXTRA_PAGES 3
80+
#define EARLY_IDMAP_EXTRA_FDT_PAGES 2
8681
#else
87-
#define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS | PTE_WRITE)
88-
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
82+
#define EARLY_SEGMENT_EXTRA_PAGES 0
83+
#define EARLY_IDMAP_EXTRA_PAGES 0
84+
#define EARLY_IDMAP_EXTRA_FDT_PAGES 0
8985
#endif
9086

9187
#endif /* __ASM_KERNEL_PGTABLE_H */

arch/arm64/include/asm/mmu_context.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,9 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
6161
}
6262

6363
/*
64-
* TCR.T0SZ value to use when the ID map is active. Usually equals
65-
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
66-
* physical memory, in which case it will be smaller.
64+
* TCR.T0SZ value to use when the ID map is active.
6765
*/
68-
extern int idmap_t0sz;
66+
#define idmap_t0sz TCR_T0SZ(IDMAP_VA_BITS)
6967

7068
/*
7169
* Ensure TCR.T0SZ is set to the provided value.

0 commit comments

Comments
 (0)