Skip to content

Commit 97e3d26

Browse files
Peter Zijlstrahansendc
authored andcommitted
x86/mm: Randomize per-cpu entry area
Seth found that the CPU-entry-area; the piece of per-cpu data that is mapped into the userspace page-tables for kPTI is not subject to any randomization -- irrespective of kASLR settings. On x86_64 a whole P4D (512 GB) of virtual address space is reserved for this structure, which is plenty large enough to randomize things a little. As such, use a straight forward randomization scheme that avoids duplicates to spread the existing CPUs over the available space. [ bp: Fix le build. ] Reported-by: Seth Jenkins <[email protected]> Reviewed-by: Kees Cook <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Dave Hansen <[email protected]> Signed-off-by: Borislav Petkov <[email protected]>
1 parent 3f148f3 commit 97e3d26

File tree

4 files changed

+50
-10
lines changed

4 files changed

+50
-10
lines changed

arch/x86/include/asm/cpu_entry_area.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -130,10 +130,6 @@ struct cpu_entry_area {
130130
};
131131

132132
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
133-
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
134-
135-
/* Total size includes the readonly IDT mapping page as well: */
136-
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
137133

138134
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
139135
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);

arch/x86/include/asm/pgtable_areas.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,12 @@
1111

1212
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
1313

14-
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
14+
#ifdef CONFIG_X86_32
15+
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
16+
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
17+
CPU_ENTRY_AREA_BASE)
18+
#else
19+
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
20+
#endif
1521

1622
#endif /* _ASM_X86_PGTABLE_AREAS_H */

arch/x86/kernel/hw_breakpoint.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
266266

267267
/* CPU entry erea is always used for CPU entry */
268268
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
269-
CPU_ENTRY_AREA_TOTAL_SIZE))
269+
CPU_ENTRY_AREA_MAP_SIZE))
270270
return true;
271271

272272
/*

arch/x86/mm/cpu_entry_area.c

Lines changed: 42 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,16 +16,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
1616
#ifdef CONFIG_X86_64
1717
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
1818
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
19-
#endif
2019

21-
#ifdef CONFIG_X86_32
20+
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
21+
22+
static __always_inline unsigned int cea_offset(unsigned int cpu)
23+
{
24+
return per_cpu(_cea_offset, cpu);
25+
}
26+
27+
static __init void init_cea_offsets(void)
28+
{
29+
unsigned int max_cea;
30+
unsigned int i, j;
31+
32+
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
33+
34+
/* O(sodding terrible) */
35+
for_each_possible_cpu(i) {
36+
unsigned int cea;
37+
38+
again:
39+
cea = prandom_u32_max(max_cea);
40+
41+
for_each_possible_cpu(j) {
42+
if (cea_offset(j) == cea)
43+
goto again;
44+
45+
if (i == j)
46+
break;
47+
}
48+
49+
per_cpu(_cea_offset, i) = cea;
50+
}
51+
}
52+
#else /* !X86_64 */
2253
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
54+
55+
static __always_inline unsigned int cea_offset(unsigned int cpu)
56+
{
57+
return cpu;
58+
}
59+
static inline void init_cea_offsets(void) { }
2360
#endif
2461

2562
/* Is called from entry code, so must be noinstr */
2663
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
2764
{
28-
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
65+
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
2966
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
3067

3168
return (struct cpu_entry_area *) va;
@@ -211,7 +248,6 @@ static __init void setup_cpu_entry_area_ptes(void)
211248

212249
/* The +1 is for the readonly IDT: */
213250
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
214-
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
215251
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
216252

217253
start = CPU_ENTRY_AREA_BASE;
@@ -227,6 +263,8 @@ void __init setup_cpu_entry_areas(void)
227263
{
228264
unsigned int cpu;
229265

266+
init_cea_offsets();
267+
230268
setup_cpu_entry_area_ptes();
231269

232270
for_each_possible_cpu(cpu)

0 commit comments

Comments
 (0)