Skip to content

Commit 5dfd57d

Browse files
authored
Don't expose guard pages to malloc_stack API consumers (#54591)
Whether or not a guard page is in effect is an implementation detail and consumers of the `malloc_stack` API should not have to worry about that. In particular, if a stack of a certain size is requested, a stack of that size should be delivered, and not be reduced on some systems because we park a guard page in that range. This also helps consumers of the gcext API implementing stack scanning (i.e., GAP.jl), as it does not have to worry about running into those guard pages anymore.
1 parent 8a7d8f4 commit 5dfd57d

File tree

1 file changed

+29
-1
lines changed

1 file changed

+29
-1
lines changed

src/gc-stacks.c

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,22 @@
2222
// number of stacks to always keep available per pool
2323
#define MIN_STACK_MAPPINGS_PER_POOL 5
2424

25+
#if defined(_OS_WINDOWS_) || (!defined(_OS_OPENBSD_) && !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK))
26+
#define JL_USE_GUARD_PAGE 1
2527
const size_t jl_guard_size = (4096 * 8);
28+
#else
29+
const size_t jl_guard_size = 0;
30+
#endif
31+
2632
static _Atomic(uint32_t) num_stack_mappings = 0;
2733

2834
#ifdef _OS_WINDOWS_
2935
#define MAP_FAILED NULL
3036
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
3137
{
38+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
39+
bufsz += guard_size;
40+
3241
void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
3342
if (stk == NULL)
3443
return MAP_FAILED;
@@ -39,6 +48,7 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
3948
VirtualFree(stk, 0, MEM_RELEASE);
4049
return MAP_FAILED;
4150
}
51+
stk = (char *)stk + guard_size;
4252

4353
jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
4454
return stk;
@@ -47,6 +57,12 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
4757

4858
static void free_stack(void *stkbuf, size_t bufsz)
4959
{
60+
#ifdef JL_USE_GUARD_PAGE
61+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
62+
bufsz += guard_size;
63+
stkbuf = (char *)stkbuf - guard_size;
64+
#endif
65+
5066
VirtualFree(stkbuf, 0, MEM_RELEASE);
5167
jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1);
5268
}
@@ -72,16 +88,22 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
7288
# else
7389
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
7490
{
91+
#ifdef JL_USE_GUARD_PAGE
92+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
93+
bufsz += guard_size;
94+
#endif
95+
7596
void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
7697
if (stk == MAP_FAILED)
7798
return MAP_FAILED;
7899

79-
#if !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK)
100+
#ifdef JL_USE_GUARD_PAGE
80101
// set up a guard page to detect stack overflow
81102
if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) {
82103
munmap(stk, bufsz);
83104
return MAP_FAILED;
84105
}
106+
stk = (char *)stk + guard_size;
85107
#endif
86108

87109
jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
@@ -91,6 +113,12 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
91113

92114
static void free_stack(void *stkbuf, size_t bufsz)
93115
{
116+
#ifdef JL_USE_GUARD_PAGE
117+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
118+
bufsz += guard_size;
119+
stkbuf = (char *)stkbuf - guard_size;
120+
#endif
121+
94122
munmap(stkbuf, bufsz);
95123
jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1);
96124
}

0 commit comments

Comments
 (0)