Skip to content

Commit 9b6f7e1

Browse files
rgushchintorvalds
authored andcommitted
mm: rework memcg kernel stack accounting
If CONFIG_VMAP_STACK is set, kernel stacks are allocated using __vmalloc_node_range() with __GFP_ACCOUNT. So kernel stack pages are charged against corresponding memory cgroups on allocation and uncharged on releasing them. The problem is that we do cache kernel stacks in small per-cpu caches and do reuse them for new tasks, which can belong to different memory cgroups. Each stack page still holds a reference to the original cgroup, so the cgroup can't be released until the vmap area is released. To make this happen we need more than two subsequent exits without forks in between on the current cpu, which makes it very unlikely to happen. As a result, I saw a significant number of dying cgroups (in theory, up to 2 * number_of_cpu + number_of_tasks), which can't be released even by significant memory pressure. As a cgroup structure can take a significant amount of memory (first of all, per-cpu data like memcg statistics), it leads to a noticeable waste of memory. Link: http://lkml.kernel.org/r/[email protected] Fixes: ac496bf ("fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y") Signed-off-by: Roman Gushchin <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Tejun Heo <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent c5fd3ca commit 9b6f7e1

File tree

2 files changed

+61
-7
lines changed

2 files changed

+61
-7
lines changed

include/linux/memcontrol.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
12681268
void memcg_kmem_put_cache(struct kmem_cache *cachep);
12691269
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
12701270
struct mem_cgroup *memcg);
1271+
1272+
#ifdef CONFIG_MEMCG_KMEM
12711273
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
12721274
void memcg_kmem_uncharge(struct page *page, int order);
12731275

1274-
#ifdef CONFIG_MEMCG_KMEM
12751276
extern struct static_key_false memcg_kmem_enabled_key;
12761277
extern struct workqueue_struct *memcg_kmem_cache_wq;
12771278

@@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id);
13071308
extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
13081309
int nid, int shrinker_id);
13091310
#else
1311+
1312+
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1313+
{
1314+
return 0;
1315+
}
1316+
1317+
static inline void memcg_kmem_uncharge(struct page *page, int order)
1318+
{
1319+
}
1320+
13101321
#define for_each_memcg_cache_index(_idx) \
13111322
for (; NULL; )
13121323

kernel/fork.c

Lines changed: 49 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -223,9 +223,14 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
223223
return s->addr;
224224
}
225225

226+
/*
227+
* Allocated stacks are cached and later reused by new threads,
228+
* so memcg accounting is performed manually on assigning/releasing
229+
* stacks to tasks. Drop __GFP_ACCOUNT.
230+
*/
226231
stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
227232
VMALLOC_START, VMALLOC_END,
228-
THREADINFO_GFP,
233+
THREADINFO_GFP & ~__GFP_ACCOUNT,
229234
PAGE_KERNEL,
230235
0, node, __builtin_return_address(0));
231236

@@ -248,9 +253,19 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
248253
static inline void free_thread_stack(struct task_struct *tsk)
249254
{
250255
#ifdef CONFIG_VMAP_STACK
251-
if (task_stack_vm_area(tsk)) {
256+
struct vm_struct *vm = task_stack_vm_area(tsk);
257+
258+
if (vm) {
252259
int i;
253260

261+
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
262+
mod_memcg_page_state(vm->pages[i],
263+
MEMCG_KERNEL_STACK_KB,
264+
-(int)(PAGE_SIZE / 1024));
265+
266+
memcg_kmem_uncharge(vm->pages[i], 0);
267+
}
268+
254269
for (i = 0; i < NR_CACHED_STACKS; i++) {
255270
if (this_cpu_cmpxchg(cached_stacks[i],
256271
NULL, tsk->stack_vm_area) != NULL)
@@ -351,10 +366,6 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
351366
NR_KERNEL_STACK_KB,
352367
PAGE_SIZE / 1024 * account);
353368
}
354-
355-
/* All stack pages belong to the same memcg. */
356-
mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
357-
account * (THREAD_SIZE / 1024));
358369
} else {
359370
/*
360371
* All stack pages are in the same zone and belong to the
@@ -370,6 +381,35 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
370381
}
371382
}
372383

384+
static int memcg_charge_kernel_stack(struct task_struct *tsk)
385+
{
386+
#ifdef CONFIG_VMAP_STACK
387+
struct vm_struct *vm = task_stack_vm_area(tsk);
388+
int ret;
389+
390+
if (vm) {
391+
int i;
392+
393+
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
394+
/*
395+
* If memcg_kmem_charge() fails, page->mem_cgroup
396+
* pointer is NULL, and both memcg_kmem_uncharge()
397+
* and mod_memcg_page_state() in free_thread_stack()
398+
* will ignore this page. So it's safe.
399+
*/
400+
ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
401+
if (ret)
402+
return ret;
403+
404+
mod_memcg_page_state(vm->pages[i],
405+
MEMCG_KERNEL_STACK_KB,
406+
PAGE_SIZE / 1024);
407+
}
408+
}
409+
#endif
410+
return 0;
411+
}
412+
373413
static void release_task_stack(struct task_struct *tsk)
374414
{
375415
if (WARN_ON(tsk->state != TASK_DEAD))
@@ -807,6 +847,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
807847
if (!stack)
808848
goto free_tsk;
809849

850+
if (memcg_charge_kernel_stack(tsk))
851+
goto free_stack;
852+
810853
stack_vm_area = task_stack_vm_area(tsk);
811854

812855
err = arch_dup_task_struct(tsk, orig);

0 commit comments

Comments
 (0)