Skip to content

Commit 1a7b7d9

Browse files
rpedgecoIngo Molnar
authored and
Ingo Molnar
committed
modules: Use vmalloc special flag
Use new flag for handling freeing of special permissioned memory in vmalloc and remove places where memory was set RW before freeing which is no longer needed. Since freeing of VM_FLUSH_RESET_PERMS memory is not supported in an interrupt by vmalloc, the freeing of init sections is moved to a work queue. Instead of call_rcu it now uses synchronize_rcu() in the work queue. Lastly, there is now a WARN_ON in module_memfree since it should not be called in an interrupt with special memory as is required for VM_FLUSH_RESET_PERMS. Signed-off-by: Rick Edgecombe <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Jessica Yu <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 868b104 commit 1a7b7d9

File tree

1 file changed

+39
-38
lines changed

1 file changed

+39
-38
lines changed

kernel/module.c

Lines changed: 39 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,10 @@ DEFINE_MUTEX(module_mutex);
9898
EXPORT_SYMBOL_GPL(module_mutex);
9999
static LIST_HEAD(modules);
100100

101+
/* Work queue for freeing init sections in success case */
102+
static struct work_struct init_free_wq;
103+
static struct llist_head init_free_list;
104+
101105
#ifdef CONFIG_MODULES_TREE_LOOKUP
102106

103107
/*
@@ -1949,6 +1953,8 @@ void module_enable_ro(const struct module *mod, bool after_init)
19491953
if (!rodata_enabled)
19501954
return;
19511955

1956+
set_vm_flush_reset_perms(mod->core_layout.base);
1957+
set_vm_flush_reset_perms(mod->init_layout.base);
19521958
frob_text(&mod->core_layout, set_memory_ro);
19531959
frob_text(&mod->core_layout, set_memory_x);
19541960

@@ -1972,15 +1978,6 @@ static void module_enable_nx(const struct module *mod)
19721978
frob_writable_data(&mod->init_layout, set_memory_nx);
19731979
}
19741980

1975-
static void module_disable_nx(const struct module *mod)
1976-
{
1977-
frob_rodata(&mod->core_layout, set_memory_x);
1978-
frob_ro_after_init(&mod->core_layout, set_memory_x);
1979-
frob_writable_data(&mod->core_layout, set_memory_x);
1980-
frob_rodata(&mod->init_layout, set_memory_x);
1981-
frob_writable_data(&mod->init_layout, set_memory_x);
1982-
}
1983-
19841981
/* Iterate through all modules and set each module's text as RW */
19851982
void set_all_modules_text_rw(void)
19861983
{
@@ -2024,23 +2021,8 @@ void set_all_modules_text_ro(void)
20242021
}
20252022
mutex_unlock(&module_mutex);
20262023
}
2027-
2028-
static void disable_ro_nx(const struct module_layout *layout)
2029-
{
2030-
if (rodata_enabled) {
2031-
frob_text(layout, set_memory_rw);
2032-
frob_rodata(layout, set_memory_rw);
2033-
frob_ro_after_init(layout, set_memory_rw);
2034-
}
2035-
frob_rodata(layout, set_memory_x);
2036-
frob_ro_after_init(layout, set_memory_x);
2037-
frob_writable_data(layout, set_memory_x);
2038-
}
2039-
20402024
#else
2041-
static void disable_ro_nx(const struct module_layout *layout) { }
20422025
static void module_enable_nx(const struct module *mod) { }
2043-
static void module_disable_nx(const struct module *mod) { }
20442026
#endif
20452027

20462028
#ifdef CONFIG_LIVEPATCH
@@ -2120,6 +2102,11 @@ static void free_module_elf(struct module *mod)
21202102

21212103
void __weak module_memfree(void *module_region)
21222104
{
2105+
/*
2106+
* This memory may be RO, and freeing RO memory in an interrupt is not
2107+
* supported by vmalloc.
2108+
*/
2109+
WARN_ON(in_interrupt());
21232110
vfree(module_region);
21242111
}
21252112

@@ -2171,7 +2158,6 @@ static void free_module(struct module *mod)
21712158
mutex_unlock(&module_mutex);
21722159

21732160
/* This may be empty, but that's OK */
2174-
disable_ro_nx(&mod->init_layout);
21752161
module_arch_freeing_init(mod);
21762162
module_memfree(mod->init_layout.base);
21772163
kfree(mod->args);
@@ -2181,7 +2167,6 @@ static void free_module(struct module *mod)
21812167
lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
21822168

21832169
/* Finally, free the core (containing the module structure) */
2184-
disable_ro_nx(&mod->core_layout);
21852170
module_memfree(mod->core_layout.base);
21862171
}
21872172

@@ -3420,17 +3405,34 @@ static void do_mod_ctors(struct module *mod)
34203405

34213406
/* For freeing module_init on success, in case kallsyms traversing */
34223407
struct mod_initfree {
3423-
struct rcu_head rcu;
3408+
struct llist_node node;
34243409
void *module_init;
34253410
};
34263411

3427-
static void do_free_init(struct rcu_head *head)
3412+
static void do_free_init(struct work_struct *w)
34283413
{
3429-
struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3430-
module_memfree(m->module_init);
3431-
kfree(m);
3414+
struct llist_node *pos, *n, *list;
3415+
struct mod_initfree *initfree;
3416+
3417+
list = llist_del_all(&init_free_list);
3418+
3419+
synchronize_rcu();
3420+
3421+
llist_for_each_safe(pos, n, list) {
3422+
initfree = container_of(pos, struct mod_initfree, node);
3423+
module_memfree(initfree->module_init);
3424+
kfree(initfree);
3425+
}
34323426
}
34333427

3428+
static int __init modules_wq_init(void)
3429+
{
3430+
INIT_WORK(&init_free_wq, do_free_init);
3431+
init_llist_head(&init_free_list);
3432+
return 0;
3433+
}
3434+
module_init(modules_wq_init);
3435+
34343436
/*
34353437
* This is where the real work happens.
34363438
*
@@ -3507,7 +3509,6 @@ static noinline int do_init_module(struct module *mod)
35073509
#endif
35083510
module_enable_ro(mod, true);
35093511
mod_tree_remove_init(mod);
3510-
disable_ro_nx(&mod->init_layout);
35113512
module_arch_freeing_init(mod);
35123513
mod->init_layout.base = NULL;
35133514
mod->init_layout.size = 0;
@@ -3518,14 +3519,18 @@ static noinline int do_init_module(struct module *mod)
35183519
* We want to free module_init, but be aware that kallsyms may be
35193520
* walking this with preempt disabled. In all the failure paths, we
35203521
* call synchronize_rcu(), but we don't want to slow down the success
3521-
* path, so use actual RCU here.
3522+
* path. module_memfree() cannot be called in an interrupt, so do the
3523+
* work and call synchronize_rcu() in a work queue.
3524+
*
35223525
* Note that module_alloc() on most architectures creates W+X page
35233526
* mappings which won't be cleaned up until do_free_init() runs. Any
35243527
* code such as mark_rodata_ro() which depends on those mappings to
35253528
* be cleaned up needs to sync with the queued work - ie
35263529
* rcu_barrier()
35273530
*/
3528-
call_rcu(&freeinit->rcu, do_free_init);
3531+
if (llist_add(&freeinit->node, &init_free_list))
3532+
schedule_work(&init_free_wq);
3533+
35293534
mutex_unlock(&module_mutex);
35303535
wake_up_all(&module_wq);
35313536

@@ -3822,10 +3827,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
38223827
module_bug_cleanup(mod);
38233828
mutex_unlock(&module_mutex);
38243829

3825-
/* we can't deallocate the module until we clear memory protection */
3826-
module_disable_ro(mod);
3827-
module_disable_nx(mod);
3828-
38293830
ddebug_cleanup:
38303831
ftrace_release_mod(mod);
38313832
dynamic_debug_remove(mod, info->debug);

0 commit comments

Comments
 (0)