@@ -98,6 +98,10 @@ DEFINE_MUTEX(module_mutex);
98
98
EXPORT_SYMBOL_GPL (module_mutex );
99
99
static LIST_HEAD (modules );
100
100
101
+ /* Work queue for freeing init sections in success case */
102
+ static struct work_struct init_free_wq ;
103
+ static struct llist_head init_free_list ;
104
+
101
105
#ifdef CONFIG_MODULES_TREE_LOOKUP
102
106
103
107
/*
@@ -1949,6 +1953,8 @@ void module_enable_ro(const struct module *mod, bool after_init)
1949
1953
if (!rodata_enabled )
1950
1954
return ;
1951
1955
1956
+ set_vm_flush_reset_perms (mod -> core_layout .base );
1957
+ set_vm_flush_reset_perms (mod -> init_layout .base );
1952
1958
frob_text (& mod -> core_layout , set_memory_ro );
1953
1959
frob_text (& mod -> core_layout , set_memory_x );
1954
1960
@@ -1972,15 +1978,6 @@ static void module_enable_nx(const struct module *mod)
1972
1978
frob_writable_data (& mod -> init_layout , set_memory_nx );
1973
1979
}
1974
1980
1975
- static void module_disable_nx (const struct module * mod )
1976
- {
1977
- frob_rodata (& mod -> core_layout , set_memory_x );
1978
- frob_ro_after_init (& mod -> core_layout , set_memory_x );
1979
- frob_writable_data (& mod -> core_layout , set_memory_x );
1980
- frob_rodata (& mod -> init_layout , set_memory_x );
1981
- frob_writable_data (& mod -> init_layout , set_memory_x );
1982
- }
1983
-
1984
1981
/* Iterate through all modules and set each module's text as RW */
1985
1982
void set_all_modules_text_rw (void )
1986
1983
{
@@ -2024,23 +2021,8 @@ void set_all_modules_text_ro(void)
2024
2021
}
2025
2022
mutex_unlock (& module_mutex );
2026
2023
}
2027
-
2028
- static void disable_ro_nx (const struct module_layout * layout )
2029
- {
2030
- if (rodata_enabled ) {
2031
- frob_text (layout , set_memory_rw );
2032
- frob_rodata (layout , set_memory_rw );
2033
- frob_ro_after_init (layout , set_memory_rw );
2034
- }
2035
- frob_rodata (layout , set_memory_x );
2036
- frob_ro_after_init (layout , set_memory_x );
2037
- frob_writable_data (layout , set_memory_x );
2038
- }
2039
-
2040
2024
#else
2041
- static void disable_ro_nx (const struct module_layout * layout ) { }
2042
2025
static void module_enable_nx (const struct module * mod ) { }
2043
- static void module_disable_nx (const struct module * mod ) { }
2044
2026
#endif
2045
2027
2046
2028
#ifdef CONFIG_LIVEPATCH
@@ -2120,6 +2102,11 @@ static void free_module_elf(struct module *mod)
2120
2102
2121
2103
void __weak module_memfree (void * module_region )
2122
2104
{
2105
+ /*
2106
+ * This memory may be RO, and freeing RO memory in an interrupt is not
2107
+ * supported by vmalloc.
2108
+ */
2109
+ WARN_ON (in_interrupt ());
2123
2110
vfree (module_region );
2124
2111
}
2125
2112
@@ -2171,7 +2158,6 @@ static void free_module(struct module *mod)
2171
2158
mutex_unlock (& module_mutex );
2172
2159
2173
2160
/* This may be empty, but that's OK */
2174
- disable_ro_nx (& mod -> init_layout );
2175
2161
module_arch_freeing_init (mod );
2176
2162
module_memfree (mod -> init_layout .base );
2177
2163
kfree (mod -> args );
@@ -2181,7 +2167,6 @@ static void free_module(struct module *mod)
2181
2167
lockdep_free_key_range (mod -> core_layout .base , mod -> core_layout .size );
2182
2168
2183
2169
/* Finally, free the core (containing the module structure) */
2184
- disable_ro_nx (& mod -> core_layout );
2185
2170
module_memfree (mod -> core_layout .base );
2186
2171
}
2187
2172
@@ -3420,17 +3405,34 @@ static void do_mod_ctors(struct module *mod)
3420
3405
3421
3406
/* For freeing module_init on success, in case kallsyms traversing */
3422
3407
struct mod_initfree {
3423
- struct rcu_head rcu ;
3408
+ struct llist_node node ;
3424
3409
void * module_init ;
3425
3410
};
3426
3411
3427
- static void do_free_init (struct rcu_head * head )
3412
+ static void do_free_init (struct work_struct * w )
3428
3413
{
3429
- struct mod_initfree * m = container_of (head , struct mod_initfree , rcu );
3430
- module_memfree (m -> module_init );
3431
- kfree (m );
3414
+ struct llist_node * pos , * n , * list ;
3415
+ struct mod_initfree * initfree ;
3416
+
3417
+ list = llist_del_all (& init_free_list );
3418
+
3419
+ synchronize_rcu ();
3420
+
3421
+ llist_for_each_safe (pos , n , list ) {
3422
+ initfree = container_of (pos , struct mod_initfree , node );
3423
+ module_memfree (initfree -> module_init );
3424
+ kfree (initfree );
3425
+ }
3432
3426
}
3433
3427
3428
+ static int __init modules_wq_init (void )
3429
+ {
3430
+ INIT_WORK (& init_free_wq , do_free_init );
3431
+ init_llist_head (& init_free_list );
3432
+ return 0 ;
3433
+ }
3434
+ module_init (modules_wq_init );
3435
+
3434
3436
/*
3435
3437
* This is where the real work happens.
3436
3438
*
@@ -3507,7 +3509,6 @@ static noinline int do_init_module(struct module *mod)
3507
3509
#endif
3508
3510
module_enable_ro (mod , true);
3509
3511
mod_tree_remove_init (mod );
3510
- disable_ro_nx (& mod -> init_layout );
3511
3512
module_arch_freeing_init (mod );
3512
3513
mod -> init_layout .base = NULL ;
3513
3514
mod -> init_layout .size = 0 ;
@@ -3518,14 +3519,18 @@ static noinline int do_init_module(struct module *mod)
3518
3519
* We want to free module_init, but be aware that kallsyms may be
3519
3520
* walking this with preempt disabled. In all the failure paths, we
3520
3521
* call synchronize_rcu(), but we don't want to slow down the success
3521
- * path, so use actual RCU here.
3522
+ * path. module_memfree() cannot be called in an interrupt, so do the
3523
+ * work and call synchronize_rcu() in a work queue.
3524
+ *
3522
3525
* Note that module_alloc() on most architectures creates W+X page
3523
3526
* mappings which won't be cleaned up until do_free_init() runs. Any
3524
3527
* code such as mark_rodata_ro() which depends on those mappings to
3525
3528
* be cleaned up needs to sync with the queued work - ie
3526
3529
* rcu_barrier()
3527
3530
*/
3528
- call_rcu (& freeinit -> rcu , do_free_init );
3531
+ if (llist_add (& freeinit -> node , & init_free_list ))
3532
+ schedule_work (& init_free_wq );
3533
+
3529
3534
mutex_unlock (& module_mutex );
3530
3535
wake_up_all (& module_wq );
3531
3536
@@ -3822,10 +3827,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
3822
3827
module_bug_cleanup (mod );
3823
3828
mutex_unlock (& module_mutex );
3824
3829
3825
- /* we can't deallocate the module until we clear memory protection */
3826
- module_disable_ro (mod );
3827
- module_disable_nx (mod );
3828
-
3829
3830
ddebug_cleanup :
3830
3831
ftrace_release_mod (mod );
3831
3832
dynamic_debug_remove (mod , info -> debug );
0 commit comments