|
22 | 22 | #include <linux/spinlock.h>
|
23 | 23 | #include <linux/lockdep.h>
|
24 | 24 | #include <linux/percpu.h>
|
| 25 | +#include <linux/cpu.h> |
25 | 26 |
|
26 | 27 | /* can make br locks by using local lock for read side, global lock for write */
|
27 | 28 | #define br_lock_init(name) name##_lock_init()
|
|
72 | 73 |
|
73 | 74 | #define DEFINE_LGLOCK(name) \
|
74 | 75 | \
|
| 76 | + DEFINE_SPINLOCK(name##_cpu_lock); \ |
| 77 | + cpumask_t name##_cpus __read_mostly; \ |
75 | 78 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
|
76 | 79 | DEFINE_LGLOCK_LOCKDEP(name); \
|
77 | 80 | \
|
| 81 | + static int \ |
| 82 | + name##_lg_cpu_callback(struct notifier_block *nb, \ |
| 83 | + unsigned long action, void *hcpu) \ |
| 84 | + { \ |
| 85 | + switch (action & ~CPU_TASKS_FROZEN) { \ |
| 86 | + case CPU_UP_PREPARE: \ |
| 87 | + spin_lock(&name##_cpu_lock); \ |
| 88 | + cpu_set((unsigned long)hcpu, name##_cpus); \ |
| 89 | + spin_unlock(&name##_cpu_lock); \ |
| 90 | + break; \ |
| 91 | + case CPU_UP_CANCELED: case CPU_DEAD: \ |
| 92 | + spin_lock(&name##_cpu_lock); \ |
| 93 | + cpu_clear((unsigned long)hcpu, name##_cpus); \ |
| 94 | + spin_unlock(&name##_cpu_lock); \ |
| 95 | + } \ |
| 96 | + return NOTIFY_OK; \ |
| 97 | + } \ |
| 98 | + static struct notifier_block name##_lg_cpu_notifier = { \ |
| 99 | + .notifier_call = name##_lg_cpu_callback, \ |
| 100 | + }; \ |
78 | 101 | void name##_lock_init(void) { \
|
79 | 102 | int i; \
|
80 | 103 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
|
|
83 | 106 | lock = &per_cpu(name##_lock, i); \
|
84 | 107 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
|
85 | 108 | } \
|
| 109 | + register_hotcpu_notifier(&name##_lg_cpu_notifier); \ |
| 110 | + get_online_cpus(); \ |
| 111 | + for_each_online_cpu(i) \ |
| 112 | + cpu_set(i, name##_cpus); \ |
| 113 | + put_online_cpus(); \ |
86 | 114 | } \
|
87 | 115 | EXPORT_SYMBOL(name##_lock_init); \
|
88 | 116 | \
|
|
124 | 152 | \
|
125 | 153 | void name##_global_lock_online(void) { \
|
126 | 154 | int i; \
|
127 |
| - preempt_disable(); \ |
| 155 | + spin_lock(&name##_cpu_lock); \ |
128 | 156 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
129 |
| - for_each_online_cpu(i) { \ |
| 157 | + for_each_cpu(i, &name##_cpus) { \ |
130 | 158 | arch_spinlock_t *lock; \
|
131 | 159 | lock = &per_cpu(name##_lock, i); \
|
132 | 160 | arch_spin_lock(lock); \
|
|
137 | 165 | void name##_global_unlock_online(void) { \
|
138 | 166 | int i; \
|
139 | 167 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
140 |
| - for_each_online_cpu(i) { \ |
| 168 | + for_each_cpu(i, &name##_cpus) { \ |
141 | 169 | arch_spinlock_t *lock; \
|
142 | 170 | lock = &per_cpu(name##_lock, i); \
|
143 | 171 | arch_spin_unlock(lock); \
|
144 | 172 | } \
|
145 |
| - preempt_enable(); \ |
| 173 | + spin_unlock(&name##_cpu_lock); \ |
146 | 174 | } \
|
147 | 175 | EXPORT_SYMBOL(name##_global_unlock_online); \
|
148 | 176 | \
|
|
0 commit comments