@@ -180,16 +180,12 @@ static void smp_callin(void)
180
180
apic_ap_setup ();
181
181
182
182
/*
183
- * Save our processor parameters. Note: this information
184
- * is needed for clock calibration.
185
- */
186
- smp_store_cpu_info (cpuid );
187
-
188
- /*
183
+ * Save our processor parameters and update topology.
184
+ * Note: this information is needed for clock calibration.
189
185
* The topology information must be up to date before
190
186
* calibrate_delay() and notify_cpu_starting().
191
187
*/
192
- set_cpu_sibling_map ( raw_smp_processor_id () );
188
+ smp_store_cpu_info ( cpuid , false );
193
189
194
190
ap_init_aperfmperf ();
195
191
@@ -243,6 +239,12 @@ static void notrace start_secondary(void *unused)
243
239
* its bit bit in cpu_callout_mask to release it.
244
240
*/
245
241
cpu_init_secondary ();
242
+
243
+ /*
244
+ * Even though notify_cpu_starting() will do this, it does so too late
245
+ * as the AP may already have triggered lockdep splats by then. See
246
+ * commit 29368e093 ("x86/smpboot: Move rcu_cpu_starting() earlier").
247
+ */
246
248
rcu_cpu_starting (raw_smp_processor_id ());
247
249
x86_cpuinit .early_percpu_clock_init ();
248
250
@@ -351,7 +353,7 @@ EXPORT_SYMBOL(topology_phys_to_logical_die);
351
353
* @pkg: The physical package id as retrieved via CPUID
352
354
* @cpu: The cpu for which this is updated
353
355
*/
354
- int topology_update_package_map (unsigned int pkg , unsigned int cpu )
356
+ static int topology_update_package_map (unsigned int pkg , unsigned int cpu )
355
357
{
356
358
int new ;
357
359
@@ -374,7 +376,7 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu)
374
376
* @die: The die id as retrieved via CPUID
375
377
* @cpu: The cpu for which this is updated
376
378
*/
377
- int topology_update_die_map (unsigned int die , unsigned int cpu )
379
+ static int topology_update_die_map (unsigned int die , unsigned int cpu )
378
380
{
379
381
int new ;
380
382
@@ -405,25 +407,7 @@ void __init smp_store_boot_cpu_info(void)
405
407
c -> initialized = true;
406
408
}
407
409
408
- /*
409
- * The bootstrap kernel entry code has set these up. Save them for
410
- * a given CPU
411
- */
412
- void smp_store_cpu_info (int id )
413
- {
414
- struct cpuinfo_x86 * c = & cpu_data (id );
415
-
416
- /* Copy boot_cpu_data only on the first bringup */
417
- if (!c -> initialized )
418
- * c = boot_cpu_data ;
419
- c -> cpu_index = id ;
420
- /*
421
- * During boot time, CPU0 has this setup already. Save the info when
422
- * bringing up AP or offlined CPU0.
423
- */
424
- identify_secondary_cpu (c );
425
- c -> initialized = true;
426
- }
410
+ static arch_spinlock_t topology_lock = __ARCH_SPIN_LOCK_UNLOCKED ;
427
411
428
412
static bool
429
413
topology_same_node (struct cpuinfo_x86 * c , struct cpuinfo_x86 * o )
@@ -629,7 +613,7 @@ static struct sched_domain_topology_level x86_topology[] = {
629
613
*/
630
614
static bool x86_has_numa_in_package ;
631
615
632
- void set_cpu_sibling_map (int cpu )
616
+ static void set_cpu_sibling_map (int cpu )
633
617
{
634
618
bool has_smt = smp_num_siblings > 1 ;
635
619
bool has_mp = has_smt || boot_cpu_data .x86_max_cores > 1 ;
@@ -708,6 +692,37 @@ void set_cpu_sibling_map(int cpu)
708
692
}
709
693
}
710
694
695
+ /*
696
+ * The bootstrap kernel entry code has set these up. Save them for
697
+ * a given CPU
698
+ */
699
+ void smp_store_cpu_info (int id , bool force_single_core )
700
+ {
701
+ struct cpuinfo_x86 * c = & cpu_data (id );
702
+
703
+ /* Copy boot_cpu_data only on the first bringup */
704
+ if (!c -> initialized )
705
+ * c = boot_cpu_data ;
706
+ c -> cpu_index = id ;
707
+ /*
708
+ * During boot time, CPU0 has this setup already. Save the info when
709
+ * bringing up AP or offlined CPU0.
710
+ */
711
+ identify_secondary_cpu (c );
712
+
713
+ arch_spin_lock (& topology_lock );
714
+ BUG_ON (topology_update_package_map (c -> phys_proc_id , id ));
715
+ BUG_ON (topology_update_die_map (c -> cpu_die_id , id ));
716
+ c -> initialized = true;
717
+
718
+ /* For Xen PV */
719
+ if (force_single_core )
720
+ c -> x86_max_cores = 1 ;
721
+
722
+ set_cpu_sibling_map (id );
723
+ arch_spin_unlock (& topology_lock );
724
+ }
725
+
711
726
/* maps the cpu to the sched domain representing multi-core */
712
727
const struct cpumask * cpu_coregroup_mask (int cpu )
713
728
{
0 commit comments