Skip to content
This repository was archived by the owner on Jan 28, 2023. It is now read-only.

Commit 119dea0

Browse files
committed
Normalized prefixes of functions/types
Some platform-specific functions such as `smp_mb` or `__fls` had no hax_/asm_-prefix, clashing against functions declared in kernel headers. Signed-off-by: Alexandro Sanchez Bach <[email protected]>
1 parent e13a10e commit 119dea0

File tree

19 files changed

+59
-53
lines changed

19 files changed

+59
-53
lines changed

core/cpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -520,7 +520,7 @@ uint32_t load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
520520
/* when wake up from sleep, we need the barrier, as vm operation
521521
* are not serialized instructions.
522522
*/
523-
smp_mb();
523+
hax_smp_mb();
524524

525525
cpu_data = current_cpu_data();
526526

core/ept.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,7 @@ static void invept_smpfunc(struct invept_bundle *bundle)
328328
{
329329
struct per_cpu_data *cpu_data;
330330

331-
smp_mb();
331+
hax_smp_mb();
332332
cpu_data = current_cpu_data();
333333
cpu_data->invept_res = VMX_SUCCEED;
334334

@@ -373,7 +373,7 @@ void invept(hax_vm_t *hax_vm, uint type)
373373

374374
bundle.type = type;
375375
bundle.desc = &desc;
376-
smp_call_function(&cpu_online_map, (void (*)(void *))invept_smpfunc,
376+
hax_smp_call_function(&cpu_online_map, (void (*)(void *))invept_smpfunc,
377377
&bundle);
378378

379379
/*

core/hax.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,12 +61,12 @@ struct hax_t *hax;
6161
extern hax_atomic_t vmx_cpu_num, vmx_enabled_num;
6262
static void hax_enable_vmx(void)
6363
{
64-
smp_call_function(&cpu_online_map, cpu_init_vmx, NULL);
64+
hax_smp_call_function(&cpu_online_map, cpu_init_vmx, NULL);
6565
}
6666

6767
static void hax_disable_vmx(void)
6868
{
69-
smp_call_function(&cpu_online_map, cpu_exit_vmx, NULL);
69+
hax_smp_call_function(&cpu_online_map, cpu_exit_vmx, NULL);
7070
}
7171

7272
static void free_cpu_vmxon_region(void)
@@ -410,7 +410,7 @@ static void hax_pmu_init(void)
410410
int ref_cpu_id = -1;
411411

412412
// Execute cpu_pmu_init() on each logical processor of the host CPU
413-
smp_call_function(&cpu_online_map, cpu_pmu_init, NULL);
413+
hax_smp_call_function(&cpu_online_map, cpu_pmu_init, NULL);
414414

415415
// Find the common APM version supported by all host logical processors
416416
// TODO: Theoretically we should do the same for other APM parameters

core/ia32.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void ia32_wrmsr(uint32_t reg, uint64_t val)
7070
#endif
7171
}
7272

73-
uint64_t rdtsc(void)
73+
uint64_t ia32_rdtsc(void)
7474
{
7575
#ifdef HAX_ARCH_X86_32
7676
struct qword_val val = { 0 };

core/ia32_ops.asm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ function __nmi, 0
161161
int 2h
162162
ret
163163

164-
function __fls, 1
164+
function asm_fls, 1
165165
xor reg_ret_32, reg_ret_32
166166
bsr reg_ret_32, reg_arg1_32
167167
ret

core/include/cpu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,12 +108,12 @@ struct per_cpu_data {
108108

109109
/*
110110
* These fields are used to record the result of certain VMX instructions
111-
* when they are used in a function wrapped by smp_call_function(). This is
111+
* when they are used in a function wrapped by hax_smp_call_function(). This is
112112
* because it is not safe to call hax_error(), etc. (whose underlying
113113
* implementation may use a lock) from the wrapped function to log a
114114
* failure; doing so may cause a deadlock and thus a host reboot, especially
115115
* on macOS, where mp_rendezvous_no_intrs() (the legacy Darwin API used by
116-
* HAXM to implement smp_call_function()) is known to be prone to deadlocks:
116+
* HAXM to implement hax_smp_call_function()) is known to be prone to deadlocks:
117117
* https://lists.apple.com/archives/darwin-kernel/2006/Dec/msg00006.html
118118
*/
119119
vmx_result_t vmxon_res;

core/include/ia32.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,12 +78,12 @@ void ASMCALL asm_fxrstor(mword *addr);
7878
void ASMCALL asm_cpuid(union cpuid_args_t *state);
7979

8080
void ASMCALL __nmi(void);
81-
uint32_t ASMCALL __fls(uint32_t bit32);
81+
uint32_t ASMCALL asm_fls(uint32_t bit32);
8282

8383
uint64_t ia32_rdmsr(uint32_t reg);
8484
void ia32_wrmsr(uint32_t reg, uint64_t val);
8585

86-
uint64_t rdtsc(void);
86+
uint64_t ia32_rdtsc(void);
8787

8888
void fxinit(void);
8989
void fxsave(mword *addr);

core/intr_exc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ uint32_t vcpu_get_pending_intrs(struct vcpu_t *vcpu)
4848

4949
for (i = 7; i >= 0; i--) {
5050
if (intr_pending[i]) {
51-
offset = __fls(intr_pending[i]);
51+
offset = asm_fls(intr_pending[i]);
5252
break;
5353
}
5454
}

core/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info)
342342
hva = 0;
343343
#endif
344344
#endif
345-
cur_va += page_size;
345+
cur_va += HAX_PAGE_SIZE;
346346
}
347347

348348
if (!hax_core_set_p2m(vm, gpfn, hpfn, hva, info->flags)) {

core/vcpu.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ static void vcpu_init(struct vcpu_t *vcpu)
565565

566566
vcpu->ref_count = 1;
567567

568-
vcpu->tsc_offset = 0ULL - rdtsc();
568+
vcpu->tsc_offset = 0ULL - ia32_rdtsc();
569569

570570
// Prepare the vcpu state to Power-up
571571
state->_rflags = 2;
@@ -3247,7 +3247,7 @@ static int handle_msr_read(struct vcpu_t *vcpu, uint32_t msr, uint64_t *val)
32473247

32483248
switch (msr) {
32493249
case IA32_TSC: {
3250-
*val = vcpu->tsc_offset + rdtsc();
3250+
*val = vcpu->tsc_offset + ia32_rdtsc();
32513251
break;
32523252
}
32533253
case IA32_FEATURE_CONTROL: {
@@ -3503,7 +3503,7 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val)
35033503

35043504
switch (msr) {
35053505
case IA32_TSC: {
3506-
vcpu->tsc_offset = val - rdtsc();
3506+
vcpu->tsc_offset = val - ia32_rdtsc();
35073507
if (vmx(vcpu, pcpu_ctls) & USE_TSC_OFFSETTING) {
35083508
vmwrite(vcpu, VMX_TSC_OFFSET, vcpu->tsc_offset);
35093509
}
@@ -4160,9 +4160,9 @@ int vcpu_pause(struct vcpu_t *vcpu)
41604160
return -1;
41614161

41624162
vcpu->paused = 1;
4163-
smp_mb();
4163+
hax_smp_mb();
41644164
if (vcpu->is_running) {
4165-
smp_call_function(&cpu_online_map, _vcpu_take_off, NULL);
4165+
hax_smp_call_function(&cpu_online_map, _vcpu_take_off, NULL);
41664166
}
41674167

41684168
return 0;
@@ -4171,15 +4171,15 @@ int vcpu_pause(struct vcpu_t *vcpu)
41714171
int vcpu_takeoff(struct vcpu_t *vcpu)
41724172
{
41734173
int cpu_id;
4174-
cpumap_t targets;
4174+
hax_cpumap_t targets;
41754175

41764176
// Don't change the sequence unless you are sure
41774177
if (vcpu->is_running) {
41784178
cpu_id = vcpu->cpu_id;
41794179
assert(cpu_id != hax_cpuid());
41804180
targets = cpu2cpumap(cpu_id);
41814181
// If not considering Windows XP, definitely we don't need this
4182-
smp_call_function(&targets, _vcpu_take_off, NULL);
4182+
hax_smp_call_function(&targets, _vcpu_take_off, NULL);
41834183
}
41844184

41854185
return 0;

0 commit comments

Comments
 (0)