[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 11/16] accel: Inline NVMM get_qemu_vcpu()
No need for this helper to access the CPUState::accel field. Reviewed-by: Richard Henderson <richard.henderson@xxxxxxxxxx> Signed-off-by: Philippe Mathieu-Daudé <philmd@xxxxxxxxxx> --- target/i386/nvmm/nvmm-all.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index e5ee4af084..72a3a9e3ae 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -49,12 +49,6 @@ struct qemu_machine { static bool nvmm_allowed; static struct qemu_machine qemu_mach; -static AccelCPUState * -get_qemu_vcpu(CPUState *cpu) -{ - return cpu->accel; -} - static struct nvmm_machine * get_nvmm_mach(void) { @@ -86,7 +80,7 @@ nvmm_set_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_x64_state *state = vcpu->state; uint64_t bitmap; @@ -223,7 +217,7 @@ nvmm_get_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -347,7 +341,7 @@ static bool nvmm_can_take_int(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -372,7 +366,7 @@ nvmm_can_take_int(CPUState *cpu) static bool nvmm_can_take_nmi(CPUState *cpu) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; /* * Contrary to INTs, NMIs always schedule an exit when they are @@ -395,7 +389,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -478,7 +472,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) static void nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); uint64_t tpr; @@ -565,7 +559,7 @@ static int nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -610,7 +604,7 @@ static int nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -686,7 +680,7 @@ nvmm_vcpu_loop(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_vcpu_exit *exit = vcpu->exit; @@ -892,7 +886,7 @@ static void nvmm_ipi_signal(int sigcpu) { if (current_cpu) { - AccelCPUState *qcpu = get_qemu_vcpu(current_cpu); + AccelCPUState *qcpu = current_cpu->accel; #if NVMM_USER_VERSION >= 2 struct nvmm_vcpu *vcpu = &qcpu->vcpu; nvmm_vcpu_stop(vcpu); @@ -1023,7 +1017,7 @@ void nvmm_destroy_vcpu(CPUState *cpu) { struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; nvmm_vcpu_destroy(mach, &qcpu->vcpu); g_free(cpu->accel); -- 2.38.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |