[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v6 28/39] accel: Expose and register generic_handle_interrupt()
In order to dispatch over AccelOpsClass::handle_interrupt(), we need it always defined, not calling a hidden handler under the hood. Make AccelOpsClass::handle_interrupt() mandatory. Expose generic_handle_interrupt() prototype and register it for each accelerator. Suggested-by: Richard Henderson <richard.henderson@xxxxxxxxxx> Signed-off-by: Philippe Mathieu-Daudé <philmd@xxxxxxxxxx> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@xxxxxxxxxx> Reviewed-by: Zhao Liu <zhao1.liu@xxxxxxxxx> Reviewed-by: Richard Henderson <richard.henderson@xxxxxxxxxx> --- include/system/accel-ops.h | 3 +++ accel/hvf/hvf-accel-ops.c | 1 + accel/kvm/kvm-accel-ops.c | 1 + accel/qtest/qtest.c | 1 + accel/xen/xen-all.c | 1 + system/cpus.c | 10 ++++------ target/i386/nvmm/nvmm-accel-ops.c | 1 + target/i386/whpx/whpx-accel-ops.c | 1 + 8 files changed, 13 insertions(+), 6 deletions(-) diff --git a/include/system/accel-ops.h b/include/system/accel-ops.h index d84eaa376c2..95a0f402cde 100644 --- a/include/system/accel-ops.h +++ b/include/system/accel-ops.h @@ -61,6 +61,7 @@ struct AccelOpsClass { void (*synchronize_pre_loadvm)(CPUState *cpu); void (*synchronize_pre_resume)(bool step_pending); + /* handle_interrupt is mandatory. */ void (*handle_interrupt)(CPUState *cpu, int old_mask, int new_mask); /** @@ -84,4 +85,6 @@ struct AccelOpsClass { void (*remove_all_breakpoints)(CPUState *cpu); }; +void generic_handle_interrupt(CPUState *cpu, int old_mask, int new_mask); + #endif /* ACCEL_OPS_H */ diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index 2944e350ca9..a0248942f3a 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -353,6 +353,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = hvf_start_vcpu_thread; ops->kick_vcpu_thread = hvf_kick_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; ops->synchronize_post_init = hvf_cpu_synchronize_post_init; diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index 99f61044da5..2a744092749 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -95,6 +95,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data) ops->synchronize_post_init = kvm_cpu_synchronize_post_init; ops->synchronize_state = kvm_cpu_synchronize_state; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; + ops->handle_interrupt = generic_handle_interrupt; #ifdef TARGET_KVM_HAVE_GUEST_DEBUG ops->update_guest_debug = kvm_update_guest_debug_ops; diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c index 612cede160b..5474ce73135 100644 --- a/accel/qtest/qtest.c +++ b/accel/qtest/qtest.c @@ -67,6 +67,7 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = dummy_start_vcpu_thread; ops->get_virtual_clock = qtest_get_virtual_clock; ops->set_virtual_clock = qtest_set_virtual_clock; + ops->handle_interrupt = generic_handle_interrupt; }; static const TypeInfo qtest_accel_ops_type = { diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index c150dd43cab..c12c22de785 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -153,6 +153,7 @@ static void xen_accel_ops_class_init(ObjectClass *oc, const void *data) AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = dummy_start_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; } static const TypeInfo xen_accel_ops_type = { diff --git a/system/cpus.c b/system/cpus.c index 13535a74e6f..f90b8be9eee 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -246,7 +246,7 @@ int64_t cpus_get_elapsed_ticks(void) return cpu_get_ticks(); } -static void generic_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) +void generic_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) { if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); @@ -261,11 +261,7 @@ void cpu_interrupt(CPUState *cpu, int mask) cpu->interrupt_request |= mask; - if (cpus_accel->handle_interrupt) { - cpus_accel->handle_interrupt(cpu, old_mask, cpu->interrupt_request); - } else { - generic_handle_interrupt(cpu, old_mask, cpu->interrupt_request); - } + cpus_accel->handle_interrupt(cpu, old_mask, cpu->interrupt_request); } /* @@ -674,6 +670,8 @@ void cpus_register_accel(const AccelOpsClass *ops) { assert(ops != NULL); assert(ops->create_vcpu_thread != NULL); /* mandatory */ + assert(ops->handle_interrupt); + cpus_accel = ops; } diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 21443078b72..a5517b0abf3 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -87,6 +87,7 @@ static void nvmm_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = nvmm_start_vcpu_thread; ops->kick_vcpu_thread = nvmm_kick_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset; ops->synchronize_post_init = nvmm_cpu_synchronize_post_init; diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index b8bebe403c9..31cf15f0045 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -90,6 +90,7 @@ static void whpx_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = whpx_start_vcpu_thread; ops->kick_vcpu_thread = whpx_kick_vcpu_thread; ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset; ops->synchronize_post_init = whpx_cpu_synchronize_post_init; -- 2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |