--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5096,7 +5096,7 @@ static int hvmop_inject_msi(
if ( rc )
goto out;
- hvm_inject_msi(d, op.addr, op.data);
+ rc = hvm_inject_msi(d, op.addr, op.data);
out:
rcu_unlock_domain(d);
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -270,7 +270,7 @@ void hvm_set_pci_link_route(struct domai
d->domain_id, link, old_isa_irq, isa_irq);
}
-void hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
+int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
{
uint32_t tmp = (uint32_t) addr;
uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
@@ -292,20 +292,28 @@ void hvm_inject_msi(struct domain *d, ui
/* if it is the first time, allocate the pirq */
if ( !info || info->arch.hvm.emuirq == IRQ_UNBOUND )
{
+ int rc;
+
spin_lock(&d->event_lock);
- map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU);
+ rc = map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU);
spin_unlock(&d->event_lock);
+ if ( rc )
+ return rc;
info = pirq_info(d, pirq);
if ( !info )
- return;
- } else if (info->arch.hvm.emuirq != IRQ_MSI_EMU)
- return;
+ return -EBUSY;
+ }
+ else if ( info->arch.hvm.emuirq != IRQ_MSI_EMU )
+ return -EINVAL;
send_guest_pirq(d, info);
- return;
+ return 0;
}
+ return -ERANGE;
}
vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+
+ return 0;
}
void hvm_set_callback_via(struct domain *d, uint64_t via)
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -123,7 +123,7 @@ void hvm_isa_irq_deassert(
void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
-void hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data);
+int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data);
void hvm_maybe_deassert_evtchn_irq(void);
void hvm_assert_evtchn_irq(struct vcpu *v);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel