|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 05/10] x86/passthrough: move hvm_dpci_isairq_eoi
This function is not Intel specific. Move it to io.c along side its
sole user. Remove declaration in iommu.h.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/drivers/passthrough/x86/io.c | 45 +++++++++++++++++++++++++++++++
xen/drivers/passthrough/x86/vtd/x86/vtd.c | 45 -------------------------------
xen/include/xen/iommu.h | 1 -
3 files changed, 45 insertions(+), 46 deletions(-)
diff --git a/xen/drivers/passthrough/x86/io.c b/xen/drivers/passthrough/x86/io.c
index 8f16e6c0a5..6a7c6415dc 100644
--- a/xen/drivers/passthrough/x86/io.c
+++ b/xen/drivers/passthrough/x86/io.c
@@ -51,6 +51,51 @@ enum {
STATE_RUN
};
+static int _hvm_dpci_isairq_eoi(struct domain *d,
+ struct hvm_pirq_dpci *pirq_dpci, void *arg)
+{
+ struct hvm_irq *hvm_irq = hvm_domain_irq(d);
+ unsigned int isairq = (long)arg;
+ const struct dev_intx_gsi_link *digl;
+
+ list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
+ {
+ unsigned int link = hvm_pci_intx_link(digl->device, digl->intx);
+
+ if ( hvm_irq->pci_link.route[link] == isairq )
+ {
+ hvm_pci_intx_deassert(d, digl->device, digl->intx);
+ if ( --pirq_dpci->pending == 0 )
+ {
+ stop_timer(&pirq_dpci->timer);
+ pirq_guest_eoi(dpci_pirq(pirq_dpci));
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
+{
+ struct hvm_irq_dpci *dpci = NULL;
+
+ ASSERT(isairq < NR_ISAIRQS);
+ if ( !iommu_enabled)
+ return;
+
+ spin_lock(&d->event_lock);
+
+ dpci = domain_get_irq_dpci(d);
+
+ if ( dpci && test_bit(isairq, dpci->isairq_map) )
+ {
+ /* Multiple mirq may be mapped to one isa irq */
+ pt_pirq_iterate(d, _hvm_dpci_isairq_eoi, (void *)(long)isairq);
+ }
+ spin_unlock(&d->event_lock);
+}
+
/*
* This can be called multiple times, but the softirq is only raised once.
* That is until the STATE_SCHED state has been cleared. The state can be
diff --git a/xen/drivers/passthrough/x86/vtd/x86/vtd.c
b/xen/drivers/passthrough/x86/vtd/x86/vtd.c
index 88a60b3307..c7823be4e8 100644
--- a/xen/drivers/passthrough/x86/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/x86/vtd/x86/vtd.c
@@ -63,51 +63,6 @@ void flush_all_cache()
wbinvd();
}
-static int _hvm_dpci_isairq_eoi(struct domain *d,
- struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
- struct hvm_irq *hvm_irq = hvm_domain_irq(d);
- unsigned int isairq = (long)arg;
- const struct dev_intx_gsi_link *digl;
-
- list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
- {
- unsigned int link = hvm_pci_intx_link(digl->device, digl->intx);
-
- if ( hvm_irq->pci_link.route[link] == isairq )
- {
- hvm_pci_intx_deassert(d, digl->device, digl->intx);
- if ( --pirq_dpci->pending == 0 )
- {
- stop_timer(&pirq_dpci->timer);
- pirq_guest_eoi(dpci_pirq(pirq_dpci));
- }
- }
- }
-
- return 0;
-}
-
-void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
-{
- struct hvm_irq_dpci *dpci = NULL;
-
- ASSERT(isairq < NR_ISAIRQS);
- if ( !iommu_enabled)
- return;
-
- spin_lock(&d->event_lock);
-
- dpci = domain_get_irq_dpci(d);
-
- if ( dpci && test_bit(isairq, dpci->isairq_map) )
- {
- /* Multiple mirq may be mapped to one isa irq */
- pt_pirq_iterate(d, _hvm_dpci_isairq_eoi, (void *)(long)isairq);
- }
- spin_unlock(&d->event_lock);
-}
-
void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
{
unsigned long i, j, tmp, top;
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 33c8b221dc..32674e6e59 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -99,7 +99,6 @@ int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
int pt_irq_create_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
int pt_irq_destroy_bind(struct domain *, const struct xen_domctl_bind_pt_irq
*);
-void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
bool_t pt_irq_need_timer(uint32_t flags);
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |