--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -147,7 +147,6 @@ static struct intel_iommu *__init alloc_
if ( intel == NULL )
return NULL;
- spin_lock_init(&intel->qi_ctrl.qinval_lock);
spin_lock_init(&intel->ir_ctrl.iremap_lock);
return intel;
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -487,8 +487,6 @@ extern struct list_head acpi_ioapic_unit
struct qi_ctrl {
u64 qinval_maddr; /* queue invalidation page machine address */
- int qinval_index; /* queue invalidation index */
- spinlock_t qinval_lock; /* lock for queue invalidation page */
};
struct ir_ctrl {
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -68,19 +68,21 @@ static void qinval_update_qtail(struct i
dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
}
-static int gen_cc_inv_dsc(struct iommu *iommu, unsigned int index,
+int queue_invalidate_context(struct iommu *iommu,
u16 did, u16 source_id, u8 function_mask, u8 granu)
{
unsigned long flags;
- struct qinval_entry *qinval_entry = NULL, *qinval_entries;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- u64 entry_base = qi_ctrl->qinval_maddr +
- (( index >> QINVAL_ENTRY_ORDER ) << PAGE_SHIFT );
+ unsigned int index;
+ u64 entry_base;
+ struct qinval_entry *qinval_entry, *qinval_entries;
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
- qinval_entries =
- (struct qinval_entry *)map_vtd_domain_page(entry_base);
+ spin_lock_irqsave(&iommu->register_lock, flags);
+ index = qinval_next_index(iommu);
+ entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
+ qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+
qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
qinval_entry->q.cc_inv_dsc.lo.granu = granu;
qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
@@ -90,42 +92,29 @@ static int gen_cc_inv_dsc(struct iommu *
qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
qinval_entry->q.cc_inv_dsc.hi.res = 0;
+ qinval_update_qtail(iommu, index);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+
unmap_vtd_domain_page(qinval_entries);
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
return 0;
}
-int queue_invalidate_context(struct iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u8 granu)
+int queue_invalidate_iotlb(struct iommu *iommu,
+ u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
- int ret;
unsigned long flags;
unsigned int index;
+ u64 entry_base;
+ struct qinval_entry *qinval_entry, *qinval_entries;
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- ret = gen_cc_inv_dsc(iommu, index, did, source_id,
- function_mask, granu);
- qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-
-static int gen_iotlb_inv_dsc(struct iommu *iommu, unsigned int index,
- u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
-{
- unsigned long flags;
- struct qinval_entry *qinval_entry = NULL, *qinval_entries;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- u64 entry_base = qi_ctrl->qinval_maddr +
- (( index >> QINVAL_ENTRY_ORDER ) << PAGE_SHIFT );
-
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
-
- qinval_entries =
- (struct qinval_entry *)map_vtd_domain_page(entry_base);
+ entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
+ qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+
qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
qinval_entry->q.iotlb_inv_dsc.lo.dr = dr;
@@ -140,50 +129,9 @@ static int gen_iotlb_inv_dsc(struct iomm
qinval_entry->q.iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
unmap_vtd_domain_page(qinval_entries);
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
- return 0;
-}
-
-int queue_invalidate_iotlb(struct iommu *iommu,
- u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
-{
- int ret;
- unsigned long flags;
- unsigned int index;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- index = qinval_next_index(iommu);
- ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
- am, ih, addr);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-static int gen_wait_dsc(struct iommu *iommu, unsigned int index,
- u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
-{
- unsigned long flags;
- struct qinval_entry *qinval_entry = NULL, *qinval_entries;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- u64 entry_base = qi_ctrl->qinval_maddr +
- (( index >> QINVAL_ENTRY_ORDER ) << PAGE_SHIFT );
-
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
- qinval_entries =
- (struct qinval_entry *)map_vtd_domain_page(entry_base);
- qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
- qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
- qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
- qinval_entry->q.inv_wait_dsc.lo.sw = sw;
- qinval_entry->q.inv_wait_dsc.lo.fn = fn;
- qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
- qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
- unmap_vtd_domain_page(qinval_entries);
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
return 0;
}
@@ -193,12 +141,27 @@ static int queue_invalidate_wait(struct
s_time_t start_time;
volatile u32 poll_slot = QINVAL_STAT_INIT;
unsigned int index;
- int ret;
unsigned long flags;
+ u64 entry_base;
+ struct qinval_entry *qinval_entry, *qinval_entries;
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- ret = gen_wait_dsc(iommu, index, iflag, sw, fn, QINVAL_STAT_DONE, &poll_slot);
+ entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
+ qinval_entries = map_vtd_domain_page(entry_base);
+ qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+
+ qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
+ qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
+ qinval_entry->q.inv_wait_dsc.lo.sw = sw;
+ qinval_entry->q.inv_wait_dsc.lo.fn = fn;
+ qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
+ qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
+ qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
+ qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(&poll_slot) >> 2;
+
+ unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);
@@ -216,10 +179,10 @@ static int queue_invalidate_wait(struct
}
cpu_relax();
}
+ return 0;
}
- else if ( !ret )
- ret = -EOPNOTSUPP;
- return ret;
+
+ return -EOPNOTSUPP;
}
static int invalidate_sync(struct iommu *iommu)
@@ -231,20 +194,21 @@ static int invalidate_sync(struct iommu
return 0;
}
-static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, unsigned int index,
+int qinval_device_iotlb(struct iommu *iommu,
u32 max_invs_pend, u16 sid, u16 size, u64 addr)
{
unsigned long flags;
- struct qinval_entry *qinval_entry = NULL, *qinval_entries;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- u64 entry_base = qi_ctrl->qinval_maddr +
- (( index >> QINVAL_ENTRY_ORDER ) << PAGE_SHIFT );
-
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
+ unsigned int index;
+ u64 entry_base;
+ struct qinval_entry *qinval_entry, *qinval_entries;
- qinval_entries =
- (struct qinval_entry *)map_vtd_domain_page(entry_base);
+ spin_lock_irqsave(&iommu->register_lock, flags);
+ index = qinval_next_index(iommu);
+ entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
+ qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+
qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
@@ -257,40 +221,26 @@ static int gen_dev_iotlb_inv_dsc(struct
qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
unmap_vtd_domain_page(qinval_entries);
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
+ qinval_update_qtail(iommu, index);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+
return 0;
}
-int qinval_device_iotlb(struct iommu *iommu,
- u32 max_invs_pend, u16 sid, u16 size, u64 addr)
+int queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
{
- int ret;
unsigned long flags;
unsigned int index;
+ u64 entry_base;
+ struct qinval_entry *qinval_entry, *qinval_entries;
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
- sid, size, addr);
- qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-
-static int gen_iec_inv_dsc(struct iommu *iommu, unsigned int index,
- u8 granu, u8 im, u16 iidx)
-{
- unsigned long flags;
- struct qinval_entry *qinval_entry = NULL, *qinval_entries;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- u64 entry_base = qi_ctrl->qinval_maddr +
- (( index >> QINVAL_ENTRY_ORDER ) << PAGE_SHIFT );
-
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
-
- qinval_entries =
- (struct qinval_entry *)map_vtd_domain_page(entry_base);
+ entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
+ qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+
qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
qinval_entry->q.iec_inv_dsc.lo.granu = granu;
qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
@@ -300,22 +250,10 @@ static int gen_iec_inv_dsc(struct iommu
qinval_entry->q.iec_inv_dsc.hi.res = 0;
unmap_vtd_domain_page(qinval_entries);
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
- return 0;
-}
-
-int queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
-{
- int ret;
- unsigned long flags;
- unsigned int index;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- index = qinval_next_index(iommu);
- ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
+
+ return 0;
}
static int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel