[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V3 12/29] x86/vvtd: Add MMIO handler for VVTD



From: Chao Gao <chao.gao@xxxxxxxxx>

This patch adds VVTD MMIO handler to deal with MMIO access.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/vvtd.c | 91 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/xen/drivers/passthrough/vtd/vvtd.c 
b/xen/drivers/passthrough/vtd/vvtd.c
index c851ec7..a3002c3 100644
--- a/xen/drivers/passthrough/vtd/vvtd.c
+++ b/xen/drivers/passthrough/vtd/vvtd.c
@@ -47,6 +47,29 @@ struct vvtd {
     struct page_info *regs_page;
 };
 
+/* Setting viommu_verbose enables debugging messages of vIOMMU */
+bool __read_mostly viommu_verbose;
+boolean_runtime_param("viommu_verbose", viommu_verbose);
+
+#ifndef NDEBUG
+#define vvtd_info(fmt...) do {                    \
+    if ( viommu_verbose )                         \
+        gprintk(XENLOG_G_INFO, ## fmt);           \
+} while(0)
+#define vvtd_debug(fmt...) do {                   \
+    if ( viommu_verbose && printk_ratelimit() )   \
+        printk(XENLOG_G_DEBUG fmt);               \
+} while(0)
+#else
+#define vvtd_info(fmt...) do {} while(0)
+#define vvtd_debug(fmt...) do {} while(0)
+#endif
+
+struct vvtd *domain_vvtd(struct domain *d)
+{
+    return (d->viommu) ? d->viommu->priv : NULL;
+}
+
 static inline void vvtd_set_reg(struct vvtd *vtd, uint32_t reg, uint32_t value)
 {
     vtd->regs->data32[reg/sizeof(uint32_t)] = value;
@@ -68,6 +91,73 @@ static inline uint64_t vvtd_get_reg_quad(struct vvtd *vtd, 
uint32_t reg)
     return vtd->regs->data64[reg/sizeof(uint64_t)];
 }
 
+static int vvtd_in_range(struct vcpu *v, unsigned long addr)
+{
+    struct vvtd *vvtd = domain_vvtd(v->domain);
+
+    if ( vvtd )
+        return (addr >= vvtd->base_addr) &&
+               (addr < vvtd->base_addr + PAGE_SIZE);
+    return 0;
+}
+
+static int vvtd_read(struct vcpu *v, unsigned long addr,
+                     unsigned int len, unsigned long *pval)
+{
+    struct vvtd *vvtd = domain_vvtd(v->domain);
+    unsigned int offset = addr - vvtd->base_addr;
+
+    vvtd_info("Read offset %x len %d\n", offset, len);
+
+    if ( (len != 4 && len != 8) || (offset & (len - 1)) )
+        return X86EMUL_OKAY;
+
+    if ( len == 4 )
+        *pval = vvtd_get_reg(vvtd, offset);
+    else
+        *pval = vvtd_get_reg_quad(vvtd, offset);
+
+    return X86EMUL_OKAY;
+}
+
+static int vvtd_write(struct vcpu *v, unsigned long addr,
+                      unsigned int len, unsigned long val)
+{
+    struct vvtd *vvtd = domain_vvtd(v->domain);
+    unsigned int offset = addr - vvtd->base_addr;
+
+    vvtd_info("Write offset %x len %d val %lx\n", offset, len, val);
+
+    if ( (len != 4 && len != 8) || (offset & (len - 1)) )
+        return X86EMUL_OKAY;
+
+    if ( len == 4 )
+    {
+        switch ( offset )
+        {
+        case DMAR_IEDATA_REG:
+        case DMAR_IEADDR_REG:
+        case DMAR_IEUADDR_REG:
+        case DMAR_FEDATA_REG:
+        case DMAR_FEADDR_REG:
+        case DMAR_FEUADDR_REG:
+            vvtd_set_reg(vvtd, offset, val);
+            break;
+
+        default:
+            break;
+        }
+    }
+
+    return X86EMUL_OKAY;
+}
+
+static const struct hvm_mmio_ops vvtd_mmio_ops = {
+    .check = vvtd_in_range,
+    .read = vvtd_read,
+    .write = vvtd_write
+};
+
 static void vvtd_reset(struct vvtd *vvtd, uint64_t capability)
 {
     uint64_t cap = cap_set_num_fault_regs(1ULL) |
@@ -109,6 +199,7 @@ static int vvtd_create(struct domain *d, struct viommu 
*viommu)
     vvtd_reset(vvtd, viommu->caps);
     vvtd->base_addr = viommu->base_address;
     vvtd->domain = d;
+    register_mmio_handler(d, &vvtd_mmio_ops);
 
     viommu->priv = vvtd;
 
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.