[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v7][RFC][PATCH 06/13] hvmloader/ram: check if guest memory is out of reserved device memory maps



On 2014/11/11 17:42, Chen, Tiejun wrote:
On 2014/11/11 17:06, Jan Beulich wrote:
On 11.11.14 at 10:03, <JBeulich@xxxxxxxx> wrote:
On 11.11.14 at 08:49, <tiejun.chen@xxxxxxxxx> wrote:
Unless we move all check inside each callback functions.

That's what you should do imo, albeit I realize that the comparing

Yes, I can do this in all existing callback functions but I'm somewhat
afraid when other guys want to introduce new callback function, who can
guarantee this should be done as well?


I don't see any feedback to this point, so I think you still prefer we should do all check in the callback function.

I tried to address this but obviously we have to pass each 'pdf' to callback functions,

diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index af613b9..db4b90f 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -20,12 +20,16 @@ CHECK_mem_access_op;
 struct get_reserved_device_memory {
     struct compat_reserved_device_memory_map map;
     unsigned int used_entries;
+    struct domain *domain;
 };

-static int get_reserved_device_memory(xen_pfn_t start,
-                                      xen_ulong_t nr, void *ctxt)
+static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
+                                      u16 bdf, void *ctxt)
 {
     struct get_reserved_device_memory *grdm = ctxt;
+    u16 pt_bdf;
+    int i;
+    struct domain *d = grdm->domain;

     if ( grdm->used_entries < grdm->map.nr_entries )
     {
@@ -36,9 +40,24 @@ static int get_reserved_device_memory(xen_pfn_t start,
         if ( rdm.start_pfn != start || rdm.nr_pages != nr )
             return -ERANGE;

-        if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
-                                     &rdm, 1) )
-            return -EFAULT;
+        if ( d->arch.hvm_domain.pci_force )
+        {
+ if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
+                                         &rdm, 1) )
+                return -EFAULT;
+        }
+        else
+        {
+            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
+            {
+                pt_bdf = PCI_BDF2(d->arch.hvm_domain.pcidevs[i].bus,
+                                  d->arch.hvm_domain.pcidevs[i].devfn);
+                if ( pt_bdf == bdf )
+ if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
+                                                 &rdm, 1) )
+                        return -EFAULT;
+            }
+        }
     }

     ++grdm->used_entries;
@@ -314,6 +333,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
                 return -EFAULT;

             grdm.used_entries = 0;
+            grdm.domain = current->domain;
rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
                                                   &grdm);

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 2177c56..f5e9c1f 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -696,12 +696,16 @@ out:
 struct get_reserved_device_memory {
     struct xen_reserved_device_memory_map map;
     unsigned int used_entries;
+    struct domain *domain;
 };

-static int get_reserved_device_memory(xen_pfn_t start,
-                                      xen_ulong_t nr, void *ctxt)
+static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
+                                      u16 bdf, void *ctxt)
 {
     struct get_reserved_device_memory *grdm = ctxt;
+    u16 pt_bdf;
+    int i;
+    struct domain *d = grdm->domain;

     if ( grdm->used_entries < grdm->map.nr_entries )
     {
@@ -709,9 +713,24 @@ static int get_reserved_device_memory(xen_pfn_t start,
             .start_pfn = start, .nr_pages = nr
         };

-        if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
-                                    &rdm, 1) )
-            return -EFAULT;
+        if ( d->arch.hvm_domain.pci_force )
+        {
+ if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
+                                        &rdm, 1) )
+                return -EFAULT;
+        }
+        else
+        {
+            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
+            {
+                pt_bdf = PCI_BDF2(d->arch.hvm_domain.pcidevs[i].bus,
+                                  d->arch.hvm_domain.pcidevs[i].devfn);
+                if ( pt_bdf == bdf )
+ if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
+                                                &rdm, 1) )
+                        return -EFAULT;
+            }
+        }
     }

     ++grdm->used_entries;
@@ -1139,6 +1158,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             return -EFAULT;

         grdm.used_entries = 0;
+        grdm.domain = current->domain;
         rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
                                               &grdm);

diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c
index 141e735..68da9d0 100644
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -898,11 +898,14 @@ int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
 {
     struct acpi_rmrr_unit *rmrr;
     int rc = 0;
+    int i;
+    u16 bdf;

-    list_for_each_entry(rmrr, &acpi_rmrr_units, list)
+    for_each_rmrr_device ( rmrr, bdf, i )
     {
         rc = func(PFN_DOWN(rmrr->base_address),
PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
+                  bdf,
                   ctxt);
         if ( rc )
             break;
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 409f6f8..ddea0be 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -120,7 +120,7 @@ void iommu_dt_domain_destroy(struct domain *d);

 struct page_info;

-typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
+typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u16 bdf, void *ctxt);

 struct iommu_ops {
     int (*init)(struct domain *d);

Thanks
Tiejun

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.