[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v7][RFC][PATCH 06/13] hvmloader/ram: check if guest memory is out of reserved device memory maps



On 2014/11/12 16:55, Jan Beulich wrote:
On 12.11.14 at 04:05, <tiejun.chen@xxxxxxxxx> wrote:
I don't see any feedback to this point, so I think you still prefer we
should do all check in the callback function.

As a draft this looks reasonable, but there are various bugs to be
dealt with along with cosmetic issues (I'll point out the former, but
I'm tired of pointing out the latter once again - please go back to
earlier reviews of patches to refresh e.g. what types to use for
loop variables).

'earlier reviews' means this subject email? I go back to check this but just see this comment related to our loop codes:

"
>>> +        for ( i = 0; i < xdsr->num_pcidevs; ++i )
>>> +        {
>>> + if ( __copy_from_guest_offset(pcidevs, xdsr->pcidevs, i, 1) )
>>> +            {
>>> +                xfree(pcidevs);
>>> +                return -EFAULT;
>>> +            }
>>> +        }
>>
>> I don't see the need for a loop here. And you definitely can't use the
>> double-underscore-prefixed variant the way you do.
>
> Do you mean this line?
>
> copy_from_guest_offset(pcidevs, xdsr->pcidevs, 0,
> xdsr->num_pcidevs*sizeof(xen_guest_pcidev_info_t))

Almost:

copy_from_guest(pcidevs, xdsr->pcidevs, xdsr->num_pcidevs * sizeof(*pcidevs))
"

Or I need to set as 'unsigned int' here?

Anyway I did this in the following codes firstly. If I'm still wrong I will correct that.


I tried to address this but obviously we have to pass each 'pdf' to
callback functions,

Yes, but at the generic IOMMU layer this shouldn't be named "bdf",
but something more neutral (maybe "id"). And you again lost the
segment there.

@@ -36,9 +40,24 @@ static int get_reserved_device_memory(xen_pfn_t start,
           if ( rdm.start_pfn != start || rdm.nr_pages != nr )
               return -ERANGE;

-        if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
-                                     &rdm, 1) )
-            return -EFAULT;
+        if ( d->arch.hvm_domain.pci_force )
+        {
+            if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
+                                         &rdm, 1) )
+                return -EFAULT;
+        }
+        else
+        {
+            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
+            {
+                pt_bdf = PCI_BDF2(d->arch.hvm_domain.pcidevs[i].bus,
+                                  d->arch.hvm_domain.pcidevs[i].devfn);
+                if ( pt_bdf == bdf )
+                    if ( __copy_to_compat_offset(grdm->map.buffer, 
grdm->used_entries,
+                                                 &rdm, 1) )
+                        return -EFAULT;

I think d->arch.hvm_domain.pcidevs[] shouldn't contain duplicates,
and hence there's no point continuing the loop if you found a match.

+            }
+        }
       }

       ++grdm->used_entries;

This should no longer get incremented unconditionally.

@@ -314,6 +333,7 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)
                   return -EFAULT;

               grdm.used_entries = 0;
+            grdm.domain = current->domain;
               rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
                                                     &grdm);


Maybe I misguided you with an earlier response, or maybe the
earlier reply was in a different context: There's no point
communicating current->domain to the callback function; there
would be a point communicating the domain if it was _not_
always current->domain.


So we need the caller to pass domain ID, right?

diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index af613b9..314d7e6 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -22,10 +22,13 @@ struct get_reserved_device_memory {
     unsigned int used_entries;
 };

-static int get_reserved_device_memory(xen_pfn_t start,
-                                      xen_ulong_t nr, void *ctxt)
+static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, u16 seg,
+                                      u16 *ids, int cnt, void *ctxt)
 {
     struct get_reserved_device_memory *grdm = ctxt;
+    struct domain *d = get_domain_by_id(grdm->map.domid);
+    unsigned int i, j;
+    u32 sbdf, pt_sbdf;

     if ( grdm->used_entries < grdm->map.nr_entries )
     {
@@ -36,13 +39,37 @@ static int get_reserved_device_memory(xen_pfn_t start,
         if ( rdm.start_pfn != start || rdm.nr_pages != nr )
             return -ERANGE;

-        if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
-                                     &rdm, 1) )
-            return -EFAULT;
+        if ( d->arch.hvm_domain.pci_force )
+        {
+ if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
+                                         &rdm, 1) )
+                return -EFAULT;
+            ++grdm->used_entries;
+        }
+        else
+        {
+            for ( i = 0; i < cnt; i++ )
+            {
+                sbdf = PCI_SBDF(seg, ids[i]);
+                for ( j = 0; j < d->arch.hvm_domain.num_pcidevs; j++ )
+                {
+                    pt_sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[j].seg,
+                                        d->arch.hvm_domain.pcidevs[j].bus,
+ d->arch.hvm_domain.pcidevs[j].devfn);
+                    if ( pt_sbdf == sbdf )
+                    {
+                        if ( __copy_to_compat_offset(grdm->map.buffer,
+                                                     grdm->used_entries,
+                                                     &rdm, 1) )
+                            return -EFAULT;
+                        ++grdm->used_entries;
+                        break;
+                    }
+                }
+            }
+        }
     }

-    ++grdm->used_entries;
-
     return 0;
 }
 #endif
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 2177c56..f27b17f 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -698,10 +698,13 @@ struct get_reserved_device_memory {
     unsigned int used_entries;
 };

-static int get_reserved_device_memory(xen_pfn_t start,
-                                      xen_ulong_t nr, void *ctxt)
+static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, u16 seg,
+                                      u16 *ids, int cnt, void *ctxt)
 {
     struct get_reserved_device_memory *grdm = ctxt;
+    struct domain *d = get_domain_by_id(grdm->map.domid);
+    unsigned int i, j;
+    u32 sbdf, pt_sbdf;

     if ( grdm->used_entries < grdm->map.nr_entries )
     {
@@ -709,13 +712,37 @@ static int get_reserved_device_memory(xen_pfn_t start,
             .start_pfn = start, .nr_pages = nr
         };

-        if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
-                                    &rdm, 1) )
-            return -EFAULT;
+        if ( d->arch.hvm_domain.pci_force )
+        {
+ if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
+                                        &rdm, 1) )
+                return -EFAULT;
+            ++grdm->used_entries;
+        }
+        else
+        {
+            for ( i = 0; i < cnt; i++ )
+            {
+                sbdf = PCI_SBDF(seg, ids[i]);
+                for ( j = 0; j < d->arch.hvm_domain.num_pcidevs; j++ )
+                {
+                    pt_sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[j].seg,
+                                        d->arch.hvm_domain.pcidevs[j].bus,
+ d->arch.hvm_domain.pcidevs[j].devfn);
+                    if ( pt_sbdf == sbdf )
+                    {
+                        if ( __copy_to_guest_offset(grdm->map.buffer,
+                                                    grdm->used_entries,
+                                                    &rdm, 1) )
+                            return -EFAULT;
+                        ++grdm->used_entries;
+                        break;
+                    }
+                }
+            }
+        }
     }

-    ++grdm->used_entries;
-
     return 0;
 }
 #endif
diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c
index 141e735..fa813c5 100644
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -903,6 +903,9 @@ int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
     {
         rc = func(PFN_DOWN(rmrr->base_address),
PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
+                  rmrr->segment,
+                  rmrr->scope.devices,
+                  rmrr->scope.devices_cnt,
                   ctxt);
         if ( rc )
             break;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index f1b6a48..f8964e1 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -588,6 +588,11 @@ typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);

 struct xen_reserved_device_memory_map {
+    /*
+     * Domain whose reservation is being changed.
+     * Unprivileged domains can specify only DOMID_SELF.
+     */
+    domid_t        domid;
     /* IN/OUT */
     unsigned int nr_entries;
     /* OUT */
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 409f6f8..75c6759 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -120,7 +120,8 @@ void iommu_dt_domain_destroy(struct domain *d);

 struct page_info;

-typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
+typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u16 seg, u16 *ids,
+                         int cnt, void *ctxt);

 struct iommu_ops {
     int (*init)(struct domain *d);
diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h
index 91520bc..ba881ef 100644
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -31,6 +31,8 @@
 #define PCI_DEVFN2(bdf) ((bdf) & 0xff)
 #define PCI_BDF(b,d,f)  ((((b) & 0xff) << 8) | PCI_DEVFN(d,f))
 #define PCI_BDF2(b,df)  ((((b) & 0xff) << 8) | ((df) & 0xff))
+#define PCI_SBDF(s,bdf) (((s & 0xffff) << 16) | (bdf & 0xffff))
+#define PCI_SBDF2(s,b,df) (((s & 0xffff) << 16) | PCI_BDF2(b,df))

 struct pci_dev_info {
     bool_t is_extfn;

Thanks
Tiejun

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.