[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v7][RFC][PATCH 06/13] hvmloader/ram: check if guest memory is out of reserved device memory maps



On 2014/11/17 19:51, Jan Beulich wrote:
On 17.11.14 at 12:32, <tiejun.chen@xxxxxxxxx> wrote:
On 2014/11/17 19:17, Jan Beulich wrote:
On 17.11.14 at 12:08, <tiejun.chen@xxxxxxxxx> wrote:

On 2014/11/17 18:05, Jan Beulich wrote:
On 17.11.14 at 08:57, <tiejun.chen@xxxxxxxxx> wrote:
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -698,10 +698,13 @@ struct get_reserved_device_memory {
         unsigned int used_entries;
     };

-static int get_reserved_device_memory(xen_pfn_t start,
-                                      xen_ulong_t nr, void *ctxt)
+static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, u16
seg,
+                                      u16 *ids, int cnt, void *ctxt)

While the approach is a lot better than what you did previously, I still
don't like you adding 3 new parameters when one would do (calling
the callback for each SBDF individually): That way you avoid

Do you mean I should do this?

for_each_rmrr_device ( rmrr, bdf, i )
{
         sbdf = PCI_SBDF(seg, rmrr->scope.devices[i]);
            rc = func(PFN_DOWN(rmrr->base_address),
                      PFN_UP(rmrr->end_address) -
PFN_DOWN(rmrr->base_address),
                   sbdf,        
                      ctxt);

But each different sbdf may occupy one same rmrr entry as I said
previously, so we have to introduce more codes to filter them as one
identified entry in the callback.

Not really - remember that part of what needs to be done is to make
sure all devices associated with a given RMRR get assigned to the
same guest? Or the callback function could use a special return value

Yes, but I means in the callback,

get_reserved_device_memory()
{
        ...
        for(each assigned pci devs:pt_sbdf)
                if (sbdf == pt_sbdf)
                        __copy_to_guest_offset(buffer, ...)

Buffer may be copied to include multiple same entries if we have two or
more assigned devices associated to one give RMRR entry.

Which would be easily avoided by ...

(e.g. 1) to signal that the iteration for the current RMRR can be
terminated (or further entries skipped).

... the approach I outlined.


Here I tried to implement what you want. Note just pick two key fragments since others have no big deal.

#1:

@@ -898,14 +898,25 @@ int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
 {
     struct acpi_rmrr_unit *rmrr;
     int rc = 0;
+    unsigned int i;
+    u32 id;
+    u16 bdf;

     list_for_each_entry(rmrr, &acpi_rmrr_units, list)
     {
-        rc = func(PFN_DOWN(rmrr->base_address),
-                  PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
-                  ctxt);
-        if ( rc )
-            break;
+        for (i = 0; (bdf = rmrr->scope.devices[i]) &&
+                    i < rmrr->scope.devices_cnt && !rc; i++)
+        {
+            id = PCI_SBDF(rmrr->segment, bdf);
+            rc = func(PFN_DOWN(rmrr->base_address),
+                               PFN_UP(rmrr->end_address) -
+                                PFN_DOWN(rmrr->base_address),
+                               id,
+                               ctxt);
+            if ( rc < 0 )
+                return rc;
+        }
+        rc = 0;
     }

     return rc;


and #2,

@@ -698,10 +698,13 @@ struct get_reserved_device_memory {
     unsigned int used_entries;
 };

-static int get_reserved_device_memory(xen_pfn_t start,
-                                      xen_ulong_t nr, void *ctxt)
+static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
+                                      u32 id, void *ctxt)
 {
     struct get_reserved_device_memory *grdm = ctxt;
+    struct domain *d = get_domain_by_id(grdm->map.domid);
+    unsigned int i;
+    u32 sbdf;

     if ( grdm->used_entries < grdm->map.nr_entries )
     {
@@ -709,13 +712,34 @@ static int get_reserved_device_memory(xen_pfn_t start,
             .start_pfn = start, .nr_pages = nr
         };

-        if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
-                                    &rdm, 1) )
-            return -EFAULT;
+        if ( d->arch.hvm_domain.pci_force )
+        {
+ if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
+                                        &rdm, 1) )
+                return -EFAULT;
+            ++grdm->used_entries;
+            return 1;
+        }
+        else
+        {
+            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
+            {
+                sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[i].seg,
+                                 d->arch.hvm_domain.pcidevs[i].bus,
+                                 d->arch.hvm_domain.pcidevs[i].devfn);
+                if ( sbdf == id )
+                {
+                    if ( __copy_to_guest_offset(grdm->map.buffer,
+                                                grdm->used_entries,
+                                                &rdm, 1) )
+                        return -EFAULT;
+                    ++grdm->used_entries;
+                    return 1;
+                }
+            }
+        }
     }

-    ++grdm->used_entries;
-
     return 0;
 }
 #endif


Thanks
Tiejun


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.