[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v9 09/16] vpci/header: program p2m with guest BAR view


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Date: Tue, 29 Aug 2023 23:19:44 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=+865b/wWNj0EWFhzOYa954eDivbVLOrScndgP/Dydu4=; b=khrAN9NxYaNMj+uPA2BLMfvKVIi+qmmJTw3vjcU9iXpvz7s/zzD0n5fjVkTiJGYn2YSI2jsNogTSp2p/XOyjHcRS5OJNJJXeNxMWU1xHwPc/hWcUj1ioJfGP4TD0kd5cuUTNmK3bvqSwl/XouD8syX4Ff59mH5a91LX5fkrcn9wK+cGZYfpPkhPOIBhmgK9Wqip6rIMYS5Ttb6numMcwL5vIBOJxUOHrs5xDW3W8iW8vEqPIQB0lX3IZqMuVJOkhjBhbQvkhehX4aNqCarAxt2RHlBDLv79vhdvMB0bSZPmOhuvcKiYoQ2bjrLU+vNkyuC+P84rzFTCVf8zbemNwJg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=OSs1iU1bICm52I2iQaMAh65m6EkaSWqIcwr8PG0Arg+X2YXFnVui+h6WKIoJ2Yr2BSGtk6UA0ratFHotlG2DiK2W01yamuyx+cvtpU49qmq4mPpnyMRAeLIwsJa1V1lw3jfM7y6/uxOk/pEIk41J8U5143sg4bIusQP7gBIs3q6ipAc5+bwJ9u6z+wIhfD8xbqRpljh3kQsDWZe/wi5P3HOgtKeoAz62kaIAiz7nRhxwD8rWXgRDkwFvOGUHNNzZooDqQ2/euAVDQkpjL2vJyJnGSEHzbpSF8KqJPyAuIofz4schPikcuIfkX57lLqSGIgF+mKzXqHLL++dV40rWEA==
  • Cc: Stewart Hildebrand <stewart.hildebrand@xxxxxxx>, Oleksandr Andrushchenko <Oleksandr_Andrushchenko@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Tue, 29 Aug 2023 23:20:05 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHZ2s9LsI9X1QGM2Uq0tBcOyguChw==
  • Thread-topic: [PATCH v9 09/16] vpci/header: program p2m with guest BAR view

From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>

Take into account guest's BAR view and program its p2m accordingly:
gfn is guest's view of the BAR and mfn is the physical BAR value.
This way hardware domain sees physical BAR values and guest sees
emulated ones.

Hardware domain continues getting the BARs identity mapped, while for
domUs the BARs are mapped at the requested guest address without
modifying the BAR address in the device PCI config space.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
Since v9:
- Extended the commit message
- Use bar->guest_addr in modify_bars
- Extended printk error message in map_range
- Moved map_data initialization so .bar can be initialized during declaration
Since v5:
- remove debug print in map_range callback
- remove "identity" from the debug print
Since v4:
- moved start_{gfn|mfn} calculation into map_range
- pass vpci_bar in the map_data instead of start_{gfn|mfn}
- s/guest_addr/guest_reg
Since v3:
- updated comment (Roger)
- removed gfn_add(map->start_gfn, rc); which is wrong
- use v->domain instead of v->vpci.pdev->domain
- removed odd e.g. in comment
- s/d%d/%pd in altered code
- use gdprintk for map/unmap logs
Since v2:
- improve readability for data.start_gfn and restructure ?: construct
Since v1:
 - s/MSI/MSI-X in comments
---
 xen/drivers/vpci/header.c | 52 ++++++++++++++++++++++++++++-----------
 1 file changed, 38 insertions(+), 14 deletions(-)

diff --git a/xen/drivers/vpci/header.c b/xen/drivers/vpci/header.c
index 3cc6a96849..1e82217200 100644
--- a/xen/drivers/vpci/header.c
+++ b/xen/drivers/vpci/header.c
@@ -33,6 +33,7 @@
 
 struct map_data {
     struct domain *d;
+    const struct vpci_bar *bar;
     bool map;
 };
 
@@ -44,6 +45,12 @@ static int cf_check map_range(
 
     for ( ; ; )
     {
+        /* Start address of the BAR as seen by the guest. */
+        gfn_t start_gfn = _gfn(PFN_DOWN(is_hardware_domain(map->d)
+                                        ? map->bar->addr
+                                        : map->bar->guest_addr));
+        /* Physical start address of the BAR. */
+        mfn_t start_mfn = _mfn(PFN_DOWN(map->bar->addr));
         unsigned long size = e - s + 1;
 
         if ( !iomem_access_permitted(map->d, s, e) )
@@ -63,6 +70,13 @@ static int cf_check map_range(
             return rc;
         }
 
+        /*
+         * Ranges to be mapped don't always start at the BAR start address, as
+         * there can be holes or partially consumed ranges. Account for the
+         * offset of the current address from the BAR start.
+         */
+        start_mfn = mfn_add(start_mfn, s - gfn_x(start_gfn));
+
         /*
          * ARM TODOs:
          * - On ARM whether the memory is prefetchable or not should be passed
@@ -72,8 +86,8 @@ static int cf_check map_range(
          * - {un}map_mmio_regions doesn't support preemption.
          */
 
-        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, _mfn(s))
-                      : unmap_mmio_regions(map->d, _gfn(s), size, _mfn(s));
+        rc = map->map ? map_mmio_regions(map->d, _gfn(s), size, start_mfn)
+                      : unmap_mmio_regions(map->d, _gfn(s), size, start_mfn);
         if ( rc == 0 )
         {
             *c += size;
@@ -82,8 +96,9 @@ static int cf_check map_range(
         if ( rc < 0 )
         {
             printk(XENLOG_G_WARNING
-                   "Failed to identity %smap [%lx, %lx] for d%d: %d\n",
-                   map->map ? "" : "un", s, e, map->d->domain_id, rc);
+                   "Failed to %smap [%lx (%lx), %lx (%lx)] for %pd: %d\n",
+                   map->map ? "" : "un", s,  mfn_x(start_mfn), e,
+                   mfn_x(start_mfn) + size, map->d, rc);
             break;
         }
         ASSERT(rc < size);
@@ -162,10 +177,6 @@ static void modify_decoding(const struct pci_dev *pdev, 
uint16_t cmd,
 bool vpci_process_pending(struct vcpu *v)
 {
     struct pci_dev *pdev = v->vpci.pdev;
-    struct map_data data = {
-        .d = v->domain,
-        .map = v->vpci.cmd & PCI_COMMAND_MEMORY,
-    };
     struct vpci_header *header = NULL;
     unsigned int i;
 
@@ -177,6 +188,11 @@ bool vpci_process_pending(struct vcpu *v)
     for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
     {
         struct vpci_bar *bar = &header->bars[i];
+        struct map_data data = {
+            .d = v->domain,
+            .map = v->vpci.cmd & PCI_COMMAND_MEMORY,
+            .bar = bar,
+        };
         int rc;
 
         if ( rangeset_is_empty(bar->mem) )
@@ -229,7 +245,6 @@ bool vpci_process_pending(struct vcpu *v)
 static int __init apply_map(struct domain *d, const struct pci_dev *pdev,
                             uint16_t cmd)
 {
-    struct map_data data = { .d = d, .map = true };
     struct vpci_header *header = &pdev->vpci->header;
     int rc = 0;
     unsigned int i;
@@ -239,6 +254,7 @@ static int __init apply_map(struct domain *d, const struct 
pci_dev *pdev,
     for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
     {
         struct vpci_bar *bar = &header->bars[i];
+        struct map_data data = { .d = d, .map = true, .bar = bar };
 
         if ( rangeset_is_empty(bar->mem) )
             continue;
@@ -306,12 +322,18 @@ static int modify_bars(const struct pci_dev *pdev, 
uint16_t cmd, bool rom_only)
      * First fill the rangesets with the BAR of this device or with the ROM
      * BAR only, depending on whether the guest is toggling the memory decode
      * bit of the command register, or the enable bit of the ROM BAR register.
+     *
+     * For non-hardware domain we use guest physical addresses.
      */
     for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
     {
         struct vpci_bar *bar = &header->bars[i];
         unsigned long start = PFN_DOWN(bar->addr);
         unsigned long end = PFN_DOWN(bar->addr + bar->size - 1);
+        unsigned long start_guest = PFN_DOWN(is_hardware_domain(pdev->domain) ?
+                                             bar->addr : bar->guest_addr);
+        unsigned long end_guest = PFN_DOWN((is_hardware_domain(pdev->domain) ?
+                                  bar->addr : bar->guest_addr) + bar->size - 
1);
 
         if ( !bar->mem )
             continue;
@@ -331,11 +353,11 @@ static int modify_bars(const struct pci_dev *pdev, 
uint16_t cmd, bool rom_only)
             continue;
         }
 
-        rc = rangeset_add_range(bar->mem, start, end);
+        rc = rangeset_add_range(bar->mem, start_guest, end_guest);
         if ( rc )
         {
             printk(XENLOG_G_WARNING "Failed to add [%lx, %lx]: %d\n",
-                   start, end, rc);
+                   start_guest, end_guest, rc);
             return rc;
         }
 
@@ -352,7 +374,7 @@ static int modify_bars(const struct pci_dev *pdev, uint16_t 
cmd, bool rom_only)
             {
                 gprintk(XENLOG_WARNING,
                        "%pp: failed to remove overlapping range [%lx, %lx]: 
%d\n",
-                        &pdev->sbdf, start, end, rc);
+                        &pdev->sbdf, start_guest, end_guest, rc);
                 return rc;
             }
         }
@@ -420,8 +442,10 @@ static int modify_bars(const struct pci_dev *pdev, 
uint16_t cmd, bool rom_only)
             for ( i = 0; i < ARRAY_SIZE(tmp->vpci->header.bars); i++ )
             {
                 const struct vpci_bar *remote_bar = &tmp->vpci->header.bars[i];
-                unsigned long start = PFN_DOWN(remote_bar->addr);
-                unsigned long end = PFN_DOWN(remote_bar->addr +
+                unsigned long start = 
PFN_DOWN(is_hardware_domain(pdev->domain) ?
+                                      remote_bar->addr : 
remote_bar->guest_addr);
+                unsigned long end = PFN_DOWN(is_hardware_domain(pdev->domain) ?
+                                    remote_bar->addr : remote_bar->guest_addr +
                                              remote_bar->size - 1);
 
                 if ( !remote_bar->enabled )
-- 
2.41.0



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.