[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [QEMU][PATCH v1 2/7] xen: add pseudo RAM region for grant mappings


  • To: Stefano Stabellini <sstabellini@xxxxxxxxxx>
  • From: Vikram Garhwal <vikram.garhwal@xxxxxxx>
  • Date: Tue, 10 Oct 2023 14:25:18 -0700
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass header.d=amd.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=Ru3iKP+0Xf8JpOwjbAlhgLW/NB+krGMTDzVgt1EF8bg=; b=Ty8dlrjUnPpEXKbusMLlzN+rdN3Qqw81RKUY8LLxUpskLuCCRXSvwrYvM9ADoqy2Tygs2gr+Ra7FMfyhC4E5PVElALae9PNURcP3pgSmcscsOjWvaiejzvpN13gexjCrRoP/GFfCwmj5vC/k15Ncyn+NyCkCAzdsjrDNuPW5rIKRzmg6aV3RfHQba4LZ8OGUu9IcrXAJAgkoX1q5yCzuE0VqU8AND6Elk2HhbT3nEkUOSTlvsYq4cu66iWkOS+q2Ofd106/HuG3M2jRFc2R0qBsh+Q2gQGATkgvvXjnWa2AX+wiJpGHF3twkE0bToI8zElo8SALIQePrn9/mCR8gNA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=hUSM7ViTS+OlQmHsavors+rDzVZT0LVG9CIia+r5KnXAhenr6Hv9iAj8otH3s3x1LA10dGb1/F9ZYoHtUsc/cLTqQrH+rSz6O4xuG3RaTj8w2uXb1m+Mt9yCh/5LNSGIr/30XpJq5jcKIfaDdwHEjClFJIq3gIsyUPxG2S9UGfTabAOXJTEIz4keVYcNBWke1yIbDyDakrQQC6R2gIFyeDH1PI0ASQp+RGEYqAhzLmmDV0SoGSpJXgKxnZ0hVNEWEW/jfCuvNCgpt+hdXudt1ckLaK0tG/5rBycKLE4YN/jgmL8pzZ+yEUOMypxLsy18JjAL/8SsEhjC8cfqrUuZZQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=amd.com;
  • Cc: qemu-devel@xxxxxxxxxx, Juergen Gross <jgross@xxxxxxxx>, Anthony Perard <anthony.perard@xxxxxxxxxx>, Paul Durrant <paul@xxxxxxx>, "Michael S. Tsirkin" <mst@xxxxxxxxxx>, Marcel Apfelbaum <marcel.apfelbaum@xxxxxxxxx>, Paolo Bonzini <pbonzini@xxxxxxxxxx>, Richard Henderson <richard.henderson@xxxxxxxxxx>, Eduardo Habkost <eduardo@xxxxxxxxxxx>, Peter Xu <peterx@xxxxxxxxxx>, David Hildenbrand <david@xxxxxxxxxx>, Philippe Mathieu-Daudé <philmd@xxxxxxxxxx>, "open list:X86 Xen CPUs" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • Delivery-date: Tue, 10 Oct 2023 21:25:52 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Hi Stefano,
On Mon, Oct 09, 2023 at 05:02:14PM -0700, Stefano Stabellini wrote:
> On Thu, 5 Oct 2023, Vikram Garhwal wrote:
> > From: Juergen Gross <jgross@xxxxxxxx>
> > 
> > Add a memory region which can be used to automatically map granted
> > memory. It is starting at 0x8000000000000000ULL in order to be able to
> > distinguish it from normal RAM.
> > 
> > For this reason the xen.ram memory region is expanded, which has no
> > further impact as it is used just as a container of the real RAM
> > regions and now the grant region.
> > 
> > Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
> > Signed-off-by: Vikram Garhwal <vikram.garhwal@xxxxxxx>
> 
> This patch doesn't apply to staging anymore
Will re-base it. I rebased it against master branch.
> 
> 
> > ---
> >  hw/i386/xen/xen-hvm.c           |  3 ++
> >  hw/xen/xen-hvm-common.c         |  4 +--
> >  hw/xen/xen-mapcache.c           | 27 ++++++++++++++
> >  include/exec/ram_addr.h         |  1 +
> >  include/hw/xen/xen-hvm-common.h |  2 ++
> >  include/hw/xen/xen_pvdev.h      |  3 ++
> >  include/sysemu/xen-mapcache.h   |  3 ++
> >  softmmu/physmem.c               | 62 +++++++++++++++++++++------------
> >  8 files changed, 80 insertions(+), 25 deletions(-)
> > 
> > diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
> > index f42621e674..67a55558a6 100644
> > --- a/hw/i386/xen/xen-hvm.c
> > +++ b/hw/i386/xen/xen-hvm.c
> > @@ -172,6 +172,9 @@ static void xen_ram_init(PCMachineState *pcms,
> >                                   x86ms->above_4g_mem_size);
> >          memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
> >      }
> > +
> > +    /* Add grant mappings as a pseudo RAM region. */
> > +    ram_grants = *xen_init_grant_ram();
> >  }
> >  
> >  static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
> > diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
> > index 565dc39c8f..b7255977a5 100644
> > --- a/hw/xen/xen-hvm-common.c
> > +++ b/hw/xen/xen-hvm-common.c
> > @@ -9,7 +9,7 @@
> >  #include "hw/boards.h"
> >  #include "hw/xen/arch_hvm.h"
> >  
> > -MemoryRegion ram_memory;
> > +MemoryRegion ram_memory, ram_grants;
> >  
> >  void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
> >                     Error **errp)
> > @@ -26,7 +26,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, 
> > MemoryRegion *mr,
> >          return;
> >      }
> >  
> > -    if (mr == &ram_memory) {
> > +    if (mr == &ram_memory || mr == &ram_grants) {
> >          return;
> >      }
> >  
> > diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
> > index f7d974677d..8115c44c00 100644
> > --- a/hw/xen/xen-mapcache.c
> > +++ b/hw/xen/xen-mapcache.c
> > @@ -14,7 +14,9 @@
> >  
> >  #include <sys/resource.h>
> >  
> > +#include "hw/xen/xen-hvm-common.h"
> >  #include "hw/xen/xen_native.h"
> > +#include "hw/xen/xen_pvdev.h"
> >  #include "qemu/bitmap.h"
> >  
> >  #include "sysemu/runstate.h"
> > @@ -597,3 +599,28 @@ uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
> >      mapcache_unlock();
> >      return p;
> >  }
> > +
> > +MemoryRegion *xen_init_grant_ram(void)
> > +{
> > +    RAMBlock *block;
> > +
> > +    memory_region_init(&ram_grants, NULL, "xen.grants",
> > +                       XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE);
> > +    block = g_malloc0(sizeof(*block));
> > +    block->mr = &ram_grants;
> > +    block->used_length = XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE;
> > +    block->max_length = XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE;
> > +    block->fd = -1;
> > +    block->page_size = XC_PAGE_SIZE;
> > +    block->host = (void *)XEN_GRANT_ADDR_OFF;
> > +    block->offset = XEN_GRANT_ADDR_OFF;
> > +    block->flags = RAM_PREALLOC;
> > +    ram_grants.ram_block = block;
> > +    ram_grants.ram = true;
> > +    ram_grants.terminates = true;
> > +    ram_block_add_list(block);
> > +    memory_region_add_subregion(get_system_memory(), XEN_GRANT_ADDR_OFF,
> > +                                &ram_grants);
> > +
> > +    return &ram_grants;
> 
> It doesn't look like xen_init_grant_ram has anything to do with the
> mapcache. It should be in another file. Maybe ./hw/xen/xen-hvm-common.c
> or ./hw/i386/xen/xen-hvm.c (but this is x86 specific and we need grants
> on ARM too)
Do you mean to move all grant related functions? As moving this alone will not
be sufficient. There are lot of new grant related function added in later 
patches.

I am okay with moving all to xen-hvm-common.c

Following code movement will happen in this case:
1. All grant related static function to xen-hvm-common.c.
    xen_ram_addr_from_grant_cache(), xen_ram_addr_from_mapcache(),
    xen_map_grant_dyn(), xen_unmap_grant_dyn and xen_init_grant_ram().
2. Remove static from xen_ram_addr_from_mapcache_try().

Does these changes looks good?
> 
> 
> > +}
> > diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> > index 90676093f5..c0b5f9a7d0 100644
> > --- a/include/exec/ram_addr.h
> > +++ b/include/exec/ram_addr.h
> > @@ -139,6 +139,7 @@ void qemu_ram_free(RAMBlock *block);
> >  int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
> >  
> >  void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
> > +void ram_block_add_list(RAMBlock *new_block);
> >  
> >  /* Clear whole block of mem */
> >  static inline void qemu_ram_block_writeback(RAMBlock *block)
> > diff --git a/include/hw/xen/xen-hvm-common.h 
> > b/include/hw/xen/xen-hvm-common.h
> > index 4e9904f1a6..0d300ba898 100644
> > --- a/include/hw/xen/xen-hvm-common.h
> > +++ b/include/hw/xen/xen-hvm-common.h
> > @@ -17,6 +17,8 @@
> >  #include <xen/hvm/ioreq.h>
> >  
> >  extern MemoryRegion ram_memory;
> > +
> > +extern MemoryRegion ram_grants;
> >  extern MemoryListener xen_io_listener;
> >  extern DeviceListener xen_device_listener;
> >  
> > diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h
> > index ddad4b9f36..0f1b5edfa9 100644
> > --- a/include/hw/xen/xen_pvdev.h
> > +++ b/include/hw/xen/xen_pvdev.h
> > @@ -80,4 +80,7 @@ int xen_pv_send_notify(struct XenLegacyDevice *xendev);
> >  void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level,
> >                     const char *fmt, ...)  G_GNUC_PRINTF(3, 4);
> >  
> > +#define XEN_GRANT_ADDR_OFF    0x8000000000000000ULL
> > +#define XEN_MAX_VIRTIO_GRANTS 65536
> > +
> >  #endif /* QEMU_HW_XEN_PVDEV_H */
> > diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
> > index c8e7c2f6cf..f4bedb1c11 100644
> > --- a/include/sysemu/xen-mapcache.h
> > +++ b/include/sysemu/xen-mapcache.h
> > @@ -10,6 +10,7 @@
> >  #define XEN_MAPCACHE_H
> >  
> >  #include "exec/cpu-common.h"
> > +#include "exec/ram_addr.h"
> >  
> >  typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
> >                                           ram_addr_t size);
> > @@ -25,6 +26,8 @@ void xen_invalidate_map_cache(void);
> >  uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
> >                                   hwaddr new_phys_addr,
> >                                   hwaddr size);
> > +MemoryRegion *xen_init_grant_ram(void);
> > +
> >  #else
> >  
> >  static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
> > diff --git a/softmmu/physmem.c b/softmmu/physmem.c
> > index 309653c722..e182a2fa07 100644
> > --- a/softmmu/physmem.c
> > +++ b/softmmu/physmem.c
> 
> 
> You might want to split this change out of this patch to make it easier
> to get the physmem.c maintainers' attention
Understood, will create a new patch for this change.
> 
> 
> > @@ -1803,12 +1803,47 @@ static void dirty_memory_extend(ram_addr_t 
> > old_ram_size,
> >      }
> >  }
> >  
> > +static void ram_block_add_list_locked(RAMBlock *new_block)
> > + {
> > +     RAMBlock *block;
> > +     RAMBlock *last_block = NULL;
> > +
> > +    /*
> > +     * Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
> > +     * QLIST (which has an RCU-friendly variant) does not have insertion at
> > +     * tail, so save the last element in last_block.
> > +     */
> > +    RAMBLOCK_FOREACH(block) {
> > +        last_block = block;
> > +        if (block->max_length < new_block->max_length) {
> > +            break;
> > +        }
> > +    }
> > +    if (block) {
> > +        QLIST_INSERT_BEFORE_RCU(block, new_block, next);
> > +    } else if (last_block) {
> > +        QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
> > +    } else { /* list is empty */
> > +        QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
> > +    }
> > +    ram_list.mru_block = NULL;
> > +
> > +    /* Write list before version */
> > +    smp_wmb();
> > +    ram_list.version++;
> > +}
> > +
> > +void ram_block_add_list(RAMBlock *new_block)
> > +{
> > +    qemu_mutex_lock_ramlist();
> > +    ram_block_add_list_locked(new_block);
> > +    qemu_mutex_unlock_ramlist();
> > +}
> > +
> >  static void ram_block_add(RAMBlock *new_block, Error **errp)
> >  {
> >      const bool noreserve = qemu_ram_is_noreserve(new_block);
> >      const bool shared = qemu_ram_is_shared(new_block);
> > -    RAMBlock *block;
> > -    RAMBlock *last_block = NULL;
> >      ram_addr_t old_ram_size, new_ram_size;
> >      Error *err = NULL;
> >  
> > @@ -1846,28 +1881,9 @@ static void ram_block_add(RAMBlock *new_block, Error 
> > **errp)
> >      if (new_ram_size > old_ram_size) {
> >          dirty_memory_extend(old_ram_size, new_ram_size);
> >      }
> > -    /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
> > -     * QLIST (which has an RCU-friendly variant) does not have insertion at
> > -     * tail, so save the last element in last_block.
> > -     */
> > -    RAMBLOCK_FOREACH(block) {
> > -        last_block = block;
> > -        if (block->max_length < new_block->max_length) {
> > -            break;
> > -        }
> > -    }
> > -    if (block) {
> > -        QLIST_INSERT_BEFORE_RCU(block, new_block, next);
> > -    } else if (last_block) {
> > -        QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
> > -    } else { /* list is empty */
> > -        QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
> > -    }
> > -    ram_list.mru_block = NULL;
> >  
> > -    /* Write list before version */
> > -    smp_wmb();
> > -    ram_list.version++;
> > +    ram_block_add_list_locked(new_block);
> > +
> >      qemu_mutex_unlock_ramlist();
> >  
> >      cpu_physical_memory_set_dirty_range(new_block->offset,
> > -- 
> > 2.17.1
> > 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.