[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/2] xen-hvm: Clean up xen_ram_alloc() error handling



On Thu, 14 Jan 2016, Markus Armbruster wrote:
> xen_ram_alloc() dies with hw_error() on error, even though its caller
> ram_block_add() handles errors just fine.  Add an Error **errp
> parameter and use it.
> 
> Leave case RUN_STATE_INMIGRATE alone, because that looks like some
> kind of warning.
> 
> Signed-off-by: Markus Armbruster <armbru@xxxxxxxxxx>

Reviewed-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

I am happy to queue both patches up in my next branch.


>  exec.c               | 8 +++++++-
>  include/hw/xen/xen.h | 2 +-
>  xen-hvm-stub.c       | 3 ++-
>  xen-hvm.c            | 7 ++++---
>  4 files changed, 14 insertions(+), 6 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index 7f0ce42..c268c36 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1474,6 +1474,7 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, 
> Error **errp)
>      RAMBlock *block;
>      RAMBlock *last_block = NULL;
>      ram_addr_t old_ram_size, new_ram_size;
> +    Error *err = NULL;
>  
>      old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
>  
> @@ -1483,7 +1484,12 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, 
> Error **errp)
>      if (!new_block->host) {
>          if (xen_enabled()) {
>              xen_ram_alloc(new_block->offset, new_block->max_length,
> -                          new_block->mr);
> +                          new_block->mr, &err);
> +            if (err) {
> +                error_propagate(errp, err);
> +                qemu_mutex_unlock_ramlist();
> +                return -1;
> +            }
>          } else {
>              new_block->host = phys_mem_alloc(new_block->max_length,
>                                               &new_block->mr->align);
> diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
> index d07bc99..1b81b4b 100644
> --- a/include/hw/xen/xen.h
> +++ b/include/hw/xen/xen.h
> @@ -41,7 +41,7 @@ void xenstore_store_pv_console_info(int i, struct 
> CharDriverState *chr);
>  #if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
>  void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory);
>  void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
> -                   struct MemoryRegion *mr);
> +                   struct MemoryRegion *mr, Error **errp);
>  void xen_modified_memory(ram_addr_t start, ram_addr_t length);
>  #endif
>  
> diff --git a/xen-hvm-stub.c b/xen-hvm-stub.c
> index 7ff0602..b958367 100644
> --- a/xen-hvm-stub.c
> +++ b/xen-hvm-stub.c
> @@ -30,7 +30,8 @@ void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
>  {
>  }
>  
> -void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
> +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
> +                   Error **errp)
>  {
>  }
>  
> diff --git a/xen-hvm.c b/xen-hvm.c
> index cb7128c..a9085a8 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -239,9 +239,9 @@ static void xen_ram_init(PCMachineState *pcms,
>      }
>  }
>  
> -void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
> +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
> +                   Error **errp)
>  {
> -    /* FIXME caller ram_block_add() wants error_setg() on failure */
>      unsigned long nr_pfn;
>      xen_pfn_t *pfn_list;
>      int i;
> @@ -268,7 +268,8 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, 
> MemoryRegion *mr)
>      }
>  
>      if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, 
> pfn_list)) {
> -        hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
> +        error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
> +                   ram_addr);
>      }
>  
>      g_free(pfn_list);
> -- 
> 2.4.3
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.