[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/2] pc-nvdimm acpi: build ACPI tables for pc-nvdimm devices



On Mon, 4 Jan 2016, Konrad Rzeszutek Wilk wrote:
> On Mon, Jan 04, 2016 at 04:01:08PM +0000, Stefano Stabellini wrote:
> > CC'ing the Xen tools maintainers and Anthony.
> > 
> > On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > > devices. The resulting tables are then copied into Xen guest domain so
> > > tha they can be later loaded by Xen hvmloader.
> > > 
> > > Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
> > 
> > How much work would it be to generate the nvdimm acpi tables from the
> > Xen toolstack?
> 
> Why duplicate the code? The QEMU generates the NFIT tables and its sub-tables.
>
>
> > Getting the tables from QEMU doesn't seem like a good idea to me, unless
> > we start getting the whole set of ACPI tables that way.
> 
> There is also the ACPI DSDT code - which requires an memory region
> to be reserved for the AML code to drop the parameters so that QEMU
> can scan the NVDIMM for failures. The region (and size) should be
> determined by QEMU since it works on this data.

QEMU can generate the whole set of ACPI tables. Why should we take only
the nvdimm tables and not the others?

I don't think it is wise to have two components which both think are in
control of generating ACPI tables, hvmloader (soon to be the toolstack
with Anthony's work) and QEMU. From an architectural perspective, it
doesn't look robust to me.

Could we take this opportunity to switch to QEMU generating the whole
set of ACPI tables?
 
 
> > >  hw/acpi/nvdimm.c     |  5 +++-
> > >  hw/i386/pc.c         |  6 ++++-
> > >  include/hw/xen/xen.h |  2 ++
> > >  xen-hvm.c            | 71 
> > > ++++++++++++++++++++++++++++++++++++++++++++++++++++
> > >  4 files changed, 82 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
> > > index df1b176..7c4b931 100644
> > > --- a/hw/acpi/nvdimm.c
> > > +++ b/hw/acpi/nvdimm.c
> > > @@ -29,12 +29,15 @@
> > >  #include "hw/acpi/acpi.h"
> > >  #include "hw/acpi/aml-build.h"
> > >  #include "hw/mem/nvdimm.h"
> > > +#include "hw/mem/pc-nvdimm.h"
> > > +#include "hw/xen/xen.h"
> > >  
> > >  static int nvdimm_plugged_device_list(Object *obj, void *opaque)
> > >  {
> > >      GSList **list = opaque;
> > > +    const char *type_name = xen_enabled() ? TYPE_PC_NVDIMM : TYPE_NVDIMM;
> > >  
> > > -    if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
> > > +    if (object_dynamic_cast(obj, type_name)) {
> > >          DeviceState *dev = DEVICE(obj);
> > >  
> > >          if (dev->realized) { /* only realized NVDIMMs matter */
> > > diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> > > index 459260b..fadacf5 100644
> > > --- a/hw/i386/pc.c
> > > +++ b/hw/i386/pc.c
> > > @@ -1186,7 +1186,11 @@ void pc_guest_info_machine_done(Notifier 
> > > *notifier, void *data)
> > >          }
> > >      }
> > >  
> > > -    acpi_setup(&guest_info_state->info);
> > > +    if (!xen_enabled()) {
> > > +        acpi_setup(&guest_info_state->info);
> > > +    } else if (xen_hvm_acpi_setup(PC_MACHINE(qdev_get_machine()))) {
> > > +        error_report("Warning: failed to initialize Xen HVM ACPI 
> > > tables");
> > > +    }
> > >  }
> > >  
> > >  PcGuestInfo *pc_guest_info_init(PCMachineState *pcms)
> > > diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
> > > index e90931a..8b705e1 100644
> > > --- a/include/hw/xen/xen.h
> > > +++ b/include/hw/xen/xen.h
> > > @@ -51,4 +51,6 @@ void xen_register_framebuffer(struct MemoryRegion *mr);
> > >  #  define HVM_MAX_VCPUS 32
> > >  #endif
> > >  
> > > +int xen_hvm_acpi_setup(PCMachineState *pcms);
> > > +
> > >  #endif /* QEMU_HW_XEN_H */
> > > diff --git a/xen-hvm.c b/xen-hvm.c
> > > index 6ebf43f..f1f5e77 100644
> > > --- a/xen-hvm.c
> > > +++ b/xen-hvm.c
> > > @@ -26,6 +26,13 @@
> > >  #include <xen/hvm/params.h>
> > >  #include <xen/hvm/e820.h>
> > >  
> > > +#include "qemu/error-report.h"
> > > +#include "hw/acpi/acpi.h"
> > > +#include "hw/acpi/aml-build.h"
> > > +#include "hw/acpi/bios-linker-loader.h"
> > > +#include "hw/mem/nvdimm.h"
> > > +#include "hw/mem/pc-nvdimm.h"
> > > +
> > >  //#define DEBUG_XEN_HVM
> > >  
> > >  #ifdef DEBUG_XEN_HVM
> > > @@ -1330,6 +1337,70 @@ int xen_hvm_init(PCMachineState *pcms,
> > >      return 0;
> > >  }
> > >  
> > > +int xen_hvm_acpi_setup(PCMachineState *pcms)
> > > +{
> > > +    AcpiBuildTables *hvm_acpi_tables;
> > > +    GArray *tables_blob, *table_offsets;
> > > +
> > > +    ram_addr_t acpi_tables_addr, acpi_tables_size;
> > > +    void *host;
> > > +
> > > +    struct xs_handle *xs = NULL;
> > > +    char path[80], value[17];
> > > +
> > > +    if (!pcms->nvdimm) {
> > > +        return 0;
> > > +    }
> > > +
> > > +    hvm_acpi_tables = g_malloc0(sizeof(AcpiBuildTables));
> > > +    if (!hvm_acpi_tables) {
> > > +        return -1;
> > > +    }
> > > +    acpi_build_tables_init(hvm_acpi_tables);
> > > +    tables_blob = hvm_acpi_tables->table_data;
> > > +    table_offsets = g_array_new(false, true, sizeof(uint32_t));
> > > +    bios_linker_loader_alloc(hvm_acpi_tables->linker,
> > > +                             ACPI_BUILD_TABLE_FILE, 64, false);
> > > +
> > > +    /* build NFIT tables */
> > > +    nvdimm_build_acpi(table_offsets, tables_blob, 
> > > hvm_acpi_tables->linker);
> > > +    g_array_free(table_offsets, true);
> > > +
> > > +    /* copy APCI tables into VM */
> > > +    acpi_tables_size = tables_blob->len;
> > > +    acpi_tables_addr =
> > > +        (pcms->below_4g_mem_size - acpi_tables_size) & XC_PAGE_MASK;
> > > +    host = xc_map_foreign_range(xen_xc, xen_domid,
> > > +                                ROUND_UP(acpi_tables_size, XC_PAGE_SIZE),
> > > +                                PROT_READ | PROT_WRITE,
> > > +                                acpi_tables_addr >> XC_PAGE_SHIFT);
> > > +    memcpy(host, tables_blob->data, acpi_tables_size);
> > > +    munmap(host, ROUND_UP(acpi_tables_size, XC_PAGE_SIZE));
> > > +
> > > +    /* write address and size of ACPI tables to xenstore */
> > > +    xs = xs_open(0);
> > > +    if (xs == NULL) {
> > > +        error_report("could not contact XenStore\n");
> > > +        return -1;
> > > +    }
> > > +    snprintf(path, sizeof(path),
> > > +             "/local/domain/%d/hvmloader/dm-acpi/address", xen_domid);
> > > +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) 
> > > acpi_tables_addr);
> > > +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> > > +        error_report("failed to write NFIT base address to xenstore\n");
> > > +        return -1;
> > > +    }
> > > +    snprintf(path, sizeof(path),
> > > +             "/local/domain/%d/hvmloader/dm-acpi/length", xen_domid);
> > > +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) 
> > > acpi_tables_size);
> > > +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> > > +        error_report("failed to write NFIT size to xenstore\n");
> > > +        return -1;
> > > +    }
> > > +
> > > +    return 0;
> > > +}
> > > +
> > >  void destroy_hvm_domain(bool reboot)
> > >  {
> > >      XenXC xc_handle;
> > > -- 
> > > 2.4.8
> > > 
> > > 
> > > _______________________________________________
> > > Xen-devel mailing list
> > > Xen-devel@xxxxxxxxxxxxx
> > > http://lists.xen.org/xen-devel
> > > 
> > 
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@xxxxxxxxxxxxx
> > http://lists.xen.org/xen-devel
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.