[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v6][PATCH 1/7] introduce XENMEM_reserved_device_memory_map



> From: Chen, Tiejun
> Sent: Tuesday, September 09, 2014 10:50 PM
> 
> From: Jan Beulich <jbeulich@xxxxxxxx>
> 
> This is a prerequisite for punching holes into HVM and PVH guests' P2M
> to allow passing through devices that are associated with (on VT-d)
> RMRRs.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Did you change anything on top of Jan's original version? If yes, also need 
your sign-off.

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> for VT-d part.

> 
> diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
> index 25dc016..01154f6 100644
> --- a/xen/common/compat/memory.c
> +++ b/xen/common/compat/memory.c
> @@ -15,6 +15,35 @@ CHECK_TYPE(domid);
> 
>  CHECK_mem_access_op;
> 
> +struct get_reserved_device_memory {
> +    struct compat_mem_reserved_device_memory_map map;
> +    unsigned int used_entries;
> +};
> +
> +static int get_reserved_device_memory(xen_pfn_t start,
> +                                      xen_ulong_t nr, void *ctxt)
> +{
> +    struct get_reserved_device_memory *grdm = ctxt;
> +
> +    if ( grdm->used_entries < grdm->map.nr_entries )
> +    {
> +        struct compat_mem_reserved_device_memory rdm = {
> +            .start_pfn = start, .nr_pages = nr
> +        };
> +
> +        if ( rdm.start_pfn != start || rdm.nr_pages != nr )
> +            return -ERANGE;
> +
> +        if ( __copy_to_compat_offset(grdm->map.buffer,
> grdm->used_entries,
> +                                     &rdm, 1) )
> +            return -EFAULT;
> +    }
> +
> +    ++grdm->used_entries;
> +
> +    return 0;
> +}
> +
>  int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
>  {
>      int split, op = cmd & MEMOP_CMD_MASK;
> @@ -272,6 +301,29 @@ int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
>              break;
>          }
> 
> +#ifdef HAS_PASSTHROUGH
> +        case XENMEM_reserved_device_memory_map:
> +        {
> +            struct get_reserved_device_memory grdm;
> +
> +            if ( copy_from_guest(&grdm.map, compat, 1) ||
> +                 !compat_handle_okay(grdm.map.buffer,
> grdm.map.nr_entries) )
> +                return -EFAULT;
> +
> +            grdm.used_entries = 0;
> +            rc =
> iommu_get_reserved_device_memory(get_reserved_device_memory,
> +                                                  &grdm);
> +
> +            if ( !rc && grdm.map.nr_entries < grdm.used_entries )
> +                rc = -ENOBUFS;
> +            grdm.map.nr_entries = grdm.used_entries;
> +            if ( __copy_to_guest(compat, &grdm.map, 1) )
> +                rc = -EFAULT;
> +
> +            return rc;
> +        }
> +#endif
> +
>          default:
>              return compat_arch_memory_op(cmd, compat);
>          }
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index c2dd31b..c7efd6b 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -695,6 +695,32 @@ out:
>      return rc;
>  }
> 
> +struct get_reserved_device_memory {
> +    struct xen_mem_reserved_device_memory_map map;
> +    unsigned int used_entries;
> +};
> +
> +static int get_reserved_device_memory(xen_pfn_t start,
> +                                      xen_ulong_t nr, void *ctxt)
> +{
> +    struct get_reserved_device_memory *grdm = ctxt;
> +
> +    if ( grdm->used_entries < grdm->map.nr_entries )
> +    {
> +        struct xen_mem_reserved_device_memory rdm = {
> +            .start_pfn = start, .nr_pages = nr
> +        };
> +
> +        if ( __copy_to_guest_offset(grdm->map.buffer,
> grdm->used_entries,
> +                                    &rdm, 1) )
> +            return -EFAULT;
> +    }
> +
> +    ++grdm->used_entries;
> +
> +    return 0;
> +}
> +
>  long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void)
> arg)
>  {
>      struct domain *d;
> @@ -969,6 +995,29 @@ long do_memory_op(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> 
>          break;
> 
> +#ifdef HAS_PASSTHROUGH
> +    case XENMEM_reserved_device_memory_map:
> +    {
> +        struct get_reserved_device_memory grdm;
> +
> +        if ( copy_from_guest(&grdm.map, arg, 1) ||
> +             !guest_handle_okay(grdm.map.buffer,
> grdm.map.nr_entries) )
> +            return -EFAULT;
> +
> +        grdm.used_entries = 0;
> +        rc =
> iommu_get_reserved_device_memory(get_reserved_device_memory,
> +                                              &grdm);
> +
> +        if ( !rc && grdm.map.nr_entries < grdm.used_entries )
> +            rc = -ENOBUFS;
> +        grdm.map.nr_entries = grdm.used_entries;
> +        if ( __copy_to_guest(arg, &grdm.map, 1) )
> +            rc = -EFAULT;
> +
> +        break;
> +    }
> +#endif
> +
>      default:
>          rc = arch_memory_op(cmd, arg);
>          break;
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> index cc12735..7c17e8d 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -344,6 +344,16 @@ void iommu_crash_shutdown(void)
>      iommu_enabled = iommu_intremap = 0;
>  }
> 
> +int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
> +{
> +    const struct iommu_ops *ops = iommu_get_ops();
> +
> +    if ( !iommu_enabled || !ops->get_reserved_device_memory )
> +        return 0;
> +
> +    return ops->get_reserved_device_memory(func, ctxt);
> +}
> +
>  bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
>  {
>      const struct hvm_iommu *hd = domain_hvm_iommu(d);
> diff --git a/xen/drivers/passthrough/vtd/dmar.c
> b/xen/drivers/passthrough/vtd/dmar.c
> index 1152c3a..141e735 100644
> --- a/xen/drivers/passthrough/vtd/dmar.c
> +++ b/xen/drivers/passthrough/vtd/dmar.c
> @@ -893,3 +893,20 @@ int platform_supports_x2apic(void)
>      unsigned int mask = ACPI_DMAR_INTR_REMAP |
> ACPI_DMAR_X2APIC_OPT_OUT;
>      return cpu_has_x2apic && ((dmar_flags & mask) ==
> ACPI_DMAR_INTR_REMAP);
>  }
> +
> +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void
> *ctxt)
> +{
> +    struct acpi_rmrr_unit *rmrr;
> +    int rc = 0;
> +
> +    list_for_each_entry(rmrr, &acpi_rmrr_units, list)
> +    {
> +        rc = func(PFN_DOWN(rmrr->base_address),
> +                  PFN_UP(rmrr->end_address) -
> PFN_DOWN(rmrr->base_address),
> +                  ctxt);
> +        if ( rc )
> +            break;
> +    }
> +
> +    return rc;
> +}
> diff --git a/xen/drivers/passthrough/vtd/extern.h
> b/xen/drivers/passthrough/vtd/extern.h
> index 5524dba..f9ee9b0 100644
> --- a/xen/drivers/passthrough/vtd/extern.h
> +++ b/xen/drivers/passthrough/vtd/extern.h
> @@ -75,6 +75,7 @@ int domain_context_mapping_one(struct domain
> *domain, struct iommu *iommu,
>                                 u8 bus, u8 devfn, const struct pci_dev
> *);
>  int domain_context_unmap_one(struct domain *domain, struct iommu
> *iommu,
>                               u8 bus, u8 devfn);
> +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void
> *ctxt);
> 
>  unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg);
>  void io_apic_write_remap_rte(unsigned int apic,
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> index 042b882..d513dba 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -2485,6 +2485,7 @@ const struct iommu_ops intel_iommu_ops = {
>      .crash_shutdown = vtd_crash_shutdown,
>      .iotlb_flush = intel_iommu_iotlb_flush,
>      .iotlb_flush_all = intel_iommu_iotlb_flush_all,
> +    .get_reserved_device_memory =
> intel_iommu_get_reserved_device_memory,
>      .dump_p2m_table = vtd_dump_p2m_table,
>  };
> 
> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
> index 2c57aa0..c59d42d 100644
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -523,7 +523,29 @@
> DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
> 
>  #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
> 
> -/* Next available subop number is 26 */
> +/*
> + * For legacy reasons, some devices must be configured with special memory
> + * regions to function correctly.  The guest must avoid using any of these
> + * regions.
> + */
> +#define XENMEM_reserved_device_memory_map   26
> +struct xen_mem_reserved_device_memory {
> +    xen_pfn_t start_pfn;
> +    xen_ulong_t nr_pages;
> +};
> +typedef struct xen_mem_reserved_device_memory
> xen_mem_reserved_device_memory_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t);
> +
> +struct xen_mem_reserved_device_memory_map {
> +    /* IN/OUT */
> +    unsigned int nr_entries;
> +    /* OUT */
> +    XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
> +};
> +typedef struct xen_mem_reserved_device_memory_map
> xen_mem_reserved_device_memory_map_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_map_t);
> +
> +/* Next available subop number is 27 */
> 
>  #endif /* __XEN_PUBLIC_MEMORY_H__ */
> 
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index 8eb764a..409f6f8 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -120,6 +120,8 @@ void iommu_dt_domain_destroy(struct domain *d);
> 
>  struct page_info;
> 
> +typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
> +
>  struct iommu_ops {
>      int (*init)(struct domain *d);
>      void (*hwdom_init)(struct domain *d);
> @@ -156,12 +158,14 @@ struct iommu_ops {
>      void (*crash_shutdown)(void);
>      void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int
> page_count);
>      void (*iotlb_flush_all)(struct domain *d);
> +    int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
>      void (*dump_p2m_table)(struct domain *d);
>  };
> 
>  void iommu_suspend(void);
>  void iommu_resume(void);
>  void iommu_crash_shutdown(void);
> +int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
> 
>  void iommu_share_p2m_table(struct domain *d);
> 
> diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
> index 9a35dd7..3ec1749 100644
> --- a/xen/include/xlat.lst
> +++ b/xen/include/xlat.lst
> @@ -60,7 +60,8 @@
>  !    memory_exchange                 memory.h
>  !    memory_map                      memory.h
>  !    memory_reservation              memory.h
> -?    mem_access_op           memory.h
> +?    mem_access_op                   memory.h
> +!    mem_reserved_device_memory_map  memory.h
>  !    pod_target                      memory.h
>  !    remove_from_physmap             memory.h
>  ?    physdev_eoi                     physdev.h
> --
> 1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.