[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 09/11] vpci: register as an internal ioreq server



> -----Original Message-----
> From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
> Sent: 03 September 2019 17:14
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Ian Jackson 
> <Ian.Jackson@xxxxxxxxxx>; Wei Liu
> <wl@xxxxxxx>; Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; George Dunlap 
> <George.Dunlap@xxxxxxxxxx>; Jan
> Beulich <jbeulich@xxxxxxxx>; Julien Grall <julien.grall@xxxxxxx>; Konrad 
> Rzeszutek Wilk
> <konrad.wilk@xxxxxxxxxx>; Stefano Stabellini <sstabellini@xxxxxxxxxx>; Tim 
> (Xen.org) <tim@xxxxxxx>;
> Paul Durrant <Paul.Durrant@xxxxxxxxxx>
> Subject: [PATCH v2 09/11] vpci: register as an internal ioreq server
> 
> Switch vPCI to become an internal ioreq server, and hence drop all the
> vPCI specific decoding and trapping to PCI IO ports and MMCFG regions.
> 
> This allows to unify the vPCI code with the ioreq infrastructure,
> opening the door for domains having PCI accesses handled by vPCI and
> other ioreq servers at the same time.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

[snip]
> diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
> index f61f66df5f..bf2c64a0a9 100644
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -11,6 +11,7 @@
>  #include <asm/current.h>
>  #include <asm/io_apic.h>
>  #include <asm/msi.h>
> +#include <asm/hvm/ioreq.h>

Why is this change necessary on its own?

>  #include <asm/hvm/irq.h>
>  #include <asm/hypercall.h>
>  #include <public/xen.h>
> diff --git a/xen/drivers/vpci/vpci.c b/xen/drivers/vpci/vpci.c
> index cbd1bac7fc..5664020c2d 100644
> --- a/xen/drivers/vpci/vpci.c
> +++ b/xen/drivers/vpci/vpci.c
> @@ -20,6 +20,8 @@
>  #include <xen/sched.h>
>  #include <xen/vpci.h>
> 
> +#include <asm/hvm/ioreq.h>
> +
>  /* Internal struct to store the emulated PCI registers. */
>  struct vpci_register {
>      vpci_read_t *read;
> @@ -302,7 +304,7 @@ static uint32_t merge_result(uint32_t data, uint32_t new, 
> unsigned int size,
>      return (data & ~(mask << (offset * 8))) | ((new & mask) << (offset * 8));
>  }
> 
> -uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size)
> +static uint32_t read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size)
>  {
>      const struct domain *d = current->domain;
>      const struct pci_dev *pdev;
> @@ -404,8 +406,8 @@ static void vpci_write_helper(const struct pci_dev *pdev,
>               r->private);
>  }
> 
> -void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
> -                uint32_t data)
> +static void write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
> +                  uint32_t data)
>  {
>      const struct domain *d = current->domain;
>      const struct pci_dev *pdev;
> @@ -478,6 +480,67 @@ void vpci_write(pci_sbdf_t sbdf, unsigned int reg, 
> unsigned int size,
>      spin_unlock(&pdev->vpci->lock);
>  }
> 
> +#ifdef __XEN__
> +static int ioreq_handler(struct vcpu *v, ioreq_t *req, void *data)
> +{
> +    pci_sbdf_t sbdf;
> +
> +    if ( req->type == IOREQ_TYPE_INVALIDATE )
> +        /*
> +         * Ignore invalidate requests, those can be received even without
> +         * having any memory ranges registered, see send_invalidate_req.
> +         */
> +        return X86EMUL_OKAY;

In general, I wonder whether internal servers will ever need to deal with 
invalidate? The code only exists to get QEMU to drop its map cache after a 
decrease_reservation so that the page refs get dropped.

  Paul

> +
> +    if ( req->type != IOREQ_TYPE_PCI_CONFIG || req->data_is_ptr )
> +    {
> +        ASSERT_UNREACHABLE();
> +        return X86EMUL_UNHANDLEABLE;
> +    }
> +
> +    sbdf.sbdf = req->addr >> 32;
> +
> +    if ( req->dir )
> +        req->data = read(sbdf, req->addr, req->size);
> +    else
> +        write(sbdf, req->addr, req->size, req->data);
> +
> +    return X86EMUL_OKAY;
> +}
> +
> +int vpci_register_ioreq(struct domain *d)
> +{
> +    ioservid_t id;
> +    int rc;
> +
> +    if ( !has_vpci(d) )
> +        return 0;
> +
> +    rc = hvm_create_ioreq_server(d, HVM_IOREQSRV_BUFIOREQ_OFF, &id, true);
> +    if ( rc )
> +        return rc;
> +
> +    rc = hvm_add_ioreq_handler(d, id, ioreq_handler, NULL);
> +    if ( rc )
> +        return rc;
> +
> +    if ( is_hardware_domain(d) )
> +    {
> +        /* Handle all devices in vpci. */
> +        rc = hvm_map_io_range_to_ioreq_server(d, id, XEN_DMOP_IO_RANGE_PCI,
> +                                              0, ~(uint64_t)0);
> +        if ( rc )
> +            return rc;
> +    }
> +
> +    rc = hvm_set_ioreq_server_state(d, id, true);
> +    if ( rc )
> +        return rc;
> +
> +    return rc;
> +}
> +#endif
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/xen/vpci.h b/xen/include/xen/vpci.h
> index 4cf233c779..36f435ed5b 100644
> --- a/xen/include/xen/vpci.h
> +++ b/xen/include/xen/vpci.h
> @@ -23,6 +23,9 @@ typedef int vpci_register_init_t(struct pci_dev *dev);
>    static vpci_register_init_t *const x##_entry  \
>                 __used_section(".data.vpci." p) = x
> 
> +/* Register vPCI handler with ioreq. */
> +int vpci_register_ioreq(struct domain *d);
> +
>  /* Add vPCI handlers to device. */
>  int __must_check vpci_add_handlers(struct pci_dev *dev);
> 
> @@ -38,11 +41,6 @@ int __must_check vpci_add_register(struct vpci *vpci,
>  int __must_check vpci_remove_register(struct vpci *vpci, unsigned int offset,
>                                        unsigned int size);
> 
> -/* Generic read/write handlers for the PCI config space. */
> -uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size);
> -void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
> -                uint32_t data);
> -
>  /* Passthrough handlers. */
>  uint32_t vpci_hw_read16(const struct pci_dev *pdev, unsigned int reg,
>                          void *data);
> @@ -221,20 +219,12 @@ static inline int vpci_add_handlers(struct pci_dev 
> *pdev)
>      return 0;
>  }
> 
> -static inline void vpci_dump_msi(void) { }
> -
> -static inline uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg,
> -                                 unsigned int size)
> +static inline int vpci_register_ioreq(struct domain *d)
>  {
> -    ASSERT_UNREACHABLE();
> -    return ~(uint32_t)0;
> +    return 0;
>  }
> 
> -static inline void vpci_write(pci_sbdf_t sbdf, unsigned int reg,
> -                              unsigned int size, uint32_t data)
> -{
> -    ASSERT_UNREACHABLE();
> -}
> +static inline void vpci_dump_msi(void) { }
> 
>  static inline bool vpci_process_pending(struct vcpu *v)
>  {
> --
> 2.22.0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.