|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 2/5] sysctl: Add sysctl interface for querying PCI topology
On 06/04/15 23:12, Boris Ostrovsky wrote:
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>
> Changes in v6:
> * Dropped continuations, the sysctl now returns after 64 iteration if
> necessary
Why? The caller should reasonably expect a single hypercall to complete,
not to have to reissue repeated to get all the data they want.
> * -ENODEV returned if device is not found
You don't actually break on -ENODEV, which causes the loop to continue
to new devices and leave no hint as to which device doesn't exist.
~Andrew
> * sysctl's first_dev is now expected to be used by userspace to continue the
> query
> * Added XSM hooks
>
> docs/misc/xsm-flask.txt | 1 +
> xen/common/sysctl.c | 58
> +++++++++++++++++++++++++++++++++++
> xen/include/public/sysctl.h | 30 ++++++++++++++++++
> xen/xsm/flask/hooks.c | 1 +
> xen/xsm/flask/policy/access_vectors | 1 +
> 5 files changed, 91 insertions(+), 0 deletions(-)
>
> diff --git a/docs/misc/xsm-flask.txt b/docs/misc/xsm-flask.txt
> index 90a2aef..4e0f14f 100644
> --- a/docs/misc/xsm-flask.txt
> +++ b/docs/misc/xsm-flask.txt
> @@ -121,6 +121,7 @@ __HYPERVISOR_sysctl (xen/include/public/sysctl.h)
> * XEN_SYSCTL_cpupool_op
> * XEN_SYSCTL_scheduler_op
> * XEN_SYSCTL_coverage_op
> + * XEN_SYSCTL_pcitopoinfo
>
> __HYPERVISOR_memory_op (xen/include/public/memory.h)
>
> diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
> index d75440e..449ff70 100644
> --- a/xen/common/sysctl.c
> +++ b/xen/common/sysctl.c
> @@ -399,6 +399,64 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t)
> u_sysctl)
> break;
> #endif
>
> +#ifdef HAS_PCI
> + case XEN_SYSCTL_pcitopoinfo:
> + {
> + xen_sysctl_pcitopoinfo_t *ti = &op->u.pcitopoinfo;
> + unsigned dev_cnt = 0;
> +
> + if ( guest_handle_is_null(ti->devs) ||
> + guest_handle_is_null(ti->nodes) ||
> + (ti->first_dev > ti->num_devs) )
> + {
> + ret = -EINVAL;
> + break;
> + }
> +
> + while ( ti->first_dev < ti->num_devs )
> + {
> + physdev_pci_device_t dev;
> + uint32_t node;
> + struct pci_dev *pdev;
> +
> + if ( copy_from_guest_offset(&dev, ti->devs, ti->first_dev, 1) )
> + {
> + ret = -EFAULT;
> + break;
> + }
> +
> + spin_lock(&pcidevs_lock);
> + pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
> + if ( !pdev )
> + {
> + ret = -ENODEV;
> + node = XEN_INVALID_NODE_ID;
> + }
> + else if ( pdev->node == NUMA_NO_NODE )
> + node = XEN_INVALID_NODE_ID;
> + else
> + node = pdev->node;
> + spin_unlock(&pcidevs_lock);
> +
> + if ( copy_to_guest_offset(ti->nodes, ti->first_dev, &node, 1) )
> + {
> + ret = -EFAULT;
> + break;
> + }
> +
> + ti->first_dev++;
> +
> + if ( (++dev_cnt > 0x3f) && hypercall_preempt_check() )
> + break;
> + }
> +
> + if ( (ret != -EFAULT) &&
> + __copy_field_to_guest(u_sysctl, op, u.pcitopoinfo.first_dev) )
> + ret = -EFAULT;
> + }
> + break;
> +#endif
> +
> default:
> ret = arch_do_sysctl(op, u_sysctl);
> copyback = 0;
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> index 5aa3708..877b661 100644
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -33,6 +33,7 @@
>
> #include "xen.h"
> #include "domctl.h"
> +#include "physdev.h"
>
> #define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
>
> @@ -668,6 +669,33 @@ struct xen_sysctl_psr_cmt_op {
> typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
>
> +/* XEN_SYSCTL_pcitopoinfo */
> +struct xen_sysctl_pcitopoinfo {
> + /* IN: Number of elements in 'pcitopo' and 'nodes' arrays. */
> + uint32_t num_devs;
> +
> + /*
> + * IN/OUT:
> + * IN: First element of pcitopo array that needs to be processed by
> + * the hypervisor.
> + * OUT: Index of the first still unprocessed element of pcitopo array.
> + */
> + uint32_t first_dev;
> +
> + /* IN: list of devices for which node IDs are requested. */
> + XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
> +
> + /*
> + * OUT: node identifier for each device.
> + * If information for a particular device is not avalable then set
> + * to XEN_INVALID_NODE_ID. In addition, if device is not known to the
> + * hypervisor, sysctl will stop further processing and return -ENODEV.
> + */
> + XEN_GUEST_HANDLE_64(uint32) nodes;
> +};
> +typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
> +
> struct xen_sysctl {
> uint32_t cmd;
> #define XEN_SYSCTL_readconsole 1
> @@ -690,12 +718,14 @@ struct xen_sysctl {
> #define XEN_SYSCTL_scheduler_op 19
> #define XEN_SYSCTL_coverage_op 20
> #define XEN_SYSCTL_psr_cmt_op 21
> +#define XEN_SYSCTL_pcitopoinfo 22
> uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
> union {
> struct xen_sysctl_readconsole readconsole;
> struct xen_sysctl_tbuf_op tbuf_op;
> struct xen_sysctl_physinfo physinfo;
> struct xen_sysctl_cputopoinfo cputopoinfo;
> + struct xen_sysctl_pcitopoinfo pcitopoinfo;
> struct xen_sysctl_numainfo numainfo;
> struct xen_sysctl_sched_id sched_id;
> struct xen_sysctl_perfc_op perfc_op;
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 4e2c1b7..da7cdfd 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -785,6 +785,7 @@ static int flask_sysctl(int cmd)
> case XEN_SYSCTL_physinfo:
> case XEN_SYSCTL_cputopoinfo:
> case XEN_SYSCTL_numainfo:
> + case XEN_SYSCTL_pcitopoinfo:
> return domain_has_xen(current->domain, XEN__PHYSINFO);
>
> case XEN_SYSCTL_psr_cmt_op:
> diff --git a/xen/xsm/flask/policy/access_vectors
> b/xen/xsm/flask/policy/access_vectors
> index fe5406d..6e24a6e 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -28,6 +28,7 @@ class xen
> # XENPF_microcode_update
> microcode
> # XEN_SYSCTL_physinfo, XEN_SYSCTL_cputopoinfo, XEN_SYSCTL_numainfo
> +# XEN_SYSCTL_pcitopoinfo
> physinfo
> # XENPF_platform_quirk
> quirk
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |