|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 01/10] x86/domctl: Add XEN_DOMCTL_set_avail_vcpus
This domctl is called when a VCPU is hot-(un)plugged to a guest (via
'xl vcpu-set'). While this currently is only intended to be needed by
PVH guests we will call this domctl for all (x86) guests for consistency.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
CC: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
tools/flask/policy/modules/dom0.te | 2 +-
tools/flask/policy/modules/xen.if | 4 ++--
tools/libxc/include/xenctrl.h | 5 +++++
tools/libxc/xc_dom_x86.c | 11 +++++++++++
tools/libxl/libxl.c | 10 +++++++++-
tools/libxl/libxl_arch.h | 4 ++++
tools/libxl/libxl_arm.c | 6 ++++++
tools/libxl/libxl_dom.c | 7 +++++++
tools/libxl/libxl_x86.c | 6 ++++++
xen/arch/x86/domctl.c | 13 +++++++++++++
xen/include/asm-x86/domain.h | 6 ++++++
xen/include/public/domctl.h | 9 +++++++++
xen/xsm/flask/hooks.c | 3 +++
xen/xsm/flask/policy/access_vectors | 2 ++
14 files changed, 84 insertions(+), 4 deletions(-)
diff --git a/tools/flask/policy/modules/dom0.te
b/tools/flask/policy/modules/dom0.te
index 2d982d9..fd60c39 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -38,7 +38,7 @@ allow dom0_t dom0_t:domain {
};
allow dom0_t dom0_t:domain2 {
set_cpuid gettsc settsc setscheduler set_max_evtchn set_vnumainfo
- get_vnumainfo psr_cmt_op psr_cat_op
+ get_vnumainfo psr_cmt_op psr_cat_op set_avail_vcpus
};
allow dom0_t dom0_t:resource { add remove };
diff --git a/tools/flask/policy/modules/xen.if
b/tools/flask/policy/modules/xen.if
index d83f031..0ac4c5b 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -52,7 +52,7 @@ define(`create_domain_common', `
settime setdomainhandle };
allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim
set_max_evtchn set_vnumainfo get_vnumainfo cacheflush
- psr_cmt_op psr_cat_op soft_reset };
+ psr_cmt_op psr_cat_op soft_reset set_avail_vcpus};
allow $1 $2:security check_context;
allow $1 $2:shadow enable;
allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage
mmuext_op updatemp };
@@ -85,7 +85,7 @@ define(`manage_domain', `
getaddrsize pause unpause trigger shutdown destroy
setaffinity setdomainmaxmem getscheduler resume
setpodtarget getpodtarget };
- allow $1 $2:domain2 set_vnumainfo;
+ allow $1 $2:domain2 { set_vnumainfo set_avail_vcpus };
')
# migrate_domain_out(priv, target)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 2c83544..49e9b9f 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1256,6 +1256,11 @@ int xc_domain_getvnuma(xc_interface *xch,
int xc_domain_soft_reset(xc_interface *xch,
uint32_t domid);
+int xc_domain_set_avail_vcpus(xc_interface *xch,
+ uint32_t domid,
+ unsigned int num_vcpus);
+
+
#if defined(__i386__) || defined(__x86_64__)
/*
* PC BIOS standard E820 types and structure.
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 0eab8a7..7fcdee1 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -125,6 +125,17 @@ const char *xc_domain_get_native_protocol(xc_interface
*xch,
return protocol;
}
+int xc_domain_set_avail_vcpus(xc_interface *xch,
+ uint32_t domid,
+ unsigned int num_vcpus)
+{
+ DECLARE_DOMCTL;
+ domctl.cmd = XEN_DOMCTL_set_avail_vcpus;
+ domctl.domain = (domid_t)domid;
+ domctl.u.avail_vcpus.num = num_vcpus;
+ return do_domctl(xch, &domctl);
+}
+
static int count_pgtables(struct xc_dom_image *dom, xen_vaddr_t from,
xen_vaddr_t to, xen_pfn_t pfn)
{
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 33c5e4c..9b94413 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5148,11 +5148,12 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t
domid, libxl_bitmap *cpumap)
case LIBXL_DOMAIN_TYPE_HVM:
switch (libxl__device_model_version_running(gc, domid)) {
case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
- case LIBXL_DEVICE_MODEL_VERSION_NONE:
rc = libxl__set_vcpuonline_xenstore(gc, domid, cpumap, &info);
break;
case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN:
rc = libxl__set_vcpuonline_qmp(gc, domid, cpumap, &info);
+ /* fallthrough */
+ case LIBXL_DEVICE_MODEL_VERSION_NONE:
break;
default:
rc = ERROR_INVAL;
@@ -5164,6 +5165,13 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
libxl_bitmap *cpumap)
default:
rc = ERROR_INVAL;
}
+
+ if (!rc) {
+ rc = libxl__arch_set_vcpuonline(ctx, domid, maxcpus);
+ if (rc)
+ LOG(ERROR, "Couldn't set available vcpu count");
+ }
+
out:
libxl_dominfo_dispose(&info);
GC_FREE;
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index 5e1fc60..33b5e12 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -71,6 +71,10 @@ int libxl__arch_extra_memory(libxl__gc *gc,
const libxl_domain_build_info *info,
uint64_t *out);
+_hidden
+int libxl__arch_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
+ unsigned int vcpu_num);
+
#if defined(__i386__) || defined(__x86_64__)
#define LAPIC_BASE_ADDRESS 0xfee00000
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index d842d88..18c79b0 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -1065,6 +1065,12 @@ void libxl__arch_domain_build_info_acpi_setdefault(
libxl_defbool_setdefault(&b_info->acpi, false);
}
+int libxl__arch_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
+ unsigned int vcpu_num)
+{
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index d519c8d..e6e94bb 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -309,6 +309,13 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
return ERROR_FAIL;
}
+ rc = libxl__arch_set_vcpuonline(ctx, domid,
+
libxl_bitmap_count_set(&info->avail_vcpus));
+ if (rc) {
+ LOG(ERROR, "Couldn't set available vcpu count (error %d)", rc);
+ return ERROR_FAIL;
+ }
+
/*
* Check if the domain has any CPU or node affinity already. If not, try
* to build up the latter via automatic NUMA placement. In fact, in case
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index e1844c8..3a5264d 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -588,6 +588,12 @@ void libxl__arch_domain_build_info_acpi_setdefault(
libxl_defbool_setdefault(&b_info->acpi, true);
}
+int libxl__arch_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
+ unsigned int vcpu_num)
+{
+ return xc_domain_set_avail_vcpus(ctx->xch, domid, vcpu_num);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 2a2fe04..78b7d4b 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1430,6 +1430,19 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_set_avail_vcpus:
+ {
+ unsigned int num = domctl->u.avail_vcpus.num;
+
+ ret = -EINVAL;
+ if ( num > d->max_vcpus )
+ break;
+
+ d->arch.avail_vcpus = num;
+ ret = 0;
+ break;
+ }
+
default:
ret = iommu_do_domctl(domctl, d, u_domctl);
break;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f6a40eb..a279e4a 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -414,6 +414,12 @@ struct arch_domain
/* Emulated devices enabled bitmap. */
uint32_t emulation_flags;
+
+ /*
+ * Number of VCPUs that were online during guest creation
+ * plus/minus any hot-(un)plugged VCPUs.
+ */
+ unsigned int avail_vcpus;
} __cacheline_aligned;
#define has_vlapic(d) (!!((d)->arch.emulation_flags & XEN_X86_EMU_LAPIC))
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 177319d..f114645 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1010,6 +1010,13 @@ struct xen_domctl_vcpu_msrs {
};
typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
+
+/* XEN_DOMCTL_avail_vcpus */
+struct xen_domctl_avail_vcpus {
+ uint32_t num; /* available number of vcpus */
+};
+typedef struct xen_domctl_avail_vcpus xen_domctl_avail_vcpus_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_avail_vcpus_t);
#endif
/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
@@ -1221,6 +1228,7 @@ struct xen_domctl {
#define XEN_DOMCTL_monitor_op 77
#define XEN_DOMCTL_psr_cat_op 78
#define XEN_DOMCTL_soft_reset 79
+#define XEN_DOMCTL_set_avail_vcpus 80
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1269,6 +1277,7 @@ struct xen_domctl {
struct xen_domctl_cpuid cpuid;
struct xen_domctl_vcpuextstate vcpuextstate;
struct xen_domctl_vcpu_msrs vcpu_msrs;
+ struct xen_domctl_avail_vcpus avail_vcpus;
#endif
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 177c11f..377549a 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -748,6 +748,9 @@ static int flask_domctl(struct domain *d, int cmd)
case XEN_DOMCTL_soft_reset:
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SOFT_RESET);
+ case XEN_DOMCTL_set_avail_vcpus:
+ return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_AVAIL_VCPUS);
+
default:
return avc_unknown_permission("domctl", cmd);
}
diff --git a/xen/xsm/flask/policy/access_vectors
b/xen/xsm/flask/policy/access_vectors
index 49c9a9e..f8a5e6c 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -244,6 +244,8 @@ class domain2
mem_sharing
# XEN_DOMCTL_psr_cat_op
psr_cat_op
+# XEN_DOMCTL_set_avail_vcpus
+ set_avail_vcpus
}
# Similar to class domain, but primarily contains domctls related to HVM
domains
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |