[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 6/8] libxl: implement virConnectGetAllDomainStats



Introduce support for connectGetAllDomainStats call that
allow us to _all_ domain(s) statistics including network, block,
cpus and memory. Changes are rather mechanical and mostly
take care of the format to export the data.

Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
Changes since v1:
 - Rework flags checking on libxlDomainGetStats
 for VIR_DOMAIN_STATS_{VCPU,INTERFACE,BLOCK}
 - Removed path since we are reusing <virDomainNetDef>.ifname
 - Init dominfo and dispose it on cleanup.
 - Fixed VIR_FREE issue that was reported with make syntax-check"
 - Bump version to 1.2.22
---
 src/libxl/libxl_driver.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 266 insertions(+)

diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c
index ba1d67b..8db6536 100644
--- a/src/libxl/libxl_driver.c
+++ b/src/libxl/libxl_driver.c
@@ -5238,6 +5238,271 @@ libxlDomainMemoryStats(virDomainPtr dom,
 
 #undef LIBXL_SET_MEMSTAT
 
+#define LIBXL_RECORD_UINT(error, key, value, ...) \
+do { \
+    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+    snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+             key, ##__VA_ARGS__); \
+    if (virTypedParamsAddUInt(&tmp->params, \
+                              &tmp->nparams, \
+                              &maxparams, \
+                              param_name, \
+                              value) < 0) \
+        goto error;                       \
+} while (0)
+
+#define LIBXL_RECORD_LL(error, key, value, ...) \
+do { \
+    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+    snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+             key, ##__VA_ARGS__); \
+    if (virTypedParamsAddLLong(&tmp->params, \
+                               &tmp->nparams, \
+                               &maxparams, \
+                               param_name, \
+                               value) < 0) \
+        goto error;                         \
+} while (0)
+
+#define LIBXL_RECORD_ULL(error, key, value, ...) \
+do { \
+    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+    snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+             key, ##__VA_ARGS__); \
+    if (virTypedParamsAddULLong(&tmp->params, \
+                                &tmp->nparams, \
+                                &maxparams, \
+                                param_name, \
+                                value) < 0) \
+        goto error;                         \
+} while (0)
+
+#define LIBXL_RECORD_STR(error, key, value, ...) \
+do { \
+    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+    snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+             key, ##__VA_ARGS__); \
+    if (virTypedParamsAddString(&tmp->params, \
+                                &tmp->nparams, \
+                                &maxparams, \
+                                param_name, \
+                                value) < 0) \
+        goto error;                         \
+} while (0)
+
+static int
+libxlDomainGetStats(virConnectPtr conn,
+                    virDomainObjPtr dom,
+                    unsigned int stats,
+                    virDomainStatsRecordPtr *record)
+{
+    libxlDriverPrivatePtr driver = conn->privateData;
+    libxlDriverConfigPtr cfg = libxlDriverConfigGet(driver);
+    virDomainStatsRecordPtr tmp;
+    libxl_dominfo d_info;
+    libxl_vcpuinfo *vcpuinfo = NULL;
+    int maxcpu, hostcpus;
+    unsigned long long mem, maxmem;
+    int maxparams = 0;
+    int ret = -1;
+    size_t i, state;
+    unsigned int domflags = stats & (VIR_DOMAIN_STATS_BALLOON |
+                                     VIR_DOMAIN_STATS_CPU_TOTAL);
+
+    if (VIR_ALLOC(tmp) < 0)
+        return ret;
+
+    libxl_dominfo_init(&d_info);
+
+    mem = virDomainDefGetMemoryInitial(dom->def);
+    maxmem = virDomainDefGetMemoryActual(dom->def);
+    d_info.cpu_time = 0;
+
+    if (domflags && virDomainObjIsActive(dom) &&
+        !libxl_domain_info(cfg->ctx, &d_info, dom->def->id)) {
+        mem = d_info.current_memkb;
+        maxmem = d_info.max_memkb;
+    }
+
+    if (stats & VIR_DOMAIN_STATS_STATE) {
+        LIBXL_RECORD_UINT(cleanup, "state.reason", dom->state.reason);
+        LIBXL_RECORD_UINT(cleanup, "state.state", dom->state.state);
+    }
+
+    if (stats & VIR_DOMAIN_STATS_BALLOON) {
+        LIBXL_RECORD_ULL(cleanup, "balloon.current", mem);
+        LIBXL_RECORD_ULL(cleanup, "balloon.maximum", maxmem);
+    }
+
+    if (stats & VIR_DOMAIN_STATS_CPU_TOTAL)
+        LIBXL_RECORD_ULL(cleanup, "cpu.time", d_info.cpu_time);
+
+    if (stats & VIR_DOMAIN_STATS_VCPU) {
+        vcpuinfo = libxl_list_vcpu(cfg->ctx, dom->def->id, &maxcpu, &hostcpus);
+
+        for (i = 0; i < dom->def->vcpus; i++) {
+            if (!vcpuinfo)
+                state = VIR_VCPU_OFFLINE;
+            else if (vcpuinfo[i].running)
+                state = VIR_VCPU_RUNNING;
+            else if (vcpuinfo[i].blocked)
+                state = VIR_VCPU_BLOCKED;
+            else
+                state = VIR_VCPU_OFFLINE;
+
+            LIBXL_RECORD_UINT(cleanup_vcpu, "vcpu.%zu.state", state, i);
+
+            /* vcputime is shown only if the VM is active */
+            if (!virDomainObjIsActive(dom))
+                continue;
+
+            LIBXL_RECORD_ULL(cleanup_vcpu, "vcpu.%zu.time", 
vcpuinfo[i].vcpu_time, i);
+        }
+
+        if (vcpuinfo)
+            libxl_vcpuinfo_list_free(vcpuinfo, maxcpu);
+    }
+
+    if (stats & VIR_DOMAIN_STATS_INTERFACE) {
+        for (i = 0; i < dom->def->nnets; i++) {
+            virDomainNetDefPtr net = dom->def->nets[i];
+            struct _virDomainInterfaceStats netstats;
+
+            if (!virDomainObjIsActive(dom) || !net->ifname)
+                continue;
+
+            if ((ret = virNetInterfaceStats(net->ifname, &netstats)) < 0)
+                continue;
+
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.rx.bytes", netstats.rx_bytes, 
i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.rx.pkts",  netstats.rx_packets, 
 i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.rx.drop",  netstats.rx_drop, i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.rx.errs",  netstats.rx_errs,  
i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.tx.bytes", netstats.tx_bytes, 
i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.tx.pkts",  netstats.tx_packets, 
 i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.tx.drop",  netstats.tx_drop, i);
+            LIBXL_RECORD_ULL(cleanup, "net.%zu.tx.errs",  netstats.tx_errs,  
i);
+            LIBXL_RECORD_STR(cleanup, "net.%zu.name", net->ifname, i);
+        }
+
+        LIBXL_RECORD_UINT(cleanup, "net.count", dom->def->nnets);
+    }
+
+    if (stats & VIR_DOMAIN_STATS_BLOCK) {
+        for (i = 0; i < dom->def->ndisks; i++) {
+            virDomainDiskDefPtr disk = dom->def->disks[i];
+            struct _libxlBlockStats blkstats;
+
+            if (!virDomainObjIsActive(dom))
+                continue;
+
+            memset(&blkstats, 0, sizeof(libxlBlockStats));
+            if ((ret = libxlDomainBlockStatsGather(dom, disk->dst, &blkstats)) 
< 0)
+                continue;
+
+            LIBXL_RECORD_LL(cleanup, "block.%zu.rd.reqs",  blkstats.rd_req, i);
+            LIBXL_RECORD_LL(cleanup, "block.%zu.rd.bytes", blkstats.rd_bytes, 
i);
+            LIBXL_RECORD_LL(cleanup, "block.%zu.wr.reqs",  blkstats.wr_req, i);
+            LIBXL_RECORD_LL(cleanup, "block.%zu.wr.bytes", blkstats.wr_bytes, 
i);
+            LIBXL_RECORD_LL(cleanup, "block.%zu.fl.reqs",  blkstats.f_req, i);
+
+            if (STREQ_NULLABLE(blkstats.backend, "vbd")) {
+                LIBXL_RECORD_LL(cleanup, "block.%zu.discard.reqs", 
blkstats.u.vbd.ds_req, i);
+                LIBXL_RECORD_LL(cleanup, "block.%zu.errs", 
blkstats.u.vbd.oo_req, i);
+            }
+            LIBXL_RECORD_STR(cleanup, "block.%zu.name", disk->dst, i);
+            LIBXL_RECORD_STR(cleanup, "block.%zu.path", disk->src->path, i);
+        }
+
+        LIBXL_RECORD_UINT(cleanup, "block.count", dom->def->ndisks);
+    }
+
+    if (!(tmp->dom = virGetDomain(conn, dom->def->name, dom->def->uuid)))
+        goto cleanup;
+
+    *record = tmp;
+    return 0;
+
+ cleanup_vcpu:
+    if (vcpuinfo)
+        libxl_vcpuinfo_list_free(vcpuinfo, maxcpu);
+ cleanup:
+    libxl_dominfo_dispose(&d_info);
+    virTypedParamsFree(tmp->params, tmp->nparams);
+    VIR_FREE(tmp);
+    return ret;
+}
+
+static int
+libxlConnectGetAllDomainStats(virConnectPtr conn,
+                              virDomainPtr *doms,
+                              unsigned int ndoms,
+                              unsigned int stats,
+                              virDomainStatsRecordPtr **retStats,
+                              unsigned int flags)
+{
+    libxlDriverPrivatePtr driver = conn->privateData;
+    virDomainObjPtr *vms = NULL;
+    virDomainObjPtr vm;
+    size_t nvms;
+    virDomainStatsRecordPtr *tmpstats = NULL;
+    int nstats = 0;
+    size_t i;
+    int ret = -1;
+    unsigned int lflags = flags & (VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE |
+                                   VIR_CONNECT_LIST_DOMAINS_FILTERS_PERSISTENT 
|
+                                   VIR_CONNECT_LIST_DOMAINS_FILTERS_STATE);
+
+    virCheckFlags(VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE |
+                  VIR_CONNECT_LIST_DOMAINS_FILTERS_PERSISTENT |
+                  VIR_CONNECT_LIST_DOMAINS_FILTERS_STATE, -1);
+
+    if (virConnectGetAllDomainStatsEnsureACL(conn) < 0)
+        return -1;
+
+    if (ndoms) {
+        if (virDomainObjListConvert(driver->domains, conn, doms, ndoms, &vms,
+                                    &nvms, virConnectGetAllDomainStatsCheckACL,
+                                    lflags, true) < 0)
+            return -1;
+    } else {
+        if (virDomainObjListCollect(driver->domains, conn, &vms, &nvms,
+                                    virConnectGetAllDomainStatsCheckACL,
+                                    lflags) < 0)
+            return -1;
+    }
+
+    if (VIR_ALLOC_N(tmpstats, nvms + 1) < 0)
+        return -1;
+
+    for (i = 0; i < nvms; i++) {
+        virDomainStatsRecordPtr tmp = NULL;
+        vm = vms[i];
+
+        virObjectLock(vm);
+
+        if (libxlDomainGetStats(conn, vm, stats, &tmp) < 0) {
+            virObjectUnlock(vm);
+            goto cleanup;
+        }
+
+        if (tmp)
+            tmpstats[nstats++] = tmp;
+
+        virObjectUnlock(vm);
+    }
+
+    *retStats = tmpstats;
+    tmpstats = NULL;
+
+    ret = nstats;
+
+ cleanup:
+    virDomainStatsRecordListFree(tmpstats);
+    virObjectListFreeCount(vms, nvms);
+    return ret;
+}
+
 static int
 libxlConnectDomainEventRegisterAny(virConnectPtr conn, virDomainPtr dom, int 
eventID,
                                    virConnectDomainEventGenericCallback 
callback,
@@ -5836,6 +6101,7 @@ static virHypervisorDriver libxlHypervisorDriver = {
     .domainInterfaceStats = libxlDomainInterfaceStats, /* 1.2.22 */
     .domainMemoryStats = libxlDomainMemoryStats, /* 1.2.22 */
     .domainGetCPUStats = libxlDomainGetCPUStats, /* 1.2.22 */
+    .connectGetAllDomainStats = libxlConnectGetAllDomainStats, /* 1.2.22 */
     .connectDomainEventRegister = libxlConnectDomainEventRegister, /* 0.9.0 */
     .connectDomainEventDeregister = libxlConnectDomainEventDeregister, /* 
0.9.0 */
     .domainManagedSave = libxlDomainManagedSave, /* 0.9.2 */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.