[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v1 4/6] xentop: collect IRQ and HYP time statistics.



On 12.06.20 02:22, Volodymyr Babchuk wrote:
As scheduler code now collects time spent in IRQ handlers and in
do_softirq(), we can present those values to userspace tools like
xentop, so system administrator can see how system behaves.

We are updating counters only in sched_get_time_correction() function
to minimize number of taken spinlocks. As atomic_t is 32 bit wide, it
is not enough to store time with nanosecond precision. So we need to
use 64 bit variables and protect them with spinlock.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
  xen/common/sched/core.c     | 17 +++++++++++++++++
  xen/common/sysctl.c         |  1 +
  xen/include/public/sysctl.h |  4 +++-
  xen/include/xen/sched.h     |  2 ++
  4 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index a7294ff5c3..ee6b1d9161 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -95,6 +95,10 @@ static struct scheduler __read_mostly ops;
static bool scheduler_active; +static DEFINE_SPINLOCK(sched_stat_lock);
+s_time_t sched_stat_irq_time;
+s_time_t sched_stat_hyp_time;
+
  static void sched_set_affinity(
      struct sched_unit *unit, const cpumask_t *hard, const cpumask_t *soft);
@@ -994,9 +998,22 @@ s_time_t sched_get_time_correction(struct sched_unit *u)
              break;
      }
+ spin_lock_irqsave(&sched_stat_lock, flags);
+    sched_stat_irq_time += irq;
+    sched_stat_hyp_time += hyp;
+    spin_unlock_irqrestore(&sched_stat_lock, flags);

Please don't use a lock. Just use add_sized() instead which will add
atomically.

+
      return irq + hyp;
  }
+void sched_get_time_stats(uint64_t *irq_time, uint64_t *hyp_time)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&sched_stat_lock, flags);
+    *irq_time = sched_stat_irq_time;
+    *hyp_time = sched_stat_hyp_time;
+    spin_unlock_irqrestore(&sched_stat_lock, flags);

read_atomic() will do the job without lock.

  }
/*
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 1c6a817476..00683bc93f 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -270,6 +270,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) 
u_sysctl)
          pi->scrub_pages = 0;
          pi->cpu_khz = cpu_khz;
          pi->max_mfn = get_upper_mfn_bound();
+        sched_get_time_stats(&pi->irq_time, &pi->hyp_time);
          arch_do_physinfo(pi);
          if ( iommu_enabled )
          {
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index 3a08c512e8..f320144d40 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -35,7 +35,7 @@
  #include "domctl.h"
  #include "physdev.h"
-#define XEN_SYSCTL_INTERFACE_VERSION 0x00000013
+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000014
/*
   * Read console content from Xen buffer ring.
@@ -118,6 +118,8 @@ struct xen_sysctl_physinfo {
      uint64_aligned_t scrub_pages;
      uint64_aligned_t outstanding_pages;
      uint64_aligned_t max_mfn; /* Largest possible MFN on this host */
+    uint64_aligned_t irq_time;
+    uint64_aligned_t hyp_time;

Would hypfs work, too? This would avoid the need for extending another
hypercall.


Juergen




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.