|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 08/12] xen/sched: Clean up trace handling
There is no need for bitfields anywhere - use more sensible types. There is
also no need to cast 'd' to (unsigned char *) before passing it to a function
taking void *.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
CC: Ian Jackson <iwj@xxxxxxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Juergen Gross <jgross@xxxxxxxx>
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
v2:
* New
---
xen/common/sched/core.c | 4 ++--
xen/common/sched/credit.c | 38 ++++++++++++++++++--------------------
xen/common/sched/null.c | 42 +++++++++++++++++++++++++-----------------
3 files changed, 45 insertions(+), 39 deletions(-)
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 8f4b1ca10d1c..fe133cbf117c 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -205,7 +205,7 @@ static inline struct scheduler *vcpu_scheduler(const struct
vcpu *v)
static inline void trace_runstate_change(const struct vcpu *v, int new_state)
{
- struct { uint32_t vcpu:16, domain:16; } d;
+ struct { uint16_t vcpu, domain; } d;
uint32_t event;
if ( likely(!tb_init_done) )
@@ -223,7 +223,7 @@ static inline void trace_runstate_change(const struct vcpu
*v, int new_state)
static inline void trace_continue_running(const struct vcpu *v)
{
- struct { uint32_t vcpu:16, domain:16; } d;
+ struct { uint16_t vcpu, domain; } d;
if ( likely(!tb_init_done) )
return;
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index d0aa017c643e..f277fa37a8b1 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -1828,21 +1828,18 @@ static void csched_schedule(
SCHED_STAT_CRANK(schedule);
CSCHED_UNIT_CHECK(unit);
- /*
- * Here in Credit1 code, we usually just call TRACE_nD() helpers, and
- * don't care about packing. But scheduling happens very often, so it
- * actually is important that the record is as small as possible.
- */
if ( unlikely(tb_init_done) )
{
struct {
- unsigned cpu:16, tasklet:8, idle:8;
- } d;
- d.cpu = cur_cpu;
- d.tasklet = tasklet_work_scheduled;
- d.idle = is_idle_unit(unit);
- __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d),
- (unsigned char *)&d);
+ uint16_t cpu;
+ uint8_t tasklet, idle;
+ } d = {
+ .cpu = cur_cpu,
+ .tasklet = tasklet_work_scheduled,
+ .idle = is_idle_unit(unit),
+ };
+
+ __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d), &d);
}
runtime = now - unit->state_entry_time;
@@ -1904,14 +1901,15 @@ static void csched_schedule(
if ( unlikely(tb_init_done) )
{
struct {
- unsigned unit:16, dom:16;
- unsigned runtime;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.runtime = runtime;
- __trace_var(TRC_CSCHED_RATELIMIT, 1, sizeof(d),
- (unsigned char *)&d);
+ uint16_t unit, dom;
+ uint32_t runtime;
+ } d = {
+ .dom = unit->domain->domain_id,
+ .unit = unit->unit_id,
+ .runtime = runtime,
+ };
+
+ __trace_var(TRC_CSCHED_RATELIMIT, 1, sizeof(d), &d);
}
goto out;
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 82d5d1baab85..deb59747fbe8 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -329,10 +329,12 @@ pick_res(const struct null_private *prv, const struct
sched_unit *unit)
struct {
uint16_t unit, dom;
uint32_t new_cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.new_cpu = new_cpu;
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .new_cpu = new_cpu,
+ };
+
__trace_var(TRC_SNULL_PICKED_CPU, 1, sizeof(d), &d);
}
@@ -357,10 +359,12 @@ static void unit_assign(struct null_private *prv, struct
sched_unit *unit,
struct {
uint16_t unit, dom;
uint32_t cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = cpu;
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = cpu,
+ };
+
__trace_var(TRC_SNULL_UNIT_ASSIGN, 1, sizeof(d), &d);
}
}
@@ -388,10 +392,12 @@ static bool unit_deassign(struct null_private *prv, const
struct sched_unit *uni
struct {
uint16_t unit, dom;
uint32_t cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = cpu;
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = cpu,
+ };
+
__trace_var(TRC_SNULL_UNIT_DEASSIGN, 1, sizeof(d), &d);
}
@@ -691,11 +697,13 @@ static void null_unit_migrate(const struct scheduler *ops,
struct {
uint16_t unit, dom;
uint16_t cpu, new_cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = sched_unit_master(unit);
- d.new_cpu = new_cpu;
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = sched_unit_master(unit),
+ .new_cpu = new_cpu,
+ };
+
__trace_var(TRC_SNULL_MIGRATE, 1, sizeof(d), &d);
}
--
2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |