|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] mem_event: use C99 initializers for mem_event_request_t users
# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1329772592 -3600
# Node ID 0c3d19f40ab145d101de84051c3e00eef17fa1cb
# Parent 626fa29dc04d7ed77ae16cfee8a767ff6bb819ad
mem_event: use C99 initializers for mem_event_request_t users
Use C99 initializers for mem_event_request_t users to make sure req is
always cleared, even with local debug patches that shuffle code around
to have a single exit point.
The common case is to use and send req, so it does not add significant
overhead to always clear req.
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---
diff -r 626fa29dc04d -r 0c3d19f40ab1 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon Feb 20 22:09:40 2012 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon Feb 20 22:16:32 2012 +0100
@@ -4302,7 +4302,7 @@
{
struct vcpu* v = current;
struct domain *d = v->domain;
- mem_event_request_t req;
+ mem_event_request_t req = { .reason = reason };
int rc;
if ( !(p & HVMPME_MODE_MASK) )
@@ -4321,9 +4321,6 @@
else if ( rc < 0 )
return rc;
- memset(&req, 0, sizeof(req));
- req.reason = reason;
-
if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
{
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
diff -r 626fa29dc04d -r 0c3d19f40ab1 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Mon Feb 20 22:09:40 2012 +0100
+++ b/xen/arch/x86/mm/p2m.c Mon Feb 20 22:16:32 2012 +0100
@@ -913,7 +913,7 @@
void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
p2m_type_t p2mt)
{
- mem_event_request_t req;
+ mem_event_request_t req = { .gfn = gfn };
/* We allow no ring in this unique case, because it won't affect
* correctness of the guest execution at this point. If this is the only
@@ -924,8 +924,6 @@
return;
/* Send release notification to pager */
- memset(&req, 0, sizeof(req));
- req.gfn = gfn;
req.flags = MEM_EVENT_FLAG_DROP_PAGE;
/* Update stats unless the page hasn't yet been evicted */
@@ -962,7 +960,7 @@
void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
{
struct vcpu *v = current;
- mem_event_request_t req;
+ mem_event_request_t req = { .gfn = gfn };
p2m_type_t p2mt;
p2m_access_t a;
mfn_t mfn;
@@ -980,8 +978,6 @@
else if ( rc < 0 )
return;
- memset(&req, 0, sizeof(req));
-
/* Fix p2m mapping */
gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
@@ -1011,7 +1007,6 @@
}
/* Send request to pager */
- req.gfn = gfn;
req.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |