[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen/many: xfree() can tolerate NULL pointers
Replace instances of "if ( p ) xfree(p)" with just "xfree(p)" Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Keir Fraser <keir@xxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: Ian Campbell <Ian.Campbell@xxxxxxxxxx> CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> --- This was from some experimentation with semantic patches. 'spatch' can't currently parse some of our macros (e.g. XEN_GUEST_HANDLE()), which cases it to skip large numbers of functions in the codebase --- xen/arch/x86/cpu/mcheck/mctelem.c | 3 +-- xen/arch/x86/mm/hap/hap.c | 3 +-- xen/common/kexec.c | 3 +-- xen/common/sched_credit2.c | 3 +-- xen/common/sched_sedf.c | 3 +-- xen/common/schedule.c | 5 +---- xen/xsm/flask/ss/policydb.c | 8 ++++---- 7 files changed, 10 insertions(+), 18 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c index b8da465..95e83c5 100644 --- a/xen/arch/x86/cpu/mcheck/mctelem.c +++ b/xen/arch/x86/cpu/mcheck/mctelem.c @@ -260,8 +260,7 @@ void __init mctelem_init(unsigned int datasz) if ((mctctl.mctc_elems = xmalloc_array(struct mctelem_ent, MC_NENT)) == NULL || (datarr = xmalloc_bytes(MC_NENT * datasz)) == NULL) { - if (mctctl.mctc_elems) - xfree(mctctl.mctc_elems); + xfree(mctctl.mctc_elems); printk("Allocations for MCA telemetry failed\n"); return; } diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index abf3d7a..97f5168 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -168,8 +168,7 @@ int hap_track_dirty_vram(struct domain *d, p2m_ram_logdirty, p2m_ram_rw); } out: - if ( dirty_bitmap ) - xfree(dirty_bitmap); + xfree(dirty_bitmap); return rc; } diff --git a/xen/common/kexec.c b/xen/common/kexec.c index 2239ee8..d4c11cd 100644 --- a/xen/common/kexec.c +++ b/xen/common/kexec.c @@ -454,8 +454,7 @@ static int kexec_init_cpu_notes(const unsigned long cpu) spin_unlock(&crash_notes_lock); /* Always return ok, because whether we successfully allocated or not, * another CPU has successfully allocated. */ - if ( note ) - xfree(note); + xfree(note); } else { diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 1ca521b..ad0a5d4 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -2138,8 +2138,7 @@ csched2_deinit(const struct scheduler *ops) struct csched2_private *prv; prv = CSCHED2_PRIV(ops); - if ( prv != NULL ) - xfree(prv); + xfree(prv); } diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index 7c80bad..c4f4b60 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -729,8 +729,7 @@ static void sedf_deinit(const struct scheduler *ops) struct sedf_priv_info *prv; prv = SEDF_PRIV(ops); - if ( prv != NULL ) - xfree(prv); + xfree(prv); } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 6285a6e..b73177f 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -260,10 +260,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) if ( vcpu_priv[v->vcpu_id] == NULL ) { for_each_vcpu ( d, v ) - { - if ( vcpu_priv[v->vcpu_id] != NULL ) - xfree(vcpu_priv[v->vcpu_id]); - } + xfree(vcpu_priv[v->vcpu_id]); xfree(vcpu_priv); SCHED_OP(c->sched, free_domdata, domdata); return -ENOMEM; diff --git a/xen/xsm/flask/ss/policydb.c b/xen/xsm/flask/ss/policydb.c index 50b2c78..b88ea56 100644 --- a/xen/xsm/flask/ss/policydb.c +++ b/xen/xsm/flask/ss/policydb.c @@ -682,17 +682,17 @@ void policydb_destroy(struct policydb *p) for ( tr = p->role_tr; tr; tr = tr->next ) { - if ( ltr ) xfree(ltr); + xfree(ltr); ltr = tr; } - if ( ltr ) xfree(ltr); + xfree(ltr); for ( ra = p->role_allow; ra; ra = ra -> next ) { - if ( lra ) xfree(lra); + xfree(lra); lra = ra; } - if ( lra ) xfree(lra); + xfree(lra); for ( rt = p->range_tr; rt; rt = rt -> next ) { -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |