[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/4] use xzalloc in driver code
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/drivers/acpi/pmstat.c +++ b/xen/drivers/acpi/pmstat.c @@ -221,9 +221,8 @@ static int get_cpufreq_para(struct xen_s return -EAGAIN; } - if ( !(affected_cpus = xmalloc_array(uint32_t, op->u.get_para.cpu_num)) ) + if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) ) return -ENOMEM; - memset(affected_cpus, 0, op->u.get_para.cpu_num * sizeof(uint32_t)); for_each_cpu_mask(cpu, policy->cpus) affected_cpus[j++] = cpu; ret = copy_to_guest(op->u.get_para.affected_cpus, @@ -233,10 +232,8 @@ static int get_cpufreq_para(struct xen_s return ret; if ( !(scaling_available_frequencies = - xmalloc_array(uint32_t, op->u.get_para.freq_num)) ) + xzalloc_array(uint32_t, op->u.get_para.freq_num)) ) return -ENOMEM; - memset(scaling_available_frequencies, 0, - op->u.get_para.freq_num * sizeof(uint32_t)); for ( i = 0; i < op->u.get_para.freq_num; i++ ) scaling_available_frequencies[i] = pmpt->perf.states[i].core_frequency * 1000; @@ -247,10 +244,8 @@ static int get_cpufreq_para(struct xen_s return ret; if ( !(scaling_available_governors = - xmalloc_array(char, gov_num * CPUFREQ_NAME_LEN)) ) + xzalloc_array(char, gov_num * CPUFREQ_NAME_LEN)) ) return -ENOMEM; - memset(scaling_available_governors, 0, - gov_num * CPUFREQ_NAME_LEN * sizeof(char)); if ( (ret = read_scaling_available_governors(scaling_available_governors, gov_num * CPUFREQ_NAME_LEN * sizeof(char))) ) { --- a/xen/drivers/cpufreq/cpufreq.c +++ b/xen/drivers/cpufreq/cpufreq.c @@ -148,11 +148,10 @@ int cpufreq_add_cpu(unsigned int cpu) } if (!domexist) { - cpufreq_dom = xmalloc(struct cpufreq_dom); + cpufreq_dom = xzalloc(struct cpufreq_dom); if (!cpufreq_dom) return -ENOMEM; - memset(cpufreq_dom, 0, sizeof(struct cpufreq_dom)); cpufreq_dom->dom = dom; list_add(&cpufreq_dom->node, &cpufreq_dom_list_head); } else { @@ -176,11 +175,10 @@ int cpufreq_add_cpu(unsigned int cpu) } if (!domexist || hw_all) { - policy = xmalloc(struct cpufreq_policy); + policy = xzalloc(struct cpufreq_policy); if (!policy) ret = -ENOMEM; - memset(policy, 0, sizeof(struct cpufreq_policy)); policy->cpu = cpu; per_cpu(cpufreq_cpu_policy, cpu) = policy; @@ -374,13 +372,12 @@ int set_px_pminfo(uint32_t acpi_id, stru pmpt = processor_pminfo[cpuid]; if ( !pmpt ) { - pmpt = xmalloc(struct processor_pminfo); + pmpt = xzalloc(struct processor_pminfo); if ( !pmpt ) { ret = -ENOMEM; goto out; } - memset(pmpt, 0, sizeof(*pmpt)); processor_pminfo[cpuid] = pmpt; } pxpt = &pmpt->perf; --- a/xen/drivers/cpufreq/utility.c +++ b/xen/drivers/cpufreq/utility.c @@ -110,22 +110,21 @@ int cpufreq_statistic_init(unsigned int count = pmpt->perf.state_count; - pxpt = xmalloc(struct pm_px); + pxpt = xzalloc(struct pm_px); if ( !pxpt ) { spin_unlock(cpufreq_statistic_lock); return -ENOMEM; } - memset(pxpt, 0, sizeof(*pxpt)); per_cpu(cpufreq_statistic_data, cpuid) = pxpt; - pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count); + pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count); if (!pxpt->u.trans_pt) { xfree(pxpt); spin_unlock(cpufreq_statistic_lock); return -ENOMEM; } - pxpt->u.pt = xmalloc_array(struct pm_px_val, count); + pxpt->u.pt = xzalloc_array(struct pm_px_val, count); if (!pxpt->u.pt) { xfree(pxpt->u.trans_pt); xfree(pxpt); @@ -133,9 +132,6 @@ int cpufreq_statistic_init(unsigned int return -ENOMEM; } - memset(pxpt->u.trans_pt, 0, count * count * (sizeof(uint64_t))); - memset(pxpt->u.pt, 0, count * (sizeof(struct pm_px_val))); - pxpt->u.total = pmpt->perf.state_count; pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit; --- a/xen/drivers/passthrough/amd/iommu_detect.c +++ b/xen/drivers/passthrough/amd/iommu_detect.c @@ -111,13 +111,12 @@ int __init amd_iommu_detect_one_acpi(voi return -ENODEV; } - iommu = (struct amd_iommu *) xmalloc(struct amd_iommu); + iommu = xzalloc(struct amd_iommu); if ( !iommu ) { AMD_IOMMU_DEBUG("Error allocating amd_iommu\n"); return -ENOMEM; } - memset(iommu, 0, sizeof(struct amd_iommu)); spin_lock_init(&iommu->lock); --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -779,13 +779,12 @@ int __init alloc_ivrs_mappings(u16 seg) if ( get_ivrs_mappings(seg) ) return 0; - ivrs_mappings = xmalloc_array(struct ivrs_mappings, ivrs_bdf_entries + 1); + ivrs_mappings = xzalloc_array(struct ivrs_mappings, ivrs_bdf_entries + 1); if ( ivrs_mappings == NULL ) { AMD_IOMMU_DEBUG("Error allocating IVRS Mappings table\n"); return -ENOMEM; } - memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings)); IVRS_MAPPINGS_SEG(ivrs_mappings) = seg; /* assign default values for device entries */ --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -115,13 +115,12 @@ int pt_irq_create_bind_vtd( hvm_irq_dpci = domain_get_irq_dpci(d); if ( hvm_irq_dpci == NULL ) { - hvm_irq_dpci = xmalloc(struct hvm_irq_dpci); + hvm_irq_dpci = xzalloc(struct hvm_irq_dpci); if ( hvm_irq_dpci == NULL ) { spin_unlock(&d->event_lock); return -ENOMEM; } - memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci)); softirq_tasklet_init( &hvm_irq_dpci->dirq_tasklet, hvm_dirq_assist, (unsigned long)d); --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -65,14 +65,13 @@ static struct pci_seg *alloc_pseg(u16 se if ( pseg ) return pseg; - pseg = xmalloc(struct pci_seg); + pseg = xzalloc(struct pci_seg); if ( !pseg ) return NULL; pseg->nr = seg; INIT_LIST_HEAD(&pseg->alldevs_list); spin_lock_init(&pseg->bus2bridge_lock); - memset(pseg->bus2bridge, 0, sizeof(pseg->bus2bridge)); if ( radix_tree_insert(&pci_segments, seg, pseg) ) { @@ -121,10 +120,9 @@ static struct pci_dev *alloc_pdev(struct if ( pdev->bus == bus && pdev->devfn == devfn ) return pdev; - pdev = xmalloc(struct pci_dev); + pdev = xzalloc(struct pci_dev); if ( !pdev ) return NULL; - memset(pdev, 0, sizeof(struct pci_dev)); *(u16*) &pdev->seg = pseg->nr; *((u8*) &pdev->bus) = bus; --- a/xen/drivers/passthrough/vtd/dmar.c +++ b/xen/drivers/passthrough/vtd/dmar.c @@ -291,10 +291,9 @@ static int __init acpi_parse_dev_scope( scope->devices_cnt = cnt; if ( cnt > 0 ) { - scope->devices = xmalloc_array(u16, cnt); + scope->devices = xzalloc_array(u16, cnt); if ( !scope->devices ) return -ENOMEM; - memset(scope->devices, 0, sizeof(u16) * cnt); } while ( start < end ) @@ -400,10 +399,9 @@ acpi_parse_one_drhd(struct acpi_dmar_ent if ( (ret = acpi_dmar_check_length(header, sizeof(*drhd))) != 0 ) return ret; - dmaru = xmalloc(struct acpi_drhd_unit); + dmaru = xzalloc(struct acpi_drhd_unit); if ( !dmaru ) return -ENOMEM; - memset(dmaru, 0, sizeof(struct acpi_drhd_unit)); dmaru->address = drhd->address; dmaru->segment = drhd->segment; @@ -534,10 +532,9 @@ acpi_parse_one_rmrr(struct acpi_dmar_ent } #endif - rmrru = xmalloc(struct acpi_rmrr_unit); + rmrru = xzalloc(struct acpi_rmrr_unit); if ( !rmrru ) return -ENOMEM; - memset(rmrru, 0, sizeof(struct acpi_rmrr_unit)); rmrru->base_address = base_addr; rmrru->end_address = end_addr; @@ -624,10 +621,9 @@ acpi_parse_one_atsr(struct acpi_dmar_ent if ( (ret = acpi_dmar_check_length(header, sizeof(*atsr))) != 0 ) return ret; - atsru = xmalloc(struct acpi_atsr_unit); + atsru = xzalloc(struct acpi_atsr_unit); if ( !atsru ) return -ENOMEM; - memset(atsru, 0, sizeof(struct acpi_atsr_unit)); atsru->segment = atsr->segment; atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */ @@ -673,10 +669,9 @@ acpi_parse_one_rhsa(struct acpi_dmar_ent if ( (ret = acpi_dmar_check_length(header, sizeof(*rhsa))) != 0 ) return ret; - rhsau = xmalloc(struct acpi_rhsa_unit); + rhsau = xzalloc(struct acpi_rhsa_unit); if ( !rhsau ) return -ENOMEM; - memset(rhsau, 0, sizeof(struct acpi_rhsa_unit)); rhsau->address = rhsa->address; rhsau->proximity_domain = rhsa->proximity_domain; --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -138,10 +138,9 @@ static struct intel_iommu *__init alloc_ { struct intel_iommu *intel; - intel = xmalloc(struct intel_iommu); + intel = xzalloc(struct intel_iommu); if ( intel == NULL ) return NULL; - memset(intel, 0, sizeof(struct intel_iommu)); spin_lock_init(&intel->qi_ctrl.qinval_lock); spin_lock_init(&intel->ir_ctrl.iremap_lock); @@ -1098,10 +1097,9 @@ int __init iommu_alloc(struct acpi_drhd_ return -ENOMEM; } - iommu = xmalloc(struct iommu); + iommu = xzalloc(struct iommu); if ( iommu == NULL ) return -ENOMEM; - memset(iommu, 0, sizeof(struct iommu)); iommu->irq = -1; /* No irq assigned yet. */ @@ -1157,10 +1155,9 @@ int __init iommu_alloc(struct acpi_drhd_ /* allocate domain id bitmap */ nr_dom = cap_ndoms(iommu->cap); - iommu->domid_bitmap = xmalloc_array(unsigned long, BITS_TO_LONGS(nr_dom)); + iommu->domid_bitmap = xzalloc_array(unsigned long, BITS_TO_LONGS(nr_dom)); if ( !iommu->domid_bitmap ) return -ENOMEM ; - memset(iommu->domid_bitmap, 0, nr_dom / 8); /* * if Caching mode is set, then invalid translations are tagged with @@ -1169,10 +1166,9 @@ int __init iommu_alloc(struct acpi_drhd_ if ( cap_caching_mode(iommu->cap) ) set_bit(0, iommu->domid_bitmap); - iommu->domid_map = xmalloc_array(u16, nr_dom); + iommu->domid_map = xzalloc_array(u16, nr_dom); if ( !iommu->domid_map ) return -ENOMEM ; - memset(iommu->domid_map, 0, nr_dom * sizeof(*iommu->domid_map)); spin_lock_init(&iommu->lock); spin_lock_init(&iommu->register_lock); --- a/xen/drivers/video/vesa.c +++ b/xen/drivers/video/vesa.c @@ -93,11 +93,11 @@ void __init vesa_init(void) if ( !lbuf ) goto fail; - text_buf = xmalloc_bytes(text_columns * text_rows); + text_buf = xzalloc_bytes(text_columns * text_rows); if ( !text_buf ) goto fail; - line_len = xmalloc_array(unsigned int, text_columns); + line_len = xzalloc_array(unsigned int, text_columns); if ( !line_len ) goto fail; @@ -108,8 +108,6 @@ void __init vesa_init(void) goto fail; lfb = memset((void *)IOREMAP_VIRT_START, 0, vram_remap); - memset(text_buf, 0, text_columns * text_rows); - memset(line_len, 0, text_columns * sizeof(*line_len)); vga_puts = vesa_redraw_puts; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |