[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2 of 5] x86: make the pv-only e820 array be dynamic



On Thu, 2011-04-07 at 21:25 +0100, Konrad Rzeszutek Wilk wrote:
> # HG changeset patch
> # User Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> # Date 1302202697 14400
> # Node ID 01d0d338b97491a3aa816dab43cc709a234214f7
> # Parent  decab6c21cc3d7ce4d4dad949d34ba35d4600490
> x86: make the pv-only e820 array be dynamic.
> 
> During creation of the PV domain we allocate the E820 structure to be
> the E820MAX. This will allow the tool stack to fill the E820 with more
> than three entries.

Is it possible to defer this allocation to set_memory_map time so we can
allocate only the required size? I guess E820MAX (128) entries is only
2.5k but a typical e820 is capped at more like 20 entries in practice
(<0.5k).

> 
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> 
> diff -r decab6c21cc3 -r 01d0d338b974 xen/arch/x86/domain.c
> --- a/xen/arch/x86/domain.c   Thu Apr 07 12:36:26 2011 -0400
> +++ b/xen/arch/x86/domain.c   Thu Apr 07 14:58:17 2011 -0400
> @@ -634,14 +634,23 @@
>                  d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
>              for (i = 0; i < nr_irqs; i++)
>                  d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
> +        } else
> +        {
> +          d->arch.pv_domain.e820 = xmalloc_array(struct e820entry, E820MAX);
> +
> +          if ( !d->arch.pv_domain.e820 )
> +            goto fail;
> +
> +          memset(d->arch.pv_domain.e820, 0,
> +                E820MAX * sizeof(*d->arch.pv_domain.e820));
>          }
>  
> -
>          if ( (rc = iommu_domain_init(d)) != 0 )
>              goto fail;
>  
>          /* For Guest vMCE MSRs virtualization */
>          vmce_init_msr(d);
> +
>      }
>  
>      if ( is_hvm_domain(d) )
> @@ -668,6 +677,10 @@
>   fail:
>      d->is_dying = DOMDYING_dead;
>      vmce_destroy_msr(d);
> +    if ( !is_hvm_domain(d) )
> +    {
> +      xfree(d->arch.pv_domain.e820);
> +    }
>      xfree(d->arch.pirq_irq);
>      xfree(d->arch.irq_pirq);
>      xfree(d->arch.pirq_emuirq);
> @@ -696,6 +709,8 @@
>  
>      if ( is_hvm_domain(d) )
>          hvm_domain_destroy(d);
> +    else
> +      xfree(d->arch.pv_domain.e820);
>  
>      vmce_destroy_msr(d);
>      pci_release_devices(d);
> diff -r decab6c21cc3 -r 01d0d338b974 xen/arch/x86/mm.c
> --- a/xen/arch/x86/mm.c       Thu Apr 07 12:36:26 2011 -0400
> +++ b/xen/arch/x86/mm.c       Thu Apr 07 14:58:17 2011 -0400
> @@ -4710,7 +4710,7 @@
>          if ( copy_from_guest(&fmap, arg, 1) )
>              return -EFAULT;
>  
> -        if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) )
> +        if ( fmap.map.nr_entries > E820MAX )
>              return -EINVAL;
>  
>          rc = rcu_lock_target_domain_by_id(fmap.domid, &d);
> @@ -4730,9 +4730,16 @@
>              return -EPERM;
>          }
>  
> +        if ( d->arch.pv_domain.e820 == NULL )
> +        {
> +            rcu_unlock_domain(d);
> +            return -EINVAL;
> +        }
>          rc = copy_from_guest(d->arch.pv_domain.e820, fmap.map.buffer,
>                               fmap.map.nr_entries) ? -EFAULT : 0;
> -        d->arch.pv_domain.nr_e820 = fmap.map.nr_entries;
> +
> +        if ( rc == 0 )
> +          d->arch.pv_domain.nr_e820 = fmap.map.nr_entries;
>  
>          rcu_unlock_domain(d);
>          return rc;
> @@ -4747,6 +4754,9 @@
>          if ( d->arch.pv_domain.nr_e820 == 0 )
>              return -ENOSYS;
>  
> +        if ( d->arch.pv_domain.e820 == NULL )
> +            return -ENOSYS;
> +
>          if ( copy_from_guest(&map, arg, 1) )
>              return -EFAULT;
>  
> diff -r decab6c21cc3 -r 01d0d338b974 xen/include/asm-x86/domain.h
> --- a/xen/include/asm-x86/domain.h    Thu Apr 07 12:36:26 2011 -0400
> +++ b/xen/include/asm-x86/domain.h    Thu Apr 07 14:58:17 2011 -0400
> @@ -238,7 +238,7 @@
>      unsigned long pirq_eoi_map_mfn;
>  
>      /* Pseudophysical e820 map (XENMEM_memory_map).  */
> -    struct e820entry e820[3];
> +    struct e820entry *e820;
>      unsigned int nr_e820;
>  };
>  
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.