[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 09/16] xen/arm: move vgic rank data to gic header file



On Mon, 26 May 2014, vijay.kilari@xxxxxxxxx wrote:
> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> 
> vgic_irq_rank structure contains gic specific data elements.
> Move this out of domain.h to new vgic header file vgic.h
> Allocate memory dynamically in vgic driver.
> 
> This patch reduces the size of domain struct and helps to
> keep domain struct within PAGE_SIZE when future GIC hw versions
> are added
> 
> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>


>  xen/arch/arm/domain.c        |    2 ++
>  xen/arch/arm/vgic.c          |   18 ++++++++++++++----
>  xen/include/asm-arm/domain.h |   11 +----------
>  xen/include/asm-arm/vgic.h   |   40 ++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 57 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 40acfb3..bccbeda 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -31,6 +31,7 @@
>  #include <asm/procinfo.h>
>  
>  #include <asm/gic.h>
> +#include <asm/vgic.h>
>  #include <asm/platform.h>
>  #include "vtimer.h"
>  #include "vuart.h"
> @@ -481,6 +482,7 @@ int vcpu_initialise(struct vcpu *v)
>  void vcpu_destroy(struct vcpu *v)
>  {
>      vcpu_timer_destroy(v);
> +    vcpu_vgic_free(v);
>      free_xenheap_pages(v->arch.stack, STACK_ORDER);
>  }
>  
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index b56f9d1..d2a9e34 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -29,6 +29,7 @@
>  
>  #include <asm/mmio.h>
>  #include <asm/gic.h>
> +#include <asm/vgic.h>
>  
>  #define REG(n) (n)
>  
> @@ -68,7 +69,7 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, 
> int b, int n)
>      rank = REG_RANK_NR(b, n);
>  
>      if ( rank == 0 )
> -        return &v->arch.vgic.private_irqs;
> +        return v->arch.vgic.private_irqs;
>      else if ( rank <= DOMAIN_NR_RANKS(v->domain) )
>          return &v->domain->arch.vgic.shared_irqs[rank - 1];
>      else
> @@ -84,9 +85,12 @@ void domain_vgic_free(struct domain *d)
>  int vcpu_vgic_init(struct vcpu *v)
>  {
>      int i;
> -    memset(&v->arch.vgic.private_irqs, 0, sizeof(v->arch.vgic.private_irqs));
>  
> -    spin_lock_init(&v->arch.vgic.private_irqs.lock);
> +    v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
> +    if ( v->arch.vgic.private_irqs == NULL )
> +      return -ENOMEM;
> +
> +    spin_lock_init(&v->arch.vgic.private_irqs->lock);
>  
>      memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
>      for (i = 0; i < 32; i++)
> @@ -97,7 +101,7 @@ int vcpu_vgic_init(struct vcpu *v)
>  
>      /* For SGI and PPI the target is always this CPU */
>      for ( i = 0 ; i < 8 ; i++ )
> -        v->arch.vgic.private_irqs.itargets[i] =
> +        v->arch.vgic.private_irqs->itargets[i] =
>                (1<<(v->vcpu_id+0))
>              | (1<<(v->vcpu_id+8))
>              | (1<<(v->vcpu_id+16))
> @@ -109,6 +113,12 @@ int vcpu_vgic_init(struct vcpu *v)
>      return 0;
>  }
>  
> +int vcpu_vgic_free(struct vcpu *v)
> +{
> +    xfree(v->arch.vgic.private_irqs);
> +    return 0;
> +}
> +
>  #define vgic_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.lock)
>  #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
>  
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index f46b631..fe84ce5 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -12,15 +12,6 @@
>  #include <public/hvm/params.h>
>  #include <xen/serial.h>
>  
> -/* Represents state corresponding to a block of 32 interrupts */
> -struct vgic_irq_rank {
> -    spinlock_t lock; /* Covers access to all other members of this struct */
> -    uint32_t ienable, iactive, ipend, pendsgi;
> -    uint32_t icfg[2];
> -    uint32_t ipriority[8];
> -    uint32_t itargets[8];
> -};
> -
>  struct pending_irq
>  {
>      /*
> @@ -274,7 +265,7 @@ struct arch_vcpu
>           * struct arch_domain.
>           */
>          struct pending_irq pending_irqs[32];
> -        struct vgic_irq_rank private_irqs;
> +        struct vgic_irq_rank *private_irqs;
>  
>          /* This list is ordered by IRQ priority and it is used to keep
>           * track of the IRQs that the VGIC injected into the guest.
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> new file mode 100644
> index 0000000..104a87d
> --- /dev/null
> +++ b/xen/include/asm-arm/vgic.h
> @@ -0,0 +1,40 @@
> +/*
> + * ARM Virtual Generic Interrupt Controller support
> + *
> + * Ian Campbell <ian.campbell@xxxxxxxxxx>
> + * Copyright (c) 2011 Citrix Systems.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +
> +#ifndef __ASM_ARM_VGIC_H__
> +#define __ASM_ARM_VGIC_H__
> +
> +/* Represents state corresponding to a block of 32 interrupts */
> +struct vgic_irq_rank {
> +    spinlock_t lock; /* Covers access to all other members of this struct */
> +    uint32_t ienable, iactive, ipend, pendsgi;
> +    uint32_t icfg[2];
> +    uint32_t ipriority[8];
> +    uint32_t itargets[8];
> +};
> +
> +extern int vcpu_vgic_free(struct vcpu *v);
> +#endif
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> -- 
> 1.7.9.5
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.