[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 1/3] xen/arm: vmmio: The number of entries cannot be negative



On Thu, 17 Aug 2023, Julien Grall wrote:
> From: Julien Grall <jgrall@xxxxxxxxxx>
> 
> The number of virtual MMIO regions cannot be negative. So switch
> the field 'num_entries' and 'max_num_entries' to 'unsigned int'.
> 
> The new type is then propagated to the caller and the vGIC
> code.
> 
> Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>

Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
>  xen/arch/arm/domain.c           | 3 ++-
>  xen/arch/arm/include/asm/mmio.h | 6 +++---
>  xen/arch/arm/include/asm/vgic.h | 6 +++---
>  xen/arch/arm/io.c               | 2 +-
>  xen/arch/arm/vgic-v2.c          | 2 +-
>  xen/arch/arm/vgic-v3.c          | 2 +-
>  xen/arch/arm/vgic.c             | 2 +-
>  xen/arch/arm/vgic/vgic-init.c   | 2 +-
>  8 files changed, 13 insertions(+), 12 deletions(-)
> 
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 6113ca785c78..28e3aaa5e482 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -694,7 +694,8 @@ int arch_domain_create(struct domain *d,
>                         struct xen_domctl_createdomain *config,
>                         unsigned int flags)
>  {
> -    int rc, count = 0;
> +    unsigned int count = 0;
> +    int rc;
>  
>      BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
>  
> diff --git a/xen/arch/arm/include/asm/mmio.h b/xen/arch/arm/include/asm/mmio.h
> index 79e64d9af804..b22cfdac5be9 100644
> --- a/xen/arch/arm/include/asm/mmio.h
> +++ b/xen/arch/arm/include/asm/mmio.h
> @@ -75,8 +75,8 @@ struct mmio_handler {
>  };
>  
>  struct vmmio {
> -    int num_entries;
> -    int max_num_entries;
> +    unsigned int num_entries;
> +    unsigned int max_num_entries;
>      rwlock_t lock;
>      struct mmio_handler *handlers;
>  };
> @@ -86,7 +86,7 @@ enum io_state try_handle_mmio(struct cpu_user_regs *regs,
>  void register_mmio_handler(struct domain *d,
>                             const struct mmio_handler_ops *ops,
>                             paddr_t addr, paddr_t size, void *priv);
> -int domain_io_init(struct domain *d, int max_count);
> +int domain_io_init(struct domain *d, unsigned int max_count);
>  void domain_io_free(struct domain *d);
>  
>  void try_decode_instruction(const struct cpu_user_regs *regs,
> diff --git a/xen/arch/arm/include/asm/vgic.h b/xen/arch/arm/include/asm/vgic.h
> index aa9f49409edc..6901a05c0669 100644
> --- a/xen/arch/arm/include/asm/vgic.h
> +++ b/xen/arch/arm/include/asm/vgic.h
> @@ -304,8 +304,8 @@ extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, 
> int n);
>  extern void vgic_set_irqs_pending(struct vcpu *v, uint32_t r,
>                                    unsigned int rank);
>  extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);
> -int vgic_v2_init(struct domain *d, int *mmio_count);
> -int vgic_v3_init(struct domain *d, int *mmio_count);
> +int vgic_v2_init(struct domain *d, unsigned int *mmio_count);
> +int vgic_v3_init(struct domain *d, unsigned int *mmio_count);
>  
>  extern bool vgic_to_sgi(struct vcpu *v, register_t sgir,
>                          enum gic_sgi_mode irqmode, int virq,
> @@ -352,7 +352,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, 
> unsigned int virq,
>  
>  bool vgic_evtchn_irq_pending(struct vcpu *v);
>  
> -int domain_vgic_register(struct domain *d, int *mmio_count);
> +int domain_vgic_register(struct domain *d, unsigned int *mmio_count);
>  int domain_vgic_init(struct domain *d, unsigned int nr_spis);
>  void domain_vgic_free(struct domain *d);
>  int vcpu_vgic_init(struct vcpu *v);
> diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
> index 172583df047f..96c740d5636c 100644
> --- a/xen/arch/arm/io.c
> +++ b/xen/arch/arm/io.c
> @@ -224,7 +224,7 @@ void register_mmio_handler(struct domain *d,
>      write_unlock(&vmmio->lock);
>  }
>  
> -int domain_io_init(struct domain *d, int max_count)
> +int domain_io_init(struct domain *d, unsigned int max_count)
>  {
>      rwlock_init(&d->arch.vmmio.lock);
>      d->arch.vmmio.num_entries = 0;
> diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
> index 35363fee098c..2a2eda2e6f4c 100644
> --- a/xen/arch/arm/vgic-v2.c
> +++ b/xen/arch/arm/vgic-v2.c
> @@ -731,7 +731,7 @@ static const struct vgic_ops vgic_v2_ops = {
>      .lpi_get_priority = vgic_v2_lpi_get_priority,
>  };
>  
> -int vgic_v2_init(struct domain *d, int *mmio_count)
> +int vgic_v2_init(struct domain *d, unsigned int *mmio_count)
>  {
>      if ( !vgic_v2_hw.enabled )
>      {
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index 1b7173da1e30..05a009409ab8 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -1812,7 +1812,7 @@ static const struct vgic_ops v3_ops = {
>      .lpi_get_priority = vgic_v3_lpi_get_priority,
>  };
>  
> -int vgic_v3_init(struct domain *d, int *mmio_count)
> +int vgic_v3_init(struct domain *d, unsigned int *mmio_count)
>  {
>      if ( !vgic_v3_hw.enabled )
>      {
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 97d6f6106638..afcac791fe4b 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -85,7 +85,7 @@ static void vgic_rank_init(struct vgic_irq_rank *rank, 
> uint8_t index,
>          write_atomic(&rank->vcpu[i], vcpu);
>  }
>  
> -int domain_vgic_register(struct domain *d, int *mmio_count)
> +int domain_vgic_register(struct domain *d, unsigned int *mmio_count)
>  {
>      switch ( d->arch.vgic.version )
>      {
> diff --git a/xen/arch/arm/vgic/vgic-init.c b/xen/arch/arm/vgic/vgic-init.c
> index 76b85ea8231b..f8d7d3a226d0 100644
> --- a/xen/arch/arm/vgic/vgic-init.c
> +++ b/xen/arch/arm/vgic/vgic-init.c
> @@ -101,7 +101,7 @@ static void vgic_vcpu_early_init(struct vcpu *vcpu)
>   *
>   * was: kvm_vgic_create
>   */
> -int domain_vgic_register(struct domain *d, int *mmio_count)
> +int domain_vgic_register(struct domain *d, unsigned int *mmio_count)
>  {
>      switch ( d->arch.vgic.version )
>      {
> -- 
> 2.40.1
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.