[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 45/49] ARM: new VGIC: vgic-init: implement vgic_init



Hi,

On 19/02/18 13:21, Julien Grall wrote:
> Hi,
> 
> On 09/02/18 14:39, Andre Przywara wrote:
>> This patch allocates and initializes the data structures used to model
>> the vgic distributor and virtual cpu interfaces. At that stage the
>> number of IRQs and number of virtual CPUs is frozen.
>>
>> This is based on Linux commit ad275b8bb1e6, written by Eric Auger.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxxxxx>
>> ---
>>   xen/arch/arm/vgic/vgic-init.c | 197
>> ++++++++++++++++++++++++++++++++++++++++++
> 
> This file is exporting a lot of function, all the corresponding
> prototype should be declared within this patch as well.

But those are implementations of functions used by Xen code already, and
are all declared in xen/include/asm-arm/vgic.h.
Or did I miss any?

Cheers,
Andre.

> 
>>   1 file changed, 197 insertions(+)
>>
>> diff --git a/xen/arch/arm/vgic/vgic-init.c
>> b/xen/arch/arm/vgic/vgic-init.c
>> index b5f1183a50..0cd2dfc600 100644
>> --- a/xen/arch/arm/vgic/vgic-init.c
>> +++ b/xen/arch/arm/vgic/vgic-init.c
>> @@ -1,5 +1,6 @@
>>   /*
>>    * Copyright (C) 2015, 2016 ARM Ltd.
>> + * Imported from Linux ("new" KVM VGIC) and heavily adapted to Xen.
>>    *
>>    * This program is free software; you can redistribute it and/or modify
>>    * it under the terms of the GNU General Public License version 2 as
>> @@ -19,6 +20,77 @@
>>     #include "vgic.h"
>>   +/*
>> + * Initialization rules: there are multiple stages to the vgic
>> + * initialization, both for the distributor and the CPU interfaces. 
>> The basic
>> + * idea is that even though the VGIC is not functional or not
>> requested from
>> + * user space, the critical path of the run loop can still call VGIC
>> functions
>> + * that just won't do anything, without them having to check additional
>> + * initialization flags to ensure they don't look at uninitialized data
>> + * structures.
>> + *
>> + * Distributor:
>> + *
>> + * - vgic_early_init(): initialization of static data that doesn't
>> + *   depend on any sizing information or emulation type. No allocation
>> + *   is allowed there.
>> + *
>> + * - vgic_init(): allocation and initialization of the generic data
>> + *   structures that depend on sizing information (number of CPUs,
>> + *   number of interrupts). Also initializes the vcpu specific data
>> + *   structures. Can be executed lazily for GICv2.
>> + *
>> + * CPU Interface:
>> + *
>> + * - kvm_vgic_vcpu_early_init(): initialization of static data that
>> + *   doesn't depend on any sizing information or emulation type. No
>> + *   allocation is allowed there.
>> + */
>> +
>> +/**
>> + * vgic_vcpu_early_init() - Initialize static VGIC VCPU data structures
>> + * @vcpu: The VCPU whose VGIC data structures whould be initialized
>> + *
>> + * Only do initialization, but do not actually enable the VGIC CPU
>> interface
>> + * yet.
>> + */
>> +static void vgic_vcpu_early_init(struct vcpu *vcpu)
>> +{
>> +    struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> +    int i;
>> +
>> +    INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
>> +    spin_lock_init(&vgic_cpu->ap_list_lock);
>> +
>> +    /*
>> +     * Enable and configure all SGIs to be edge-triggered and
>> +     * configure all PPIs as level-triggered.
>> +     */
>> +    for ( i = 0; i < VGIC_NR_PRIVATE_IRQS; i++ )
>> +    {
>> +        struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
>> +
>> +        INIT_LIST_HEAD(&irq->ap_list);
>> +        spin_lock_init(&irq->irq_lock);
>> +        irq->intid = i;
>> +        irq->vcpu = NULL;
>> +        irq->target_vcpu = vcpu;
>> +        irq->targets = 1U << vcpu->vcpu_id;
>> +        atomic_set(&irq->refcount, 0);
>> +        if ( vgic_irq_is_sgi(i) )
>> +        {
>> +            /* SGIs */
>> +            irq->enabled = 1;
>> +            irq->config = VGIC_CONFIG_EDGE;
>> +        }
>> +        else
>> +        {
>> +            /* PPIs */
>> +            irq->config = VGIC_CONFIG_LEVEL;
>> +        }
>> +    }
>> +}
>> +
>>   /* CREATION */
>>     /**
>> @@ -52,6 +124,131 @@ int domain_vgic_register(struct domain *d, int
>> *mmio_count)
>>       return 0;
>>   }
>>   +/* INIT/DESTROY */
>> +
>> +/**
>> + * domain_vgic_init: initialize the dist data structures
>> + * @d: domain pointer
>> + * @nr_spis: number of SPIs
>> + */
>> +int domain_vgic_init(struct domain *d, unsigned int nr_spis)
>> +{
>> +    struct vgic_dist *dist = &d->arch.vgic;
>> +    int i, ret;
>> +
>> +    /* Limit the number of virtual SPIs supported to (1020 - 32) =
>> 988  */
>> +    if ( nr_spis > (1020 - NR_LOCAL_IRQS) )
>> +        return -EINVAL;
>> +
>> +    dist->nr_spis = nr_spis;
>> +    dist->spis = xzalloc_array(struct vgic_irq, nr_spis);
>> +    if ( !dist->spis )
>> +        return  -ENOMEM;
>> +
>> +    /*
>> +     * In the following code we do not take the irq struct lock since
>> +     * no other action on irq structs can happen while the VGIC is
>> +     * not initialized yet:
>> +     * If someone wants to inject an interrupt or does a MMIO access, we
>> +     * require prior initialization in case of a virtual GICv3 or
>> trigger
>> +     * initialization when using a virtual GICv2.
>> +     */
>> +    for ( i = 0; i < nr_spis; i++ )
>> +    {
>> +        struct vgic_irq *irq = &dist->spis[i];
>> +
>> +        irq->intid = i + VGIC_NR_PRIVATE_IRQS;
>> +        INIT_LIST_HEAD(&irq->ap_list);
>> +        spin_lock_init(&irq->irq_lock);
>> +        irq->vcpu = NULL;
>> +        irq->target_vcpu = NULL;
>> +        atomic_set(&irq->refcount, 0);
>> +        if ( dist->version == GIC_V2 )
>> +            irq->targets = 0;
>> +        else
>> +            irq->mpidr = 0;
>> +    }
>> +
>> +    INIT_LIST_HEAD(&dist->lpi_list_head);
>> +    spin_lock_init(&dist->lpi_list_lock);
>> +
>> +    if ( dist->version == GIC_V2 )
>> +        ret = vgic_v2_map_resources(d);
>> +    else
>> +        ret = -ENXIO;
>> +
>> +    if ( ret )
>> +        return ret;
>> +
>> +    /* allocated_irqs() is used by Xen to find available vIRQs */
>> +    d->arch.vgic.allocated_irqs =
>> +        xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d)));
>> +    if ( !d->arch.vgic.allocated_irqs )
>> +        return -ENOMEM;
>> +
>> +    /* vIRQ0-15 (SGIs) are reserved */
>> +    for ( i = 0; i < NR_GIC_SGI; i++ )
>> +        set_bit(i, d->arch.vgic.allocated_irqs);
>> +
>> +    return 0;
>> +}
>> +
>> +/**
>> + * vcpu_vgic_init() - Register VCPU-specific KVM iodevs
>> + * was: kvm_vgic_vcpu_init()
>> + * Xen: adding vgic_vx_enable() call
>> + * @vcpu: pointer to the VCPU being created and initialized
>> + */
>> +int vcpu_vgic_init(struct vcpu *vcpu)
>> +{
>> +    int ret = 0;
>> +
>> +    vgic_vcpu_early_init(vcpu);
>> +
>> +    if ( gic_hw_version() == GIC_V2 )
>> +        vgic_v2_enable(vcpu);
>> +    else
>> +        ret = -ENXIO;
>> +
>> +    return ret;
>> +}
>> +
>> +void domain_vgic_free(struct domain *d)
>> +{
>> +    struct vgic_dist *dist = &d->arch.vgic;
>> +        int i, ret;
>> +
>> +    for ( i = 0; i < dist->nr_spis; i++ )
>> +    {
>> +        struct vgic_irq *irq = vgic_get_irq(d, NULL, 32 + i);
>> +
>> +        if ( !irq->hw )
>> +            continue;
>> +
>> +        ret = release_guest_irq(d, irq->hwintid);
>> +        if ( ret )
>> +            dprintk(XENLOG_G_WARNING,
>> +                "d%u: Failed to release virq %u ret = %d\n",
>> +                d->domain_id, 32 + i, ret);
>> +    }
>> +
>> +    dist->ready = false;
>> +    dist->initialized = false;
>> +
>> +    xfree(dist->spis);
>> +    xfree(dist->allocated_irqs);
>> +    dist->nr_spis = 0;
>> +}
>> +
>> +int vcpu_vgic_free(struct vcpu *vcpu)
>> +{
>> +    struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> +
>> +    INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
>> +
>> +    return 0;
>> +}
>> +
>>   /*
>>    * Local variables:
>>    * mode: C
>>
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.