[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 02/21] xen/arm: make mmio handlers domain specific
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx> register mmio handlers at runtime and make mmio handlers domain specific and check handlers are removed. Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx> --- xen/arch/arm/domain.c | 3 +++ xen/arch/arm/io.c | 56 +++++++++++++++++++++++++++++++++--------- xen/arch/arm/vgic.c | 28 +++++++++++---------- xen/arch/arm/vuart.c | 26 +++++++++----------- xen/include/asm-arm/domain.h | 2 ++ xen/include/asm-arm/mmio.h | 22 ++++++++++++++--- 6 files changed, 94 insertions(+), 43 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index e2ada12..43a2c6f 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -508,6 +508,9 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) share_xen_page_with_guest( virt_to_page(d->shared_info), d, XENSHARE_writable); + if ( (rc = domain_io_init(d)) != 0 ) + goto fail; + if ( (rc = p2m_alloc_table(d)) != 0 ) goto fail; diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c index ada1918..8e55d49 100644 --- a/xen/arch/arm/io.c +++ b/xen/arch/arm/io.c @@ -1,5 +1,5 @@ /* - * xen/arch/arm/io.h + * xen/arch/arm/io.c * * ARM I/O handlers * @@ -18,29 +18,61 @@ #include <xen/config.h> #include <xen/lib.h> +#include <xen/spinlock.h> +#include <xen/sched.h> #include <asm/current.h> #include <asm/mmio.h> -static const struct mmio_handler *const mmio_handlers[] = -{ - &vgic_distr_mmio_handler, - &vuart_mmio_handler, -}; -#define MMIO_HANDLER_NR ARRAY_SIZE(mmio_handlers) - int handle_mmio(mmio_info_t *info) { struct vcpu *v = current; int i; + const struct mmio_handler *mmio_handler; + const struct io_handler *io_handlers = &v->domain->arch.io_handlers; - for ( i = 0; i < MMIO_HANDLER_NR; i++ ) - if ( mmio_handlers[i]->check_handler(v, info->gpa) ) + for ( i = 0; i < io_handlers->num_entries; i++ ) + { + mmio_handler = &io_handlers->mmio_handlers[i]; + + if ( (info->gpa >= mmio_handler->addr) && + (info->gpa < (mmio_handler->addr + mmio_handler->size)) ) + { return info->dabt.write ? - mmio_handlers[i]->write_handler(v, info) : - mmio_handlers[i]->read_handler(v, info); + mmio_handler->mmio_handler_ops->write_handler(v, info) : + mmio_handler->mmio_handler_ops->read_handler(v, info); + } + } return 0; } + +void register_mmio_handler(struct domain *d, + const struct mmio_handler_ops *handle, + paddr_t addr, paddr_t size) +{ + struct io_handler *handler = &d->arch.io_handlers; + + BUG_ON(handler->num_entries >= MAX_IO_HANDLER); + + spin_lock(&handler->lock); + + handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle; + handler->mmio_handlers[handler->num_entries].addr = addr; + handler->mmio_handlers[handler->num_entries].size = size; + dsb(ish); + handler->num_entries++; + + spin_unlock(&handler->lock); +} + +int domain_io_init(struct domain *d) +{ + spin_lock_init(&d->arch.io_handlers.lock); + d->arch.io_handlers.num_entries = 0; + + return 0; +} + /* * Local variables: * mode: C diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 4962e70..2949948 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -35,6 +35,9 @@ /* Number of ranks of interrupt registers for a domain */ #define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32) +static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info); +static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info); + /* * Rank containing GICD_<FOO><n> for GICD_<FOO> with * <b>-bits-per-interrupt @@ -73,6 +76,11 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, int b, int n) return NULL; } +const struct mmio_handler_ops vgic_distr_mmio_handler = { + .read_handler = vgic_distr_mmio_read, + .write_handler = vgic_distr_mmio_write, +}; + int domain_vgic_init(struct domain *d) { int i; @@ -107,6 +115,13 @@ int domain_vgic_init(struct domain *d) } for (i=0; i<DOMAIN_NR_RANKS(d); i++) spin_lock_init(&d->arch.vgic.shared_irqs[i].lock); + + /* + * We rely on gicv_setup() to initialize dbase(vGIC distributor base) + */ + register_mmio_handler(d, &vgic_distr_mmio_handler, + d->arch.vgic.dbase, PAGE_SIZE); + return 0; } @@ -676,19 +691,6 @@ write_ignore: return 1; } -static int vgic_distr_mmio_check(struct vcpu *v, paddr_t addr) -{ - struct domain *d = v->domain; - - return (addr >= (d->arch.vgic.dbase)) && (addr < (d->arch.vgic.dbase + PAGE_SIZE)); -} - -const struct mmio_handler vgic_distr_mmio_handler = { - .check_handler = vgic_distr_mmio_check, - .read_handler = vgic_distr_mmio_read, - .write_handler = vgic_distr_mmio_write, -}; - struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq) { struct pending_irq *n; diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c index 953cd46..a9106e6 100644 --- a/xen/arch/arm/vuart.c +++ b/xen/arch/arm/vuart.c @@ -44,6 +44,14 @@ #define domain_has_vuart(d) ((d)->arch.vuart.info != NULL) +static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info); +static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info); + +const struct mmio_handler_ops vuart_mmio_handler = { + .read_handler = vuart_mmio_read, + .write_handler = vuart_mmio_write, +}; + int domain_vuart_init(struct domain *d) { ASSERT( is_hardware_domain(d) ); @@ -59,6 +67,10 @@ int domain_vuart_init(struct domain *d) if ( !d->arch.vuart.buf ) return -ENOMEM; + register_mmio_handler(d, &vuart_mmio_handler, + d->arch.vuart.info->base_addr, + d->arch.vuart.info->size); + return 0; } @@ -92,14 +104,6 @@ static void vuart_print_char(struct vcpu *v, char c) spin_unlock(&uart->lock); } -static int vuart_mmio_check(struct vcpu *v, paddr_t addr) -{ - const struct vuart_info *info = v->domain->arch.vuart.info; - - return (domain_has_vuart(v->domain) && addr >= info->base_addr && - addr <= (info->base_addr + info->size)); -} - static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info) { struct domain *d = v->domain; @@ -133,12 +137,6 @@ static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info) return 1; } -const struct mmio_handler vuart_mmio_handler = { - .check_handler = vuart_mmio_check, - .read_handler = vuart_mmio_read, - .write_handler = vuart_mmio_write, -}; - /* * Local variables: * mode: C diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 9ef6b26..36d82e2 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -7,6 +7,7 @@ #include <asm/page.h> #include <asm/p2m.h> #include <asm/vfp.h> +#include <asm/mmio.h> #include <public/hvm/params.h> #include <xen/serial.h> #include <xen/hvm/iommu.h> @@ -119,6 +120,7 @@ struct arch_domain struct hvm_domain hvm_domain; xen_pfn_t *grant_table_gpfn; + struct io_handler io_handlers; /* Continuable domain_relinquish_resources(). */ enum { RELMEM_not_started, diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h index 5870985..0160f09 100644 --- a/xen/include/asm-arm/mmio.h +++ b/xen/include/asm-arm/mmio.h @@ -23,6 +23,8 @@ #include <asm/processor.h> #include <asm/regs.h> +#define MAX_IO_HANDLER 16 + typedef struct { struct hsr_dabt dabt; @@ -34,16 +36,28 @@ typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t *info); typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info); typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr); -struct mmio_handler { - mmio_check_t check_handler; +struct mmio_handler_ops { mmio_read_t read_handler; mmio_write_t write_handler; }; -extern const struct mmio_handler vgic_distr_mmio_handler; -extern const struct mmio_handler vuart_mmio_handler; +struct mmio_handler { + paddr_t addr; + paddr_t size; + const struct mmio_handler_ops *mmio_handler_ops; +}; + +struct io_handler { + int num_entries; + spinlock_t lock; + struct mmio_handler mmio_handlers[MAX_IO_HANDLER]; +}; extern int handle_mmio(mmio_info_t *info); +void register_mmio_handler(struct domain *d, + const struct mmio_handler_ops *handle, + paddr_t addr, paddr_t size); +int domain_io_init(struct domain *d); #endif /* __ASM_ARM_MMIO_H__ */ -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |