[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN PATCH v2 16/25] arm: new VGIC: its: Implement basic ITS register handlers
Add emulation for some basic MMIO registers used in the ITS emulation. This includes: - GITS_{CTLR,TYPER,IIDR} - ID registers - GITS_{CBASER,CREADR,CWRITER} (which implement the ITS command buffer handling) - GITS_BASER<n> The registers holding base addresses and attributes are sanitised before storing them. Base on Linux commit 424c33830f53f2 by Andre Przywara Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx> --- xen/arch/arm/include/asm/gic_v3_defs.h | 4 + xen/arch/arm/include/asm/gic_v3_its.h | 60 ++++ xen/arch/arm/include/asm/new_vgic.h | 18 + xen/arch/arm/vgic/vgic-its.c | 465 ++++++++++++++++++++++++- xen/arch/arm/vgic/vgic-mmio.h | 7 + xen/arch/arm/vgic/vgic.h | 5 + 6 files changed, 546 insertions(+), 13 deletions(-) diff --git a/xen/arch/arm/include/asm/gic_v3_defs.h b/xen/arch/arm/include/asm/gic_v3_defs.h index 3f1f59d1c7..e4e4696de3 100644 --- a/xen/arch/arm/include/asm/gic_v3_defs.h +++ b/xen/arch/arm/include/asm/gic_v3_defs.h @@ -138,7 +138,11 @@ #define GIC_BASER_NonShareable 0ULL #define GIC_BASER_InnerShareable 1ULL #define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) #define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT 56 #define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ (7ULL << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT) diff --git a/xen/arch/arm/include/asm/gic_v3_its.h b/xen/arch/arm/include/asm/gic_v3_its.h index 4e857cac1a..b408441c6e 100644 --- a/xen/arch/arm/include/asm/gic_v3_its.h +++ b/xen/arch/arm/include/asm/gic_v3_its.h @@ -36,7 +36,16 @@ #define GITS_BASER6 0x130 #define GITS_BASER7 0x138 #define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 #define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 /* Register bits */ #define GITS_VALID_BIT BIT(63, UL) @@ -50,6 +59,11 @@ #define GITS_TYPER_DEVICE_ID_BITS(r) ((((r) & GITS_TYPER_DEVIDS_MASK) >> \ GITS_TYPER_DEVIDS_SHIFT) + 1) +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + #define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_IDBITS_MASK (0x1fUL << GITS_TYPER_IDBITS_SHIFT) #define GITS_TYPER_EVENT_ID_BITS(r) ((((r) & GITS_TYPER_IDBITS_MASK) >> \ @@ -61,10 +75,12 @@ GITS_TYPER_ITT_SIZE_SHIFT) + 1) #define GITS_TYPER_PHYSICAL (1U << 0) +#define GITS_BASER_VALID (1ULL << 63) #define GITS_BASER_INDIRECT BIT(62, UL) #define GITS_BASER_INNER_CACHEABILITY_SHIFT 59 #define GITS_BASER_TYPE_SHIFT 56 #define GITS_BASER_TYPE_MASK (7ULL << GITS_BASER_TYPE_SHIFT) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) #define GITS_BASER_OUTER_CACHEABILITY_SHIFT 53 #define GITS_BASER_TYPE_NONE 0UL #define GITS_BASER_TYPE_DEVICE 1UL @@ -77,6 +93,7 @@ #define GITS_BASER_ENTRY_SIZE_SHIFT 48 #define GITS_BASER_ENTRY_SIZE(reg) \ ((((reg) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) #define GITS_BASER_SHAREABILITY_SHIFT 10 #define GITS_BASER_PAGE_SIZE_SHIFT 8 #define GITS_BASER_SIZE_MASK 0xff @@ -84,7 +101,48 @@ #define GITS_BASER_OUTER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) #define GITS_BASER_INNER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_INNER_CACHEABILITY_SHIFT) +#define GIC_PAGE_SIZE_4K 0ULL +#define GIC_PAGE_SIZE_16K 1ULL +#define GIC_PAGE_SIZE_64K 2ULL +#define GIC_PAGE_SIZE_MASK 3ULL + +#define __GITS_BASER_PSZ(sz) \ + (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) +#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) +#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) +#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) + +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + #define GITS_CBASER_SIZE_MASK 0xff +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) /* ITS command definitions */ #define ITS_CMD_SIZE 32 @@ -104,6 +162,8 @@ #define ITS_DOORBELL_OFFSET 0x10040 #define GICV3_ITS_SIZE SZ_128K +#define ITS_TRANSLATION_OFFSET 0x10000 +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) #include <xen/device_tree.h> #include <xen/rbtree.h> diff --git a/xen/arch/arm/include/asm/new_vgic.h b/xen/arch/arm/include/asm/new_vgic.h index 00a5557921..3048f39844 100644 --- a/xen/arch/arm/include/asm/new_vgic.h +++ b/xen/arch/arm/include/asm/new_vgic.h @@ -122,7 +122,25 @@ struct vgic_its { bool enabled; struct vgic_io_device iodev; + struct domain *domain; + + /* These registers correspond to GITS_BASER{0,1} */ + u64 baser_device_table; + u64 baser_coll_table; + + /* Protects the command queue */ + struct spinlock cmd_lock; + u64 cbaser; + u32 creadr; + u32 cwriter; + + /* migration ABI revision in use */ + u32 abi_rev; + + /* Protects the device and collection lists */ + struct spinlock its_lock; struct list_head device_list; + struct list_head collection_list; paddr_t doorbell_address; }; diff --git a/xen/arch/arm/vgic/vgic-its.c b/xen/arch/arm/vgic/vgic-its.c index 3eceadaa79..5e94f0144d 100644 --- a/xen/arch/arm/vgic/vgic-its.c +++ b/xen/arch/arm/vgic/vgic-its.c @@ -27,11 +27,22 @@ #include "vgic.h" #include "vgic-mmio.h" -static unsigned long its_mmio_read_raz(struct domain *d, struct vgic_its *its, - paddr_t addr, unsigned int len) -{ - return 0; -} +#define COLLECTION_NOT_MAPPED ((u32)~0) + +struct its_collection { + struct list_head coll_list; + + u32 collection_id; + u32 target_addr; +}; + +struct its_ite { + struct list_head ite_list; + + struct vgic_irq *irq; + struct its_collection *collection; + u32 event_id; +}; /* * Find and returns a device in the device table for an ITS. @@ -48,16 +59,68 @@ static struct vgic_its_device *find_its_device(struct vgic_its *its, u32 device_ return NULL; } +#define VGIC_ITS_TYPER_IDBITS 16 +#define VGIC_ITS_TYPER_DEVBITS 16 +#define VGIC_ITS_TYPER_ITE_SIZE 8 + +/* Requires the its_lock to be held. */ +static void its_free_ite(struct domain *d, struct its_ite *ite) +{ + list_del(&ite->ite_list); + + /* This put matches the get in vgic_add_lpi. */ + if ( ite->irq ) + { + /* GICv4 style VLPIS are not yet supported */ + WARN_ON(ite->irq->hw); + + vgic_put_irq(d, ite->irq); + } + + xfree(ite); +} + /* Requires the its_devices_lock to be held. */ void vgic_its_free_device(struct vgic_its_device *device) { + struct its_ite *ite, *temp; struct domain *d = device->d; BUG_ON(!d); + /* + * The spec says that unmapping a device with still valid + * ITTEs associated is UNPREDICTABLE. We remove all ITTEs, + * since we cannot leave the memory unreferenced. + */ + list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list) + its_free_ite(d, ite); + list_del(&device->dev_list); xfree(device); } +/* its lock must be held */ +static void vgic_its_free_device_list(struct domain *d, struct vgic_its *its) +{ + struct vgic_its_device *cur, *temp; + + list_for_each_entry_safe(cur, temp, &its->device_list, dev_list) + vgic_its_free_device(cur); +} + +/* its lock must be held */ +static void vgic_its_free_collection_list(struct domain *d, + struct vgic_its *its) +{ + struct its_collection *cur, *temp; + + list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list) + { + list_del(&cur->coll_list); + xfree(cur); + } +} + /* Must be called with its_devices_lock mutex held */ struct vgic_its_device* vgic_its_get_device(struct domain *d, paddr_t vdoorbell, uint32_t vdevid) @@ -119,6 +182,349 @@ void vgic_its_delete_device(struct domain *d, struct vgic_its_device *its_dev) list_del(&its_dev->dev_list); } +/* + * This function is called with the its_cmd lock held, but the ITS data + * structure lock dropped. + */ +static int vgic_its_handle_command(struct domain *d, struct vgic_its *its, + u64 *its_cmd) +{ + + return -ENODEV; +} + +#define ITS_CMD_BUFFER_SIZE(baser) ((((baser)&0xff) + 1) << 12) +#define ITS_CMD_SIZE 32 +#define ITS_CMD_OFFSET(reg) ((reg)&GENMASK(19, 5)) + +/* Must be called with the cmd_lock held. */ +static void vgic_its_process_commands(struct domain *d, struct vgic_its *its) +{ + paddr_t cbaser; + u64 cmd_buf[4]; + + /* Commands are only processed when the ITS is enabled. */ + if ( !its->enabled ) + return; + + cbaser = GITS_CBASER_ADDRESS(its->cbaser); + + while ( its->cwriter != its->creadr ) + { + int ret = access_guest_memory_by_gpa(d, cbaser + its->creadr, cmd_buf, + ITS_CMD_SIZE, false); + /* + * If kvm_read_guest() fails, this could be due to the guest + * programming a bogus value in CBASER or something else going + * wrong from which we cannot easily recover. + * According to section 6.3.2 in the GICv3 spec we can just + * ignore that command then. + */ + if ( !ret ) + vgic_its_handle_command(d, its, cmd_buf); + + its->creadr += ITS_CMD_SIZE; + if ( its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser) ) + its->creadr = 0; + } +} + +static unsigned long vgic_mmio_read_its_ctlr(struct domain *d, + struct vgic_its *its, paddr_t addr, + unsigned int len) +{ + u32 reg = 0; + + spin_lock(&its->cmd_lock); + if ( its->creadr == its->cwriter ) + reg |= GITS_CTLR_QUIESCENT; + if ( its->enabled ) + reg |= GITS_CTLR_ENABLE; + spin_unlock(&its->cmd_lock); + + return reg; +} + +static void vgic_mmio_write_its_ctlr(struct domain *d, struct vgic_its *its, + paddr_t addr, unsigned int len, + unsigned long val) +{ + spin_lock(&its->cmd_lock); + + /* + * It is UNPREDICTABLE to enable the ITS if any of the CBASER or + * device/collection BASER are invalid + */ + if ( !its->enabled && (val & GITS_CTLR_ENABLE) && + (!(its->baser_device_table & GITS_VALID_BIT) || + !(its->baser_coll_table & GITS_VALID_BIT) || + !(its->cbaser & GITS_VALID_BIT)) ) + goto out; + + its->enabled = !!(val & GITS_CTLR_ENABLE); + + /* + * Try to process any pending commands. This function bails out early + * if the ITS is disabled or no commands have been queued. + */ + vgic_its_process_commands(d, its); + +out: + spin_unlock(&its->cmd_lock); +} + +static unsigned long vgic_mmio_read_its_iidr(struct domain *d, + struct vgic_its *its, paddr_t addr, + unsigned int len) +{ + u32 val; + + val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK; + val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM; + return val; +} + +static unsigned long vgic_mmio_read_its_typer(struct domain *d, + struct vgic_its *its, + paddr_t addr, unsigned int len) +{ + u64 reg = GITS_TYPER_PHYSICAL; + + /* + * We use linear CPU numbers for redistributor addressing, + * so GITS_TYPER.PTA is 0. + * Also we force all PROPBASER registers to be the same, so + * CommonLPIAff is 0 as well. + * To avoid memory waste in the guest, we keep the number of IDBits and + * DevBits low - as least for the time being. + */ + reg |= GIC_ENCODE_SZ(VGIC_ITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVIDS_SHIFT; + reg |= GIC_ENCODE_SZ(VGIC_ITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT; + reg |= GIC_ENCODE_SZ(VGIC_ITS_TYPER_ITE_SIZE, 4) << GITS_TYPER_ITT_SIZE_SHIFT; + + return extract_bytes(reg, addr & 7, len); +} + +static u64 vgic_sanitise_its_baser(u64 reg) +{ + reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK, + GITS_BASER_SHAREABILITY_SHIFT, + vgic_sanitise_shareability); + reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK, + GITS_BASER_INNER_CACHEABILITY_SHIFT, + vgic_sanitise_inner_cacheability); + reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK, + GITS_BASER_OUTER_CACHEABILITY_SHIFT, + vgic_sanitise_outer_cacheability); + + /* We support only one (ITS) page size: 64K */ + reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K; + + return reg; +} + +static u64 vgic_sanitise_its_cbaser(u64 reg) +{ + reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK, + GITS_CBASER_SHAREABILITY_SHIFT, + vgic_sanitise_shareability); + reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK, + GITS_CBASER_INNER_CACHEABILITY_SHIFT, + vgic_sanitise_inner_cacheability); + reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK, + GITS_CBASER_OUTER_CACHEABILITY_SHIFT, + vgic_sanitise_outer_cacheability); + + /* Sanitise the physical address to be 64k aligned. */ + reg &= ~GENMASK_ULL(15, 12); + + return reg; +} + +static unsigned long vgic_mmio_read_its_cbaser(struct domain *d, + struct vgic_its *its, + paddr_t addr, unsigned int len) +{ + return extract_bytes(its->cbaser, addr & 7, len); +} + +static void vgic_mmio_write_its_cbaser(struct domain *d, struct vgic_its *its, + paddr_t addr, unsigned int len, + unsigned long val) +{ + /* When GITS_CTLR.Enable is 1, this register is RO. */ + if ( its->enabled ) + return; + + spin_lock(&its->cmd_lock); + its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val); + its->cbaser = vgic_sanitise_its_cbaser(its->cbaser); + its->creadr = 0; + /* + * CWRITER is architecturally UNKNOWN on reset, but we need to reset + * it to CREADR to make sure we start with an empty command buffer. + */ + its->cwriter = its->creadr; + spin_unlock(&its->cmd_lock); +} + +static unsigned long vgic_mmio_read_its_cwriter(struct domain *d, + struct vgic_its *its, + paddr_t addr, unsigned int len) +{ + return extract_bytes(its->cwriter, addr & 0x7, len); +} + +/* + * By writing to CWRITER the guest announces new commands to be processed. + * To avoid any races in the first place, we take the its_cmd lock, which + * protects our ring buffer variables, so that there is only one user + * per ITS handling commands at a given time. + */ +static void vgic_mmio_write_its_cwriter(struct domain *d, struct vgic_its *its, + paddr_t addr, unsigned int len, + unsigned long val) +{ + u64 reg; + + if ( !its ) + return; + + spin_lock(&its->cmd_lock); + + reg = update_64bit_reg(its->cwriter, addr & 7, len, val); + reg = ITS_CMD_OFFSET(reg); + if ( reg >= ITS_CMD_BUFFER_SIZE(its->cbaser) ) + { + spin_unlock(&its->cmd_lock); + return; + } + its->cwriter = reg; + + vgic_its_process_commands(d, its); + + spin_unlock(&its->cmd_lock); +} + +static unsigned long vgic_mmio_read_its_creadr(struct domain *d, + struct vgic_its *its, + paddr_t addr, unsigned int len) +{ + return extract_bytes(its->creadr, addr & 0x7, len); +} + +#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7) + +static unsigned long vgic_mmio_read_its_baser(struct domain *d, + struct vgic_its *its, + paddr_t addr, unsigned int len) +{ + uint64_t reg; + + switch ( BASER_INDEX(addr) ) + { + case 0: + reg = its->baser_device_table; + break; + case 1: + reg = its->baser_coll_table; + break; + default: + reg = 0; + break; + } + + return extract_bytes(reg, addr & 7, len); +} + +#define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56)) +#define VGIC_ITS_BASER_DTE_SIZE 8 +#define VGIC_ITS_BASER_CTE_SIZE 8 + +static void vgic_mmio_write_its_baser(struct domain *d, struct vgic_its *its, + paddr_t addr, unsigned int len, + unsigned long val) +{ + u64 entry_size, table_type; + u64 reg, *regptr, clearbits = 0; + + /* When GITS_CTLR.Enable is 1, we ignore write accesses. */ + if ( its->enabled ) + return; + + switch ( BASER_INDEX(addr) ) + { + case 0: + regptr = &its->baser_device_table; + entry_size = VGIC_ITS_BASER_DTE_SIZE; + table_type = GITS_BASER_TYPE_DEVICE; + break; + case 1: + regptr = &its->baser_coll_table; + entry_size = VGIC_ITS_BASER_CTE_SIZE; + table_type = GITS_BASER_TYPE_COLLECTION; + clearbits = GITS_BASER_INDIRECT; + break; + default: + return; + } + + reg = update_64bit_reg(*regptr, addr & 7, len, val); + reg &= ~GITS_BASER_RO_MASK; + reg &= ~clearbits; + + reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; + reg |= table_type << GITS_BASER_TYPE_SHIFT; + reg = vgic_sanitise_its_baser(reg); + + *regptr = reg; + + if ( !(reg & GITS_BASER_VALID) ) + { + /* Take the its_lock to prevent a race with a save/restore */ + spin_lock(&its->its_lock); + switch ( table_type ) + { + case GITS_BASER_TYPE_DEVICE: + vgic_its_free_device_list(d, its); + break; + case GITS_BASER_TYPE_COLLECTION: + vgic_its_free_collection_list(d, its); + break; + } + spin_unlock(&its->its_lock); + } +} + + +static unsigned long vgic_mmio_read_its_idregs(struct domain *d, + struct vgic_its *its, + paddr_t addr, unsigned int len) +{ + switch ( addr & 0xffff ) + { + case GITS_PIDR0: + return 0x92; /* part number, bits[7:0] */ + case GITS_PIDR1: + return 0xb4; /* part number, bits[11:8] */ + case GITS_PIDR2: + return GIC_PIDR2_ARCH_GICv3 | 0x0b; + case GITS_PIDR4: + return 0x40; /* This is a 64K software visible page */ + /* The following are the ID registers for (any) GIC. */ + case GITS_CIDR0: + return 0x0d; + case GITS_CIDR1: + return 0xf0; + case GITS_CIDR2: + return 0x05; + case GITS_CIDR3: + return 0xb1; + } + + return 0; +} + static void its_mmio_write_wi(struct domain *d, struct vgic_its *its, paddr_t addr, unsigned int len, unsigned long val) { @@ -133,28 +539,28 @@ static void its_mmio_write_wi(struct domain *d, struct vgic_its *its, static struct vgic_register_region its_registers[] = { REGISTER_ITS_DESC(GITS_CTLR, - its_mmio_read_raz, its_mmio_write_wi, 4, + vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4, VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_IIDR, - its_mmio_read_raz, its_mmio_write_wi, 4, + vgic_mmio_read_its_iidr, its_mmio_write_wi, 4, VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_TYPER, - its_mmio_read_raz, its_mmio_write_wi, 8, + vgic_mmio_read_its_typer, its_mmio_write_wi, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_CBASER, - its_mmio_read_raz, its_mmio_write_wi, 8, + vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_CWRITER, - its_mmio_read_raz, its_mmio_write_wi, 8, + vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_CREADR, - its_mmio_read_raz, its_mmio_write_wi, 8, + vgic_mmio_read_its_creadr, its_mmio_write_wi, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_BASER0, - its_mmio_read_raz, its_mmio_write_wi, 0x40, + vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_IDREGS_BASE, - its_mmio_read_raz, its_mmio_write_wi, 0x30, + vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30, VGIC_ACCESS_32bit), }; @@ -184,6 +590,17 @@ out: return ret; } +#define INITIAL_BASER_VALUE \ + (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \ + GITS_BASER_PAGE_SIZE_64K) + +#define INITIAL_PROPBASER_VALUE \ + (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable)) + static int vgic_its_create(struct domain *d, u64 addr) { struct vgic_its *its; @@ -194,11 +611,25 @@ static int vgic_its_create(struct domain *d, u64 addr) d->arch.vgic.its = its; + spin_lock_init(&its->its_lock); + spin_lock_init(&its->cmd_lock); + its->vgic_its_base = VGIC_ADDR_UNDEF; + INIT_LIST_HEAD(&its->device_list); + INIT_LIST_HEAD(&its->collection_list); + spin_lock_init(&d->arch.vgic.its_devices_lock); + d->arch.vgic.msis_require_devid = true; d->arch.vgic.has_its = true; its->enabled = false; + its->domain = d; + + its->baser_device_table = INITIAL_BASER_VALUE | ((u64)GITS_BASER_TYPE_DEVICE + << GITS_BASER_TYPE_SHIFT); + its->baser_coll_table = INITIAL_BASER_VALUE | + ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT); + d->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE; vgic_register_its_iodev(d, its, addr); @@ -241,6 +672,14 @@ void vgic_v3_its_free_domain(struct domain *d) { struct vgic_its *its = d->arch.vgic.its; + spin_lock(&its->its_lock); + spin_lock(&d->arch.vgic.its_devices_lock); + + vgic_its_free_device_list(d, its); + vgic_its_free_collection_list(d, its); + + spin_unlock(&d->arch.vgic.its_devices_lock); + spin_unlock(&its->its_lock); xfree(its); d->arch.vgic.its = NULL; } diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h index 0a8deb46ba..edf8665cda 100644 --- a/xen/arch/arm/vgic/vgic-mmio.h +++ b/xen/arch/arm/vgic/vgic-mmio.h @@ -146,4 +146,11 @@ void vgic_mmio_write_config(struct vcpu *vcpu, unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); +/* extract @num bytes at @offset bytes offset in data */ +unsigned long extract_bytes(uint64_t data, unsigned int offset, + unsigned int num); + +uint64_t update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, + unsigned long val); + #endif diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h index 791c91ebb3..a14b519f77 100644 --- a/xen/arch/arm/vgic/vgic.h +++ b/xen/arch/arm/vgic/vgic.h @@ -80,6 +80,11 @@ bool vgic_v3_emulate_reg(struct cpu_user_regs *regs, union hsr hsr); unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev); int vgic_v3_set_redist_base(struct domain *d, u32 index, u64 addr, u32 count); int vgic_register_redist_iodev(struct vcpu *vcpu); +u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, + u64 (*sanitise_fn)(u64)); +u64 vgic_sanitise_shareability(u64 field); +u64 vgic_sanitise_inner_cacheability(u64 field); +u64 vgic_sanitise_outer_cacheability(u64 field); #else static inline void vgic_v3_fold_lr_state(struct vcpu *vcpu) { -- 2.34.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |