diff -r 3057f813da14 xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c Fri Nov 30 14:19:10 2007 +0100 @@ -49,14 +49,16 @@ int __init get_iommu_last_downstream_bus iommu->downstream_bus_present[bus] = 1; dev = PCI_SLOT(iommu->first_devfn); multi_func = PCI_FUNC(iommu->first_devfn) > 0; - for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) { + for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) + { /* skipping to next device#? */ - if ( dev != PCI_SLOT(devfn) ) { + if ( dev != PCI_SLOT(devfn) ) + { dev = PCI_SLOT(devfn); multi_func = 0; } func = PCI_FUNC(devfn); - + if ( !VALID_PCI_VENDOR_ID( read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) ) continue; @@ -67,7 +69,8 @@ int __init get_iommu_last_downstream_bus multi_func = IS_PCI_MULTI_FUNCTION(hdr_type); if ( (func == 0 || multi_func) && - IS_PCI_TYPE1_HEADER(hdr_type) ) { + IS_PCI_TYPE1_HEADER(hdr_type) ) + { if (!valid_bridge_bus_config(bus, dev, func, &sec_bus, &sub_bus)) return -ENODEV; @@ -86,28 +89,37 @@ int __init get_iommu_capabilities(u8 bus int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu) { - u32 cap_header, cap_range; + u32 cap_header, cap_range, misc_info; u64 mmio_bar; /* remove it when BIOS available */ +#if HACK_BIOS_SETTINGS + /* set MMIO base address for IOMMU */ + static u32 mmio_hi = 0x0; + static u32 mmio_lo = 0xF0010001; + write_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000); + cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, mmio_hi); write_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001); + cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, mmio_lo); + mmio_lo += 0x10000; +#endif /* remove it when BIOS available */ mmio_bar = (u64)read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; mmio_bar |= read_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) & - PCI_CAP_MMIO_BAR_LOW_MASK; - iommu->mmio_base_phys = (unsigned long)mmio_bar; - - if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) { - dprintk(XENLOG_ERR , - "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); + cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET); + iommu->mmio_base_phys = mmio_bar & (u64)~0x3FFF; + + if ((mmio_bar & 0x1) == 0 || iommu->mmio_base_phys == 0 ) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, + "Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); return -ENODEV; } + + iommu->device_id = (bus << 8) | PCI_DEVFN(dev, func); cap_header = read_pci_config(bus, dev, func, cap_ptr); iommu->revision = get_field_from_reg_u32(cap_header, @@ -133,6 +145,11 @@ int __init get_iommu_capabilities(u8 bus PCI_CAP_LAST_DEVICE_MASK, PCI_CAP_LAST_DEVICE_SHIFT); + misc_info = read_pci_config(bus, dev, func, + cap_ptr + PCI_CAP_MISC_INFO_OFFSET); + iommu->msi_number = get_field_from_reg_u32(misc_info, + PCI_CAP_MSI_NUMBER_MASK, + PCI_CAP_MSI_NUMBER_SHIFT); return 0; } @@ -147,16 +164,19 @@ static int __init scan_caps_for_iommu(in cap_ptr = read_pci_config_byte(bus, dev, func, PCI_CAPABILITY_LIST); while ( cap_ptr >= PCI_MIN_CAP_OFFSET && - count < PCI_MAX_CAP_BLOCKS && !error ) { + count < PCI_MAX_CAP_BLOCKS && !error ) + { cap_ptr &= PCI_CAP_PTR_MASK; cap_header = read_pci_config(bus, dev, func, cap_ptr); cap_id = get_field_from_reg_u32(cap_header, PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT); - if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) { + if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) + { cap_type = get_field_from_reg_u32(cap_header, PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT); - if ( cap_type == PCI_CAP_TYPE_IOMMU ) { + if ( cap_type == PCI_CAP_TYPE_IOMMU ) + { error = iommu_detect_callback( bus, dev, func, cap_ptr); } @@ -164,7 +184,8 @@ static int __init scan_caps_for_iommu(in cap_ptr = get_field_from_reg_u32(cap_header, PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT); - ++count; } + ++count; + } return error; } @@ -178,7 +199,8 @@ static int __init scan_functions_for_iom func = 0; count = 1; while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func, - PCI_VENDOR_ID)) && !error && func < count ) { + PCI_VENDOR_ID)) && !error && func < count ) + { hdr_type = read_pci_config_byte(bus, dev, func, PCI_HEADER_TYPE); @@ -186,7 +208,8 @@ static int __init scan_functions_for_iom count = PCI_MAX_FUNC_COUNT; if ( IS_PCI_TYPE0_HEADER(hdr_type) || - IS_PCI_TYPE1_HEADER(hdr_type) ) { + IS_PCI_TYPE1_HEADER(hdr_type) ) + { error = scan_caps_for_iommu(bus, dev, func, iommu_detect_callback); } @@ -201,8 +224,10 @@ int __init scan_for_iommu(iommu_detect_c { int bus, dev, error = 0; - for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) { - for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) { + for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) + { + for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) + { error = scan_functions_for_iommu(bus, dev, iommu_detect_callback); } diff -r 3057f813da14 xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c Fri Nov 30 14:19:10 2007 +0100 @@ -32,9 +32,10 @@ int __init map_iommu_mmio_region(struct { unsigned long mfn; - if ( nr_amd_iommus > MAX_AMD_IOMMUS ) { - gdprintk(XENLOG_ERR, - "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus); + if ( nr_amd_iommus > MAX_AMD_IOMMUS ) + { + IOV_GDPRINTK(XENLOG_ERR IOVPREFIX, + "nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus); return -ENOMEM; } @@ -51,7 +52,8 @@ int __init map_iommu_mmio_region(struct void __init unmap_iommu_mmio_region(struct amd_iommu *iommu) { - if ( iommu->mmio_base ) { + if ( iommu->mmio_base ) + { iounmap(iommu->mmio_base); iommu->mmio_base = NULL; } @@ -107,18 +109,66 @@ void __init register_iommu_cmd_buffer_in writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET); } +void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu) +{ + u64 addr_64, addr_lo, addr_hi; + u32 power_of2_entries; + u32 entry; + + addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer); + addr_lo = addr_64 & DMA_32BIT_MASK; + addr_hi = addr_64 >> 32; + + set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, + IOMMU_EVENT_LOG_BASE_LOW_MASK, + IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry); + writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET); + + power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) + + IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE; + + set_field_in_reg_u32((u32)addr_hi, 0, + IOMMU_EVENT_LOG_BASE_HIGH_MASK, + IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry); + set_field_in_reg_u32(power_of2_entries, entry, + IOMMU_EVENT_LOG_LENGTH_MASK, + IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry); + writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET); +} + static void __init set_iommu_translation_control(struct amd_iommu *iommu, int enable) { u32 entry; entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); + set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : + IOMMU_CONTROL_DISABLED, entry, + IOMMU_CONTROL_ISOCHRONOUS_MASK, + IOMMU_CONTROL_ISOCHRONOUS_SHIFT, &entry); + + set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : + IOMMU_CONTROL_DISABLED, entry, + IOMMU_CONTROL_COHERENT_MASK, + IOMMU_CONTROL_COHERENT_SHIFT, &entry); + + set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : + IOMMU_CONTROL_DISABLED, entry, + IOMMU_CONTROL_PASS_POSTED_WRITE_MASK, + IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT, &entry); + + set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : + IOMMU_CONTROL_DISABLED, entry, + IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK, + IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT, &entry); + set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_ENABLED, entry, + IOMMU_CONTROL_DISABLED, entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry); - set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_ENABLED, entry, + + set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : + IOMMU_CONTROL_DISABLED, entry, IOMMU_CONTROL_TRANSLATION_ENABLE_MASK, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); @@ -137,11 +187,25 @@ static void __init set_iommu_command_buf writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); } +static void __init set_iommu_event_log_control(struct amd_iommu *iommu, + int enable) +{ + u32 entry; + + entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); + set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : + IOMMU_CONTROL_DISABLED, entry, + IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK, + IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry); + writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); +} + void __init enable_iommu(struct amd_iommu *iommu) { set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); + set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED); set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); - printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus); -} - - + printk(IOVPREFIX "IOMMU #%d: Enabled\n", nr_amd_iommus); +} + + diff -r 3057f813da14 xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Fri Nov 30 14:19:10 2007 +0100 @@ -33,17 +33,20 @@ static int queue_iommu_command(struct am BUG_ON( !iommu || !cmd ); tail = iommu->cmd_buffer_tail; - if ( ++tail == iommu->cmd_buffer.entries ) { + if ( ++tail == iommu->cmd_buffer.entries ) + { tail = 0; } head = get_field_from_reg_u32( readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET), IOMMU_CMD_BUFFER_HEAD_MASK, IOMMU_CMD_BUFFER_HEAD_SHIFT); - if ( head != tail ) { + if ( head != tail ) + { cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer + (iommu->cmd_buffer_tail * IOMMU_CMD_BUFFER_ENTRY_SIZE)); - for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; ++i ) { + for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; ++i ) + { cmd_buffer[i] = cmd[i]; } @@ -70,7 +73,8 @@ int send_iommu_command(struct amd_iommu { BUG_ON( !iommu || !cmd ); - if ( queue_iommu_command(iommu, cmd) ) { + if ( queue_iommu_command(iommu, cmd) ) + { commit_iommu_command_buffer(iommu); return 1; } @@ -113,7 +117,25 @@ static void invalidate_iommu_page(struct send_iommu_command(iommu, cmd); } -static void flush_command_buffer(struct amd_iommu *iommu) +void invalidate_dev_table_entry(struct amd_iommu *iommu, + u16 device_id) +{ + u32 cmd[4], entry; + + cmd[3] = cmd[2] = 0; + set_field_in_reg_u32(device_id, 0, + IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK, + IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry); + cmd[0] = entry; + + set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0, + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); + cmd[1] = entry; + + send_iommu_command(iommu, cmd); +} + +void flush_command_buffer(struct amd_iommu *iommu) { u32 cmd[4], status; int loop_count, comp_wait; @@ -135,7 +157,8 @@ static void flush_command_buffer(struct send_iommu_command(iommu, cmd); /* wait for 'ComWaitInt' to signal comp#endifletion? */ - if ( amd_iommu_poll_comp_wait ) { + if ( amd_iommu_poll_comp_wait ) + { loop_count = amd_iommu_poll_comp_wait; do { status = readl(iommu->mmio_base + @@ -146,13 +169,15 @@ static void flush_command_buffer(struct --loop_count; } while ( loop_count && !comp_wait ); - if ( comp_wait ) { + if ( comp_wait ) + { /* clear 'ComWaitInt' in status register (WIC) */ status &= IOMMU_STATUS_COMP_WAIT_INT_MASK; writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - } else - dprintk(XENLOG_WARNING, "AMD IOMMU: %s(): Warning:" + } + else + IOV_GDPRINTK(XENLOG_WARNING IOVPREFIX, "%s(): Warning:" " ComWaitInt bit did not assert!\n", __FUNCTION__); } @@ -185,6 +210,13 @@ static void set_page_table_entry_present IOMMU_CONTROL_DISABLED, entry, IOMMU_PTE_IO_READ_PERMISSION_MASK, IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry); + +/* force coherency in PTE?? */ +#if SET_FC_IN_PTE + set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, + IOMMU_PTE_FC_MASK, + IOMMU_PTE_FC_SHIFT, &entry); +#endif pte[1] = entry; set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, @@ -238,7 +270,7 @@ void amd_iommu_set_dev_table_entry(u32 * u64 addr_hi, addr_lo; u32 entry; - dte[6] = dte[5] = dte[4] = 0; + dte[7] = dte[6] = dte[5] = dte[4] = 0; set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0, IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK, @@ -278,7 +310,17 @@ void amd_iommu_set_dev_table_entry(u32 * dte[0] = entry; } -static void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry) +int amd_iommu_is_dte_page_translation_valid(u32 *entry) +{ + return (get_field_from_reg_u32(entry[0], + IOMMU_DEV_TABLE_VALID_MASK, + IOMMU_DEV_TABLE_VALID_SHIFT) && + get_field_from_reg_u32(entry[0], + IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, + IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT)); +} + +void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry) { u64 addr_lo, addr_hi, ptr; @@ -322,16 +364,19 @@ static void *get_pte_from_page_tables(vo break; if ( !pde ) return NULL; - if ( !amd_iommu_is_pte_present(pde) ) { + if ( !amd_iommu_is_pte_present(pde) ) + { next_table = alloc_xenheap_page(); if ( next_table == NULL ) return NULL; memset(next_table, 0, PAGE_SIZE); - if ( *(u64*)(pde) == 0 ) { + if ( *(u64*)(pde) == 0 ) + { next_ptr = (u64)virt_to_maddr(next_table); amd_iommu_set_page_directory_entry((u32 *)pde, next_ptr, level - 1); - } else + } + else free_xenheap_page(next_table); } table = amd_iommu_get_vptr_from_page_table_entry(pde); @@ -361,16 +406,19 @@ int amd_iommu_map_page(struct domain *d, pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); - if ( pte != 0 ) { + if ( pte != 0 ) + { set_page_table_entry_present((u32 *)pte, maddr, iw, ir); spin_unlock_irqrestore(&hd->mapping_lock, flags); return 0; - } else { - dprintk(XENLOG_ERR, - "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", + } + else + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, + "%s(): Invalid IO pagetable entry gfn = %lx\n", __FUNCTION__, gfn); spin_unlock_irqrestore(&hd->mapping_lock, flags); - return -EIO; + return -ENOMEM; } } @@ -378,12 +426,13 @@ int amd_iommu_unmap_page(struct domain * { void *pte; unsigned long flags; - u64 io_addr = gfn; + u64 io_addr; int requestor_id; struct amd_iommu *iommu; struct hvm_iommu *hd = domain_hvm_iommu(d); - BUG_ON( !hd->root_table ); + if ( !hd->root_table ) + return 0; requestor_id = hd->domain_id; io_addr = (u64)(gfn << PAGE_SHIFT); @@ -392,14 +441,15 @@ int amd_iommu_unmap_page(struct domain * pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); - if ( pte != 0 ) { + if ( pte != 0 ) + { /* mark PTE as 'page not present' */ clear_page_table_entry_present((u32 *)pte); spin_unlock_irqrestore(&hd->mapping_lock, flags); /* send INVALIDATE_IOMMU_PAGES command */ - for_each_amd_iommu(iommu) { - + for_each_amd_iommu(iommu) + { spin_lock_irqsave(&iommu->lock, flags); invalidate_iommu_page(iommu, io_addr, requestor_id); @@ -407,13 +457,14 @@ int amd_iommu_unmap_page(struct domain * spin_unlock_irqrestore(&iommu->lock, flags); } - return 0; - } else { - dprintk(XENLOG_ERR, - "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", + } + else + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, + "%s() Invalid IO pagetable entry gfn = %lx\n", __FUNCTION__, gfn); spin_unlock_irqrestore(&hd->mapping_lock, flags); - return -EIO; - } -} + return -ENOMEM; + } +} diff -r 3057f813da14 xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c --- a/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c Fri Nov 30 14:19:10 2007 +0100 @@ -28,6 +28,7 @@ struct list_head amd_iommu_head; struct list_head amd_iommu_head; long amd_iommu_poll_comp_wait = COMPLETION_WAIT_DEFAULT_POLLING_COUNT; static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES; +static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES; int nr_amd_iommus = 0; /* will set if amd-iommu HW is found */ @@ -36,32 +37,27 @@ static int enable_amd_iommu = 0; static int enable_amd_iommu = 0; boolean_param("enable_amd_iommu", enable_amd_iommu); -static void deallocate_domain_page_tables(struct hvm_iommu *hd) +static void deallocate_domain_resources(struct hvm_iommu *hd) { if ( hd->root_table ) free_xenheap_page(hd->root_table); } -static void deallocate_domain_resources(struct hvm_iommu *hd) -{ - deallocate_domain_page_tables(hd); -} - static void __init init_cleanup(void) { struct amd_iommu *iommu; - dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__); - - for_each_amd_iommu(iommu) { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "%s()\n", __FUNCTION__); + + for_each_amd_iommu(iommu) unmap_iommu_mmio_region(iommu); - } } static void __init deallocate_iommu_table_struct( struct table_struct *table) { - if (table->buffer) { + if ( table->buffer ) + { free_xenheap_pages(table->buffer, get_order_from_bytes(table->alloc_size)); table->buffer = NULL; @@ -70,17 +66,22 @@ static void __init deallocate_iommu_tabl static void __init deallocate_iommu_resources(struct amd_iommu *iommu) { + deallocate_iommu_table_struct(&iommu->cmd_buffer); + deallocate_iommu_table_struct(&iommu->event_log); +} + +static void __init detect_cleanup(void) +{ + struct amd_iommu *iommu; + + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "%s()\n", __FUNCTION__); + + iommu = list_entry(amd_iommu_head.next, struct amd_iommu, list); deallocate_iommu_table_struct(&iommu->dev_table); - deallocate_iommu_table_struct(&iommu->cmd_buffer);; -} - -static void __init detect_cleanup(void) -{ - struct amd_iommu *iommu; - - dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__); - - for_each_amd_iommu(iommu) { + + while ( !list_empty(&amd_iommu_head) ) + { + iommu = list_entry(amd_iommu_head.next, struct amd_iommu, list); list_del(&iommu->list); deallocate_iommu_resources(iommu); xfree(iommu); @@ -89,9 +90,26 @@ static void __init detect_cleanup(void) static int requestor_id_from_bdf(int bdf) { +#if ENABLE_HT1100 + /* IOMMU0 (Legacy/PCI): 0/6/0..0/15/0 -> 0/1/0 */ + if (bdf >= 0x30 && bdf <= 0x78) + return 0x08; + + /* IOMMU2 (USB): 2/12/0,1,2 -> 2/12/0 */ + if (bdf >= 0x260 && bdf <= 0x262) + return 0x260; + + /* IOMMU2 (USB): 2/13/0,1,2 -> 2/13/0 */ + if (bdf >= 0x268 && bdf <= 0x26a) + return 0x268; + + /* IOMMU2 (USB): 2/14/0,1,2 -> 2/14/0 */ + if (bdf >= 0x270 && bdf <= 0x272) + return 0x270; +#endif /* HACK - HACK */ /* account for possible 'aliasing' by parent device */ - return bdf; + return bdf; } static int __init allocate_iommu_table_struct(struct table_struct *table, @@ -100,8 +118,9 @@ static int __init allocate_iommu_table_s table->buffer = (void *) alloc_xenheap_pages( get_order_from_bytes(table->alloc_size)); - if ( !table->buffer ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name); + if ( !table->buffer ) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "Error allocating %s\n", name); return -ENOMEM; } memset(table->buffer, 0, table->alloc_size); @@ -111,18 +130,6 @@ static int __init allocate_iommu_table_s static int __init allocate_iommu_resources(struct amd_iommu *iommu) { - /* allocate 'device table' on a 4K boundary */ - iommu->dev_table.alloc_size = - PAGE_ALIGN(((iommu->last_downstream_bus + 1) * - IOMMU_DEV_TABLE_ENTRIES_PER_BUS) * - IOMMU_DEV_TABLE_ENTRY_SIZE); - iommu->dev_table.entries = - iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE; - - if (allocate_iommu_table_struct(&iommu->dev_table, - "Device Table") != 0) - goto error_out; - /* allocate 'command buffer' in power of 2 increments of 4K */ iommu->cmd_buffer_tail = 0; iommu->cmd_buffer.alloc_size = @@ -137,6 +144,20 @@ static int __init allocate_iommu_resourc "Command Buffer") != 0 ) goto error_out; + /* allocate 'event log' in power of 2 increments of 4K */ + iommu->event_log_head = 0; + iommu->event_log.alloc_size = + PAGE_SIZE << get_order_from_bytes( + PAGE_ALIGN(amd_iommu_event_log_entries * + IOMMU_EVENT_LOG_ENTRY_SIZE)); + + iommu->event_log.entries = + iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE; + + if ( allocate_iommu_table_struct(&iommu->event_log, + "Event Log") != 0 ) + goto error_out; + return 0; error_out: @@ -149,8 +170,9 @@ int iommu_detect_callback(u8 bus, u8 dev struct amd_iommu *iommu; iommu = (struct amd_iommu *) xmalloc(struct amd_iommu); - if ( !iommu ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n"); + if ( !iommu ) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "Error allocating amd_iommu\n"); return -ENOMEM; } memset(iommu, 0, sizeof(struct amd_iommu)); @@ -162,12 +184,12 @@ int iommu_detect_callback(u8 bus, u8 dev if ( get_iommu_last_downstream_bus(iommu) != 0 ) goto error_out; - list_add_tail(&iommu->list, &amd_iommu_head); - /* allocate resources for this IOMMU */ if (allocate_iommu_resources(iommu) != 0) goto error_out; + list_add_tail(&iommu->list, &amd_iommu_head); + return 0; error_out: @@ -180,14 +202,21 @@ static int __init amd_iommu_init(void) struct amd_iommu *iommu; unsigned long flags; - for_each_amd_iommu(iommu) { + for_each_amd_iommu(iommu) + { spin_lock_irqsave(&iommu->lock, flags); /* register IOMMU data strucures in MMIO space */ - if (map_iommu_mmio_region(iommu) != 0) + if (map_iommu_mmio_region(iommu) != 0) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, + "Initialize IOMMU #%d failed!\n", nr_amd_iommus); + spin_unlock_irqrestore(&iommu->lock, flags); goto error_out; + } register_iommu_dev_table_in_mmio_space(iommu); register_iommu_cmd_buffer_in_mmio_space(iommu); + register_iommu_event_log_in_mmio_space(iommu); /* enable IOMMU translation services */ enable_iommu(iommu); @@ -209,13 +238,16 @@ struct amd_iommu *find_iommu_for_device( { struct amd_iommu *iommu; - for_each_amd_iommu(iommu) { - if ( bus == iommu->root_bus ) { + for_each_amd_iommu(iommu) + { + if ( bus == iommu->root_bus ) + { if ( devfn >= iommu->first_devfn && devfn <= iommu->last_devfn ) return iommu; } - else if ( bus <= iommu->last_downstream_bus ) { + else if ( bus <= iommu->last_downstream_bus ) + { if ( iommu->downstream_bus_present[bus] ) return iommu; } @@ -224,7 +256,7 @@ struct amd_iommu *find_iommu_for_device( return NULL; } -void amd_iommu_setup_domain_device( +static void amd_iommu_setup_domain_device( struct domain *domain, struct amd_iommu *iommu, int requestor_id) { void *dte; @@ -232,22 +264,27 @@ void amd_iommu_setup_domain_device( unsigned long flags; struct hvm_iommu *hd = domain_hvm_iommu(domain); - BUG_ON( !hd->root_table||!hd->paging_mode ); + BUG_ON( !hd->root_table || !hd->paging_mode ); root_ptr = (u64)virt_to_maddr(hd->root_table); dte = iommu->dev_table.buffer + (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE); - spin_lock_irqsave(&iommu->lock, flags); - - amd_iommu_set_dev_table_entry((u32 *)dte, - root_ptr, hd->domain_id, hd->paging_mode); - - dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, " + if ( !(amd_iommu_is_dte_page_translation_valid((u32 *)dte)) ) + { + spin_lock_irqsave(&iommu->lock, flags); + + amd_iommu_set_dev_table_entry((u32 *)dte, + root_ptr, hd->domain_id, hd->paging_mode); + invalidate_dev_table_entry(iommu, requestor_id); + flush_command_buffer(iommu); + + IOV_DPRINTK(XENLOG_INFO IOVPREFIX, "update DTE 0x%x, " "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n", requestor_id, root_ptr, hd->domain_id, hd->paging_mode); - spin_unlock_irqrestore(&iommu->lock, flags); + spin_unlock_irqrestore(&iommu->lock, flags); + } } void __init amd_iommu_setup_dom0_devices(void) @@ -259,9 +296,12 @@ void __init amd_iommu_setup_dom0_devices u32 l; int req_id, bdf; - for ( bus = 0; bus < 256; bus++ ) { - for ( dev = 0; dev < 32; dev++ ) { - for ( func = 0; func < 8; func++ ) { + for ( bus = 0; bus < 256; bus++ ) + { + for ( dev = 0; dev < 32; dev++ ) + { + for ( func = 0; func < 8; func++ ) + { l = read_pci_config(bus, dev, func, PCI_VENDOR_ID); /* some broken boards return 0 or ~0 if a slot is empty: */ if ( l == 0xffffffff || l == 0x00000000 || @@ -284,57 +324,112 @@ void __init amd_iommu_setup_dom0_devices } } +static int amd_iommu_allocate_dev_table(void) +{ + int last_bus; + struct table_struct dev_table; + struct amd_iommu *iommu; + + if ( iommu_found() ) + { + /* allocate common 'device table' on a 4K boundary */ + last_bus = 0; + for_each_amd_iommu(iommu) + { + if ( iommu->last_downstream_bus > last_bus ) + last_bus = iommu->last_downstream_bus; + } + dev_table.alloc_size = + PAGE_ALIGN(((last_bus + 1) * + IOMMU_DEV_TABLE_ENTRIES_PER_BUS) * + IOMMU_DEV_TABLE_ENTRY_SIZE); + dev_table.entries = + dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE; + dev_table.buffer = (void *) alloc_xenheap_pages( + get_order_from_bytes(dev_table.alloc_size)); + + if (!dev_table.buffer) + return -ENOMEM; + + memset(dev_table.buffer, 0, dev_table.alloc_size); + + /* assign common 'device table' to each IOMMU */ + for_each_amd_iommu(iommu) + { + iommu->dev_table.alloc_size = dev_table.alloc_size; + iommu->dev_table.entries = dev_table.entries; + iommu->dev_table.buffer = dev_table.buffer; + } + } + + return 0; +} + int amd_iommu_detect(void) { - unsigned long i; - - if ( !enable_amd_iommu ) { - printk("AMD IOMMU: Disabled\n"); + if ( !enable_amd_iommu ) + { + printk(IOVPREFIX "IOMMU disabled!\n"); return 0; } INIT_LIST_HEAD(&amd_iommu_head); - if ( scan_for_iommu(iommu_detect_callback) != 0 ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n"); - goto error_out; - } - - if ( !iommu_found() ) { - printk("AMD IOMMU: Not found!\n"); + if ( scan_for_iommu(iommu_detect_callback) != 0 ) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "IOMMU detection failed!\n"); + goto error_out; + } + + if ( !iommu_found() ) + { + printk(IOVPREFIX "IOMMU Not found!\n"); return 0; } - if ( amd_iommu_init() != 0 ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n"); + if ( amd_iommu_allocate_dev_table() != 0 ) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "Device table allocation failed!\n"); + goto error_out; + } + + if ( amd_iommu_init() != 0 ) + { + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, "IOMMU initialization failed!\n"); goto error_out; } if ( amd_iommu_domain_init(dom0) != 0 ) goto error_out; - /* setup 1:1 page table for dom0 */ - for ( i = 0; i < max_page; i++ ) - amd_iommu_map_page(dom0, i, i); - - amd_iommu_setup_dom0_devices(); - return 0; - -error_out: - detect_cleanup(); - return -ENODEV; + return 0; + +error_out: + detect_cleanup(); + return -ENODEV; } static int allocate_domain_resources(struct hvm_iommu *hd) { /* allocate root table */ - hd->root_table = (void *)alloc_xenheap_page(); + unsigned long flags; + + spin_lock_irqsave(&hd->mapping_lock, flags); if ( !hd->root_table ) - return -ENOMEM; - memset((u8*)hd->root_table, 0, PAGE_SIZE); - - return 0; + { + hd->root_table = (void *)alloc_xenheap_page(); + if ( !hd->root_table ) + goto error_out; + memset((u8*)hd->root_table, 0, PAGE_SIZE); + } + spin_unlock_irqrestore(&hd->mapping_lock, flags); + + return 0; + +error_out: + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return -ENOMEM; } static int get_paging_mode(unsigned long entries) @@ -346,44 +441,61 @@ static int get_paging_mode(unsigned long if ( entries > max_page ) entries = max_page; - while ( entries > PTE_PER_TABLE_SIZE ) { + while ( entries > PTE_PER_TABLE_SIZE ) + { entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT; ++level; if ( level > 6 ) return -ENOMEM; } - dprintk(XENLOG_INFO, "AMD IOMMU: paging mode = %d\n", level); - return level; } int amd_iommu_domain_init(struct domain *domain) { + unsigned long i; struct hvm_iommu *hd = domain_hvm_iommu(domain); + if ( !amd_iommu_enabled ) + return 0; + + hd->domain_id = domain->domain_id; spin_lock_init(&hd->mapping_lock); spin_lock_init(&hd->iommu_list_lock); INIT_LIST_HEAD(&hd->pdev_list); - /* allocate page directroy */ - if ( allocate_domain_resources(hd) != 0 ) { - dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__); - goto error_out; - } - if ( is_hvm_domain(domain) ) + { + if ( allocate_domain_resources(hd) != 0 ) + goto error_out; hd->paging_mode = IOMMU_PAGE_TABLE_LEVEL_4; + } else - hd->paging_mode = get_paging_mode(max_page); - - hd->domain_id = domain->domain_id; - - return 0; - -error_out: + { + if ( domain == dom0 ) + { + if ( allocate_domain_resources(hd) != 0 ) + goto error_out; + hd->paging_mode = get_paging_mode(max_page); + + /* setup 1:1 page table for dom0 */ + for ( i = 0; i < max_page; i++ ) + amd_iommu_map_page(domain, i, i); + + amd_iommu_setup_dom0_devices(); + } + else + hd->paging_mode = get_paging_mode(max_page); + } + + IOV_DPRINTK(XENLOG_INFO IOVPREFIX, + "iommu domain %d initialized\n", hd->domain_id); + return 0; + +error_out: + IOV_DPRINTK(XENLOG_ERR IOVPREFIX, + "failed to initialize iommu domain %d \n", hd->domain_id); deallocate_domain_resources(hd); return -ENOMEM; } - - diff -r 3057f813da14 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/include/asm-x86/amd-iommu.h Fri Nov 30 14:19:10 2007 +0100 @@ -32,6 +32,11 @@ extern struct list_head amd_iommu_head; extern struct list_head amd_iommu_head; extern int __init amd_iommu_detect(void); +extern int amd_iommu_domain_init(struct domain *domain); +extern int amd_iommu_map_page(struct domain *d, unsigned long gfn, + unsigned long mfn); +extern int amd_iommu_unmap_page(struct domain *d, unsigned long gfn); + struct table_struct { void *buffer; @@ -47,6 +52,9 @@ struct amd_iommu { int ht_tunnel_support; int not_present_cached; u8 revision; + u8 msi_number; + + u16 device_id; u8 root_bus; u8 first_devfn; @@ -61,6 +69,8 @@ struct amd_iommu { struct table_struct dev_table; struct table_struct cmd_buffer; u32 cmd_buffer_tail; + struct table_struct event_log; + u32 event_log_head; int exclusion_enabled; unsigned long exclusion_base; diff -r 3057f813da14 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Fri Nov 30 14:19:10 2007 +0100 @@ -35,6 +35,9 @@ /* IOMMU Command Buffer entries: in power of 2 increments, minimum of 256 */ #define IOMMU_CMD_BUFFER_DEFAULT_ENTRIES 512 +/* IOMMU Event Log entries: in power of 2 increments, minimum of 256 */ +#define IOMMU_EVENT_LOG_DEFAULT_ENTRIES 512 + #define BITMAP_ENTRIES_PER_BYTE 8 #define PTE_PER_TABLE_SHIFT 9 @@ -118,6 +121,10 @@ #define PCI_CAP_LAST_DEVICE_MASK 0xFF000000 #define PCI_CAP_LAST_DEVICE_SHIFT 24 +#define PCI_CAP_MISC_INFO_OFFSET 0x10 +#define PCI_CAP_MSI_NUMBER_MASK 0x0000001F +#define PCI_CAP_MSI_NUMBER_SHIFT 0 + /* Device Table */ #define IOMMU_DEV_TABLE_BASE_LOW_OFFSET 0x00 #define IOMMU_DEV_TABLE_BASE_HIGH_OFFSET 0x04 @@ -262,6 +269,10 @@ #define IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT 12 #define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK 0xFFFFFFFF #define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT 0 + +/* INVALIDATE_DEVTAB_ENTRY command */ +#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK 0x0000FFFF +#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT 0 /* Event Log */ #define IOMMU_EVENT_LOG_BASE_LOW_OFFSET 0x10 @@ -415,5 +426,16 @@ #define IOMMU_PAGE_TABLE_LEVEL_4 4 #define IOMMU_IO_WRITE_ENABLED 1 #define IOMMU_IO_READ_ENABLED 1 +#define IOVPREFIX "AMD IOV: " +#define CONFIG_AMD_IOMMU_DEBUG 1 + +/* HT1100 enablement */ +#define ENABLE_HT1100 1 +#define SET_FC_IN_PTE 1 +#define HACK_BIOS_SETTINGS 1 +#define HACK_MMIO_ON_CPU1 0 +#define HACK_ISOC_SETTINGS 0 +#define HACK_ISOC_ON_CPU1 0 +/* HT1100 enablement */ #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */ diff -r 3057f813da14 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Nov 29 19:30:33 2007 +0000 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Nov 30 14:19:10 2007 +0100 @@ -27,11 +27,23 @@ list_for_each_entry(amd_iommu, \ &amd_iommu_head, list) +#define for_each_pdev(domain, pdev) \ + list_for_each_entry(pdev, \ + &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list) + #define DMA_32BIT_MASK 0x00000000ffffffffULL #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_SHIFT_4K (12) #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) + +#ifdef CONFIG_AMD_IOMMU_DEBUG +#define IOV_DPRINTK dprintk +#define IOV_GDPRINTK gdprintk +#else +#define IOV_DPRINTK(s, a...) +#define IOV_GDPRINTK(s, a...) +#endif typedef int (*iommu_detect_callback_ptr_t)(u8 bus, u8 dev, u8 func, u8 cap_ptr); @@ -46,24 +58,23 @@ void __init unmap_iommu_mmio_region(stru void __init unmap_iommu_mmio_region(struct amd_iommu *iommu); void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu); void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu); +void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu); + void __init enable_iommu(struct amd_iommu *iommu); /* mapping functions */ -int amd_iommu_map_page(struct domain *d, unsigned long gfn, - unsigned long mfn); -int amd_iommu_unmap_page(struct domain *d, unsigned long gfn); +void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry); /* device table functions */ void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode); +void invalidate_dev_table_entry(struct amd_iommu *iommu, + u16 devic_id); +int amd_iommu_is_dte_page_translation_valid(u32 *entry); /* send cmd to iommu */ int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]); - -/* iommu domain funtions */ -int amd_iommu_domain_init(struct domain *domain); -void amd_iommu_setup_domain_device(struct domain *domain, - struct amd_iommu *iommu, int requestor_id); +void flush_command_buffer(struct amd_iommu *iommu); /* find iommu for bdf */ struct amd_iommu *find_iommu_for_device(int bus, int devfn);