diff -r f8624b023d67 xen/drivers/acpi/tables.c --- a/xen/drivers/acpi/tables.c Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/drivers/acpi/tables.c Thu Feb 28 12:06:48 2008 +0100 @@ -60,6 +60,7 @@ static char *acpi_table_signatures[ACPI_ [ACPI_HPET] = "HPET", [ACPI_MCFG] = "MCFG", [ACPI_DMAR] = "DMAR", + [ACPI_IVRS] = "IVRS", }; static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; diff -r f8624b023d67 xen/drivers/passthrough/amd/Makefile --- a/xen/drivers/passthrough/amd/Makefile Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/drivers/passthrough/amd/Makefile Thu Feb 28 12:06:48 2008 +0100 @@ -2,3 +2,4 @@ obj-y += iommu_init.o obj-y += iommu_init.o obj-y += iommu_map.o obj-y += pci_amd_iommu.o +obj-y += iommu_acpi.o diff -r f8624b023d67 xen/drivers/passthrough/amd/iommu_detect.c --- a/xen/drivers/passthrough/amd/iommu_detect.c Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_detect.c Thu Feb 28 12:06:48 2008 +0100 @@ -86,30 +86,24 @@ int __init get_iommu_capabilities(u8 bus int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu) { - u32 cap_header, cap_range; + u32 cap_header, cap_range, misc_info; u64 mmio_bar; -#if HACK_BIOS_SETTINGS - /* remove it when BIOS available */ - write_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000); - write_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001); - /* remove it when BIOS available */ -#endif - mmio_bar = (u64)read_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; + cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; mmio_bar |= read_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) & - PCI_CAP_MMIO_BAR_LOW_MASK; - iommu->mmio_base_phys = (unsigned long)mmio_bar; - - if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) { + cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET); + iommu->mmio_base_phys = mmio_bar & (u64)~0x3FFF; + + if ( (mmio_bar & 0x1) == 0 || iommu->mmio_base_phys == 0 ) + { dprintk(XENLOG_ERR , "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); return -ENODEV; } + + iommu->bdf = (bus << 8) | PCI_DEVFN(dev, func); + iommu->cap_offset = cap_ptr; cap_header = read_pci_config(bus, dev, func, cap_ptr); iommu->revision = get_field_from_reg_u32(cap_header, @@ -119,12 +113,15 @@ int __init get_iommu_capabilities(u8 bus iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header, PCI_CAP_HT_TUNNEL_MASK, PCI_CAP_HT_TUNNEL_SHIFT); - iommu->not_present_cached = get_field_from_reg_u32(cap_header, + iommu->pte_not_present_cached = get_field_from_reg_u32(cap_header, PCI_CAP_NP_CACHE_MASK, PCI_CAP_NP_CACHE_SHIFT); cap_range = read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_RANGE_OFFSET); + iommu->unit_id = get_field_from_reg_u32(cap_range, + PCI_CAP_UNIT_ID_MASK, + PCI_CAP_UNIT_ID_SHIFT); iommu->root_bus = get_field_from_reg_u32(cap_range, PCI_CAP_BUS_NUMBER_MASK, PCI_CAP_BUS_NUMBER_SHIFT); @@ -135,6 +132,11 @@ int __init get_iommu_capabilities(u8 bus PCI_CAP_LAST_DEVICE_MASK, PCI_CAP_LAST_DEVICE_SHIFT); + misc_info = read_pci_config(bus, dev, func, + cap_ptr + PCI_MISC_INFO_OFFSET); + iommu->msi_number = get_field_from_reg_u32(misc_info, + PCI_CAP_MSI_NUMBER_MASK, + PCI_CAP_MSI_NUMBER_SHIFT); return 0; } diff -r f8624b023d67 xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_init.c Thu Feb 28 12:06:48 2008 +0100 @@ -137,8 +137,49 @@ static void __init set_iommu_command_buf writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); } +static void __init register_iommu_exclusion_range(struct amd_iommu *iommu) +{ + u64 addr_lo, addr_hi; + u32 entry; + + addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK; + addr_hi = iommu->exclusion_limit >> 32; + + set_field_in_reg_u32((u32)addr_hi, 0, + IOMMU_EXCLUSION_LIMIT_HIGH_MASK, + IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry); + writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET); + + set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, + IOMMU_EXCLUSION_LIMIT_LOW_MASK, + IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry); + writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET); + + addr_lo = iommu->exclusion_base & DMA_32BIT_MASK; + addr_hi = iommu->exclusion_base >> 32; + + set_field_in_reg_u32((u32)addr_hi, 0, + IOMMU_EXCLUSION_BASE_HIGH_MASK, + IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry); + writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET); + + set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, + IOMMU_EXCLUSION_BASE_LOW_MASK, + IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry); + + set_field_in_reg_u32(iommu->exclusion_allow_all, entry, + IOMMU_EXCLUSION_ALLOW_ALL_MASK, + IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry); + + set_field_in_reg_u32(iommu->exclusion_enable, entry, + IOMMU_EXCLUSION_RANGE_ENABLE_MASK, + IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry); + writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET); +} + void __init enable_iommu(struct amd_iommu *iommu) { + register_iommu_exclusion_range(iommu); set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus); diff -r f8624b023d67 xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_map.c Thu Feb 28 12:06:48 2008 +0100 @@ -234,16 +234,19 @@ static void amd_iommu_set_page_directory } void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id, - u8 paging_mode) + u8 sys_mgt, u8 dev_ex, u8 paging_mode) { u64 addr_hi, addr_lo; u32 entry; - dte[6] = dte[5] = dte[4] = 0; - - set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0, + dte[7] = dte[6] = dte[5] = dte[4] = 0; + + set_field_in_reg_u32(sys_mgt, 0, IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK, IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry); + set_field_in_reg_u32(dev_ex, entry, + IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK, + IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry); dte[3] = entry; set_field_in_reg_u32(domain_id, 0, @@ -448,3 +451,34 @@ int amd_iommu_unmap_page(struct domain * return 0; } + +int amd_iommu_reserve_domain_unity_map( + struct domain *domain, + unsigned long phys_addr, + unsigned long size, int iw, int ir) +{ + unsigned long flags, npages, i; + void *pte; + struct hvm_iommu *hd = domain_hvm_iommu(domain); + + npages = region_to_pages(phys_addr, size); + + spin_lock_irqsave(&hd->mapping_lock, flags); + for ( i = 0; i < npages; ++i ) + { + pte = get_pte_from_page_tables(hd->root_table, + hd->paging_mode, phys_addr>>PAGE_SHIFT); + if ( pte == 0 ) + { + dprintk(XENLOG_ERR, + "AMD IOMMU: Invalid IO pagetable entry phys_addr = %lx\n", phys_addr); + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return -EFAULT; + } + set_page_table_entry_present((u32 *)pte, + phys_addr, iw, ir); + phys_addr += PAGE_SIZE; + } + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return 0; +} diff -r f8624b023d67 xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Feb 28 12:06:48 2008 +0100 @@ -20,6 +20,7 @@ #include #include +#include #include #include #include "../pci-direct.h" @@ -30,6 +31,9 @@ static long amd_iommu_cmd_buffer_entries static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES; int nr_amd_iommus = 0; +unsigned short ivrs_bdf_entries = 0; +struct ivrs_mappings *ivrs_mappings = NULL; + /* will set if amd-iommu HW is found */ int amd_iommu_enabled = 0; @@ -82,13 +86,12 @@ static void __init detect_cleanup(void) deallocate_iommu_resources(iommu); xfree(iommu); } -} - -static int requestor_id_from_bdf(int bdf) -{ - /* HACK - HACK */ - /* account for possible 'aliasing' by parent device */ - return bdf; + + if ( ivrs_mappings ) + { + xfree(ivrs_mappings); + ivrs_mappings = NULL; + } } static int __init allocate_iommu_table_struct(struct table_struct *table, @@ -179,10 +182,21 @@ static int __init amd_iommu_init(void) { struct amd_iommu *iommu; unsigned long flags; + u16 bdf; for_each_amd_iommu ( iommu ) { spin_lock_irqsave(&iommu->lock, flags); + + /* assign default IOMMU values */ + iommu->coherent = IOMMU_CONTROL_ENABLED; + iommu->isochronous = IOMMU_CONTROL_ENABLED; + iommu->res_pass_pw = IOMMU_CONTROL_ENABLED; + iommu->pass_pw = IOMMU_CONTROL_ENABLED; + iommu->ht_tunnel_enable = iommu->ht_tunnel_support ? + IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED; + iommu->exclusion_enable = IOMMU_CONTROL_DISABLED; + iommu->exclusion_allow_all = IOMMU_CONTROL_DISABLED; /* register IOMMU data strucures in MMIO space */ if ( map_iommu_mmio_region(iommu) != 0 ) @@ -190,10 +204,30 @@ static int __init amd_iommu_init(void) register_iommu_dev_table_in_mmio_space(iommu); register_iommu_cmd_buffer_in_mmio_space(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + } + + /* assign default values for device entries */ + for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf ) + { + ivrs_mappings[bdf].dte_requestor_id = bdf; + ivrs_mappings[bdf].dte_sys_mgt_enable = + IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED; + ivrs_mappings[bdf].dte_allow_exclusion = + IOMMU_CONTROL_DISABLED; + ivrs_mappings[bdf].unity_map_enable = + IOMMU_CONTROL_DISABLED; + } + + if ( acpi_table_parse(ACPI_IVRS, parse_ivrs_table) != 0 ) + dprintk(XENLOG_INFO, "AMD IOMMU: Did not find IVRS table!\n"); + + for_each_amd_iommu ( iommu ) + { + spin_lock_irqsave(&iommu->lock, flags); /* enable IOMMU translation services */ enable_iommu(iommu); nr_amd_iommus++; - spin_unlock_irqrestore(&iommu->lock, flags); } @@ -229,31 +263,38 @@ struct amd_iommu *find_iommu_for_device( } void amd_iommu_setup_domain_device( - struct domain *domain, struct amd_iommu *iommu, int requestor_id) + struct domain *domain, struct amd_iommu *iommu, int bdf) { void *dte; u64 root_ptr; unsigned long flags; + int req_id; + u8 sys_mgt, dev_ex; struct hvm_iommu *hd = domain_hvm_iommu(domain); - BUG_ON( !hd->root_table||!hd->paging_mode ); + BUG_ON( !hd->root_table || !hd->paging_mode ); root_ptr = (u64)virt_to_maddr(hd->root_table); + /* get device-table entry */ + req_id = ivrs_mappings[bdf].dte_requestor_id; dte = iommu->dev_table.buffer + - (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE); + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE); if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) ) { spin_lock_irqsave(&iommu->lock, flags); - amd_iommu_set_dev_table_entry( - (u32 *)dte, - root_ptr, hd->domain_id, hd->paging_mode); - invalidate_dev_table_entry(iommu, requestor_id); + /* bind DTE to domain page-tables */ + sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable; + dev_ex = ivrs_mappings[req_id].dte_allow_exclusion; + amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, + req_id, sys_mgt, dev_ex, hd->paging_mode); + + invalidate_dev_table_entry(iommu, req_id); flush_command_buffer(iommu); dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, " "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n", - requestor_id, root_ptr, hd->domain_id, hd->paging_mode); + req_id, root_ptr, hd->domain_id, hd->paging_mode); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -266,7 +307,7 @@ void __init amd_iommu_setup_dom0_devices struct pci_dev *pdev; int bus, dev, func; u32 l; - int req_id, bdf; + int bdf; for ( bus = 0; bus < 256; bus++ ) { @@ -286,11 +327,12 @@ void __init amd_iommu_setup_dom0_devices list_add_tail(&pdev->list, &hd->pdev_list); bdf = (bus << 8) | pdev->devfn; - req_id = requestor_id_from_bdf(bdf); - iommu = find_iommu_for_device(bus, pdev->devfn); + /* supported device? */ + iommu = (bdf < ivrs_bdf_entries) ? + find_iommu_for_device(bus, pdev->devfn) : NULL; if ( iommu ) - amd_iommu_setup_domain_device(dom0, iommu, req_id); + amd_iommu_setup_domain_device(dom0, iommu, bdf); } } } @@ -299,6 +341,8 @@ int amd_iommu_detect(void) int amd_iommu_detect(void) { unsigned long i; + int last_bus; + struct amd_iommu *iommu; if ( !enable_amd_iommu ) { @@ -318,6 +362,28 @@ int amd_iommu_detect(void) { printk("AMD IOMMU: Not found!\n"); return 0; + } + else + { + /* allocate 'ivrs mappings' table */ + /* note: the table has entries to accomodate all IOMMUs */ + last_bus = 0; + for_each_amd_iommu (iommu) + if (iommu->last_downstream_bus > last_bus) + last_bus = iommu->last_downstream_bus; + + ivrs_bdf_entries = (last_bus + 1) * + IOMMU_DEV_TABLE_ENTRIES_PER_BUS; + ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries); + + if ( !ivrs_mappings ) + { + dprintk(XENLOG_ERR, "AMD IOMMU:" + " Error allocating IVRS DevMappings table\n"); + goto error_out; + } + memset(ivrs_mappings, 0, + ivrs_bdf_entries * sizeof(struct ivrs_mappings)); } if ( amd_iommu_init() != 0 ) @@ -407,23 +473,25 @@ int amd_iommu_domain_init(struct domain } static void amd_iommu_disable_domain_device( - struct domain *domain, struct amd_iommu *iommu, u16 requestor_id) + struct domain *domain, struct amd_iommu *iommu, int bdf) { void *dte; unsigned long flags; - + int req_id; + + req_id = ivrs_mappings[bdf].dte_requestor_id; dte = iommu->dev_table.buffer + - (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE); + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE); if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) ) { spin_lock_irqsave(&iommu->lock, flags); memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE); - invalidate_dev_table_entry(iommu, requestor_id); + invalidate_dev_table_entry(iommu, req_id); flush_command_buffer(iommu); dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x," " domain_id:%d, paging_mode:%d\n", - requestor_id, domain_hvm_iommu(domain)->domain_id, + req_id, domain_hvm_iommu(domain)->domain_id, domain_hvm_iommu(domain)->paging_mode); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -438,7 +506,7 @@ static int reassign_device( struct domai struct hvm_iommu *target_hd = domain_hvm_iommu(target); struct pci_dev *pdev; struct amd_iommu *iommu; - int req_id, bdf; + int bdf; unsigned long flags; for_each_pdev( source, pdev ) @@ -450,12 +518,13 @@ static int reassign_device( struct domai pdev->devfn = devfn; bdf = (bus << 8) | devfn; - req_id = requestor_id_from_bdf(bdf); - iommu = find_iommu_for_device(bus, devfn); + /* supported device? */ + iommu = (bdf < ivrs_bdf_entries) ? + find_iommu_for_device(bus, pdev->devfn) : NULL; if ( iommu ) { - amd_iommu_disable_domain_device(source, iommu, req_id); + amd_iommu_disable_domain_device(source, iommu, bdf); /* Move pci device from the source domain to target domain. */ spin_lock_irqsave(&source_hd->iommu_list_lock, flags); spin_lock_irqsave(&target_hd->iommu_list_lock, flags); @@ -463,7 +532,7 @@ static int reassign_device( struct domai spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); - amd_iommu_setup_domain_device(target, iommu, req_id); + amd_iommu_setup_domain_device(target, iommu, bdf); gdprintk(XENLOG_INFO , "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn), @@ -484,6 +553,19 @@ static int reassign_device( struct domai int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) { + int bdf = (bus << 8) | devfn; + int req_id; + req_id = ivrs_mappings[bdf].dte_requestor_id; + + if (ivrs_mappings[req_id].unity_map_enable) + { + amd_iommu_reserve_domain_unity_map(d, + ivrs_mappings[req_id].addr_range_start, + ivrs_mappings[req_id].addr_range_length, + ivrs_mappings[req_id].write_permission, + ivrs_mappings[req_id].read_permission); + } + pdev_flr(bus, devfn); return reassign_device(dom0, d, bus, devfn); } diff -r f8624b023d67 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/include/asm-x86/amd-iommu.h Thu Feb 28 12:06:48 2008 +0100 @@ -43,14 +43,25 @@ struct amd_iommu { struct list_head list; spinlock_t lock; /* protect iommu */ - int iotlb_support; - int ht_tunnel_support; - int not_present_cached; + u16 bdf; + u8 cap_offset; u8 revision; + u8 unit_id; + u8 msi_number; u8 root_bus; u8 first_devfn; u8 last_devfn; + + u8 pte_not_present_cached; + u8 ht_tunnel_support; + u8 iotlb_support; + + u8 isochronous; + u8 coherent; + u8 res_pass_pw; + u8 pass_pw; + u8 ht_tunnel_enable; int last_downstream_bus; int downstream_bus_present[PCI_MAX_BUS_COUNT]; @@ -61,10 +72,23 @@ struct amd_iommu { struct table_struct dev_table; struct table_struct cmd_buffer; u32 cmd_buffer_tail; + struct table_struct event_log; + u32 event_log_head; - int exclusion_enabled; + int exclusion_enable; + int exclusion_allow_all; unsigned long exclusion_base; unsigned long exclusion_limit; }; +struct ivrs_mappings { + u16 dte_requestor_id; + u8 dte_sys_mgt_enable; + u8 dte_allow_exclusion; + u8 unity_map_enable; + u8 write_permission; + u8 read_permission; + unsigned long addr_range_start; + unsigned long addr_range_length; +}; #endif /* _ASM_X86_64_AMD_IOMMU_H */ diff -r f8624b023d67 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Feb 28 12:06:48 2008 +0100 @@ -117,6 +117,12 @@ #define PCI_CAP_FIRST_DEVICE_SHIFT 16 #define PCI_CAP_LAST_DEVICE_MASK 0xFF000000 #define PCI_CAP_LAST_DEVICE_SHIFT 24 + +#define PCI_CAP_UNIT_ID_MASK 0x0000001F +#define PCI_CAP_UNIT_ID_SHIFT 0 +#define PCI_MISC_INFO_OFFSET 0x10 +#define PCI_CAP_MSI_NUMBER_MASK 0x0000001F +#define PCI_CAP_MSI_NUMBER_SHIFT 0 /* Device Table */ #define IOMMU_DEV_TABLE_BASE_LOW_OFFSET 0x00 diff -r f8624b023d67 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Feb 28 12:06:48 2008 +0100 @@ -21,6 +21,7 @@ #ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H #define _ASM_X86_64_AMD_IOMMU_PROTO_H +#include #include #define for_each_amd_iommu(amd_iommu) \ @@ -54,10 +55,12 @@ int amd_iommu_map_page(struct domain *d, int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn); int amd_iommu_unmap_page(struct domain *d, unsigned long gfn); void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry); +int amd_iommu_reserve_domain_unity_map(struct domain *domain, + unsigned long phys_addr, unsigned long size, int iw, int ir); /* device table functions */ -void amd_iommu_set_dev_table_entry(u32 *dte, - u64 root_ptr, u16 domain_id, u8 paging_mode); +void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, + u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode); int amd_iommu_is_dte_page_translation_valid(u32 *entry); void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id); @@ -69,10 +72,13 @@ void flush_command_buffer(struct amd_iom /* iommu domain funtions */ int amd_iommu_domain_init(struct domain *domain); void amd_iommu_setup_domain_device(struct domain *domain, - struct amd_iommu *iommu, int requestor_id); + struct amd_iommu *iommu, int bdf); /* find iommu for bdf */ struct amd_iommu *find_iommu_for_device(int bus, int devfn); + +/* amd-iommu-acpi functions */ +int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size); static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift) { @@ -91,4 +97,16 @@ static inline u32 set_field_in_reg_u32(u return reg_value; } +static inline u8 get_field_from_byte(u8 value, u8 mask, u8 shift) +{ + u8 field; + field = (value & mask) >> shift; + return field; +} + +static inline unsigned long region_to_pages(unsigned long addr, unsigned long size) +{ + return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT; +} + #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */ diff -r f8624b023d67 xen/include/xen/acpi.h --- a/xen/include/xen/acpi.h Thu Feb 28 10:31:45 2008 +0000 +++ b/xen/include/xen/acpi.h Thu Feb 28 12:06:48 2008 +0100 @@ -368,6 +368,7 @@ enum acpi_table_id { ACPI_HPET, ACPI_MCFG, ACPI_DMAR, + ACPI_IVRS, ACPI_TABLE_COUNT };