diff -r f91555fa60d4 xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Fri Nov 30 15:10:01 2007 +0100 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Fri Nov 30 15:18:10 2007 +0100 @@ -468,3 +468,23 @@ int amd_iommu_unmap_page(struct domain * return -ENOMEM; } } + +void amd_iommu_unmap_all_domain_pages(struct domain *d) +{ + unsigned long flags; + u64 io_addr; + int domain_id; + struct amd_iommu *iommu; + struct hvm_iommu *hd = domain_hvm_iommu(d); + + domain_id = hd->domain_id; + io_addr = 0x7FFFFFFFFFFF; /* flush entire cache*/ + + for_each_amd_iommu(iommu) + { + spin_lock_irqsave(&iommu->lock, flags); + invalidate_iommu_page(iommu, io_addr, domain_id); + flush_command_buffer(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + } +} diff -r f91555fa60d4 xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c --- a/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c Fri Nov 30 15:10:01 2007 +0100 +++ b/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c Fri Nov 30 15:18:10 2007 +0100 @@ -499,3 +499,199 @@ error_out: deallocate_domain_resources(hd); return -ENOMEM; } + +static void amd_iommu_disable_device_entry( + struct domain *domain, struct amd_iommu *iommu, int requestor_id) +{ + void *dte; + unsigned long flags; + + dte = iommu->dev_table.buffer + + (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE); + + if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) ) + { + spin_lock_irqsave(&iommu->lock, flags); + memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE); + invalidate_dev_table_entry(iommu, requestor_id); + flush_command_buffer(iommu); + IOV_DPRINTK(XENLOG_INFO IOVPREFIX, "disable DTE 0x%x," + " domain_id:%d, paging_mode:%d\n", + requestor_id, domain_hvm_iommu(domain)->domain_id, + domain_hvm_iommu(domain)->paging_mode); + spin_unlock_irqrestore(&iommu->lock, flags); + } +} + +static int reassign_device( struct domain *source, struct domain *target, + u8 bus, u8 devfn) +{ + struct hvm_iommu *source_hd = domain_hvm_iommu(source); + struct hvm_iommu *target_hd = domain_hvm_iommu(target); + struct pci_dev *pdev; + struct amd_iommu *iommu; + int req_id, bdf; + unsigned long flags; + + for_each_pdev( source, pdev ) + { + if ( (pdev->bus != bus) || (pdev->devfn != devfn) ) + continue; + + pdev->bus = bus; + pdev->devfn = devfn; + + bdf = (bus << 8) | pdev->devfn; + req_id = requestor_id_from_bdf(bdf); + iommu = find_iommu_for_device(bus, devfn); + + /* Move pci device from the source domain to target domain. */ + spin_lock_irqsave(&source_hd->iommu_list_lock, flags); + spin_lock_irqsave(&target_hd->iommu_list_lock, flags); + list_move(&pdev->list, &target_hd->pdev_list); + spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); + spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); + + if ( iommu ) + { + amd_iommu_disable_device_entry(source, iommu, req_id); + amd_iommu_setup_domain_device(target, iommu, req_id); + IOV_GDPRINTK(XENLOG_INFO IOVPREFIX, + "reassign %x:%x.%x: domain %d -> domain %d\n", + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + source->domain_id, target->domain_id); + } + else + { + IOV_GDPRINTK(XENLOG_ERR IOVPREFIX, "failed to find iommu." + " %x:%x.%x cannot be accessed by domain %d\n", + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id); + return -ENODEV; + } + + break; + } + return 0; +} + +int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) +{ + struct hvm_iommu *hd; + int ret; + + hd = domain_hvm_iommu(d); + ret = reassign_device(dom0, d, bus, devfn); + + return ret; +} + +static void release_domain_devices(struct domain *d) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + struct pci_dev *pdev; + + while ( !list_empty(&hd->pdev_list) ) + { + pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list); + IOV_GDPRINTK(XENLOG_INFO IOVPREFIX, + "release devices: %x:%x.%x\n", + pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + reassign_device(d, dom0, pdev->bus, pdev->devfn); + } +} + +static void deallocate_next_page_table(void *table, unsigned long index, + int level) +{ + unsigned long next_index; + void *next_table, *pde; + int next_level; + + pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE); + next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde); + + if (next_table) + { + next_level = level - 1; + if (next_level > 1) + { + next_index = 0; + do { + deallocate_next_page_table(next_table, + next_index, next_level); + ++next_index; + } while (next_index < PTE_PER_TABLE_SIZE); + } + + free_xenheap_page(next_table); + } +} + +static void deallocate_domain_page_tables(struct domain *d) +{ + unsigned long index; + struct hvm_iommu *hd = domain_hvm_iommu(d); + + if (hd ->root_table) + { + index = 0; + do { + deallocate_next_page_table(hd->root_table, + index, hd->paging_mode); + ++index; + } while (index < PTE_PER_TABLE_SIZE); + + free_xenheap_page(hd ->root_table); + } + + hd ->root_table = NULL; +} + +void amd_iommu_domain_destroy(struct domain *d) +{ + struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; + uint32_t i; + struct hvm_iommu *hd = domain_hvm_iommu(d); + struct list_head *ioport_list, *digl_list, *tmp; + struct g2m_ioport *ioport; + struct dev_intx_gsi_link *digl; + + if ( !amd_iommu_enabled ) + return; + + if ( hvm_irq_dpci != NULL ) + { + for ( i = 0; i < NR_IRQS; i++ ) + if ( hvm_irq_dpci->mirq[i].valid ) + { + pirq_guest_unbind(d, i); + kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); + + list_for_each_safe ( digl_list, tmp, + &hvm_irq_dpci->mirq[i].digl_list ) + { + digl = list_entry(digl_list, + struct dev_intx_gsi_link, list); + list_del(&digl->list); + xfree(digl); + } + } + + d->arch.hvm_domain.irq.dpci = NULL; + xfree(hvm_irq_dpci); + } + + if ( hd ) + { + list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list ) + { + ioport = list_entry(ioport_list, struct g2m_ioport, list); + list_del(&ioport->list); + xfree(ioport); + } + } + + amd_iommu_unmap_all_domain_pages(d); + deallocate_domain_page_tables(d); + release_domain_devices(d); +} diff -r f91555fa60d4 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Fri Nov 30 15:10:01 2007 +0100 +++ b/xen/include/asm-x86/amd-iommu.h Fri Nov 30 15:18:10 2007 +0100 @@ -33,6 +33,8 @@ extern struct list_head amd_iommu_head; extern int __init amd_iommu_detect(void); extern int amd_iommu_domain_init(struct domain *domain); +extern int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn); +extern void amd_iommu_domain_destroy(struct domain *d); extern int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn); extern int amd_iommu_unmap_page(struct domain *d, unsigned long gfn); diff -r f91555fa60d4 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Nov 30 15:10:01 2007 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Nov 30 15:18:10 2007 +0100 @@ -63,6 +63,7 @@ void __init enable_iommu(struct amd_iomm void __init enable_iommu(struct amd_iommu *iommu); /* mapping functions */ +void amd_iommu_unmap_all_domain_pages(struct domain *d); void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry); /* device table functions */