|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 5/5] AMD IOMMU: widen NUMA nodes to be allocated from
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -488,7 +488,7 @@ static int iommu_pde_from_gfn(struct dom
mfn = next_table_mfn;
/* allocate lower level page table */
- table = alloc_amd_iommu_pgtable();
+ table = alloc_amd_iommu_pgtable(d);
if ( table == NULL )
{
AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
@@ -516,7 +516,7 @@ static int iommu_pde_from_gfn(struct dom
{
if ( next_table_mfn == 0 )
{
- table = alloc_amd_iommu_pgtable();
+ table = alloc_amd_iommu_pgtable(d);
if ( table == NULL )
{
AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
@@ -567,7 +567,7 @@ static int update_paging_mode(struct dom
{
/* Allocate and install a new root table.
* Only upper I/O page table grows, no need to fix next level bits */
- new_root = alloc_amd_iommu_pgtable();
+ new_root = alloc_amd_iommu_pgtable(d);
if ( new_root == NULL )
{
AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n",
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -225,13 +225,15 @@ int __init amd_iov_detect(void)
return scan_pci_devices();
}
-static int allocate_domain_resources(struct hvm_iommu *hd)
+static int allocate_domain_resources(struct domain *d)
{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
/* allocate root table */
spin_lock(&hd->arch.mapping_lock);
if ( !hd->arch.root_table )
{
- hd->arch.root_table = alloc_amd_iommu_pgtable();
+ hd->arch.root_table = alloc_amd_iommu_pgtable(d);
if ( !hd->arch.root_table )
{
spin_unlock(&hd->arch.mapping_lock);
@@ -263,7 +265,7 @@ static int amd_iommu_domain_init(struct
struct hvm_iommu *hd = domain_hvm_iommu(d);
/* allocate page directroy */
- if ( allocate_domain_resources(hd) != 0 )
+ if ( allocate_domain_resources(d) != 0 )
{
if ( hd->arch.root_table )
free_domheap_page(hd->arch.root_table);
@@ -383,7 +385,7 @@ static int reassign_device(struct domain
/* IO page tables might be destroyed after pci-detach the last device
* In this case, we have to re-allocate root table for next pci-attach.*/
if ( t->arch.root_table == NULL )
- allocate_domain_resources(t);
+ allocate_domain_resources(target);
amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -158,12 +158,12 @@ static inline unsigned long region_to_pa
return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
}
-static inline struct page_info* alloc_amd_iommu_pgtable(void)
+static inline struct page_info *alloc_amd_iommu_pgtable(struct domain *d)
{
struct page_info *pg;
void *vaddr;
- pg = alloc_domheap_page(NULL, 0);
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( pg == NULL )
return 0;
vaddr = __map_domain_page(pg);
Attachment:
AMD-IOMMU-alloc-for-domain.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |