[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 11/17] AMD/IOMMU: walk trees upon page fault
This is to aid diagnosing issues and largely matches VT-d's behavior. Since I'm adding permissions output here as well, take the opportunity and also add their displaying to amd_dump_page_table_level(). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/drivers/passthrough/amd/iommu.h +++ b/xen/drivers/passthrough/amd/iommu.h @@ -235,6 +235,8 @@ int __must_check amd_iommu_flush_iotlb_p unsigned long page_count, unsigned int flush_flags); int __must_check amd_iommu_flush_iotlb_all(struct domain *d); +void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int dev_id, + dfn_t dfn); /* device table functions */ int get_dma_requestor_id(uint16_t seg, uint16_t bdf); --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -573,6 +573,9 @@ static void parse_event_log_entry(struct (flags & 0x002) ? " NX" : "", (flags & 0x001) ? " GN" : ""); + if ( iommu_verbose ) + amd_iommu_print_entries(iommu, device_id, daddr_to_dfn(addr)); + for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) if ( get_dma_requestor_id(iommu->seg, bdf) == device_id ) pci_check_disable_device(iommu->seg, PCI_BUS(bdf), --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -363,6 +363,50 @@ int amd_iommu_unmap_page(struct domain * return 0; } +void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int dev_id, + dfn_t dfn) +{ + mfn_t pt_mfn; + unsigned int level; + const struct amd_iommu_dte *dt = iommu->dev_table.buffer; + + if ( !dt[dev_id].tv ) + { + printk("%pp: no root\n", &PCI_SBDF2(iommu->seg, dev_id)); + return; + } + + pt_mfn = _mfn(dt[dev_id].pt_root); + level = dt[dev_id].paging_mode; + printk("%pp root @ %"PRI_mfn" (%u levels) dfn=%"PRI_dfn"\n", + &PCI_SBDF2(iommu->seg, dev_id), mfn_x(pt_mfn), level, dfn_x(dfn)); + + while ( level ) + { + const union amd_iommu_pte *pt = map_domain_page(pt_mfn); + unsigned int idx = pfn_to_pde_idx(dfn_x(dfn), level); + union amd_iommu_pte pte = pt[idx]; + + unmap_domain_page(pt); + + printk(" L%u[%03x] = %"PRIx64" %c%c\n", level, idx, pte.raw, + pte.pr ? pte.ir ? 'r' : '-' : 'n', + pte.pr ? pte.iw ? 'w' : '-' : 'p'); + + if ( !pte.pr ) + break; + + if ( pte.next_level >= level ) + { + printk(" L%u[%03x]: next: %u\n", level, idx, pte.next_level); + break; + } + + pt_mfn = _mfn(pte.mfn); + level = pte.next_level; + } +} + static unsigned long flush_count(unsigned long dfn, unsigned long page_count, unsigned int order) { --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -561,10 +561,11 @@ static void amd_dump_page_table_level(st mfn_to_page(_mfn(pde->mfn)), pde->next_level, address, indent + 1); else - printk("%*sdfn: %08lx mfn: %08lx\n", + printk("%*sdfn: %08lx mfn: %08lx %c%c\n", indent, "", (unsigned long)PFN_DOWN(address), - (unsigned long)PFN_DOWN(pfn_to_paddr(pde->mfn))); + (unsigned long)PFN_DOWN(pfn_to_paddr(pde->mfn)), + pde->ir ? 'r' : '-', pde->iw ? 'w' : '-'); } unmap_domain_page(table_vaddr);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |