[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v03 06/10] arm: omap: introduce iommu translation for GPU remoteproc
The following patch introduced platform specific MMU data definitions and pagetable translation function for OMAP5 GPU remoteproc. Typically GPU MMU performs uses two level address translation, so algorithm is quite straightforward here - pagetables are enumerated and all pfns are updated with corresponding mfns. Current patch adds functionality, needed for proper handling of GPU MMU, which is very similar to existing IPU/DSP MMUs. Signed-off-by: Andrii Tseglytskyi <andrii.tseglytskyi@xxxxxxxxxxxxxxx> --- xen/arch/arm/remoteproc/omap_iommu.c | 107 +++++++++++++++++++++++++++++ xen/arch/arm/remoteproc/remoteproc_iommu.c | 1 + xen/include/asm-arm/remoteproc_iommu.h | 1 + 3 files changed, 109 insertions(+) diff --git a/xen/arch/arm/remoteproc/omap_iommu.c b/xen/arch/arm/remoteproc/omap_iommu.c index 8ed6d0b..f00bfc6 100644 --- a/xen/arch/arm/remoteproc/omap_iommu.c +++ b/xen/arch/arm/remoteproc/omap_iommu.c @@ -32,12 +32,23 @@ /* register where address of pagetable is stored */ #define MMU_IPU_TTB_OFFSET 0x4c +#define MMU_GPU_TTB_OFFSET_00 0xc84 +#define MMU_GPU_TTB_OFFSET_01 0xc38 +#define MMU_GPU_TTB_OFFSET_02 0xc3c +#define MMU_GPU_TTB_OFFSET_03 0xc40 +#define MMU_GPU_TTB_OFFSET_04 0xc44 +#define MMU_GPU_TTB_OFFSET_05 0xc48 +#define MMU_GPU_TTB_OFFSET_06 0xc4c +#define MMU_GPU_TTB_OFFSET_07 0xc50 + /* 1st level translation */ #define MMU_OMAP_PGD_SHIFT 20 #define MMU_OMAP_SUPER_SHIFT 24 /* "supersection" - 16 Mb */ #define MMU_OMAP_SECTION_SHIFT 20 /* "section" - 1 Mb */ #define MMU_OMAP_SECOND_LEVEL_SHIFT 10 +#define MMU_GPU_PGD_SHIFT 22 /* SGX section */ + /* 2nd level translation */ #define MMU_OMAP_PTE_SMALL_SHIFT 12 /* "small page" - 4Kb */ #define MMU_OMAP_PTE_LARGE_SHIFT 16 /* "large page" - 64 Kb */ @@ -57,15 +68,28 @@ #define IPU_PTE_LARGE (1 << 0) #define OMAP_IPU_MMU_MEM_BASE 0x55082000 +#define OMAP_GPU_MMU_MEM_BASE 0x56000000 static int mmu_omap_copy_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt); static paddr_t mmu_ipu_translate_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt); +static paddr_t mmu_gpu_translate_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt); static u32 ipu_trap_offsets[] = { MMU_IPU_TTB_OFFSET, }; +static u32 sgx_trap_offsets[] = { + MMU_GPU_TTB_OFFSET_00, + MMU_GPU_TTB_OFFSET_01, + MMU_GPU_TTB_OFFSET_02, + MMU_GPU_TTB_OFFSET_03, + MMU_GPU_TTB_OFFSET_04, + MMU_GPU_TTB_OFFSET_05, + MMU_GPU_TTB_OFFSET_06, + MMU_GPU_TTB_OFFSET_07, +}; + static const struct pagetable_data pagetable_ipu_data = { .pgd_shift = MMU_OMAP_PGD_SHIFT, .super_shift = MMU_OMAP_SUPER_SHIFT, @@ -85,6 +109,24 @@ struct mmu_info omap_ipu_mmu = { .translate_pfunc = mmu_ipu_translate_pagetable, }; +static const struct pagetable_data pagetable_gpu_data = { + .pgd_shift = MMU_GPU_PGD_SHIFT, + .super_shift = MMU_GPU_PGD_SHIFT, + .section_shift = MMU_GPU_PGD_SHIFT, + .pte_shift = MMU_OMAP_PTE_SMALL_SHIFT, /* the same as IPU */ +}; + +struct mmu_info omap_gpu_mmu = { + .name = "SGX_L2_MMU", + .pg_data = &pagetable_gpu_data, + .trap_offsets = sgx_trap_offsets, + .mem_start = OMAP_GPU_MMU_MEM_BASE, + .mem_size = 0x1000, + .num_traps = ARRAY_SIZE(sgx_trap_offsets), + .copy_pagetable_pfunc = mmu_omap_copy_pagetable, + .translate_pfunc = mmu_gpu_translate_pagetable, +}; + static bool translate_supersections_to_pages = true; static bool translate_sections_to_pages = true; @@ -315,6 +357,71 @@ static paddr_t mmu_ipu_translate_pagetable(struct mmu_info *mmu, struct mmu_page return __pa(hyp_pgt); } +static paddr_t mmu_gpu_translate_pagetable(struct mmu_info *mmu, struct mmu_pagetable *pgt) +{ + /* GPU pagetable consists of set of 32 bit pointers */ + u32 *kern_pgt, *hyp_pgt; + u32 i; + + ASSERT(mmu); + ASSERT(pgt); + + kern_pgt = pgt->kern_pagetable; + hyp_pgt = pgt->hyp_pagetable; + pgt->page_counter = 0; + + /* 1-st level translation */ + for ( i = 0; i < MMU_PTRS_PER_PGD(mmu); i++ ) + { + paddr_t pd_maddr, pd_paddr, pd_flags, pgd; + u32 pd_mask = MMU_SECTION_MASK(mmu->pg_data->pte_shift); + int res; + + pgd = kern_pgt[i]; + if ( !pgd ) + { + /* handle the case when second level translation table + * was removed from kernel */ + if ( unlikely(hyp_pgt[i]) ) + { + guest_physmap_unpin_range(current->domain, + (hyp_pgt[i] & pd_mask) >> PAGE_SHIFT, 0); + xfree(__va(hyp_pgt[i] & pd_mask)); + hyp_pgt[i] = 0; + } + continue; + } + + pd_paddr = pgd & pd_mask; + pd_flags = pgd & ~pd_mask; + pd_maddr = p2m_lookup(current->domain, pd_paddr, NULL); + + if ( INVALID_PADDR == pd_maddr ) + { + pr_mmu(mmu, "failed to lookup paddr 0x%"PRIpaddr"", pd_paddr); + return INVALID_PADDR; + } + + if ( !guest_physmap_pinned_range(current->domain, pd_maddr >> PAGE_SHIFT, 0) ) + { + res = guest_physmap_pin_range(current->domain, pd_maddr >> PAGE_SHIFT, 0); + if ( res ) + { + pr_mmu(mmu, "can't pin page pfn 0x%"PRIpaddr" mfn 0x%"PRIpaddr" res %d", + pd_paddr, pd_maddr, res); + return INVALID_PADDR; + } + } + + /* 2-nd level translation */ + hyp_pgt[i] = remoteproc_iommu_translate_second_level(mmu, pgt, pd_maddr, hyp_pgt[i]); + hyp_pgt[i] |= pd_flags; + } + + clean_and_invalidate_xen_dcache_va_range(hyp_pgt, MMU_PGD_TABLE_SIZE(mmu)); + return __pa(hyp_pgt); +} + /* * Local variables: * mode: C diff --git a/xen/arch/arm/remoteproc/remoteproc_iommu.c b/xen/arch/arm/remoteproc/remoteproc_iommu.c index a2cae25..c691619 100644 --- a/xen/arch/arm/remoteproc/remoteproc_iommu.c +++ b/xen/arch/arm/remoteproc/remoteproc_iommu.c @@ -33,6 +33,7 @@ static struct mmu_info *mmu_list[] = { &omap_ipu_mmu, + &omap_gpu_mmu, }; #define mmu_for_each(pfunc, data) \ diff --git a/xen/include/asm-arm/remoteproc_iommu.h b/xen/include/asm-arm/remoteproc_iommu.h index e581fc3..4983505 100644 --- a/xen/include/asm-arm/remoteproc_iommu.h +++ b/xen/include/asm-arm/remoteproc_iommu.h @@ -80,5 +80,6 @@ paddr_t remoteproc_iommu_translate_second_level(struct mmu_info *mmu, paddr_t maddr, paddr_t hyp_addr); extern struct mmu_info omap_ipu_mmu; +extern struct mmu_info omap_gpu_mmu; #endif /* _REMOTEPROC_IOMMU_H_ */ -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |