[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 08/18] IOMMU/x86: support freeing of pagetables
For vendor specific code to support superpages we need to be able to deal with a superpage mapping replacing an intermediate page table (or hierarchy thereof). Consequently an iommu_alloc_pgtable() counterpart is needed to free individual page tables while a domain is still alive. Since the freeing needs to be deferred until after a suitable IOTLB flush was performed, released page tables get queued for processing by a tasklet. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- I was considering whether to use a softirq-taklet instead. This would have the benefit of avoiding extra scheduling operations, but come with the risk of the freeing happening prematurely because of a process_pending_softirqs() somewhere. --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -12,6 +12,7 @@ * this program; If not, see <http://www.gnu.org/licenses/>. */ +#include <xen/cpu.h> #include <xen/sched.h> #include <xen/iommu.h> #include <xen/paging.h> @@ -463,6 +464,85 @@ struct page_info *iommu_alloc_pgtable(st return pg; } +/* + * Intermediate page tables which get replaced by large pages may only be + * freed after a suitable IOTLB flush. Hence such pages get queued on a + * per-CPU list, with a per-CPU tasklet processing the list on the assumption + * that the necessary IOTLB flush will have occurred by the time tasklets get + * to run. (List and tasklet being per-CPU has the benefit of accesses not + * requiring any locking.) + */ +static DEFINE_PER_CPU(struct page_list_head, free_pgt_list); +static DEFINE_PER_CPU(struct tasklet, free_pgt_tasklet); + +static void free_queued_pgtables(void *arg) +{ + struct page_list_head *list = arg; + struct page_info *pg; + + while ( (pg = page_list_remove_head(list)) ) + free_domheap_page(pg); +} + +void iommu_queue_free_pgtable(struct domain *d, struct page_info *pg) +{ + struct domain_iommu *hd = dom_iommu(d); + unsigned int cpu = smp_processor_id(); + + spin_lock(&hd->arch.pgtables.lock); + page_list_del(pg, &hd->arch.pgtables.list); + spin_unlock(&hd->arch.pgtables.lock); + + page_list_add_tail(pg, &per_cpu(free_pgt_list, cpu)); + + tasklet_schedule(&per_cpu(free_pgt_tasklet, cpu)); +} + +static int cpu_callback( + struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct page_list_head *list = &per_cpu(free_pgt_list, cpu); + struct tasklet *tasklet = &per_cpu(free_pgt_tasklet, cpu); + + switch ( action ) + { + case CPU_DOWN_PREPARE: + tasklet_kill(tasklet); + break; + + case CPU_DEAD: + page_list_splice(list, &this_cpu(free_pgt_list)); + INIT_PAGE_LIST_HEAD(list); + tasklet_schedule(&this_cpu(free_pgt_tasklet)); + break; + + case CPU_UP_PREPARE: + case CPU_DOWN_FAILED: + tasklet_init(tasklet, free_queued_pgtables, list); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block cpu_nfb = { + .notifier_call = cpu_callback, +}; + +static int __init bsp_init(void) +{ + if ( iommu_enabled ) + { + cpu_callback(&cpu_nfb, CPU_UP_PREPARE, + (void *)(unsigned long)smp_processor_id()); + register_cpu_notifier(&cpu_nfb); + } + + return 0; +} +presmp_initcall(bsp_init); + bool arch_iommu_use_permitted(const struct domain *d) { /* --- a/xen/include/asm-x86/iommu.h +++ b/xen/include/asm-x86/iommu.h @@ -143,6 +143,7 @@ int pi_update_irte(const struct pi_desc int __must_check iommu_free_pgtables(struct domain *d); struct page_info *__must_check iommu_alloc_pgtable(struct domain *d); +void iommu_queue_free_pgtable(struct domain *d, struct page_info *pg); #endif /* !__ARCH_X86_IOMMU_H__ */ /*
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |