[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] merge
ChangeSet 1.1399, 2005/04/28 22:45:54+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx merge hypervisor.c | 340 ++++++++++++++--------------------------------------------- 1 files changed, 86 insertions(+), 254 deletions(-) diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c --- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-04-28 18:04:44 -04:00 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-04-28 18:04:44 -04:00 @@ -34,317 +34,150 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm-xen/hypervisor.h> -#include <asm-xen/multicall.h> #include <asm-xen/balloon.h> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) +#include <linux/percpu.h> +#include <asm/tlbflush.h> +#endif -/* - * This suffices to protect us if we ever move to SMP domains. - * Further, it protects us against interrupts. At the very least, this is - * required for the network driver which flushes the update queue before - * pushing new receive buffers. - */ -static spinlock_t update_lock = SPIN_LOCK_UNLOCKED; - -/* Linux 2.6 isn't using the traditional batched interface. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -#define QUEUE_SIZE 2048 #define pte_offset_kernel pte_offset -#define pmd_val_ma(v) (v).pmd; #define pud_t pgd_t #define pud_offset(d, va) d #else -#define QUEUE_SIZE 128 #define pmd_val_ma(v) (v).pud.pgd.pgd; #endif -static mmu_update_t update_queue[QUEUE_SIZE]; -unsigned int mmu_update_queue_idx = 0; -#define idx mmu_update_queue_idx - -/* - * MULTICALL_flush_page_update_queue: - * This is a version of the flush which queues as part of a multicall. - */ -void MULTICALL_flush_page_update_queue(void) -{ - unsigned long flags; - unsigned int _idx; - spin_lock_irqsave(&update_lock, flags); - if ( (_idx = idx) != 0 ) - { - idx = 0; - wmb(); /* Make sure index is cleared first to avoid double updates. */ - queue_multicall3(__HYPERVISOR_mmu_update, - (unsigned long)update_queue, - (unsigned long)_idx, - (unsigned long)NULL); - } - spin_unlock_irqrestore(&update_lock, flags); -} - -static inline void __flush_page_update_queue(void) -{ - unsigned int _idx = idx; - idx = 0; - wmb(); /* Make sure index is cleared first to avoid double updates. */ - if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) ) - { - printk(KERN_ALERT "Failed to execute MMU updates.\n"); - BUG(); - } -} - -void _flush_page_update_queue(void) -{ - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - if ( idx != 0 ) __flush_page_update_queue(); - spin_unlock_irqrestore(&update_lock, flags); -} - -static inline void increment_index(void) -{ - idx++; - if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue(); -} - -static inline void increment_index_and_flush(void) -{ - idx++; - __flush_page_update_queue(); -} - -void queue_l1_entry_update(pte_t *ptr, unsigned long val) -{ - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = virt_to_machine(ptr); - update_queue[idx].val = val; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); -} - -void queue_l2_entry_update(pmd_t *ptr, pmd_t val) -{ - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = virt_to_machine(ptr); - update_queue[idx].val = pmd_val_ma(val); - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); -} - -void queue_pt_switch(unsigned long ptr) +#ifndef CONFIG_XEN_SHADOW_MODE +void xen_l1_entry_update(pte_t *ptr, unsigned long val) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; - update_queue[idx].val = MMUEXT_NEW_BASEPTR; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + mmu_update_t u; + u.ptr = virt_to_machine(ptr); + u.val = val; + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); } -void queue_tlb_flush(void) +void xen_l2_entry_update(pmd_t *ptr, pmd_t val) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = MMU_EXTENDED_COMMAND; - update_queue[idx].val = MMUEXT_TLB_FLUSH; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + mmu_update_t u; + u.ptr = virt_to_machine(ptr); + u.val = pmd_val_ma(val); + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); } +#endif -void queue_invlpg(unsigned long ptr) +void xen_machphys_update(unsigned long mfn, unsigned long pfn) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = MMU_EXTENDED_COMMAND; - update_queue[idx].ptr |= ptr & PAGE_MASK; - update_queue[idx].val = MMUEXT_INVLPG; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + mmu_update_t u; + u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; + u.val = pfn; + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); } -void queue_pgd_pin(unsigned long ptr) +void xen_pt_switch(unsigned long ptr) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; - update_queue[idx].val = MMUEXT_PIN_L2_TABLE; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + struct mmuext_op op; + op.cmd = MMUEXT_NEW_BASEPTR; + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } -void queue_pgd_unpin(unsigned long ptr) +void xen_tlb_flush(void) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; - update_queue[idx].val = MMUEXT_UNPIN_TABLE; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + struct mmuext_op op; + op.cmd = MMUEXT_TLB_FLUSH_LOCAL; + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } -void queue_pte_pin(unsigned long ptr) +void xen_invlpg(unsigned long ptr) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; - update_queue[idx].val = MMUEXT_PIN_L1_TABLE; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + struct mmuext_op op; + op.cmd = MMUEXT_INVLPG_LOCAL; + op.linear_addr = ptr & PAGE_MASK; + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } -void queue_pte_unpin(unsigned long ptr) -{ - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; - update_queue[idx].val = MMUEXT_UNPIN_TABLE; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); -} +#ifdef CONFIG_SMP -void queue_set_ldt(unsigned long ptr, unsigned long len) +void xen_tlb_flush_all(void) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr; - update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT); - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + struct mmuext_op op; + op.cmd = MMUEXT_TLB_FLUSH_ALL; + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } -void queue_machphys_update(unsigned long mfn, unsigned long pfn) +void xen_tlb_flush_mask(cpumask_t *mask) { - unsigned long flags; - spin_lock_irqsave(&update_lock, flags); - update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; - update_queue[idx].val = pfn; - increment_index(); - spin_unlock_irqrestore(&update_lock, flags); + struct mmuext_op op; + if ( cpus_empty(*mask) ) + return; + op.cmd = MMUEXT_TLB_FLUSH_MULTI; + op.cpuset = mask->bits; + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |