[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merge x86_64 and i386 ioremap.c.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b09f13ddd27babba0ce3948bdcdd6d8f247b4e0d
# Parent  0b5ee83ea35c98a511c794b11fa8c70ab8cc2274
Merge x86_64 and i386 ioremap.c.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 0b5ee83ea35c -r b09f13ddd27b 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Tue Aug 23 13:13:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Tue Aug 23 13:30:40 2005
@@ -36,6 +36,8 @@
 {
 }
 
+#ifdef __i386__
+
 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 {
        return NULL;
@@ -44,6 +46,8 @@
 void __init bt_iounmap(void *addr, unsigned long size)
 {
 }
+
+#endif /* __i386__ */
 
 #else
 
@@ -126,10 +130,12 @@
                return NULL;
        area->phys_addr = phys_addr;
        addr = (void __iomem *) area->addr;
+       flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+#ifdef __x86_64__
+       flags |= _PAGE_USER;
+#endif
        if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
-                                   size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
-                                                  _PAGE_DIRTY | _PAGE_ACCESSED
-                                                  | flags), domid)) {
+                                   size, __pgprot(flags), domid)) {
                vunmap((void __force *) addr);
                return NULL;
        }
@@ -218,6 +224,8 @@
        kfree(p); 
 }
 
+#ifdef __i386__
+
 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 {
        unsigned long offset, last_addr;
@@ -288,6 +296,8 @@
                --nrpages;
        }
 }
+
+#endif /* __i386__ */
 
 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
 
diff -r 0b5ee83ea35c -r b09f13ddd27b 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile  Tue Aug 23 13:13:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile  Tue Aug 23 13:30:40 2005
@@ -6,10 +6,10 @@
 
 CFLAGS += -Iarch/$(XENARCH)/mm
 
-obj-y  := init.o fault.o ioremap.o pageattr.o
+obj-y  := init.o fault.o pageattr.o
 c-obj-y        := extable.o
 
-i386-obj-y := hypervisor.o
+i386-obj-y := hypervisor.o ioremap.o
 
 #obj-y  := init.o fault.o ioremap.o extable.o pageattr.o
 #c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff -r 0b5ee83ea35c -r b09f13ddd27b 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c Tue Aug 23 13:13:39 2005
+++ /dev/null   Tue Aug 23 13:30:40 2005
@@ -1,489 +0,0 @@
-/*
- * arch/x86_64/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-/*
- * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later
- */
-#ifndef CONFIG_XEN_PHYSDEV_ACCESS
-
-void * __ioremap(unsigned long phys_addr, unsigned long size,
-                unsigned long flags)
-{
-       return NULL;
-}
-
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
-       return NULL;
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-}
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
-       return NULL;
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-}
-
-#else
-
-/*
- * Does @address reside within a non-highmem page that is local to this virtual
- * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
- * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
- * why this works.
- */
-static inline int is_local_lowmem(unsigned long address)
-{
-       extern unsigned long max_low_pfn;
-       unsigned long mfn = address >> PAGE_SHIFT;
-       unsigned long pfn = mfn_to_pfn(mfn);
-       return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
-}
-
-#endif
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned 
long flags)
-{
-       void __iomem * addr;
-       struct vm_struct * area;
-       unsigned long offset, last_addr;
-       domid_t domid = DOMID_IO;
-
-       /* Don't allow wraparound or zero size */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr)
-               return NULL;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       /*
-        * Don't remap the low PCI/ISA area, it's always mapped..
-        */
-       if (phys_addr >= 0x0 && last_addr < 0x100000)
-               return isa_bus_to_virt(phys_addr);
-#endif
-
-       /*
-        * Don't allow anybody to remap normal RAM that we're using..
-        */
-       if (is_local_lowmem(phys_addr)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = bus_to_virt(phys_addr);
-               t_end = t_addr + (size - 1);
-          
-               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); 
page++)
-                       if(!PageReserved(page))
-                               return NULL;
-
-               domid = DOMID_LOCAL;
-       }
-
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr+1) - phys_addr;
-
-       /*
-        * Ok, go for it..
-        */
-       area = get_vm_area(size, VM_IOREMAP | (flags << 20));
-       if (!area)
-               return NULL;
-       area->phys_addr = phys_addr;
-       addr = (void __iomem *) area->addr;
-       if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
-                                   size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
-                                                  _PAGE_DIRTY | _PAGE_ACCESSED
-                                                   | _PAGE_USER
-                                                  | flags), domid)) {
-               vunmap((void __force *) addr);
-               return NULL;
-       }
-       return (void __iomem *) (offset + (char __iomem *)addr);
-}
-
-
-/**
- * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address. 
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many 
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- * 
- * Must be freed with iounmap.
- */
-
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
-       unsigned long last_addr;
-       void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
-       if (!p) 
-               return p; 
-
-       /* Guaranteed to be > phys_addr, as per __ioremap() */
-       last_addr = phys_addr + size - 1;
-
-       if (is_local_lowmem(last_addr)) { 
-               struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-               unsigned long npages;
-
-               phys_addr &= PAGE_MASK;
-
-               /* This might overflow and become zero.. */
-               last_addr = PAGE_ALIGN(last_addr);
-
-               /* .. but that's ok, because modulo-2**n arithmetic will make
-               * the page-aligned "last - first" come out right.
-               */
-               npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-
-               if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
-                       iounmap(p); 
-                       p = NULL;
-               }
-               global_flush_tlb();
-       }
-
-       return p;                                       
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-       struct vm_struct *p;
-       if ((void __force *) addr <= high_memory) 
-               return; 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-               return;
-#endif
-       p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
-       if (!p) { 
-               printk("__iounmap: bad address %p\n", addr);
-               return;
-       }
-
-       if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
-               /* p->size includes the guard page, but cpa doesn't like that */
-               change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-                                (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-                                PAGE_KERNEL);                           
-               global_flush_tlb();
-       } 
-       kfree(p); 
-}
-
-#if defined(__i386__)
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
-       unsigned long offset, last_addr;
-       unsigned int nrpages;
-       enum fixed_addresses idx;
-
-       /* Don't allow wraparound or zero size */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr)
-               return NULL;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       /*
-        * Don't remap the low PCI/ISA area, it's always mapped..
-        */
-       if (phys_addr >= 0x0 && last_addr < 0x100000)
-               return isa_bus_to_virt(phys_addr);
-#endif
-
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr) - phys_addr;
-
-       /*
-        * Mappings have to fit in the FIX_BTMAP area.
-        */
-       nrpages = size >> PAGE_SHIFT;
-       if (nrpages > NR_FIX_BTMAPS)
-               return NULL;
-
-       /*
-        * Ok, go for it..
-        */
-       idx = FIX_BTMAP_BEGIN;
-       while (nrpages > 0) {
-               set_fixmap(idx, phys_addr);
-               phys_addr += PAGE_SIZE;
-               --idx;
-               --nrpages;
-       }
-       return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-       unsigned long virt_addr;
-       unsigned long offset;
-       unsigned int nrpages;
-       enum fixed_addresses idx;
-
-       virt_addr = (unsigned long)addr;
-       if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-               return;
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-               return;
-#endif
-       offset = virt_addr & ~PAGE_MASK;
-       nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-
-       idx = FIX_BTMAP_BEGIN;
-       while (nrpages > 0) {
-               clear_fixmap(idx);
-               --idx;
-               --nrpages;
-       }
-}
-#endif /* defined(__i386__) */
-
-#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
-
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
-  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
-  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-static inline void direct_remap_area_pte(pte_t *pte, 
-                                        unsigned long address, 
-                                        unsigned long size,
-                                        mmu_update_t **v)
-{
-       unsigned long end;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       if (address >= end)
-               BUG();
-
-       do {
-               (*v)->ptr = virt_to_machine(pte);
-               (*v)++;
-               address += PAGE_SIZE;
-               pte++;
-       } while (address && (address < end));
-}
-
-static inline int direct_remap_area_pmd(struct mm_struct *mm,
-                                       pmd_t *pmd, 
-                                       unsigned long address, 
-                                       unsigned long size,
-                                       mmu_update_t **v)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       if (address >= end)
-               BUG();
-       do {
-               pte_t *pte = (mm == &init_mm) ? 
-                       pte_alloc_kernel(mm, pmd, address) :
-                       pte_alloc_map(mm, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               direct_remap_area_pte(pte, address, end - address, v);
-               pte_unmap(pte);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return 0;
-}
- 
-int __direct_remap_area_pages(struct mm_struct *mm,
-                             unsigned long address, 
-                             unsigned long size, 
-                             mmu_update_t *v)
-{
-       pgd_t * dir;
-       unsigned long end = address + size;
-       int error;
-
-#if defined(__i386__)
-       dir = pgd_offset(mm, address);
-#elif defined (__x86_64)
-        dir = (mm == &init_mm) ?
-               pgd_offset_k(address):
-               pgd_offset(mm, address);
-#endif
-       if (address >= end)
-               BUG();
-       spin_lock(&mm->page_table_lock);
-       do {
-               pud_t *pud;
-               pmd_t *pmd;
-
-               error = -ENOMEM;
-               pud = pud_alloc(mm, dir, address);
-               if (!pud)
-                       break;
-               pmd = pmd_alloc(mm, pud, address);
-               if (!pmd)
-                       break;
-               error = 0;
-               direct_remap_area_pmd(mm, pmd, address, end - address, &v);
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-
-       } while (address && (address < end));
-       spin_unlock(&mm->page_table_lock);
-       return error;
-}
-
-
-int direct_remap_area_pages(struct mm_struct *mm,
-                           unsigned long address, 
-                           unsigned long machine_addr,
-                           unsigned long size, 
-                           pgprot_t prot,
-                           domid_t  domid)
-{
-       int i;
-       unsigned long start_address;
-#define MAX_DIRECTMAP_MMU_QUEUE 130
-       mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u;
-
-       start_address = address;
-
-       flush_cache_all();
-
-       for (i = 0; i < size; i += PAGE_SIZE) {
-               if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
-                       /* Fill in the PTE pointers. */
-                       __direct_remap_area_pages(mm,
-                                                 start_address, 
-                                                 address-start_address, 
-                                                 u);
- 
-                       if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-                               return -EFAULT;
-                       v = u;
-                       start_address = address;
-               }
-
-               /*
-                * Fill in the machine address: PTE ptr is done later by
-                * __direct_remap_area_pages(). 
-                */
-               v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, 
prot));
-
-               machine_addr += PAGE_SIZE;
-               address += PAGE_SIZE; 
-               v++;
-       }
-
-       if (v != u) {
-               /* get the ptep's filled in */
-               __direct_remap_area_pages(mm,
-                                         start_address, 
-                                         address-start_address, 
-                                         u);
-               if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-                       return -EFAULT;
-       }
-
-       flush_tlb_all();
-
-       return 0;
-}
-
-EXPORT_SYMBOL(direct_remap_area_pages);
-
-static int lookup_pte_fn(
-    pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 
-{
-    unsigned long *ptep = (unsigned long *)data;
-    if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT)
-                  | ((unsigned long)pte & ~PAGE_MASK);
-    return 0;
-}
-
-int create_lookup_pte_addr(struct mm_struct *mm, 
-                           unsigned long address,
-                           unsigned long *ptep)
-{
-    return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
-}
-
-EXPORT_SYMBOL(create_lookup_pte_addr);
-
-static int noop_fn(
-    pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 
-{
-    return 0;
-}
-
-int touch_pte_range(struct mm_struct *mm,
-                    unsigned long address,
-                    unsigned long size)
-{
-    return generic_page_range(mm, address, size, noop_fn, NULL);
-}
-
-EXPORT_SYMBOL(touch_pte_range);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.