[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [LINUX] Remove vm_map_xen_pages(), clean up __direct_remap_page_range.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 6f41473da163afa567a6979939b089fa1210447b
# Parent  2017f6e92bf8e8a902a1b86e1808b5cf54eeeb50
[LINUX] Remove vm_map_xen_pages(), clean up __direct_remap_page_range.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c    |   42 +---------------
 linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c |   55 +++++++++++++--------
 2 files changed, 38 insertions(+), 59 deletions(-)

diff -r 2017f6e92bf8 -r 6f41473da163 
linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c   Thu Aug 31 14:46:28 
2006 +0100
+++ b/linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c   Thu Aug 31 18:23:28 
2006 +0100
@@ -22,15 +22,6 @@
 #define ISA_START_ADDRESS      0x0
 #define ISA_END_ADDRESS                0x100000
 
-#if 0 /* not PAE safe */
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
-  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
-  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-#endif
-
 static int direct_remap_area_pte_fn(pte_t *pte, 
                                    struct page *pmd_page,
                                    unsigned long address, 
@@ -66,17 +57,16 @@ static int __direct_remap_pfn_range(stru
 
        for (i = 0; i < size; i += PAGE_SIZE) {
                if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
-                       /* Fill in the PTE pointers. */
+                       /* Flush a full batch after filling in the PTE ptrs. */
                        rc = apply_to_page_range(mm, start_address, 
                                                 address - start_address,
                                                 direct_remap_area_pte_fn, &w);
                        if (rc)
                                goto out;
-                       w = u;
                        rc = -EFAULT;
                        if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
                                goto out;
-                       v = u;
+                       v = w = u;
                        start_address = address;
                }
 
@@ -92,7 +82,7 @@ static int __direct_remap_pfn_range(stru
        }
 
        if (v != u) {
-               /* get the ptep's filled in */
+               /* Final batch. */
                rc = apply_to_page_range(mm, start_address,
                                         address - start_address,
                                         direct_remap_area_pte_fn, &w);
@@ -178,32 +168,6 @@ int touch_pte_range(struct mm_struct *mm
 } 
 
 EXPORT_SYMBOL(touch_pte_range);
-
-void *vm_map_xen_pages (unsigned long maddr, int vm_size, pgprot_t prot)
-{
-       int error;
-       
-       struct vm_struct *vma;
-       vma = get_vm_area (vm_size, VM_IOREMAP);
-      
-       if (vma == NULL) {
-               printk ("ioremap.c,vm_map_xen_pages(): "
-                       "Failed to get VMA area\n");
-               return NULL;
-       }
-
-       error = direct_kernel_remap_pfn_range((unsigned long) vma->addr,
-                                             maddr >> PAGE_SHIFT, vm_size,
-                                             prot, DOMID_SELF );
-       if (error == 0) {
-               return vma->addr;
-       } else {
-               printk ("ioremap.c,vm_map_xen_pages(): "
-                       "Failed to map xen shared pages into kernel space\n");
-               return NULL;
-       }
-}
-EXPORT_SYMBOL(vm_map_xen_pages);
 
 /*
  * Does @address reside within a non-highmem page that is local to this virtual
diff -r 2017f6e92bf8 -r 6f41473da163 
linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c
--- a/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c        Thu Aug 31 
14:46:28 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c        Thu Aug 31 
18:23:28 2006 +0100
@@ -33,8 +33,6 @@
 
 static int xenoprof_start(void);
 static void xenoprof_stop(void);
-
-void * vm_map_xen_pages(unsigned long maddr, int vm_size, pgprot_t prot);
 
 static int xenoprof_enabled = 0;
 static unsigned int num_events = 0;
@@ -373,9 +371,9 @@ static int xenoprof_set_passive(int * p_
 {
        int ret;
        int i, j;
-       int vm_size;
        int npages;
        struct xenoprof_buf *buf;
+       struct vm_struct *area;
        pgprot_t prot = __pgprot(_KERNPG_TABLE);
 
        if (!is_primary)
@@ -391,19 +389,29 @@ static int xenoprof_set_passive(int * p_
        for (i = 0; i < pdoms; i++) {
                passive_domains[i].domain_id = p_domains[i];
                passive_domains[i].max_samples = 2048;
-               ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, 
&passive_domains[i]);
+               ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive,
+                                            &passive_domains[i]);
                if (ret)
-                       return ret;
+                       goto out;
 
                npages = (passive_domains[i].bufsize * passive_domains[i].nbuf 
- 1) / PAGE_SIZE + 1;
-               vm_size = npages * PAGE_SIZE;
-
-               p_shared_buffer[i] = (char 
*)vm_map_xen_pages(passive_domains[i].buf_maddr,
-                                                             vm_size, prot);
-               if (!p_shared_buffer[i]) {
+
+               area = get_vm_area(npages * PAGE_SIZE, VM_IOREMAP);
+               if (area == NULL) {
                        ret = -ENOMEM;
                        goto out;
                }
+
+               ret = direct_kernel_remap_pfn_range(
+                       (unsigned long)area->addr,
+                       passive_domains[i].buf_maddr >> PAGE_SHIFT,
+                       npages * PAGE_SIZE, prot, DOMID_SELF);
+               if (ret) {
+                       vunmap(area->addr);
+                       goto out;
+               }
+
+               p_shared_buffer[i] = area->addr;
 
                for (j = 0; j < passive_domains[i].nbuf; j++) {
                        buf = (struct xenoprof_buf *)
@@ -473,11 +481,9 @@ int __init oprofile_arch_init(struct opr
 int __init oprofile_arch_init(struct oprofile_operations * ops)
 {
        struct xenoprof_init init;
-       struct xenoprof_buf * buf;
-       int vm_size;
-       int npages;
-       int ret;
-       int i;
+       struct xenoprof_buf *buf;
+       int npages, ret, i;
+       struct vm_struct *area;
 
        init.max_samples = 16;
        ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
@@ -495,14 +501,23 @@ int __init oprofile_arch_init(struct opr
                        num_events = OP_MAX_COUNTER;
 
                npages = (init.bufsize * nbuf - 1) / PAGE_SIZE + 1;
-               vm_size = npages * PAGE_SIZE;
-
-               shared_buffer = (char *)vm_map_xen_pages(init.buf_maddr,
-                                                        vm_size, prot);
-               if (!shared_buffer) {
+
+               area = get_vm_area(npages * PAGE_SIZE, VM_IOREMAP);
+               if (area == NULL) {
                        ret = -ENOMEM;
                        goto out;
                }
+
+               ret = direct_kernel_remap_pfn_range(
+                       (unsigned long)area->addr,
+                       init.buf_maddr >> PAGE_SHIFT,
+                       npages * PAGE_SIZE, prot, DOMID_SELF);
+               if (ret) {
+                       vunmap(area->addr);
+                       goto out;
+               }
+
+               shared_buffer = area->addr;
 
                for (i=0; i< nbuf; i++) {
                        buf = (struct xenoprof_buf*) 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.