[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] Allow multiple-time mmap of the privcmd device



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 06e5c5599147cd34caab554ca71ab03e52c85e6f
# Parent  608ac00f4cfc9020958ab5e15d00daf752ab73a2
[IA64] Allow multiple-time mmap of the privcmd device

Allow multiple-time mmap of the privcmd device. The old implemntation
doesn't allow multiple-time mmap for a same struct file_struct.
However xend or qemu does multiple-time mmap.
This patch affects only dom0 vp model. With this patch multiple domu can 
boot simultaneously and vti domain can boot on vp model dom0.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c |  104 +++++++++++-------------
 1 files changed, 49 insertions(+), 55 deletions(-)

diff -r 608ac00f4cfc -r 06e5c5599147 
linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c   Tue May 16 08:59:26 
2006 -0600
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c   Tue May 16 09:05:36 
2006 -0600
@@ -23,6 +23,7 @@
 //#include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/bootmem.h>
+#include <linux/vmalloc.h>
 #include <asm/page.h>
 #include <asm/hypervisor.h>
 #include <asm/hypercall.h>
@@ -363,7 +364,6 @@ struct xen_ia64_privcmd_entry {
 struct xen_ia64_privcmd_entry {
        atomic_t        map_count;
        struct page*    page;
-       unsigned long   mfn;
 };
 
 static void
@@ -371,9 +371,13 @@ xen_ia64_privcmd_init_entry(struct xen_i
 {
        atomic_set(&entry->map_count, 0);
        entry->page = NULL;
-       entry->mfn = INVALID_MFN;
-}
-
+}
+
+//TODO alloc_page() to allocate pseudo physical address space is 
+//     waste of memory.
+//     When vti domain is created, qemu maps all of vti domain pages which 
+//     reaches to several hundred megabytes at least.
+//     remove alloc_page().
 static int
 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
                            unsigned long addr,
@@ -418,7 +422,6 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
        } else {
                atomic_inc(&entry->map_count);
                entry->page = page;
-               entry->mfn = mfn;
        }
 
 out:
@@ -443,7 +446,6 @@ xen_ia64_privcmd_entry_munmap(struct xen
        }
 
        entry->page = NULL;
-       entry->mfn = INVALID_MFN;
        __free_page(page);
 }
 
@@ -465,9 +467,8 @@ xen_ia64_privcmd_entry_close(struct xen_
        }
 }
 
-struct xen_ia64_privcmd_file {
-       struct file*                    file;
-       atomic_t                        map_count;
+struct xen_ia64_privcmd_range {
+       atomic_t                        ref_count;
        unsigned long                   pgoff; // in PAGE_SIZE
 
        unsigned long                   num_entries;
@@ -475,7 +476,8 @@ struct xen_ia64_privcmd_file {
 };
 
 struct xen_ia64_privcmd_vma {
-       struct xen_ia64_privcmd_file*   file;
+       struct xen_ia64_privcmd_range*  range;
+
        unsigned long                   num_entries;
        struct xen_ia64_privcmd_entry*  entries;
 };
@@ -490,20 +492,19 @@ struct vm_operations_struct xen_ia64_pri
 
 static void
 __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
-                           struct xen_ia64_privcmd_vma* privcmd_vma)
-{
-       struct xen_ia64_privcmd_file* privcmd_file =
-               (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
-       unsigned long entry_offset = vma->vm_pgoff - privcmd_file->pgoff;
+                           struct xen_ia64_privcmd_vma* privcmd_vma,
+                           struct xen_ia64_privcmd_range* privcmd_range)
+{
+       unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
        unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        unsigned long i;
 
        BUG_ON(entry_offset < 0);
-       BUG_ON(entry_offset + num_entries > privcmd_file->num_entries);
-
-       privcmd_vma->file = privcmd_file;
+       BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
+
+       privcmd_vma->range = privcmd_range;
        privcmd_vma->num_entries = num_entries;
-       privcmd_vma->entries = &privcmd_file->entries[entry_offset];
+       privcmd_vma->entries = &privcmd_range->entries[entry_offset];
        vma->vm_private_data = privcmd_vma;
        for (i = 0; i < privcmd_vma->num_entries; i++) {
                xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
@@ -516,15 +517,14 @@ static void
 static void
 xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
 {
-       struct xen_ia64_privcmd_file* privcmd_file =
-               (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
-       struct xen_ia64_privcmd_vma* privcmd_vma;
-
-       atomic_inc(&privcmd_file->map_count);
+       struct xen_ia64_privcmd_vma* privcmd_vma = (struct 
xen_ia64_privcmd_vma*)vma->vm_private_data;
+       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+
+       atomic_inc(&privcmd_range->ref_count);
        // vm_op->open() can't fail.
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
 
-       __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
+       __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
 }
 
 static void
@@ -532,7 +532,7 @@ xen_ia64_privcmd_vma_close(struct vm_are
 {
        struct xen_ia64_privcmd_vma* privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-       struct xen_ia64_privcmd_file* privcmd_file = privcmd_vma->file;
+       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
        unsigned long i;
 
        for (i = 0; i < privcmd_vma->num_entries; i++) {
@@ -541,17 +541,16 @@ xen_ia64_privcmd_vma_close(struct vm_are
        vma->vm_private_data = NULL;
        kfree(privcmd_vma);
 
-       if (atomic_dec_and_test(&privcmd_file->map_count)) {
+       if (atomic_dec_and_test(&privcmd_range->ref_count)) {
 #if 1
-               for (i = 0; i < privcmd_file->num_entries; i++) {
+               for (i = 0; i < privcmd_range->num_entries; i++) {
                        struct xen_ia64_privcmd_entry* entry =
-                               &privcmd_vma->entries[i];
+                               &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
                        BUG_ON(entry->page != NULL);
                }
 #endif
-               privcmd_file->file->private_data = NULL;
-               kfree(privcmd_file->file->private_data);
+               vfree(privcmd_range);
        }
 }
 
@@ -559,22 +558,16 @@ privcmd_mmap(struct file * file, struct 
 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
 {
        unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-       struct xen_ia64_privcmd_file* privcmd_file;
+       struct xen_ia64_privcmd_range* privcmd_range;
        struct xen_ia64_privcmd_vma* privcmd_vma;
        unsigned long i;
        BUG_ON(!running_on_xen);
 
-        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
-        vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
-       if (file->private_data != NULL) {
-               return -EBUSY;
-       }
-
-       privcmd_file = kmalloc(sizeof(*privcmd_file) +
-                              sizeof(privcmd_file->entries[0]) * num_entries,
-                              GFP_KERNEL);
-       if (privcmd_file == NULL) {
+       BUG_ON(file->private_data != NULL);
+       privcmd_range =
+               vmalloc(sizeof(*privcmd_range) +
+                       sizeof(privcmd_range->entries[0]) * num_entries);
+       if (privcmd_range == NULL) {
                goto out_enomem0;
        }
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
@@ -582,22 +575,23 @@ privcmd_mmap(struct file * file, struct 
                goto out_enomem1;
        }
 
-       atomic_set(&privcmd_file->map_count, 1);
-       privcmd_file->num_entries = num_entries;
-       for (i = 0; i < privcmd_file->num_entries; i++) {
-               xen_ia64_privcmd_init_entry(&privcmd_file->entries[i]);
-       }
-       file->private_data = privcmd_file;
-       privcmd_file->file = file;
-       privcmd_file->pgoff = vma->vm_pgoff;
-
-       __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
+       /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
+
+       atomic_set(&privcmd_range->ref_count, 1);
+       privcmd_range->pgoff = vma->vm_pgoff;
+       privcmd_range->num_entries = num_entries;
+       for (i = 0; i < privcmd_range->num_entries; i++) {
+               xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
+       }
+
+       __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
        return 0;
 
 out_enomem1:
        kfree(privcmd_vma);
 out_enomem0:
-       kfree(privcmd_file);
+       vfree(privcmd_range);
        return -ENOMEM;
 }
 
@@ -625,7 +619,7 @@ direct_remap_pfn_range(struct vm_area_st
        i = (address - vma->vm_start) >> PAGE_SHIFT;
        for (offset = 0; offset < size; offset += PAGE_SIZE) {
                struct xen_ia64_privcmd_entry* entry =
-                       &privcmd_vma->file->entries[i];
+                       &privcmd_vma->entries[i];
                error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & 
PAGE_MASK, entry, mfn, prot, domid);
                if (error != 0) {
                        break;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.