[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] ioemu: Do not use PAGE_SHIFT/PAGE_SIZE/PAGE_MASK macros. Use the libxc



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1194454268 0
# Node ID 4f1363491a77b97f58b30b6652e357f93a8f100d
# Parent  4fd6610949f1e3ee3286dead75411bfb1a916d8e
ioemu: Do not use PAGE_SHIFT/PAGE_SIZE/PAGE_MASK macros. Use the libxc
provided versions.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/ioemu/hw/pass-through.c   |   14 ++++----
 tools/ioemu/hw/xen_machine_fv.c |   69 +++++++++++++++++-----------------------
 2 files changed, 38 insertions(+), 45 deletions(-)

diff -r 4fd6610949f1 -r 4f1363491a77 tools/ioemu/hw/pass-through.c
--- a/tools/ioemu/hw/pass-through.c     Wed Nov 07 15:20:06 2007 +0000
+++ b/tools/ioemu/hw/pass-through.c     Wed Nov 07 16:51:08 2007 +0000
@@ -20,8 +20,8 @@
  * Guy Zana <guy@xxxxxxxxxxxx>
  *
  * This file implements direct PCI assignment to a HVM guest
- *
  */
+
 #include "vl.h"
 #include "pass-through.h"
 #include "pci/header.h"
@@ -128,9 +128,9 @@ void pt_iomem_map(PCIDevice *d, int i, u
     {
         /* Remove old mapping */
         ret = xc_domain_memory_mapping(xc_handle, domid,
-                old_ebase >> PAGE_SHIFT,
-                assigned_device->bases[i].access.maddr >> PAGE_SHIFT,
-                (e_size+PAGE_MASK) >> PAGE_SHIFT,
+                old_ebase >> XC_PAGE_SHIFT,
+                assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT,
+                (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT,
                 DPCI_REMOVE_MAPPING);
         if ( ret != 0 )
         {
@@ -141,9 +141,9 @@ void pt_iomem_map(PCIDevice *d, int i, u
 
     /* Create new mapping */
     ret = xc_domain_memory_mapping(xc_handle, domid,
-            assigned_device->bases[i].e_physbase >> PAGE_SHIFT,
-            assigned_device->bases[i].access.maddr >> PAGE_SHIFT,
-            (e_size+PAGE_MASK) >> PAGE_SHIFT,
+            assigned_device->bases[i].e_physbase >> XC_PAGE_SHIFT,
+            assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT,
+            (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT,
             DPCI_ADD_MAPPING);
     if ( ret != 0 )
         PT_LOG("Error: create new mapping failed!\n");
diff -r 4fd6610949f1 -r 4f1363491a77 tools/ioemu/hw/xen_machine_fv.c
--- a/tools/ioemu/hw/xen_machine_fv.c   Wed Nov 07 15:20:06 2007 +0000
+++ b/tools/ioemu/hw/xen_machine_fv.c   Wed Nov 07 16:51:08 2007 +0000
@@ -27,13 +27,6 @@
 #include <xen/hvm/params.h>
 #include <sys/mman.h>
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE XC_PAGE_SIZE
-#endif
-#ifndef PAGE_SHIFT
-#define PAGE_SHIFT XC_PAGE_SHIFT
-#endif
-
 #if defined(MAPCACHE)
 
 #if defined(__i386__) 
@@ -57,7 +50,7 @@ struct map_cache {
 struct map_cache {
     unsigned long paddr_index;
     uint8_t      *vaddr_base;
-    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
+    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT);
 };
 
 static struct map_cache *mapcache_entry;
@@ -71,9 +64,9 @@ static int qemu_map_cache_init(void)
 {
     unsigned long size;
 
-    nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) +
-                   (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >>
-                  (MCACHE_BUCKET_SHIFT - PAGE_SHIFT));
+    nr_buckets = (((MAX_MCACHE_SIZE >> XC_PAGE_SHIFT) +
+                   (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
+                  (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
 
     /*
      * Use mmap() directly: lets us allocate a big hash table with no up-front
@@ -81,7 +74,7 @@ static int qemu_map_cache_init(void)
      * that we actually use. All others will contain all zeroes.
      */
     size = nr_buckets * sizeof(struct map_cache);
-    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+    size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
     fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", 
nr_buckets, size);
     mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
                           MAP_SHARED|MAP_ANON, -1, 0);
@@ -97,7 +90,7 @@ static void qemu_remap_bucket(struct map
                               unsigned long address_index)
 {
     uint8_t *vaddr_base;
-    unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
+    unsigned long pfns[MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT];
     unsigned int i, j;
 
     if (entry->vaddr_base != NULL) {
@@ -108,11 +101,11 @@ static void qemu_remap_bucket(struct map
         }
     }
 
-    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
-        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
+    for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i++)
+        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
 
     vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
-                                      pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
+                                      pfns, MCACHE_BUCKET_SIZE >> 
XC_PAGE_SHIFT);
     if (vaddr_base == NULL) {
         fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
         exit(-1);
@@ -121,10 +114,10 @@ static void qemu_remap_bucket(struct map
     entry->vaddr_base  = vaddr_base;
     entry->paddr_index = address_index;
 
-    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
+    for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i += BITS_PER_LONG) {
         unsigned long word = 0;
-        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
-            (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
+        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT)) ?
+            (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT) % BITS_PER_LONG : 
BITS_PER_LONG;
         while (j > 0)
             word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
         entry->valid_mapping[i / BITS_PER_LONG] = word;
@@ -143,10 +136,10 @@ uint8_t *qemu_map_cache(target_phys_addr
     entry = &mapcache_entry[address_index % nr_buckets];
 
     if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
-        !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
+        !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
         qemu_remap_bucket(entry, address_index);
 
-    if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
+    if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
         return NULL;
 
     last_address_index = address_index;
@@ -215,7 +208,7 @@ static void xen_init_fv(uint64_t ram_siz
 
     xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
     fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
-    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+    shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
                                        PROT_READ|PROT_WRITE, ioreq_pfn);
     if (shared_page == NULL) {
         fprintf(logfile, "map shared IO page returned error %d\n", errno);
@@ -224,7 +217,7 @@ static void xen_init_fv(uint64_t ram_siz
 
     xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
     fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
-    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+    buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
                                             PROT_READ|PROT_WRITE, ioreq_pfn);
     if (buffered_io_page == NULL) {
         fprintf(logfile, "map buffered IO page returned error %d\n", errno);
@@ -233,7 +226,7 @@ static void xen_init_fv(uint64_t ram_siz
 
 #elif defined(__ia64__)
 
-    nr_pages = ram_size/PAGE_SIZE;
+    nr_pages = ram_size/XC_PAGE_SIZE;
 
     page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
     if (page_array == NULL) {
@@ -241,17 +234,17 @@ static void xen_init_fv(uint64_t ram_siz
         exit(-1);
     }
 
-    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE,
-                                       IO_PAGE_START >> PAGE_SHIFT);
-
-    buffered_io_page =xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE,
-                                       BUFFER_IO_PAGE_START >> PAGE_SHIFT);
-
-    buffered_pio_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
-                                       PROT_READ|PROT_WRITE,
-                                       BUFFER_PIO_PAGE_START >> PAGE_SHIFT);
+    shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       IO_XC_PAGE_START >> XC_PAGE_SHIFT);
+
+    buffered_io_page =xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       BUFFER_IO_XC_PAGE_START >> 
XC_PAGE_SHIFT);
+
+    buffered_pio_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       BUFFER_PIO_XC_PAGE_START >> 
XC_PAGE_SHIFT);
 
     for (i = 0; i < nr_pages; i++)
         page_array[i] = i;
@@ -259,9 +252,9 @@ static void xen_init_fv(uint64_t ram_siz
     /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
        to make QEMU map continuous virtual memory space */
     if (ram_size > MMIO_START) {       
-        for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
-            page_array[(MMIO_START >> PAGE_SHIFT) + i] =
-                (STORE_PAGE_START >> PAGE_SHIFT); 
+        for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++)
+            page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] =
+                (STORE_XC_PAGE_START >> XC_PAGE_SHIFT); 
     }
 
     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.