[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86_64: Widen page counts to avoid overflow.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1232986782 0
# Node ID 39517e863cc89a085341e1d53317aaa7ceddd127
# Parent  055c589f4791811797867736857b08fdd0fd6d49
x86_64: Widen page counts to avoid overflow.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c            |    4 +-
 xen/arch/x86/mm.c                |   24 ++++++------
 xen/arch/x86/mm/hap/hap.c        |    2 -
 xen/arch/x86/mm/shadow/common.c  |    4 +-
 xen/arch/x86/mm/shadow/private.h |    7 +++
 xen/arch/x86/x86_32/mm.c         |    9 ----
 xen/arch/x86/x86_64/mm.c         |    9 ----
 xen/common/xenoprof.c            |    4 +-
 xen/include/asm-x86/mm.h         |   77 ++++++++++++++++++++-------------------
 9 files changed, 65 insertions(+), 75 deletions(-)

diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/domain.c     Mon Jan 26 16:19:42 2009 +0000
@@ -143,7 +143,7 @@ void dump_pageframe_info(struct domain *
     {
         list_for_each_entry ( page, &d->page_list, list )
         {
-            printk("    DomPage %p: caf=%08x, taf=%" PRtype_info "\n",
+            printk("    DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
                    _p(page_to_mfn(page)),
                    page->count_info, page->u.inuse.type_info);
         }
@@ -156,7 +156,7 @@ void dump_pageframe_info(struct domain *
 
     list_for_each_entry ( page, &d->xenpage_list, list )
     {
-        printk("    XenPage %p: caf=%08x, taf=%" PRtype_info "\n",
+        printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
                _p(page_to_mfn(page)),
                page->count_info, page->u.inuse.type_info);
     }
diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/mm.c Mon Jan 26 16:19:42 2009 +0000
@@ -739,8 +739,8 @@ get_page_from_l1e(
     else if ( pte_flags_to_cacheattr(l1f) !=
               ((page->count_info >> PGC_cacheattr_base) & 7) )
     {
-        uint32_t x, nx, y = page->count_info;
-        uint32_t cacheattr = pte_flags_to_cacheattr(l1f);
+        unsigned long x, nx, y = page->count_info;
+        unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
 
         if ( is_xen_heap_page(page) )
         {
@@ -1909,7 +1909,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
 
 void put_page(struct page_info *page)
 {
-    u32 nx, x, y = page->count_info;
+    unsigned long nx, x, y = page->count_info;
 
     do {
         x  = y;
@@ -1927,7 +1927,7 @@ void put_page(struct page_info *page)
 
 int get_page(struct page_info *page, struct domain *domain)
 {
-    u32 x, y = page->count_info;
+    unsigned long x, y = page->count_info;
 
     do {
         x = y;
@@ -1946,7 +1946,7 @@ int get_page(struct page_info *page, str
  fail:
     if ( !_shadow_mode_refcounts(domain) && !domain->is_dying )
         gdprintk(XENLOG_INFO,
-                 "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" PRtype_info,
+                 "Error pfn %lx: rd=%p, od=%p, caf=%08lx, taf=%" PRtype_info,
                  page_to_mfn(page), domain, page_get_owner(page),
                  y, page->u.inuse.type_info);
     return 0;
@@ -1962,7 +1962,7 @@ int get_page(struct page_info *page, str
  */
 static void get_page_light(struct page_info *page)
 {
-    u32 x, nx, y = page->count_info;
+    unsigned long x, nx, y = page->count_info;
 
     do {
         x  = y;
@@ -2003,7 +2003,7 @@ static int alloc_page_type(struct page_i
         rc = alloc_segdesc_page(page);
         break;
     default:
-        printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 
+        printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%lx\n", 
                type, page->u.inuse.type_info,
                page->count_info);
         rc = -EINVAL;
@@ -2027,7 +2027,7 @@ static int alloc_page_type(struct page_i
     {
         ASSERT(rc < 0);
         MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %"
-                PRtype_info ": caf=%08x taf=%" PRtype_info,
+                PRtype_info ": caf=%08lx taf=%" PRtype_info,
                 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
                 type, page->count_info, page->u.inuse.type_info);
         page->u.inuse.type_info = 0;
@@ -3184,7 +3184,7 @@ static int create_grant_pte_mapping(
     void *va;
     unsigned long gmfn, mfn;
     struct page_info *page;
-    u32 type;
+    unsigned long type;
     l1_pgentry_t ol1e;
     struct domain *d = v->domain;
 
@@ -3245,7 +3245,7 @@ static int destroy_grant_pte_mapping(
     void *va;
     unsigned long gmfn, mfn;
     struct page_info *page;
-    u32 type;
+    unsigned long type;
     l1_pgentry_t ol1e;
 
     gmfn = addr >> PAGE_SHIFT;
@@ -3471,7 +3471,7 @@ int steal_page(
 int steal_page(
     struct domain *d, struct page_info *page, unsigned int memflags)
 {
-    u32 x, y;
+    unsigned long x, y;
 
     spin_lock(&d->page_alloc_lock);
 
@@ -3508,7 +3508,7 @@ int steal_page(
 
  fail:
     spin_unlock(&d->page_alloc_lock);
-    MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08x, taf=%" PRtype_info,
+    MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08lx, taf=%" PRtype_info,
             (void *)page_to_mfn(page), d, d->domain_id,
             page_get_owner(page), page->count_info, page->u.inuse.type_info);
     return -1;
diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/mm/hap/hap.c Mon Jan 26 16:19:42 2009 +0000
@@ -166,7 +166,7 @@ void hap_free_p2m_page(struct domain *d,
     ASSERT(page_get_owner(pg) == d);
     /* Should have just the one ref we gave it in alloc_p2m_page() */
     if ( (pg->count_info & PGC_count_mask) != 1 )
-        HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
+        HAP_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
                   pg->count_info, pg->u.inuse.type_info);
     pg->count_info = 0;
     /* Free should not decrement domain's total allocation, since
diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Jan 26 16:19:42 2009 +0000
@@ -1715,7 +1715,7 @@ shadow_free_p2m_page(struct domain *d, s
     /* Should have just the one ref we gave it in alloc_p2m_page() */
     if ( (pg->count_info & PGC_count_mask) != 1 )
     {
-        SHADOW_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
+        SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
                      pg->count_info, pg->u.inuse.type_info);
     }
     pg->count_info = 0;
@@ -2593,7 +2593,7 @@ int sh_remove_all_mappings(struct vcpu *
                && (page->u.inuse.type_info & PGT_count_mask) == 0) )
         {
             SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08x t=%08lx\n", mfn_x(gmfn), 
+                          "c=%08lx t=%08lx\n", mfn_x(gmfn), 
                           page->count_info, page->u.inuse.type_info);
         }
     }
diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Mon Jan 26 16:19:42 2009 +0000
@@ -247,8 +247,13 @@ struct shadow_page_info
                 unsigned int type:5;   /* What kind of shadow is this? */
                 unsigned int pinned:1; /* Is the shadow pinned? */
                 unsigned int count:26; /* Reference count */
-                u32 mbz;               /* Must be zero: this is where the
+#ifdef __x86_64__
+                u32 pad;
+                u64 mbz;               /* Must be zero: this is where the
                                         * owner field lives in page_info */
+#else
+                u32 mbz;
+#endif
             } __attribute__((packed));
             union {
                 /* For unused shadow pages, a list of pages of this order; for 
diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/x86_32/mm.c  Mon Jan 26 16:19:42 2009 +0000
@@ -159,15 +159,6 @@ void __init subarch_init_memory(void)
     unsigned long m2p_start_mfn;
     unsigned int i, j;
 
-    /*
-     * We are rather picky about the layout of 'struct page_info'. The
-     * count_info and domain fields must be adjacent, as we perform atomic
-     * 64-bit operations on them. Also, just for sanity, we assert the size
-     * of the structure here.
-     */
-    BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
-                 (offsetof(struct page_info, count_info) + sizeof(u32)));
-    BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
     BUILD_BUG_ON(sizeof(struct page_info) != 24);
 
     /* M2P table is mappable read-only by privileged domains. */
diff -r 055c589f4791 -r 39517e863cc8 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Mon Jan 26 16:19:42 2009 +0000
@@ -225,15 +225,6 @@ void __init subarch_init_memory(void)
     l3_pgentry_t l3e;
     l2_pgentry_t l2e;
 
-    /*
-     * We are rather picky about the layout of 'struct page_info'. The
-     * count_info and domain fields must be adjacent, as we perform atomic
-     * 64-bit operations on them.
-     */
-    BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
-                 (offsetof(struct page_info, count_info) + sizeof(u32)));
-    BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
-
     /* M2P table is mappable read-only by privileged domains. */
     for ( v  = RDWR_MPT_VIRT_START;
           v != RDWR_MPT_VIRT_END;
diff -r 055c589f4791 -r 39517e863cc8 xen/common/xenoprof.c
--- a/xen/common/xenoprof.c     Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/common/xenoprof.c     Mon Jan 26 16:19:42 2009 +0000
@@ -142,8 +142,8 @@ share_xenoprof_page_with_guest(struct do
        struct page_info *page = mfn_to_page(mfn + i);
        if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
        {
-           gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
-                    mfn + i, page->count_info);
+           gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%lx\n",
+                    mfn + i, (unsigned long)page->count_info);
            return -EBUSY;
        }
        page_set_owner(page, NULL);
diff -r 055c589f4791 -r 39517e863cc8 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Mon Jan 26 14:56:19 2009 +0000
+++ b/xen/include/asm-x86/mm.h  Mon Jan 26 16:19:42 2009 +0000
@@ -23,7 +23,7 @@ struct page_info
     struct list_head list;
 
     /* Reference count and various PGC_xxx flags and fields. */
-    u32 count_info;
+    unsigned long count_info;
 
     /* Context-dependent fields follow... */
     union {
@@ -31,7 +31,7 @@ struct page_info
         /* Page is in use: ((count_info & PGC_count_mask) != 0). */
         struct {
             /* Owner of this page (NULL if page is anonymous). */
-            u32 _domain; /* pickled format */
+            unsigned long _domain; /* pickled format */
             /* Type reference count and various PGT_xxx flags and fields. */
             unsigned long type_info;
         } __attribute__ ((packed)) inuse;
@@ -102,52 +102,57 @@ struct page_info
     };
 };
 
+#define PG_shift(idx)   (BITS_PER_LONG - (idx))
+#define PG_mask(x, idx) (x ## UL << PG_shift(idx))
+
  /* The following page types are MUTUALLY EXCLUSIVE. */
-#define PGT_none            (0U<<29) /* no special uses of this page */
-#define PGT_l1_page_table   (1U<<29) /* using this page as an L1 page table? */
-#define PGT_l2_page_table   (2U<<29) /* using this page as an L2 page table? */
-#define PGT_l3_page_table   (3U<<29) /* using this page as an L3 page table? */
-#define PGT_l4_page_table   (4U<<29) /* using this page as an L4 page table? */
-#define PGT_seg_desc_page   (5U<<29) /* using this page in a GDT/LDT? */
-#define PGT_writable_page   (7U<<29) /* has writable mappings of this page? */
-#define PGT_type_mask       (7U<<29) /* Bits 29-31. */
+#define PGT_none          PG_mask(0, 3) /* no special uses of this page */
+#define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */
+#define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */
+#define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */
+#define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */
+#define PGT_seg_desc_page PG_mask(5, 3) /* using this page in a GDT/LDT? */
+#define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */
+#define PGT_type_mask     PG_mask(7, 3) /* Bits 29-31. */
 
  /* Owning guest has pinned this page to its current type? */
-#define _PGT_pinned         28
-#define PGT_pinned          (1U<<_PGT_pinned)
+#define _PGT_pinned       PG_shift(4)
+#define PGT_pinned        PG_mask(1, 4)
  /* Has this page been validated for use as its current type? */
-#define _PGT_validated      27
-#define PGT_validated       (1U<<_PGT_validated)
+#define _PGT_validated    PG_shift(5)
+#define PGT_validated     PG_mask(1, 5)
  /* PAE only: is this an L2 page directory containing Xen-private mappings? */
-#define _PGT_pae_xen_l2     26
-#define PGT_pae_xen_l2      (1U<<_PGT_pae_xen_l2)
+#define _PGT_pae_xen_l2   PG_shift(6)
+#define PGT_pae_xen_l2    PG_mask(1, 6)
 /* Has this page been *partially* validated for use as its current type? */
-#define _PGT_partial        25
-#define PGT_partial         (1U<<_PGT_partial)
-
- /* 25-bit count of uses of this frame as its current type. */
-#define PGT_count_mask      ((1U<<25)-1)
+#define _PGT_partial      PG_shift(7)
+#define PGT_partial       PG_mask(1, 7)
+
+ /* Count of uses of this frame as its current type. */
+#define PGT_count_width   PG_shift(7)
+#define PGT_count_mask    ((1UL<<PGT_count_width)-1)
 
  /* Cleared when the owning guest 'frees' this page. */
-#define _PGC_allocated      31
-#define PGC_allocated       (1U<<_PGC_allocated)
+#define _PGC_allocated    PG_shift(1)
+#define PGC_allocated     PG_mask(1, 1)
 #if defined(__i386__)
  /* Page is locked? */
-# define _PGC_locked        30
-# define PGC_locked         (1U<<_PGC_out_of_sync)
+# define _PGC_locked      PG_shift(2)
+# define PGC_locked       PG_mask(1, 2)
 #else
  /* Page is Xen heap? */
-# define _PGC_xen_heap      30
-# define PGC_xen_heap       (1U<<_PGC_xen_heap)
+# define _PGC_xen_heap    PG_shift(2)
+# define PGC_xen_heap     PG_mask(1, 2)
 #endif
  /* Set when is using a page as a page table */
-#define _PGC_page_table     29
-#define PGC_page_table      (1U<<_PGC_page_table)
+#define _PGC_page_table   PG_shift(3)
+#define PGC_page_table    PG_mask(1, 3)
  /* 3-bit PAT/PCD/PWT cache-attribute hint. */
-#define PGC_cacheattr_base  26
-#define PGC_cacheattr_mask  (7U<<PGC_cacheattr_base)
- /* 26-bit count of references to this frame. */
-#define PGC_count_mask      ((1U<<26)-1)
+#define PGC_cacheattr_base PG_shift(6)
+#define PGC_cacheattr_mask PG_mask(7, 6)
+ /* Count of references to this frame. */
+#define PGC_count_width   PG_shift(6)
+#define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 #if defined(__i386__)
 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
@@ -166,10 +171,8 @@ static inline struct domain *unpickle_do
 { return (_domain & 1) ? NULL : (void *)_domain; }
 #define PRtype_info "08lx" /* should only be used for printk's */
 #elif defined(__x86_64__)
-static inline struct domain *unpickle_domptr(u32 _domain)
-{ return ((_domain == 0) || (_domain & 1)) ? NULL : __va(_domain); }
-static inline u32 pickle_domptr(struct domain *domain)
-{ return (domain == NULL) ? 0 : (u32)__pa(domain); }
+#define unpickle_domptr(d) ((struct domain *)(d))
+#define pickle_domptr(d) ((unsigned long)(d))
 #define PRtype_info "016lx"/* should only be used for printk's */
 #endif
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.