[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Make IDT/GDT/LDT updates safe.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1195645131 0
# Node ID 8c305873f2b80768a720b834e0bb1180af9d8988
# Parent  ec0bc82cebfdc68be3ae414310d39038b8fd1dce
x86: Make IDT/GDT/LDT updates safe.

This involves either determining that the entry will not be
read/written while the update takes place, or atomically making the
entry 'present', or doing the entire write atomically, as appropriate.

This issue raised, and original patch provided, by Jan Beulich.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c               |   15 +--------
 xen/arch/x86/traps.c            |    3 +
 xen/arch/x86/x86_32/seg_fixup.c |   15 +++++----
 xen/include/asm-x86/desc.h      |   63 ++++++++++++++++++++++------------------
 xen/include/asm-x86/system.h    |   11 ++++++
 5 files changed, 62 insertions(+), 45 deletions(-)

diff -r ec0bc82cebfd -r 8c305873f2b8 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Nov 21 09:52:04 2007 +0000
+++ b/xen/arch/x86/mm.c Wed Nov 21 11:38:51 2007 +0000
@@ -3007,7 +3007,8 @@ long set_gdt(struct vcpu *v,
         return -EINVAL;
 
     /* Check the pages in the new GDT. */
-    for ( i = 0; i < nr_pages; i++ ) {
+    for ( i = 0; i < nr_pages; i++ )
+    {
         mfn = frames[i] = gmfn_to_mfn(d, frames[i]);
         if ( !mfn_valid(mfn) ||
              !get_page_and_type(mfn_to_page(mfn), d, PGT_gdt_page) )
@@ -3073,23 +3074,15 @@ long do_update_descriptor(u64 pa, u64 de
 
     *(u64 *)&d = desc;
 
-    LOCK_BIGLOCK(dom);
-
     mfn = gmfn_to_mfn(dom, gmfn);
     if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
          !mfn_valid(mfn) ||
          !check_descriptor(dom, &d) )
-    {
-        UNLOCK_BIGLOCK(dom);
         return -EINVAL;
-    }
 
     page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, dom)) )
-    {
-        UNLOCK_BIGLOCK(dom);
         return -EINVAL;
-    }
 
     /* Check if the given frame is in use in an unsafe context. */
     switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -3112,7 +3105,7 @@ long do_update_descriptor(u64 pa, u64 de
 
     /* All is good so make the update. */
     gdt_pent = map_domain_page(mfn);
-    memcpy(&gdt_pent[offset], &d, 8);
+    atomic_write64((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
     unmap_domain_page(gdt_pent);
 
     put_page_type(page);
@@ -3121,8 +3114,6 @@ long do_update_descriptor(u64 pa, u64 de
 
  out:
     put_page(page);
-
-    UNLOCK_BIGLOCK(dom);
 
     return ret;
 }
diff -r ec0bc82cebfd -r 8c305873f2b8 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Nov 21 09:52:04 2007 +0000
+++ b/xen/arch/x86/traps.c      Wed Nov 21 11:38:51 2007 +0000
@@ -2583,7 +2583,10 @@ void set_system_gate(unsigned int n, voi
 
 void set_task_gate(unsigned int n, unsigned int sel)
 {
+    idt_table[n].b = 0;
+    wmb(); /* disable gate /then/ rewrite */
     idt_table[n].a = sel << 16;
+    wmb(); /* rewrite /then/ enable gate */
     idt_table[n].b = 0x8500;
 }
 
diff -r ec0bc82cebfd -r 8c305873f2b8 xen/arch/x86/x86_32/seg_fixup.c
--- a/xen/arch/x86/x86_32/seg_fixup.c   Wed Nov 21 09:52:04 2007 +0000
+++ b/xen/arch/x86/x86_32/seg_fixup.c   Wed Nov 21 11:38:51 2007 +0000
@@ -153,7 +153,7 @@ static unsigned char twobyte_decode[256]
  *  @base  (OUT): Decoded linear base address.
  *  @limit (OUT): Decoded segment limit, in bytes. 0 == unlimited (4GB).
  */
-int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit)
+static int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit)
 {
     struct vcpu *d = current;
     unsigned long *table, a, b;
@@ -204,7 +204,7 @@ int get_baselimit(u16 seg, unsigned long
 }
 
 /* Turn a segment+offset into a linear address. */
-int linearise_address(u16 seg, unsigned long off, unsigned long *linear)
+static int linearise_address(u16 seg, unsigned long off, unsigned long *linear)
 {
     unsigned long base, limit;
 
@@ -216,10 +216,14 @@ int linearise_address(u16 seg, unsigned 
 
     *linear = base + off;
 
+    /* Conservatively check 32 bytes from returned linear base. */
+    if ( !access_ok(linear, 32) )
+        return 0;
+
     return 1;
 }
 
-int fixup_seg(u16 seg, unsigned long offset)
+static int fixup_seg(u16 seg, unsigned long offset)
 {
     struct vcpu *d = current;
     unsigned long *table, a, b, base, limit;
@@ -303,9 +307,8 @@ int fixup_seg(u16 seg, unsigned long off
     a &= ~0x0ffff; a |= limit & 0x0ffff;
     b &= ~0xf0000; b |= limit & 0xf0000;
     b ^= _SEGMENT_EC; /* grows-up <-> grows-down */
-    /* NB. These can't fault. Checked readable above; must also be writable. */
-    table[2*idx+0] = a;
-    table[2*idx+1] = b;
+    /* NB. This can't fault. Checked readable above; must also be writable. */
+    atomic_write64((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
     return 1;
 }
 
diff -r ec0bc82cebfd -r 8c305873f2b8 xen/include/asm-x86/desc.h
--- a/xen/include/asm-x86/desc.h        Wed Nov 21 09:52:04 2007 +0000
+++ b/xen/include/asm-x86/desc.h        Wed Nov 21 11:38:51 2007 +0000
@@ -143,6 +143,11 @@ typedef struct {
 
 #define _set_gate(gate_addr,type,dpl,addr)               \
 do {                                                     \
+    (gate_addr)->a = 0;                                  \
+    wmb(); /* disable gate /then/ rewrite */             \
+    (gate_addr)->b =                                     \
+        ((unsigned long)(addr) >> 32);                   \
+    wmb(); /* rewrite /then/ enable gate */              \
     (gate_addr)->a =                                     \
         (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
         ((unsigned long)(dpl) << 45) |                   \
@@ -150,49 +155,53 @@ do {                                    
         ((unsigned long)(addr) & 0xFFFFUL) |             \
         ((unsigned long)__HYPERVISOR_CS64 << 16) |       \
         (1UL << 47);                                     \
-    (gate_addr)->b =                                     \
-        ((unsigned long)(addr) >> 32);                   \
 } while (0)
 
 #define _set_tssldt_desc(desc,addr,limit,type)           \
 do {                                                     \
+    (desc)[0].b = (desc)[1].b = 0;                       \
+    wmb(); /* disable entry /then/ rewrite */            \
     (desc)[0].a =                                        \
         ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF);   \
+    (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32);  \
+    wmb(); /* rewrite /then/ enable entry */             \
     (desc)[0].b =                                        \
         ((u32)(addr) & 0xFF000000U) |                    \
         ((u32)(type) << 8) | 0x8000U |                   \
         (((u32)(addr) & 0x00FF0000U) >> 16);             \
-    (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32);  \
-    (desc)[1].b = 0;                                     \
 } while (0)
 
 #elif defined(__i386__)
 
 typedef struct desc_struct idt_entry_t;
 
-#define _set_gate(gate_addr,type,dpl,addr) \
-do { \
-  int __d0, __d1; \
-  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
- "movw %4,%%dx\n\t" \
- "movl %%eax,%0\n\t" \
- "movl %%edx,%1" \
- :"=m" (*((long *) (gate_addr))), \
-  "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
- :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
-  "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \
-} while (0)
-
-#define _set_tssldt_desc(n,addr,limit,type) \
-__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
- "movw %%ax,2(%2)\n\t" \
- "rorl $16,%%eax\n\t" \
- "movb %%al,4(%2)\n\t" \
- "movb %4,5(%2)\n\t" \
- "movb $0,6(%2)\n\t" \
- "movb %%ah,7(%2)\n\t" \
- "rorl $16,%%eax" \
- : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type|0x80))
+#define _set_gate(gate_addr,type,dpl,addr)               \
+do {                                                     \
+    (gate_addr)->b = 0;                                  \
+    wmb(); /* disable gate /then/ rewrite */             \
+    (gate_addr)->a =                                     \
+        ((unsigned long)(addr) & 0xFFFFUL) |             \
+        ((unsigned long)__HYPERVISOR_CS << 16);          \
+    wmb(); /* rewrite /then/ enable gate */              \
+    (gate_addr)->b =                                     \
+        ((unsigned long)(addr) & 0xFFFF0000UL) |         \
+        ((unsigned long)(dpl) << 13) |                   \
+        ((unsigned long)(type) << 8) |                   \
+        (1UL << 15);                                     \
+} while (0)
+
+#define _set_tssldt_desc(desc,addr,limit,type)           \
+do {                                                     \
+    (desc)->b = 0;                                       \
+    wmb(); /* disable entry /then/ rewrite */            \
+    (desc)->a =                                          \
+        ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF);   \
+    wmb(); /* rewrite /then/ enable entry */             \
+    (desc)->b =                                          \
+        ((u32)(addr) & 0xFF000000U) |                    \
+        ((u32)(type) << 8) | 0x8000U |                   \
+        (((u32)(addr) & 0x00FF0000U) >> 16);             \
+} while (0)
 
 #endif
 
diff -r ec0bc82cebfd -r 8c305873f2b8 xen/include/asm-x86/system.h
--- a/xen/include/asm-x86/system.h      Wed Nov 21 09:52:04 2007 +0000
+++ b/xen/include/asm-x86/system.h      Wed Nov 21 11:38:51 2007 +0000
@@ -256,6 +256,17 @@ static always_inline unsigned long long 
 })
 #endif
 
+static inline void atomic_write64(uint64_t *p, uint64_t v)
+{
+#ifdef __i386__
+    uint64_t w = *p, x;
+    while ( (x = __cmpxchg8b(p, w, v)) != w )
+        w = x;
+#else
+    *p = v;
+#endif
+}
+
 #if defined(__i386__)
 #define mb()   __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
 #define rmb()  __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.