[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] arm: header files



# HG changeset patch
# User Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
# Date 1328787209 0
# Node ID 46adf08254f9d88ffd3deba833ee89aa977fcb6b
# Parent  dcb04595033b020f16a1ed642cfbeb263a85e969
arm: header files

A simple implementation of everything under asm-arm and arch-arm.h; some
of these files are shamelessly taken from Linux.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---


diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/atomic.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/atomic.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,236 @@
+/*
+ *  arch/arm/include/asm/atomic.h
+ *
+ *  Copyright (C) 1996 Russell King.
+ *  Copyright (C) 2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ARCH_ARM_ATOMIC__
+#define __ARCH_ARM_ATOMIC__
+
+#include <xen/config.h>
+#include <asm/system.h>
+
+#define build_atomic_read(name, size, type, reg)   \
+static inline type name(const volatile type *addr) \
+{                                                  \
+    type ret;                                      \
+    asm volatile("ldr" size " %0,%1"               \
+                 : reg (ret)                       \
+                 : "m" (*(volatile type *)addr));  \
+    return ret;                                    \
+}
+
+#define build_atomic_write(name, size, type, reg)      \
+static inline void name(volatile type *addr, type val) \
+{                                                      \
+    asm volatile("str" size " %1,%0"                   \
+                 : "=m" (*(volatile type *)addr)       \
+                 : reg (val));                         \
+}
+
+build_atomic_read(read_u8_atomic, "b", uint8_t, "=q")
+build_atomic_read(read_u16_atomic, "h", uint16_t, "=r")
+build_atomic_read(read_u32_atomic, "", uint32_t, "=r")
+//build_atomic_read(read_u64_atomic, "d", uint64_t, "=r")
+build_atomic_read(read_int_atomic, "", int, "=r")
+
+build_atomic_write(write_u8_atomic, "b", uint8_t, "q")
+build_atomic_write(write_u16_atomic, "h", uint16_t, "r")
+build_atomic_write(write_u32_atomic, "", uint32_t, "r")
+//build_atomic_write(write_u64_atomic, "d", uint64_t, "r")
+build_atomic_write(write_int_atomic, "", int, "r")
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({                                               \
+    typeof(*p) __x;                                                     \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
+    case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
+    case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
+    default: __x = 0; __bad_atomic_size(); break;                       \
+    }                                                                   \
+    __x;                                                                \
+})
+
+#define write_atomic(p, x) ({                                           \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
+    case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
+    case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
+
+/*
+ * NB. I've pushed the volatile qualifier into the operations. This allows
+ * fast accessors such as _atomic_read() and _atomic_set() which don't give
+ * the compiler a fit.
+ */
+typedef struct { int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+/*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+#define _atomic_read(v) ((v).counter)
+#define atomic_read(v)  (*(volatile int *)&(v)->counter)
+
+#define _atomic_set(v,i) (((v).counter) = (i))
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+/*
+ * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        __asm__ __volatile__("@ atomic_add\n"
+"1:     ldrex   %0, [%3]\n"
+"       add     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        smp_mb();
+
+        __asm__ __volatile__("@ atomic_add_return\n"
+"1:     ldrex   %0, [%3]\n"
+"       add     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+
+        smp_mb();
+
+        return result;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        __asm__ __volatile__("@ atomic_sub\n"
+"1:     ldrex   %0, [%3]\n"
+"       sub     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        smp_mb();
+
+        __asm__ __volatile__("@ atomic_sub_return\n"
+"1:     ldrex   %0, [%3]\n"
+"       sub     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+
+        smp_mb();
+
+        return result;
+}
+
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+        unsigned long oldval, res;
+
+        smp_mb();
+
+        do {
+                __asm__ __volatile__("@ atomic_cmpxchg\n"
+                "ldrex  %1, [%3]\n"
+                "mov    %0, #0\n"
+                "teq    %1, %4\n"
+                "strexeq %0, %5, [%3]\n"
+                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
+                    : "cc");
+        } while (res);
+
+        smp_mb();
+
+        return oldval;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+        unsigned long tmp, tmp2;
+
+        __asm__ __volatile__("@ atomic_clear_mask\n"
+"1:     ldrex   %0, [%3]\n"
+"       bic     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+        : "r" (addr), "Ir" (mask)
+        : "cc");
+}
+
+#define atomic_inc(v)           atomic_add(1, v)
+#define atomic_dec(v)           atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+static inline atomic_t atomic_compareandswap(
+    atomic_t old, atomic_t new, atomic_t *v)
+{
+    atomic_t rc;
+    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
+    return rc;
+}
+
+#endif /* __ARCH_ARM_ATOMIC__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/bitops.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/bitops.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,213 @@
+/*
+ * Copyright 1995, Russell King.
+ * Various bits and pieces copyrights include:
+ *  Linus Torvalds (test_bit).
+ * Big endian support: Copyright 2001, Nicolas Pitre
+ *  reworked by rmk.
+ */
+
+#ifndef _ARM_BITOPS_H
+#define _ARM_BITOPS_H
+
+extern void _set_bit(int nr, volatile void * p);
+extern void _clear_bit(int nr, volatile void * p);
+extern void _change_bit(int nr, volatile void * p);
+extern int _test_and_set_bit(int nr, volatile void * p);
+extern int _test_and_clear_bit(int nr, volatile void * p);
+extern int _test_and_change_bit(int nr, volatile void * p);
+
+#define set_bit(n,p)              _set_bit(n,p)
+#define clear_bit(n,p)            _clear_bit(n,p)
+#define change_bit(n,p)           _change_bit(n,p)
+#define test_and_set_bit(n,p)     _test_and_set_bit(n,p)
+#define test_and_clear_bit(n,p)   _test_and_clear_bit(n,p)
+#define test_and_change_bit(n,p)  _test_and_change_bit(n,p)
+
+#define BIT(nr)                 (1UL << (nr))
+#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
+#define BITS_PER_BYTE           8
+
+#define ADDR (*(volatile long *) addr)
+#define CONST_ADDR (*(const volatile long *) addr)
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(int nr, volatile void *addr)
+{
+        unsigned long mask = BIT_MASK(nr);
+        volatile unsigned long *p =
+                ((volatile unsigned long *)addr) + BIT_WORD(nr);
+        unsigned long old = *p;
+
+        *p = old | mask;
+        return (old & mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(int nr, volatile void *addr)
+{
+        unsigned long mask = BIT_MASK(nr);
+        volatile unsigned long *p =
+                ((volatile unsigned long *)addr) + BIT_WORD(nr);
+        unsigned long old = *p;
+
+        *p = old & ~mask;
+        return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(int nr,
+                                            volatile void *addr)
+{
+        unsigned long mask = BIT_MASK(nr);
+        volatile unsigned long *p =
+                ((volatile unsigned long *)addr) + BIT_WORD(nr);
+        unsigned long old = *p;
+
+        *p = old ^ mask;
+        return (old & mask) != 0;
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const volatile void *addr)
+{
+        const volatile unsigned long *p = (const volatile unsigned long *)addr;
+        return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
+ */
+extern int _find_first_zero_bit_le(const void * p, unsigned size);
+extern int _find_next_zero_bit_le(const void * p, int size, int offset);
+extern int _find_first_bit_le(const unsigned long *p, unsigned size);
+extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+
+/*
+ * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
+ */
+extern int _find_first_zero_bit_be(const void * p, unsigned size);
+extern int _find_next_zero_bit_be(const void * p, int size, int offset);
+extern int _find_first_bit_be(const unsigned long *p, unsigned size);
+extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+
+#ifndef __ARMEB__
+/*
+ * These are the little endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)      _find_first_zero_bit_le(p,sz)
+#define find_next_zero_bit(p,sz,off)   _find_next_zero_bit_le(p,sz,off)
+#define find_first_bit(p,sz)           _find_first_bit_le(p,sz)
+#define find_next_bit(p,sz,off)                _find_next_bit_le(p,sz,off)
+
+#else
+/*
+ * These are the big endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)      _find_first_zero_bit_be(p,sz)
+#define find_next_zero_bit(p,sz,off)   _find_next_zero_bit_be(p,sz,off)
+#define find_first_bit(p,sz)           _find_first_bit_be(p,sz)
+#define find_next_bit(p,sz,off)                _find_next_bit_be(p,sz,off)
+
+#endif
+
+static inline int constant_fls(int x)
+{
+        int r = 32;
+
+        if (!x)
+                return 0;
+        if (!(x & 0xffff0000u)) {
+                x <<= 16;
+                r -= 16;
+        }
+        if (!(x & 0xff000000u)) {
+                x <<= 8;
+                r -= 8;
+        }
+        if (!(x & 0xf0000000u)) {
+                x <<= 4;
+                r -= 4;
+        }
+        if (!(x & 0xc0000000u)) {
+                x <<= 2;
+                r -= 2;
+        }
+        if (!(x & 0x80000000u)) {
+                x <<= 1;
+                r -= 1;
+        }
+        return r;
+}
+
+/*
+ * On ARMv5 and above those functions can be implemented around
+ * the clz instruction for much better code efficiency.
+ */
+
+static inline int fls(int x)
+{
+        int ret;
+
+        if (__builtin_constant_p(x))
+               return constant_fls(x);
+
+        asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
+        ret = 32 - ret;
+        return ret;
+}
+
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/**
+ * find_first_set_bit - find the first set bit in @word
+ * @word: the word to search
+ *
+ * Returns the bit-number of the first set bit (first bit being 0).
+ * The input must *not* be zero.
+ */
+static inline unsigned int find_first_set_bit(unsigned long word)
+{
+        return ffs(word) - 1;
+}
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+#define hweight64(x) generic_hweight64(x)
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* _ARM_BITOPS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/bug.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/bug.h Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,15 @@
+#ifndef __ARM_BUG_H__
+#define __ARM_BUG_H__
+
+#define BUG() __bug(__FILE__, __LINE__)
+#define WARN() __warn(__FILE__, __LINE__)
+
+#endif /* __X86_BUG_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/byteorder.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/byteorder.h   Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,16 @@
+#ifndef __ASM_ARM_BYTEORDER_H__
+#define __ASM_ARM_BYTEORDER_H__
+
+#define __BYTEORDER_HAS_U64__
+
+#include <xen/byteorder/little_endian.h>
+
+#endif /* __ASM_ARM_BYTEORDER_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/cache.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/cache.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,20 @@
+#ifndef __ARCH_ARM_CACHE_H
+#define __ARCH_ARM_CACHE_H
+
+#include <xen/config.h>
+
+/* L1 cache line size */
+#define L1_CACHE_SHIFT  (CONFIG_ARM_L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/config.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/config.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,122 @@
+/******************************************************************************
+ * config.h
+ *
+ * A Linux-style configuration list.
+ */
+
+#ifndef __ARM_CONFIG_H__
+#define __ARM_CONFIG_H__
+
+#define CONFIG_PAGING_LEVELS 3
+
+#define CONFIG_ARM 1
+
+#define CONFIG_ARM_L1_CACHE_SHIFT 7 /* XXX */
+
+#define CONFIG_SMP 1
+
+#define CONFIG_DOMAIN_PAGE 1
+
+#define OPT_CONSOLE_STR "com1"
+
+#ifdef MAX_PHYS_CPUS
+#define NR_CPUS MAX_PHYS_CPUS
+#else
+#define NR_CPUS 128
+#endif
+
+#define MAX_VIRT_CPUS 128 /* XXX */
+#define MAX_HVM_VCPUS MAX_VIRT_CPUS
+
+#define asmlinkage /* Nothing needed */
+
+/* Linkage for ARM */
+#define __ALIGN .align 2
+#define __ALIGN_STR ".align 2"
+#ifdef __ASSEMBLY__
+#define ALIGN __ALIGN
+#define ALIGN_STR __ALIGN_STR
+#define ENTRY(name)                             \
+  .globl name;                                  \
+  ALIGN;                                        \
+  name:
+#define END(name) \
+  .size name, .-name
+#define ENDPROC(name) \
+  .type name, %function; \
+  END(name)
+#endif
+
+/*
+ * Memory layout:
+ *  0  -   2M   Unmapped
+ *  2M -   4M   Xen text, data, bss
+ *  4M -   6M   Fixmap: special-purpose 4K mapping slots
+ *
+ * 32M - 128M   Frametable: 24 bytes per page for 16GB of RAM
+ *
+ *  1G -   2G   Xenheap: always-mapped memory
+ *  2G -   4G   Domheap: on-demand-mapped
+ */
+
+#define XEN_VIRT_START         0x00200000
+#define FIXMAP_ADDR(n)        (0x00400000 + (n) * PAGE_SIZE)
+#define FRAMETABLE_VIRT_START  0x02000000
+#define XENHEAP_VIRT_START     0x40000000
+#define DOMHEAP_VIRT_START     0x80000000
+
+#define HYPERVISOR_VIRT_START mk_unsigned_long(XEN_VIRT_START)
+
+#define DOMHEAP_ENTRIES        1024  /* 1024 2MB mapping slots */
+
+/* Fixmap slots */
+#define FIXMAP_CONSOLE  0  /* The primary UART */
+#define FIXMAP_PT       1  /* Temporary mappings of pagetable pages */
+#define FIXMAP_MISC     2  /* Ephemeral mappings of hardware */
+#define FIXMAP_GICD     3  /* Interrupt controller: distributor registers */
+#define FIXMAP_GICC1    4  /* Interrupt controller: CPU registers (first page) 
*/
+#define FIXMAP_GICC2    5  /* Interrupt controller: CPU registers (second 
page) */
+#define FIXMAP_GICH     6  /* Interrupt controller: virtual interface control 
registers */
+
+#define PAGE_SHIFT              12
+
+#ifndef __ASSEMBLY__
+#define PAGE_SIZE           (1L << PAGE_SHIFT)
+#else
+#define PAGE_SIZE           (1 << PAGE_SHIFT)
+#endif
+#define PAGE_MASK           (~(PAGE_SIZE-1))
+#define PAGE_FLAG_MASK      (~0)
+
+#define STACK_ORDER 3
+#define STACK_SIZE  (PAGE_SIZE << STACK_ORDER)
+
+#ifndef __ASSEMBLY__
+extern unsigned long xen_phys_start;
+extern unsigned long xenheap_phys_end;
+extern unsigned long frametable_virt_end;
+#endif
+
+#define supervisor_mode_kernel (0)
+
+#define watchdog_disable() ((void)0)
+#define watchdog_enable()  ((void)0)
+
+/* Board-specific: base address of PL011 UART */
+#define EARLY_UART_ADDRESS 0x1c090000
+/* Board-specific: base address of GIC + its regs */
+#define GIC_BASE_ADDRESS 0x2c000000
+#define GIC_DR_OFFSET 0x1000
+#define GIC_CR_OFFSET 0x2000
+#define GIC_HR_OFFSET 0x4000 /* Guess work 
http://lists.infradead.org/pipermail/linux-arm-kernel/2011-September/064219.html
 */
+#define GIC_VR_OFFSET 0x6000 /* Virtual Machine CPU interface) */
+
+#endif /* __ARM_CONFIG_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/cpregs.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/cpregs.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,207 @@
+#ifndef __ASM_ARM_CPREGS_H
+#define __ASM_ARM_CPREGS_H
+
+#include <xen/stringify.h>
+
+/* Co-processor registers */
+
+/* Layout as used in assembly, with src/dest registers mixed in */
+#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2
+#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm
+#define CP32(r, name...) __CP32(r, name)
+#define CP64(r, name...) __CP64(r, name)
+
+/* Stringified for inline assembly */
+#define LOAD_CP32(r, name...)  "mrc " __stringify(CP32(%r, name)) ";"
+#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";"
+#define LOAD_CP64(r, name...)  "mrrc " __stringify(CP64(%r, %H##r, name)) ";"
+#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";"
+
+/* C wrappers */
+#define READ_CP32(name...) ({                                   \
+    register uint32_t _r;                                       \
+    asm volatile(LOAD_CP32(0, name) : "=r" (_r));               \
+    _r; })
+
+#define WRITE_CP32(v, name...) do {                             \
+    register uint32_t _r = (v);                                 \
+    asm volatile(STORE_CP32(0, name) : : "r" (_r));             \
+} while (0)
+
+#define READ_CP64(name...) ({                                   \
+    register uint64_t _r;                                       \
+    asm volatile(LOAD_CP64(0, name) : "=r" (_r));               \
+    _r; })
+
+#define WRITE_CP64(v, name...) do {                             \
+    register uint64_t _r = (v);                                 \
+    asm volatile(STORE_CP64(0, name) : : "r" (_r));             \
+} while (0)
+
+#define __HSR_CPREG_c0  0
+#define __HSR_CPREG_c1  1
+#define __HSR_CPREG_c2  2
+#define __HSR_CPREG_c3  3
+#define __HSR_CPREG_c4  4
+#define __HSR_CPREG_c5  5
+#define __HSR_CPREG_c6  6
+#define __HSR_CPREG_c7  7
+#define __HSR_CPREG_c8  8
+#define __HSR_CPREG_c9  9
+#define __HSR_CPREG_c10 10
+#define __HSR_CPREG_c11 11
+#define __HSR_CPREG_c12 12
+#define __HSR_CPREG_c13 13
+#define __HSR_CPREG_c14 14
+#define __HSR_CPREG_c15 15
+
+#define __HSR_CPREG_0   0
+#define __HSR_CPREG_1   1
+#define __HSR_CPREG_2   2
+#define __HSR_CPREG_3   3
+#define __HSR_CPREG_4   4
+#define __HSR_CPREG_5   5
+#define __HSR_CPREG_6   6
+#define __HSR_CPREG_7   7
+
+#define _HSR_CPREG32(cp,op1,crn,crm,op2) \
+    ((__HSR_CPREG_##crn) << HSR_CP32_CRN_SHIFT) | \
+    ((__HSR_CPREG_##crm) << HSR_CP32_CRM_SHIFT) | \
+    ((__HSR_CPREG_##op1) << HSR_CP32_OP1_SHIFT) | \
+    ((__HSR_CPREG_##op2) << HSR_CP32_OP2_SHIFT)
+
+#define _HSR_CPREG64(cp,op1,crm) \
+    ((__HSR_CPREG_##crm) << HSR_CP64_CRM_SHIFT) | \
+    ((__HSR_CPREG_##op1) << HSR_CP64_OP1_SHIFT)
+
+/* Encode a register as per HSR ISS pattern */
+#define HSR_CPREG32(X) _HSR_CPREG32(X)
+#define HSR_CPREG64(X) _HSR_CPREG64(X)
+
+/*
+ * Order registers by Coprocessor-> CRn-> Opcode 1-> CRm-> Opcode 2
+ *
+ * This matches the ordering used in the ARM as well as the groupings
+ * which the CP registers are allocated in.
+ *
+ * This is slightly different to the form of the instruction
+ * arguments, which are cp,opc1,crn,crm,opc2.
+ */
+
+/* Coprocessor 15 */
+
+/* CP15 CR0: CPUID and Cache Type Registers */
+#define ID_PFR0         p15,0,c0,c1,0   /* Processor Feature Register 0 */
+#define ID_PFR1         p15,0,c0,c1,1   /* Processor Feature Register 1 */
+#define CCSIDR          p15,1,c0,c0,0   /* Cache Size ID Registers */
+#define CLIDR           p15,1,c0,c0,1   /* Cache Level ID Register */
+#define CSSELR          p15,2,c0,c0,0   /* Cache Size Selection Register */
+
+/* CP15 CR1: System Control Registers */
+#define SCTLR           p15,0,c1,c0,0   /* System Control Register */
+#define SCR             p15,0,c1,c1,0   /* Secure Configuration Register */
+#define NSACR           p15,0,c1,c1,2   /* Non-Secure Access Control Register 
*/
+#define HSCTLR          p15,4,c1,c0,0   /* Hyp. System Control Register */
+#define HCR             p15,4,c1,c1,0   /* Hyp. Configuration Register */
+
+/* CP15 CR2: Translation Table Base and Control Registers */
+#define TTBR0           p15,0,c2,c0,0   /* Translation Table Base Reg. 0 */
+#define TTBR1           p15,0,c2,c0,1   /* Translation Table Base Reg. 1 */
+#define TTBCR           p15,0,c2,c0,2   /* Translatation Table Base Control 
Register */
+#define HTTBR           p15,4,c2        /* Hyp. Translation Table Base 
Register */
+#define HTCR            p15,4,c2,c0,2   /* Hyp. Translation Control Register */
+#define VTCR            p15,4,c2,c1,2   /* Virtualization Translation Control 
Register */
+#define VTTBR           p15,6,c2        /* Virtualization Translation Table 
Base Register */
+
+/* CP15 CR3: Domain Access Control Register */
+
+/* CP15 CR4: */
+
+/* CP15 CR5: Fault Status Registers */
+#define DFSR            p15,0,c5,c0,0   /* Data Fault Status Register */
+#define IFSR            p15,0,c5,c0,1   /* Instruction Fault Status Register */
+#define HSR             p15,4,c5,c2,0   /* Hyp. Syndrome Register */
+
+/* CP15 CR6: Fault Address Registers */
+#define DFAR            p15,0,c6,c0,0   /* Data Fault Address Register  */
+#define IFAR            p15,0,c6,c0,2   /* Instruction Fault Address Register 
*/
+#define HDFAR           p15,4,c6,c0,0   /* Hyp. Data Fault Address Register */
+#define HIFAR           p15,4,c6,c0,2   /* Hyp. Instruction Fault Address 
Register */
+#define HPFAR           p15,4,c6,c0,4   /* Hyp. IPA Fault Address Register */
+
+/* CP15 CR7: Cache and address translation operations */
+#define PAR             p15,0,c7        /* Physical Address Register */
+#define ICIALLUIS       p15,0,c7,c1,0   /* Invalidate all instruction caches 
to PoU inner shareable */
+#define BPIALLIS        p15,0,c7,c1,6   /* Invalidate entire branch predictor 
array inner shareable */
+#define ICIALLU         p15,0,c7,c5,0   /* Invalidate all instruction caches 
to PoU */
+#define BPIALL          p15,0,c7,c5,6   /* Invalidate entire branch predictor 
array */
+#define ATS1CPR         p15,0,c7,c8,0   /* Address Translation Stage 1. 
Non-Secure Kernel Read */
+#define ATS1CPW         p15,0,c7,c8,1   /* Address Translation Stage 1. 
Non-Secure Kernel Write */
+#define ATS1CUR         p15,0,c7,c8,2   /* Address Translation Stage 1. 
Non-Secure User Read */
+#define ATS1CUW         p15,0,c7,c8,3   /* Address Translation Stage 1. 
Non-Secure User Write */
+#define ATS12NSOPR      p15,0,c7,c8,4   /* Address Translation Stage 1+2 
Non-Secure Kernel Read */
+#define ATS12NSOPW      p15,0,c7,c8,5   /* Address Translation Stage 1+2 
Non-Secure Kernel Write */
+#define ATS12NSOUR      p15,0,c7,c8,6   /* Address Translation Stage 1+2 
Non-Secure User Read */
+#define ATS12NSOUW      p15,0,c7,c8,7   /* Address Translation Stage 1+2 
Non-Secure User Write */
+#define DCCMVAC         p15,0,c7,c10,1  /* Clean data or unified cache line by 
MVA to PoC */
+#define DCCISW          p15,0,c7,c14,2  /* Clean and invalidate data cache 
line by set/way */
+#define ATS1HR          p15,4,c7,c8,0   /* Address Translation Stage 1 Hyp. 
Read */
+#define ATS1HW          p15,4,c7,c8,1   /* Address Translation Stage 1 Hyp. 
Write */
+
+/* CP15 CR8: TLB maintenance operations */
+#define TLBIALLIS       p15,0,c8,c3,0   /* Invalidate entire TLB innrer 
shareable */
+#define TLBIMVAIS       p15,0,c8,c3,1   /* Invalidate unified TLB entry by MVA 
inner shareable */
+#define TLBIASIDIS      p15,0,c8,c3,2   /* Invalidate unified TLB by ASID 
match inner shareable */
+#define TLBIMVAAIS      p15,0,c8,c3,3   /* Invalidate unified TLB entry by MVA 
all ASID inner shareable */
+#define DTLBIALL        p15,0,c8,c6,0   /* Invalidate data TLB */
+#define DTLBIMVA        p15,0,c8,c6,1   /* Invalidate data TLB entry by MVA */
+#define DTLBIASID       p15,0,c8,c6,2   /* Invalidate data TLB by ASID match */
+#define TLBILLHIS       p15,4,c8,c3,0   /* Invalidate Entire Hyp. Unified TLB 
inner shareable */
+#define TLBIMVAHIS      p15,4,c8,c3,1   /* Invalidate Unified Hyp. TLB by MVA 
inner shareable */
+#define TLBIALLNSNHIS   p15,4,c8,c7,4   /* Invalidate Entire Non-Secure 
Non-Hyp. Unified TLB inner shareable */
+#define TLBIALLH        p15,4,c8,c7,0   /* Invalidate Entire Hyp. Unified TLB 
*/
+#define TLBIMVAH        p15,4,c8,c7,1   /* Invalidate Unified Hyp. TLB by MVA 
*/
+#define TLBIALLNSNH     p15,4,c8,c7,4   /* Invalidate Entire Non-Secure 
Non-Hyp. Unified TLB */
+
+/* CP15 CR9: */
+
+/* CP15 CR10: */
+#define MAIR0           p15,0,c10,c2,0  /* Memory Attribute Indirection 
Register 0 */
+#define MAIR1           p15,0,c10,c2,1  /* Memory Attribute Indirection 
Register 1 */
+#define HMAIR0          p15,4,c10,c2,0  /* Hyp. Memory Attribute Indirection 
Register 0 */
+#define HMAIR1          p15,4,c10,c2,1  /* Hyp. Memory Attribute Indirection 
Register 1 */
+
+/* CP15 CR11: DMA Operations for TCM Access */
+
+/* CP15 CR12:  */
+#define HVBAR           p15,4,c12,c0,0  /* Hyp. Vector Base Address Register */
+
+/* CP15 CR13:  */
+#define FCSEIDR         p15,0,c13,c0,0  /* FCSE Process ID Register */
+#define CONTEXTIDR      p15,0,c13,c0,1  /* Context ID Register */
+
+/* CP15 CR14:  */
+#define CNTPCT          p15,0,c14       /* Time counter value */
+#define CNTFRQ          p15,0,c14,c0,0  /* Time counter frequency */
+#define CNTKCTL         p15,0,c14,c1,0  /* Time counter kernel control */
+#define CNTP_TVAL       p15,0,c14,c2,0  /* Physical Timer value */
+#define CNTP_CTL        p15,0,c14,c2,1  /* Physical Timer control register */
+#define CNTVCT          p15,1,c14       /* Time counter value + offset */
+#define CNTP_CVAL       p15,2,c14       /* Physical Timer comparator */
+#define CNTVOFF         p15,4,c14       /* Time counter offset */
+#define CNTHCTL         p15,4,c14,c1,0  /* Time counter hyp. control */
+#define CNTHP_TVAL      p15,4,c14,c2,0  /* Hyp. Timer value */
+#define CNTHP_CTL       p15,4,c14,c2,1  /* Hyp. Timer control register */
+#define CNTHP_CVAL      p15,6,c14       /* Hyp. Timer comparator */
+
+/* CP15 CR15: Implementation Defined Registers */
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/current.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/current.h     Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,60 @@
+#ifndef __ARM_CURRENT_H__
+#define __ARM_CURRENT_H__
+
+#include <xen/config.h>
+#include <xen/percpu.h>
+#include <public/xen.h>
+
+#ifndef __ASSEMBLY__
+
+struct vcpu;
+
+struct cpu_info {
+    struct cpu_user_regs guest_cpu_user_regs;
+    unsigned long elr;
+    unsigned int processor_id;
+    struct vcpu *current_vcpu;
+    unsigned long per_cpu_offset;
+};
+
+static inline struct cpu_info *get_cpu_info(void)
+{
+        register unsigned long sp asm ("sp");
+        return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + STACK_SIZE - 
sizeof(struct cpu_info));
+}
+
+#define get_current()         (get_cpu_info()->current_vcpu)
+#define set_current(vcpu)     (get_cpu_info()->current_vcpu = (vcpu))
+#define current               (get_current())
+
+#define get_processor_id()    (get_cpu_info()->processor_id)
+#define set_processor_id(id)  do {                                      \
+    struct cpu_info *ci__ = get_cpu_info();                             \
+    ci__->per_cpu_offset = __per_cpu_offset[ci__->processor_id = (id)]; \
+} while (0)
+
+#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
+
+#define reset_stack_and_jump(__fn)              \
+    __asm__ __volatile__ (                      \
+        "mov sp,%0; b "STR(__fn)      \
+        : : "r" (guest_cpu_user_regs()) : "memory" )
+#endif
+
+
+/*
+ * Which VCPU's state is currently running on each CPU?
+ * This is not necesasrily the same as 'current' as a CPU may be
+ * executing a lazy state switch.
+ */
+DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
+
+#endif /* __ARM_CURRENT_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/debugger.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/debugger.h    Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,15 @@
+#ifndef __ARM_DEBUGGER_H__
+#define __ARM_DEBUGGER_H__
+
+#define debugger_trap_fatal(v, r) ((void) 0)
+#define debugger_trap_immediate() ((void) 0)
+
+#endif /* __ARM_DEBUGGER_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/delay.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/delay.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,15 @@
+#ifndef _ARM_DELAY_H
+#define _ARM_DELAY_H
+
+extern void __udelay(unsigned long usecs);
+#define udelay(n) __udelay(n)
+
+#endif /* defined(_ARM_DELAY_H) */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/desc.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/desc.h        Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,12 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#endif /* __ARCH_DESC_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/div64.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/div64.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,235 @@
+/* Taken from Linux arch/arm */
+#ifndef __ASM_ARM_DIV64
+#define __ASM_ARM_DIV64
+
+#include <asm/system.h>
+#include <xen/types.h>
+
+/*
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ *     uint32_t remainder = *n % base;
+ *     *n = *n / base;
+ *     return remainder;
+ * }
+ *
+ * In other words, a 64-bit dividend with a 32-bit divisor producing
+ * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
+ * we call a special __do_div64 helper with completely non standard
+ * calling convention for arguments and results (beware).
+ */
+
+#ifdef __ARMEB__
+#define __xh "r0"
+#define __xl "r1"
+#else
+#define __xl "r0"
+#define __xh "r1"
+#endif
+
+#define __do_div_asm(n, base)                                  \
+({                                                             \
+       register unsigned int __base      asm("r4") = base;     \
+       register unsigned long long __n   asm("r0") = n;        \
+       register unsigned long long __res asm("r2");            \
+       register unsigned int __rem       asm(__xh);            \
+       asm(    __asmeq("%0", __xh)                             \
+               __asmeq("%1", "r2")                             \
+               __asmeq("%2", "r0")                             \
+               __asmeq("%3", "r4")                             \
+               "bl     __do_div64"                             \
+               : "=r" (__rem), "=r" (__res)                    \
+               : "r" (__n), "r" (__base)                       \
+               : "ip", "lr", "cc");                            \
+       n = __res;                                              \
+       __rem;                                                  \
+})
+
+#if __GNUC__ < 4
+
+/*
+ * gcc versions earlier than 4.0 are simply too problematic for the
+ * optimized implementation below. First there is gcc PR 15089 that
+ * tend to trig on more complex constructs, spurious .global __udivsi3
+ * are inserted even if none of those symbols are referenced in the
+ * generated code, and those gcc versions are not able to do constant
+ * propagation on long long values anyway.
+ */
+#define do_div(n, base) __do_div_asm(n, base)
+
+#elif __GNUC__ >= 4
+
+#include <asm/bug.h>
+
+/*
+ * If the divisor happens to be constant, we determine the appropriate
+ * inverse at compile time to turn the division into a few inline
+ * multiplications instead which is much faster. And yet only if compiling
+ * for ARMv4 or higher (we need umull/umlal) and if the gcc version is
+ * sufficiently recent to perform proper long long constant propagation.
+ * (It is unfortunate that gcc doesn't perform all this internally.)
+ */
+#define do_div(n, base)                                                        
\
+({                                                                     \
+       unsigned int __r, __b = (base);                                 \
+       if (!__builtin_constant_p(__b) || __b == 0) {                   \
+               /* non-constant divisor (or zero): slow path */         \
+               __r = __do_div_asm(n, __b);                             \
+       } else if ((__b & (__b - 1)) == 0) {                            \
+               /* Trivial: __b is constant and a power of 2 */         \
+               /* gcc does the right thing with this code.  */         \
+               __r = n;                                                \
+               __r &= (__b - 1);                                       \
+               n /= __b;                                               \
+       } else {                                                        \
+               /* Multiply by inverse of __b: n/b = n*(p/b)/p       */ \
+               /* We rely on the fact that most of this code gets   */ \
+               /* optimized away at compile time due to constant    */ \
+               /* propagation and only a couple inline assembly     */ \
+               /* instructions should remain. Better avoid any      */ \
+               /* code construct that might prevent that.           */ \
+               unsigned long long __res, __x, __t, __m, __n = n;       \
+               unsigned int __c, __p, __z = 0;                         \
+               /* preserve low part of n for reminder computation */   \
+               __r = __n;                                              \
+               /* determine number of bits to represent __b */         \
+               __p = 1 << __div64_fls(__b);                            \
+               /* compute __m = ((__p << 64) + __b - 1) / __b */       \
+               __m = (~0ULL / __b) * __p;                              \
+               __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b;     \
+               /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \
+               __x = ~0ULL / __b * __b - 1;                            \
+               __res = (__m & 0xffffffff) * (__x & 0xffffffff);        \
+               __res >>= 32;                                           \
+               __res += (__m & 0xffffffff) * (__x >> 32);              \
+               __t = __res;                                            \
+               __res += (__x & 0xffffffff) * (__m >> 32);              \
+               __t = (__res < __t) ? (1ULL << 32) : 0;                 \
+               __res = (__res >> 32) + __t;                            \
+               __res += (__m >> 32) * (__x >> 32);                     \
+               __res /= __p;                                           \
+               /* Now sanitize and optimize what we've got. */         \
+               if (~0ULL % (__b / (__b & -__b)) == 0) {                \
+                       /* those cases can be simplified with: */       \
+                       __n /= (__b & -__b);                            \
+                       __m = ~0ULL / (__b / (__b & -__b));             \
+                       __p = 1;                                        \
+                       __c = 1;                                        \
+               } else if (__res != __x / __b) {                        \
+                       /* We can't get away without a correction    */ \
+                       /* to compensate for bit truncation errors.  */ \
+                       /* To avoid it we'd need an additional bit   */ \
+                       /* to represent __m which would overflow it. */ \
+                       /* Instead we do m=p/b and n/b=(n*m+m)/p.    */ \
+                       __c = 1;                                        \
+                       /* Compute __m = (__p << 64) / __b */           \
+                       __m = (~0ULL / __b) * __p;                      \
+                       __m += ((~0ULL % __b + 1) * __p) / __b;         \
+               } else {                                                \
+                       /* Reduce __m/__p, and try to clear bit 31   */ \
+                       /* of __m when possible otherwise that'll    */ \
+                       /* need extra overflow handling later.       */ \
+                       unsigned int __bits = -(__m & -__m);            \
+                       __bits |= __m >> 32;                            \
+                       __bits = (~__bits) << 1;                        \
+                       /* If __bits == 0 then setting bit 31 is     */ \
+                       /* unavoidable.  Simply apply the maximum    */ \
+                       /* possible reduction in that case.          */ \
+                       /* Otherwise the MSB of __bits indicates the */ \
+                       /* best reduction we should apply.           */ \
+                       if (!__bits) {                                  \
+                               __p /= (__m & -__m);                    \
+                               __m /= (__m & -__m);                    \
+                       } else {                                        \
+                               __p >>= __div64_fls(__bits);            \
+                               __m >>= __div64_fls(__bits);            \
+                       }                                               \
+                       /* No correction needed. */                     \
+                       __c = 0;                                        \
+               }                                                       \
+               /* Now we have a combination of 2 conditions:        */ \
+               /* 1) whether or not we need a correction (__c), and */ \
+               /* 2) whether or not there might be an overflow in   */ \
+               /*    the cross product (__m & ((1<<63) | (1<<31)))  */ \
+               /* Select the best insn combination to perform the   */ \
+               /* actual __m * __n / (__p << 64) operation.         */ \
+               if (!__c) {                                             \
+                       asm (   "umull  %Q0, %R0, %1, %Q2\n\t"          \
+                               "mov    %Q0, #0"                        \
+                               : "=&r" (__res)                         \
+                               : "r" (__m), "r" (__n)                  \
+                               : "cc" );                               \
+               } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) {    \
+                       __res = __m;                                    \
+                       asm (   "umlal  %Q0, %R0, %Q1, %Q2\n\t"         \
+                               "mov    %Q0, #0"                        \
+                               : "+&r" (__res)                         \
+                               : "r" (__m), "r" (__n)                  \
+                               : "cc" );                               \
+               } else {                                                \
+                       asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"         \
+                               "cmn    %Q0, %Q1\n\t"                   \
+                               "adcs   %R0, %R0, %R1\n\t"              \
+                               "adc    %Q0, %3, #0"                    \
+                               : "=&r" (__res)                         \
+                               : "r" (__m), "r" (__n), "r" (__z)       \
+                               : "cc" );                               \
+               }                                                       \
+               if (!(__m & ((1ULL << 63) | (1ULL << 31)))) {           \
+                       asm (   "umlal  %R0, %Q0, %R1, %Q2\n\t"         \
+                               "umlal  %R0, %Q0, %Q1, %R2\n\t"         \
+                               "mov    %R0, #0\n\t"                    \
+                               "umlal  %Q0, %R0, %R1, %R2"             \
+                               : "+&r" (__res)                         \
+                               : "r" (__m), "r" (__n)                  \
+                               : "cc" );                               \
+               } else {                                                \
+                       asm (   "umlal  %R0, %Q0, %R2, %Q3\n\t"         \
+                               "umlal  %R0, %1, %Q2, %R3\n\t"          \
+                               "mov    %R0, #0\n\t"                    \
+                               "adds   %Q0, %1, %Q0\n\t"               \
+                               "adc    %R0, %R0, #0\n\t"               \
+                               "umlal  %Q0, %R0, %R2, %R3"             \
+                               : "+&r" (__res), "+&r" (__z)            \
+                               : "r" (__m), "r" (__n)                  \
+                               : "cc" );                               \
+               }                                                       \
+               __res /= __p;                                           \
+               /* The reminder can be computed with 32-bit regs     */ \
+               /* only, and gcc is good at that.                    */ \
+               {                                                       \
+                       unsigned int __res0 = __res;                    \
+                       unsigned int __b0 = __b;                        \
+                       __r -= __res0 * __b0;                           \
+               }                                                       \
+               /* BUG_ON(__r >= __b || __res * __b + __r != n); */     \
+               n = __res;                                              \
+       }                                                               \
+       __r;                                                            \
+})
+
+/* our own fls implementation to make sure constant propagation is fine */
+#define __div64_fls(bits)                                              \
+({                                                                     \
+       unsigned int __left = (bits), __nr = 0;                         \
+       if (__left & 0xffff0000) __nr += 16, __left >>= 16;             \
+       if (__left & 0x0000ff00) __nr +=  8, __left >>=  8;             \
+       if (__left & 0x000000f0) __nr +=  4, __left >>=  4;             \
+       if (__left & 0x0000000c) __nr +=  2, __left >>=  2;             \
+       if (__left & 0x00000002) __nr +=  1;                            \
+       __nr;                                                           \
+})
+
+#endif
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/elf.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/elf.h Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,33 @@
+#ifndef __ARM_ELF_H__
+#define __ARM_ELF_H__
+
+typedef struct {
+    unsigned long r0;
+    unsigned long r1;
+    unsigned long r2;
+    unsigned long r3;
+    unsigned long r4;
+    unsigned long r5;
+    unsigned long r6;
+    unsigned long r7;
+    unsigned long r8;
+    unsigned long r9;
+    unsigned long r10;
+    unsigned long r11;
+    unsigned long r12;
+    unsigned long sp;
+    unsigned long lr;
+    unsigned long pc;
+} ELF_Gregset;
+
+#endif /* __ARM_ELF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/event.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/event.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,41 @@
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+void vcpu_kick(struct vcpu *v);
+void vcpu_mark_events_pending(struct vcpu *v);
+
+static inline int local_events_need_delivery(void)
+{
+    /* TODO
+     * return (vcpu_info(v, evtchn_upcall_pending) &&
+                        !vcpu_info(v, evtchn_upcall_mask)); */
+        return 0;
+}
+
+int local_event_delivery_is_enabled(void);
+
+static inline void local_event_delivery_disable(void)
+{
+    /* TODO current->vcpu_info->evtchn_upcall_mask = 1; */
+}
+
+static inline void local_event_delivery_enable(void)
+{
+    /* TODO current->vcpu_info->evtchn_upcall_mask = 0; */
+}
+
+/* No arch specific virq definition now. Default to global. */
+static inline int arch_virq_is_global(int virq)
+{
+    return 1;
+}
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/flushtlb.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/flushtlb.h    Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,31 @@
+#ifndef __FLUSHTLB_H__
+#define __FLUSHTLB_H__
+
+#include <xen/cpumask.h>
+
+/*
+ * Filter the given set of CPUs, removing those that definitely flushed their
+ * TLB since @page_timestamp.
+ */
+/* XXX lazy implementation just doesn't clear anything.... */
+#define tlbflush_filter(mask, page_timestamp)                           \
+do {                                                                    \
+} while ( 0 )
+
+#define tlbflush_current_time()                 (0)
+
+/* Flush local TLBs */
+void flush_tlb_local(void);
+
+/* Flush specified CPUs' TLBs */
+void flush_tlb_mask(const cpumask_t *mask);
+
+#endif /* __FLUSHTLB_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/grant_table.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/grant_table.h Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,35 @@
+#ifndef __ASM_GRANT_TABLE_H__
+#define __ASM_GRANT_TABLE_H__
+
+#include <xen/grant_table.h>
+
+#define INVALID_GFN (-1UL)
+#define INITIAL_NR_GRANT_FRAMES 1
+
+void gnttab_clear_flag(unsigned long nr, uint16_t *addr);
+int create_grant_host_mapping(unsigned long gpaddr,
+        unsigned long mfn, unsigned int flags, unsigned int
+        cache_flags);
+#define gnttab_host_mapping_get_page_type(op, d, rd) (0)
+int replace_grant_host_mapping(unsigned long gpaddr, unsigned long mfn,
+        unsigned long new_gpaddr, unsigned int flags);
+void gnttab_mark_dirty(struct domain *d, unsigned long l);
+#define gnttab_create_status_page(d, t, i) do {} while (0)
+#define gnttab_create_shared_page(d, t, i) do {} while (0)
+#define gnttab_shared_gmfn(d, t, i) (0)
+#define gnttab_status_gmfn(d, t, i) (0)
+#define gnttab_release_host_mappings(domain) 1
+static inline int replace_grant_supported(void)
+{
+    return 1;
+}
+
+#endif /* __ASM_GRANT_TABLE_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/hardirq.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/hardirq.h     Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,28 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <xen/config.h>
+#include <xen/cache.h>
+#include <xen/smp.h>
+
+typedef struct {
+        unsigned long __softirq_pending;
+        unsigned int __local_irq_count;
+} __cacheline_aligned irq_cpustat_t;
+
+#include <xen/irq_cpustat.h>    /* Standard mappings for irq_cpustat_t above */
+
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+
+#define irq_enter()     (local_irq_count(smp_processor_id())++)
+#define irq_exit()      (local_irq_count(smp_processor_id())--)
+
+#endif /* __ASM_HARDIRQ_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/hypercall.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/hypercall.h   Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,14 @@
+#ifndef __ASM_ARM_HYPERCALL_H__
+#define __ASM_ARM_HYPERCALL_H__
+
+#include <public/domctl.h> /* for arch_do_domctl */
+
+#endif /* __ASM_ARM_HYPERCALL_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/init.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/init.h        Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,12 @@
+#ifndef _XEN_ASM_INIT_H
+#define _XEN_ASM_INIT_H
+
+#endif /* _XEN_ASM_INIT_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/io.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/io.h  Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,12 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/iocap.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/iocap.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,20 @@
+#ifndef __X86_IOCAP_H__
+#define __X86_IOCAP_H__
+
+#define cache_flush_permitted(d)                        \
+    (!rangeset_is_empty((d)->iomem_caps))
+
+#define multipage_allocation_permitted(d, order)        \
+    (((order) <= 9) || /* allow 2MB superpages */       \
+     !rangeset_is_empty((d)->iomem_caps))
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/multicall.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/multicall.h   Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,23 @@
+#ifndef __ASM_ARM_MULTICALL_H__
+#define __ASM_ARM_MULTICALL_H__
+
+#define do_multicall_call(_call)                             \
+    do {                                                     \
+        __asm__ __volatile__ (                               \
+            ".word 0xe7f000f0@; do_multicall_call\n"         \
+            "    mov r0,#0; @ do_multicall_call\n"           \
+            "    str r0, [r0];\n"                            \
+            :                                                \
+            :                                                \
+            : );                                             \
+    } while ( 0 )
+
+#endif /* __ASM_ARM_MULTICALL_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/nmi.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/nmi.h Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,15 @@
+#ifndef ASM_NMI_H
+#define ASM_NMI_H
+
+#define register_guest_nmi_callback(a)  (-ENOSYS)
+#define unregister_guest_nmi_callback() (-ENOSYS)
+
+#endif /* ASM_NMI_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/numa.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/numa.h        Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,21 @@
+#ifndef __ARCH_ARM_NUMA_H
+#define __ARCH_ARM_NUMA_H
+
+/* Fake one node for now... */
+#define cpu_to_node(cpu) 0
+#define node_to_cpumask(node)  (cpu_online_map)
+
+static inline __attribute__((pure)) int phys_to_nid(paddr_t addr)
+{
+        return 0;
+}
+
+#endif /* __ARCH_ARM_NUMA_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/paging.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/paging.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,13 @@
+#ifndef _XEN_PAGING_H
+#define _XEN_PAGING_H
+
+#endif /* XEN_PAGING_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/pci.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/pci.h Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,7 @@
+#ifndef __X86_PCI_H__
+#define __X86_PCI_H__
+
+struct arch_pci_dev {
+};
+
+#endif /* __X86_PCI_H__ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/percpu.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/percpu.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,28 @@
+#ifndef __ARM_PERCPU_H__
+#define __ARM_PERCPU_H__
+
+#ifndef __ASSEMBLY__
+extern char __per_cpu_start[], __per_cpu_data_end[];
+extern unsigned long __per_cpu_offset[NR_CPUS];
+void percpu_init_areas(void);
+#endif
+
+/* Separate out the type, so (int[3], foo) works. */
+#define __DEFINE_PER_CPU(type, name, suffix)                    \
+    __attribute__((__section__(".bss.percpu" #suffix)))         \
+    __typeof__(type) per_cpu_##name
+
+#define per_cpu(var, cpu) ((&per_cpu__##var)[cpu?0:0])
+#define __get_cpu_var(var) per_cpu__##var
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#endif /* __ARM_PERCPU_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/processor.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/processor.h   Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,269 @@
+#ifndef __ASM_ARM_PROCESSOR_H
+#define __ASM_ARM_PROCESSOR_H
+
+#include <asm/cpregs.h>
+
+/* PSR bits (CPSR, SPSR)*/
+
+/* 0-4: Mode */
+#define PSR_MODE_MASK 0x1f
+#define PSR_MODE_USR 0x10
+#define PSR_MODE_FIQ 0x11
+#define PSR_MODE_IRQ 0x12
+#define PSR_MODE_SVC 0x13
+#define PSR_MODE_MON 0x16
+#define PSR_MODE_ABT 0x17
+#define PSR_MODE_HYP 0x1a
+#define PSR_MODE_UND 0x1b
+#define PSR_MODE_SYS 0x1f
+
+#define PSR_THUMB        (1<<5)        /* Thumb Mode enable */
+#define PSR_FIQ_MASK        (1<<6)        /* Fast Interrupt mask */
+#define PSR_IRQ_MASK        (1<<7)        /* Interrupt mask */
+#define PSR_ABT_MASK         (1<<8)        /* Asynchronous Abort mask */
+#define PSR_BIG_ENDIAN        (1<<9)        /* Big Endian Mode */
+#define PSR_JAZELLE        (1<<24)        /* Jazelle Mode */
+
+/* TTBCR Translation Table Base Control Register */
+#define TTBCR_N_MASK 0x07
+#define TTBCR_N_16KB 0x00
+#define TTBCR_N_8KB  0x01
+#define TTBCR_N_4KB  0x02
+#define TTBCR_N_2KB  0x03
+#define TTBCR_N_1KB  0x04
+
+/* SCTLR System Control Register. */
+/* HSCTLR is a subset of this. */
+#define SCTLR_TE        (1<<30)
+#define SCTLR_AFE        (1<<29)
+#define SCTLR_TRE        (1<<28)
+#define SCTLR_NMFI        (1<<27)
+#define SCTLR_EE        (1<<25)
+#define SCTLR_VE        (1<<24)
+#define SCTLR_U                (1<<22)
+#define SCTLR_FI        (1<<21)
+#define SCTLR_WXN        (1<<19)
+#define SCTLR_HA        (1<<17)
+#define SCTLR_RR        (1<<14)
+#define SCTLR_V                (1<<13)
+#define SCTLR_I                (1<<12)
+#define SCTLR_Z                (1<<11)
+#define SCTLR_SW        (1<<10)
+#define SCTLR_B                (1<<7)
+#define SCTLR_C                (1<<2)
+#define SCTLR_A                (1<<1)
+#define SCTLR_M                (1<<0)
+
+#define SCTLR_BASE        0x00c50078
+#define HSCTLR_BASE        0x30c51878
+
+/* HCR Hyp Configuration Register */
+#define HCR_TGE                (1<<27)
+#define HCR_TVM                (1<<26)
+#define HCR_TTLB        (1<<25)
+#define HCR_TPU                (1<<24)
+#define HCR_TPC                (1<<23)
+#define HCR_TSW                (1<<22)
+#define HCR_TAC                (1<<21)
+#define HCR_TIDCP        (1<<20)
+#define HCR_TSC                (1<<19)
+#define HCR_TID3        (1<<18)
+#define HCR_TID2        (1<<17)
+#define HCR_TID1        (1<<16)
+#define HCR_TID0        (1<<15)
+#define HCR_TWE                (1<<14)
+#define HCR_TWI                (1<<13)
+#define HCR_DC                (1<<12)
+#define HCR_BSU_MASK        (3<<10)
+#define HCR_FB                (1<<9)
+#define HCR_VA                (1<<8)
+#define HCR_VI                (1<<7)
+#define HCR_VF                (1<<6)
+#define HCR_AMO                (1<<5)
+#define HCR_IMO                (1<<4)
+#define HCR_FMO                (1<<3)
+#define HCR_PTW                (1<<2)
+#define HCR_SWIO        (1<<1)
+#define HCR_VM                (1<<0)
+
+#define HSR_EC_WFI_WFE              0x01
+#define HSR_EC_CP15_32              0x03
+#define HSR_EC_CP15_64              0x04
+#define HSR_EC_CP14_32              0x05
+#define HSR_EC_CP14_DBG             0x06
+#define HSR_EC_CP                   0x07
+#define HSR_EC_CP10                 0x08
+#define HSR_EC_JAZELLE              0x09
+#define HSR_EC_BXJ                  0x0a
+#define HSR_EC_CP14_64              0x0c
+#define HSR_EC_SVC                  0x11
+#define HSR_EC_HVC                  0x12
+#define HSR_EC_INSTR_ABORT_GUEST    0x20
+#define HSR_EC_INSTR_ABORT_HYP      0x21
+#define HSR_EC_DATA_ABORT_GUEST     0x24
+#define HSR_EC_DATA_ABORT_HYP       0x25
+
+#ifndef __ASSEMBLY__
+union hsr {
+    uint32_t bits;
+    struct {
+        unsigned long iss:25;  /* Instruction Specific Syndrome */
+        unsigned long len:1;   /* Instruction length */
+        unsigned long ec:6;    /* Exception Class */
+    };
+
+    struct hsr_cp32 {
+        unsigned long read:1;  /* Direction */
+        unsigned long crm:4;   /* CRm */
+        unsigned long reg:4;   /* Rt */
+        unsigned long sbzp:1;
+        unsigned long crn:4;   /* CRn */
+        unsigned long op1:3;   /* Op1 */
+        unsigned long op2:3;   /* Op2 */
+        unsigned long cc:4;    /* Condition Code */
+        unsigned long ccvalid:1;/* CC Valid */
+        unsigned long len:1;   /* Instruction length */
+        unsigned long ec:6;    /* Exception Class */
+    } cp32; /* HSR_EC_CP15_32, CP14_32, CP10 */
+
+    struct hsr_cp64 {
+        unsigned long read:1;   /* Direction */
+        unsigned long crm:4;    /* CRm */
+        unsigned long reg1:4;   /* Rt1 */
+        unsigned long sbzp1:1;
+        unsigned long reg2:4;   /* Rt2 */
+        unsigned long sbzp2:2;
+        unsigned long op1:4;   /* Op1 */
+        unsigned long cc:4;    /* Condition Code */
+        unsigned long ccvalid:1;/* CC Valid */
+        unsigned long len:1;   /* Instruction length */
+        unsigned long ec:6;    /* Exception Class */
+    } cp64; /* HSR_EC_CP15_64, HSR_EC_CP14_64 */
+
+    struct hsr_dabt {
+        unsigned long dfsc:6;  /* Data Fault Status Code */
+        unsigned long write:1; /* Write / not Read */
+        unsigned long s1ptw:1; /* */
+        unsigned long cache:1; /* Cache Maintenance */
+        unsigned long eat:1;   /* External Abort Type */
+        unsigned long sbzp0:6;
+        unsigned long reg:4;   /* Register */
+        unsigned long sbzp1:1;
+        unsigned long sign:1;  /* Sign extend */
+        unsigned long size:2;  /* Access Size */
+        unsigned long valid:1; /* Syndrome Valid */
+        unsigned long len:1;   /* Instruction length */
+        unsigned long ec:6;    /* Exception Class */
+    } dabt; /* HSR_EC_DATA_ABORT_* */
+};
+#endif
+
+/* HSR.EC == HSR_CP{15,14,10}_32 */
+#define HSR_CP32_OP2_MASK (0x000e0000)
+#define HSR_CP32_OP2_SHIFT (17)
+#define HSR_CP32_OP1_MASK (0x0001c000)
+#define HSR_CP32_OP1_SHIFT (14)
+#define HSR_CP32_CRN_MASK (0x00003c00)
+#define HSR_CP32_CRN_SHIFT (10)
+#define HSR_CP32_CRM_MASK (0x0000001e)
+#define HSR_CP32_CRM_SHIFT (1)
+#define HSR_CP32_REGS_MASK (HSR_CP32_OP1_MASK|HSR_CP32_OP2_MASK|\
+                            HSR_CP32_CRN_MASK|HSR_CP32_CRM_MASK)
+
+/* HSR.EC == HSR_CP{15,14}_64 */
+#define HSR_CP64_OP1_MASK (0x000f0000)
+#define HSR_CP64_OP1_SHIFT (16)
+#define HSR_CP64_CRM_MASK (0x0000001e)
+#define HSR_CP64_CRM_SHIFT (1)
+#define HSR_CP64_REGS_MASK (HSR_CP64_OP1_MASK|HSR_CP64_CRM_MASK)
+
+/* Physical Address Register */
+#define PAR_F           (1<<0)
+
+/* .... If F == 1 */
+#define PAR_FSC_SHIFT   (1)
+#define PAR_FSC_MASK    (0x3f<<PAR_FSC_SHIFT)
+#define PAR_STAGE21     (1<<8)     /* Stage 2 Fault During Stage 1 Walk */
+#define PAR_STAGE2      (1<<9)     /* Stage 2 Fault */
+
+/* If F == 0 */
+#define PAR_MAIR_SHIFT  56                       /* Memory Attributes */
+#define PAR_MAIR_MASK   (0xffLL<<PAR_MAIR_SHIFT)
+#define PAR_NS          (1<<9)                   /* Non-Secure */
+#define PAR_SH_SHIFT    7                        /* Shareability */
+#define PAR_SH_MASK     (3<<PAR_SH_SHIFT)
+
+/* Fault Status Register */
+/*
+ * 543210 BIT
+ * 00XXLL -- XX Fault Level LL
+ * ..01LL -- Translation Fault LL
+ * ..10LL -- Access Fault LL
+ * ..11LL -- Permission Fault LL
+ * 01xxxx -- Abort/Parity
+ * 10xxxx -- Other
+ * 11xxxx -- Implementation Defined
+ */
+#define FSC_TYPE_MASK (0x3<<4)
+#define FSC_TYPE_FAULT (0x00<<4)
+#define FSC_TYPE_ABT   (0x01<<4)
+#define FSC_TYPE_OTH   (0x02<<4)
+#define FSC_TYPE_IMPL  (0x03<<4)
+
+#define FSC_FLT_TRANS  (0x04)
+#define FSC_FLT_ACCESS (0x08)
+#define FSC_FLT_PERM   (0x0c)
+#define FSC_SEA        (0x10) /* Synchronous External Abort */
+#define FSC_SPE        (0x18) /* Memory Access Synchronous Parity Error */
+#define FSC_APE        (0x11) /* Memory Access Asynchronous Parity Error */
+#define FSC_SEATT      (0x14) /* Sync. Ext. Abort Translation Table */
+#define FSC_SPETT      (0x1c) /* Sync. Parity. Error Translation Table */
+#define FSC_AF         (0x21) /* Alignment Fault */
+#define FSC_DE         (0x22) /* Debug Event */
+#define FSC_LKD        (0x34) /* Lockdown Abort */
+#define FSC_CPR        (0x3a) /* Coprocossor Abort */
+
+#define FSC_LL_MASK    (0x03<<0)
+
+/* Time counter hypervisor control register */
+#define CNTHCTL_PA      (1u<<0)  /* Kernel/user access to physical counter */
+#define CNTHCTL_TA      (1u<<1)  /* Kernel/user access to CNTP timer */
+
+/* Timer control registers */
+#define CNTx_CTL_ENABLE   (1u<<0)  /* Enable timer */
+#define CNTx_CTL_MASK     (1u<<1)  /* Mask IRQ */
+#define CNTx_CTL_PENDING  (1u<<2)  /* IRQ pending */
+
+/* CPUID bits */
+#define ID_PFR1_GT_MASK  0x000F0000  /* Generic Timer interface support */
+#define ID_PFR1_GT_v1    0x00010000
+
+#define MSR(reg,val)        asm volatile ("msr "#reg", %0\n" : : "r" (val))
+#define MRS(val,reg)        asm volatile ("mrs %0,"#reg"\n" : "=r" (v))
+
+#ifndef __ASSEMBLY__
+extern uint32_t hyp_traps_vector[8];
+
+void panic_PAR(uint64_t par, const char *when);
+
+void show_execution_state(struct cpu_user_regs *regs);
+void show_registers(struct cpu_user_regs *regs);
+//#define dump_execution_state() run_in_exception_handler(show_execution_state)
+#define dump_execution_state() asm volatile (".word 0xe7f000f0\n"); /* XXX */
+
+#define cpu_relax() barrier() /* Could yield? */
+
+/* All a bit UP for the moment */
+#define cpu_to_core(_cpu)   (0)
+#define cpu_to_socket(_cpu) (0)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_ARM_PROCESSOR_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/regs.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/regs.h        Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,43 @@
+#ifndef __ARM_REGS_H__
+#define __ARM_REGS_H__
+
+#include <xen/types.h>
+#include <public/xen.h>
+#include <asm/processor.h>
+
+#define psr_mode(psr,m) (((psr) & PSR_MODE_MASK) == m)
+
+#define usr_mode(r)     psr_mode((r)->cpsr,PSR_MODE_USR)
+#define fiq_mode(r)     psr_mode((r)->cpsr,PSR_MODE_FIQ)
+#define irq_mode(r)     psr_mode((r)->cpsr,PSR_MODE_IRQ)
+#define svc_mode(r)     psr_mode((r)->cpsr,PSR_MODE_SVC)
+#define mon_mode(r)     psr_mode((r)->cpsr,PSR_MODE_MON)
+#define abt_mode(r)     psr_mode((r)->cpsr,PSR_MODE_ABT)
+#define hyp_mode(r)     psr_mode((r)->cpsr,PSR_MODE_HYP)
+#define und_mode(r)     psr_mode((r)->cpsr,PSR_MODE_UND)
+#define sys_mode(r)     psr_mode((r)->cpsr,PSR_MODE_SYS)
+
+#define guest_mode(r)                                                         \
+({                                                                            \
+    unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r);         \
+    /* Frame pointer must point into current CPU stack. */                    \
+    ASSERT(diff < STACK_SIZE);                                                \
+    /* If not a guest frame, it must be a hypervisor frame. */                \
+    ASSERT((diff == 0) || hyp_mode(r));                                       \
+    /* Return TRUE if it's a guest frame. */                                  \
+    (diff == 0);                                                              \
+})
+
+#define return_reg(v) ((v)->arch.user_regs.r0)
+
+#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
+
+#endif /* __ARM_REGS_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/setup.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/setup.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,16 @@
+#ifndef __ARM_SETUP_H_
+#define __ARM_SETUP_H_
+
+#include <public/version.h>
+
+void arch_get_xen_caps(xen_capabilities_info_t *info);
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/smp.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/smp.h Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,25 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#ifndef __ASSEMBLY__
+#include <xen/config.h>
+#include <xen/cpumask.h>
+#include <asm/current.h>
+#endif
+
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
+
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
+#define raw_smp_processor_id() (get_processor_id())
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/softirq.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/softirq.h     Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,15 @@
+#ifndef __ASM_SOFTIRQ_H__
+#define __ASM_SOFTIRQ_H__
+
+#define VGIC_SOFTIRQ        (NR_COMMON_SOFTIRQS + 0)
+#define NR_ARCH_SOFTIRQS       1
+
+#endif /* __ASM_SOFTIRQ_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/spinlock.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/spinlock.h    Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,144 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <xen/config.h>
+#include <xen/lib.h>
+
+static inline void dsb_sev(void)
+{
+    __asm__ __volatile__ (
+        "dsb\n"
+        "sev\n"
+        );
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"   str     %1, [%0]\n"
+    :
+    : "r" (&lock->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned long tmp;
+
+    __asm__ __volatile__(
+"   ldrex   %0, [%1]\n"
+"   teq     %0, #0\n"
+"   strexeq %0, %2, [%1]"
+    : "=&r" (tmp)
+    : "r" (&lock->lock), "r" (1)
+    : "cc");
+
+    if (tmp == 0) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define _RAW_RW_LOCK_UNLOCKED { 0 }
+
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+{
+    unsigned long tmp, tmp2 = 1;
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%2]\n"
+"   adds    %0, %0, #1\n"
+"   strexpl %1, %0, [%2]\n"
+    : "=&r" (tmp), "+r" (tmp2)
+    : "r" (&rw->lock)
+    : "cc");
+
+    smp_mb();
+    return tmp2 == 0;
+}
+
+static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+{
+    unsigned long tmp;
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%1]\n"
+"   teq     %0, #0\n"
+"   strexeq %0, %2, [%1]"
+    : "=&r" (tmp)
+    : "r" (&rw->lock), "r" (0x80000000)
+    : "cc");
+
+    if (tmp == 0) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+static inline void _raw_read_unlock(raw_rwlock_t *rw)
+{
+    unsigned long tmp, tmp2;
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%2]\n"
+"   sub     %0, %0, #1\n"
+"   strex   %1, %0, [%2]\n"
+"   teq     %1, #0\n"
+"   bne     1b"
+    : "=&r" (tmp), "=&r" (tmp2)
+    : "r" (&rw->lock)
+    : "cc");
+
+    if (tmp == 0)
+        dsb_sev();
+}
+
+static inline void _raw_write_unlock(raw_rwlock_t *rw)
+{
+    smp_mb();
+
+    __asm__ __volatile__(
+    "str    %1, [%0]\n"
+    :
+    : "r" (&rw->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/string.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/string.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,38 @@
+#ifndef __ARM_STRING_H__
+#define __ARM_STRING_H__
+
+#include <xen/config.h>
+
+#define __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, __kernel_size_t);
+
+/* Some versions of gcc don't have this builtin. It's non-critical anyway. */
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *dest, const void *src, size_t n);
+
+#define __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, __kernel_size_t);
+
+extern void __memzero(void *ptr, __kernel_size_t n);
+
+#define memset(p,v,n)                                                   \
+        ({                                                              \
+                void *__p = (p); size_t __n = n;                        \
+                if ((__n) != 0) {                                       \
+                        if (__builtin_constant_p((v)) && (v) == 0)      \
+                                __memzero((__p),(__n));                 \
+                        else                                            \
+                                memset((__p),(v),(__n));                \
+                }                                                       \
+                (__p);                                                  \
+        })
+
+#endif /* __ARM_STRING_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/system.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/system.h      Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,202 @@
+/* Portions taken from Linux arch arm */
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <xen/lib.h>
+#include <asm/processor.h>
+
+#define nop() \
+    asm volatile ( "nop" )
+
+#define xchg(ptr,x) \
+        ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           dsb()
+#define wmb()           mb()
+
+#define smp_mb()        dmb()
+#define smp_rmb()       dmb()
+#define smp_wmb()       dmb()
+
+/*
+ * This is used to ensure the compiler did actually allocate the register we
+ * asked it for some inline assembly sequences.  Apparently we can't trust
+ * the compiler from one version to another so a bit of paranoia won't hurt.
+ * This string is meant to be concatenated with the inline asm string and
+ * will cause compilation to stop on mismatch.
+ * (for details, see gcc PR 15089)
+ */
+#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
+
+extern void __bad_xchg(volatile void *, int);
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
+{
+        unsigned long ret;
+        unsigned int tmp;
+
+        smp_mb();
+
+        switch (size) {
+        case 1:
+                asm volatile("@ __xchg1\n"
+                "1:     ldrexb  %0, [%3]\n"
+                "       strexb  %1, %2, [%3]\n"
+                "       teq     %1, #0\n"
+                "       bne     1b"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        case 4:
+                asm volatile("@ __xchg4\n"
+                "1:     ldrex   %0, [%3]\n"
+                "       strex   %1, %2, [%3]\n"
+                "       teq     %1, #0\n"
+                "       bne     1b"
+                        : "=&r" (ret), "=&r" (tmp)
+                        : "r" (x), "r" (ptr)
+                        : "memory", "cc");
+                break;
+        default:
+                __bad_xchg(ptr, size), ret = 0;
+                break;
+        }
+        smp_mb();
+
+        return ret;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+static always_inline unsigned long __cmpxchg(
+    volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+    unsigned long /*long*/ oldval, res;
+
+    switch (size) {
+    case 1:
+        do {
+            asm volatile("@ __cmpxchg1\n"
+                         "       ldrexb  %1, [%2]\n"
+                         "       mov     %0, #0\n"
+                         "       teq     %1, %3\n"
+                         "       strexbeq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+    case 2:
+        do {
+            asm volatile("@ __cmpxchg2\n"
+                         "       ldrexh  %1, [%2]\n"
+                         "       mov     %0, #0\n"
+                         "       teq     %1, %3\n"
+                         "       strexheq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+    case 4:
+        do {
+            asm volatile("@ __cmpxchg4\n"
+                         "       ldrex   %1, [%2]\n"
+                         "       mov     %0, #0\n"
+                         "       teq     %1, %3\n"
+                         "       strexeq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+#if 0
+    case 8:
+        do {
+            asm volatile("@ __cmpxchg8\n"
+                         "       ldrexd   %1, [%2]\n"
+                         "       mov      %0, #0\n"
+                         "       teq      %1, %3\n"
+                         "       strexdeq %0, %4, [%2]\n"
+                         : "=&r" (res), "=&r" (oldval)
+                         : "r" (ptr), "Ir" (old), "r" (new)
+                         : "memory", "cc");
+        } while (res);
+        break;
+#endif
+    default:
+        __bad_cmpxchg(ptr, size);
+        oldval = 0;
+    }
+
+    return oldval;
+}
+#define cmpxchg(ptr,o,n)                                                \
+    ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),            \
+                                   (unsigned long)(n),sizeof(*(ptr))))
+
+#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : 
: "cc" )
+#define local_irq_enable()  asm volatile ( "cpsie i @ local_irq_enable\n" : : 
: "cc" )
+
+#define local_save_flags(x)                                      \
+({                                                               \
+    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
+    asm volatile ( "mrs %0, cpsr     @ local_save_flags\n"       \
+                  : "=r" (x) :: "memory", "cc" );                \
+})
+#define local_irq_save(x)                                        \
+({                                                               \
+    local_save_flags(x);                                         \
+    local_irq_disable();                                         \
+})
+#define local_irq_restore(x)                                     \
+({                                                               \
+    BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
+    asm volatile (                                               \
+            "msr     cpsr_c, %0      @ local_irq_restore\n"      \
+            :                                                    \
+            : "r" (flags)                                        \
+            : "memory", "cc");                                   \
+})
+
+static inline int local_irq_is_enabled(void)
+{
+    unsigned long flags;
+    local_save_flags(flags);
+    return !(flags & PSR_IRQ_MASK);
+}
+
+#define local_fiq_enable()  __asm__("cpsie f   @ __stf\n" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f   @ __clf\n" : : : "memory", "cc")
+
+#define local_abort_enable() __asm__("cpsie a  @ __sta\n" : : : "memory", "cc")
+#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc")
+
+static inline int local_fiq_is_enabled(void)
+{
+    unsigned long flags;
+    local_save_flags(flags);
+    return !!(flags & PSR_FIQ_MASK);
+}
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/trace.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/trace.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,12 @@
+#ifndef __ASM_TRACE_H__
+#define __ASM_TRACE_H__
+
+#endif /* __ASM_TRACE_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/types.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/types.h       Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,57 @@
+#ifndef __ARM_TYPES_H__
+#define __ARM_TYPES_H__
+
+#ifndef __ASSEMBLY__
+
+#include <xen/config.h>
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+typedef u64 paddr_t;
+#define INVALID_PADDR (~0ULL)
+#define PRIpaddr "016llx"
+
+typedef unsigned long size_t;
+
+typedef char bool_t;
+#define test_and_set_bool(b)   xchg(&(b), 1)
+#define test_and_clear_bool(b) xchg(&(b), 0)
+
+#endif /* __ASSEMBLY__ */
+
+#define BITS_PER_LONG 32
+#define BYTES_PER_LONG 4
+#define LONG_BYTEORDER 2
+
+#endif /* __ARM_TYPES_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/asm-arm/xenoprof.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/xenoprof.h    Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,12 @@
+#ifndef __ASM_XENOPROF_H__
+#define __ASM_XENOPROF_H__
+
+#endif /* __ASM_XENOPROF_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/public/arch-arm.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/public/arch-arm.h     Thu Feb 09 11:33:29 2012 +0000
@@ -0,0 +1,125 @@
+/******************************************************************************
+ * arch-arm.h
+ *
+ * Guest OS interface to ARM Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright 2011 (C) Citrix Systems
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_ARM_H__
+#define __XEN_PUBLIC_ARCH_ARM_H__
+
+#ifndef __ASSEMBLY__
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+    typedef struct { type *p; } __guest_handle_ ## name
+
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
+    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
+#define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
+#define set_xen_guest_handle_raw(hnd, val)  do { (hnd).p = val; } while (0)
+#ifdef __XEN_TOOLS__
+#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
+#endif
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+struct cpu_user_regs
+{
+    uint32_t r0;
+    uint32_t r1;
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r4;
+    uint32_t r5;
+    uint32_t r6;
+    uint32_t r7;
+    uint32_t r8;
+    uint32_t r9;
+    uint32_t r10;
+    union {
+        uint32_t r11;
+        uint32_t fp;
+    };
+    uint32_t r12;
+
+    uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see 
below) */
+    uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. 
*/
+
+    uint32_t pc; /* Return IP */
+    uint32_t cpsr; /* Return mode */
+    uint32_t pad0; /* Doubleword-align the kernel half of the frame */
+
+    /* Outer guest frame only from here on... */
+
+    uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq;
+
+    uint32_t sp_usr, sp_svc, sp_abt, sp_und, sp_irq, sp_fiq;
+    uint32_t lr_usr, lr_svc, lr_abt, lr_und, lr_irq, lr_fiq;
+
+    uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq;
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn PRIx64
+
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
+#define XEN_LEGACY_MAX_VCPUS 1
+
+typedef uint32_t xen_ulong_t;
+
+struct vcpu_guest_context {
+    struct cpu_user_regs user_regs;         /* User-level CPU registers     */
+    union {
+        uint32_t reg[16];
+        struct {
+            uint32_t __pad[12];
+            uint32_t sp; /* r13 */
+            uint32_t lr; /* r14 */
+            uint32_t pc; /* r15 */
+        };
+    };
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct arch_vcpu_info { };
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct arch_shared_info { };
+typedef struct arch_shared_info arch_shared_info_t;
+#endif
+
+#endif /*  __XEN_PUBLIC_ARCH_ARM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dcb04595033b -r 46adf08254f9 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Thu Feb 09 11:33:29 2012 +0000
+++ b/xen/include/public/xen.h  Thu Feb 09 11:33:29 2012 +0000
@@ -33,6 +33,8 @@
 #include "arch-x86/xen.h"
 #elif defined(__ia64__)
 #include "arch-ia64.h"
+#elif defined(__arm__)
+#include "arch-arm.h"
 #else
 #error "Unsupported architecture"
 #endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.