[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] This patch is intended to make qemu support ia64/vti. We have validated
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 0cae0c6436f5fa49ab42f72ea90373cc5884d93a # Parent 65c3b9382caac676b392189f8775de50736f51c2 This patch is intended to make qemu support ia64/vti. We have validated it against latest xen-unstable.hg, both ia32 and x86-64 are not affected. Signed-off-by Ke Yu <ke.yu@xxxxxxxxx> Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx> Signed-off-by Nakajima Jun <nakajima.jun@xxxxxxxxx> Signed-off-by Anthony Xu <anthony.xu@xxxxxxxxx> diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/cpu-all.h --- a/tools/ioemu/cpu-all.h Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/cpu-all.h Sat Nov 5 10:30:01 2005 @@ -625,6 +625,47 @@ int cpu_inl(CPUState *env, int addr); #endif +#if defined(__i386__) || defined(__x86_64__) +static __inline__ void atomic_set_bit(long nr, volatile void *addr) +{ + __asm__ __volatile__( + "lock ; bts %1,%0" + :"=m" (*(volatile long *)addr) + :"dIr" (nr)); +} +static __inline__ void atomic_clear_bit(long nr, volatile void *addr) +{ + __asm__ __volatile__( + "lock ; btr %1,%0" + :"=m" (*(volatile long *)addr) + :"dIr" (nr)); +} +#elif defined(__ia64__) +#include "ia64_intrinsic.h" +#define atomic_set_bit(nr, addr) ({ \ + typeof(*addr) bit, old, new; \ + volatile typeof(*addr) *m; \ + \ + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \ + bit = 1 << (nr % (8*sizeof(*addr))); \ + do { \ + old = *m; \ + new = old | bit; \ + } while (cmpxchg_acq(m, old, new) != old); \ +}) + +#define atomic_clear_bit(nr, addr) ({ \ + typeof(*addr) bit, old, new; \ + volatile typeof(*addr) *m; \ + \ + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \ + bit = ~(1 << (nr % (8*sizeof(*addr)))); \ + do { \ + old = *m; \ + new = old & bit; \ + } while (cmpxchg_acq(m, old, new) != old); \ +}) +#endif /* memory API */ extern int phys_ram_size; diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/cpu.h --- a/tools/ioemu/cpu.h Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/cpu.h Sat Nov 5 10:30:01 2005 @@ -63,7 +63,11 @@ /* MSDOS compatibility mode FPU exception support */ void cpu_set_ferr(CPUX86State *s); +#if defined(__i386__) || defined(__x86_64__) #define TARGET_PAGE_BITS 12 +#elif defined(__ia64__) +#define TARGET_PAGE_BITS 14 +#endif #include "cpu-all.h" #endif /* CPU_I386_H */ diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/exec-all.h --- a/tools/ioemu/exec-all.h Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/exec-all.h Sat Nov 5 10:30:01 2005 @@ -433,6 +433,15 @@ } #endif +#ifdef __ia64__ +#include "ia64_intrinsic.h" +static inline int testandset (int *p) +{ + uint32_t o = 0, n = 1; + return (int)cmpxchg_acq(p, o, n); +} +#endif + #ifdef __s390__ static inline int testandset (int *p) { diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/exec.c --- a/tools/ioemu/exec.c Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/exec.c Sat Nov 5 10:30:01 2005 @@ -359,6 +359,22 @@ } return 0; } + +#ifdef __ia64__ +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller. + * So to emulate right behavior that guest OS is assumed, we need to flush + * I/D cache here. + */ +static void sync_icache(unsigned long address, int len) +{ + int l; + for(l = 0; l < (len + 32); l += 32) + __ia64_fc(address + l); + + ia64_sync_i(); + ia64_srlz_i(); +} +#endif void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write) @@ -402,6 +418,9 @@ /* RAM case */ ptr = phys_ram_base + addr1; memcpy(ptr, buf, l); +#ifdef __ia64__ + sync_icache((unsigned long)ptr,l); +#endif } } else { if (io_index) { diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/hw/i8259_stub.c --- a/tools/ioemu/hw/i8259_stub.c Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/hw/i8259_stub.c Sat Nov 5 10:30:01 2005 @@ -26,21 +26,6 @@ #include <stdio.h> #include "cpu.h" #include "cpu-all.h" - -static __inline__ void atomic_set_bit(long nr, volatile void *addr) -{ - __asm__ __volatile__( - "lock ; bts %1,%0" - :"=m" (*(volatile long *)addr) - :"dIr" (nr)); -} -static __inline__ void atomic_clear_bit(long nr, volatile void *addr) -{ - __asm__ __volatile__( - "lock ; btr %1,%0" - :"=m" (*(volatile long *)addr) - :"dIr" (nr)); -} #include <vl.h> extern shared_iopage_t *shared_page; diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/hw/iommu.c --- a/tools/ioemu/hw/iommu.c Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/hw/iommu.c Sat Nov 5 10:30:01 2005 @@ -107,7 +107,11 @@ #define IOPTE_VALID 0x00000002 /* IOPTE is valid */ #define IOPTE_WAZ 0x00000001 /* Write as zeros */ +#if defined(__i386__) || defined(__x86_64__) #define PAGE_SHIFT 12 +#elif defined(__ia64__) +#define PAGE_SHIFT 14 +#endif #define PAGE_SIZE (1 << PAGE_SHIFT) #define PAGE_MASK (PAGE_SIZE - 1) diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/hw/vga.c --- a/tools/ioemu/hw/vga.c Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/hw/vga.c Sat Nov 5 10:30:01 2005 @@ -1879,7 +1879,11 @@ /* qemu's vga mem is not detached from phys_ram_base and can cause DM abort * when guest write vga mem, so allocate a new one */ +#if defined(__i386__) || defined(__x86_64__) s->vram_ptr = shared_vram; +#else + s->vram_ptr = qemu_malloc(vga_ram_size); +#endif s->vram_offset = vga_ram_offset; s->vram_size = vga_ram_size; diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/target-i386-dm/helper2.c --- a/tools/ioemu/target-i386-dm/helper2.c Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/target-i386-dm/helper2.c Sat Nov 5 10:30:01 2005 @@ -389,14 +389,6 @@ int xc_handle; -static __inline__ void atomic_set_bit(long nr, volatile void *addr) -{ - __asm__ __volatile__( - "lock ; bts %1,%0" - :"=m" (*(volatile long *)addr) - :"dIr" (nr)); -} - void destroy_vmx_domain(void) { diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/vl.c --- a/tools/ioemu/vl.c Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/vl.c Sat Nov 5 10:30:01 2005 @@ -22,6 +22,9 @@ * THE SOFTWARE. */ #include "vl.h" +#ifdef __ia64__ +#include <xen/arch-ia64.h> +#endif #include <unistd.h> #include <fcntl.h> @@ -517,6 +520,11 @@ val |= low; return val; } + +#elif defined(__ia64__) +#include "ia64_intrinsic.h" +#define cpu_get_reak_ticks() \ + ia64_getreg(_IA64_REG_AR_ITC) #else #error unsupported CPU @@ -2375,6 +2383,7 @@ #include <xg_private.h> +#if defined(__i386__) || defined (__x86_64__) #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER) #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) @@ -2544,6 +2553,10 @@ /* FIXME Flush the shadow page */ unsetup_mapping(xc_handle, domid, toptab, addr, end); } +#elif defined(__ia64__) +void set_vram_mapping(unsigned long addr, unsigned long end) {} +void unset_vram_mapping(unsigned long addr, unsigned long end) {} +#endif int main(int argc, char **argv) { @@ -3018,9 +3031,14 @@ phys_ram_size = ram_size + vga_ram_size + bios_size; ram_pages = ram_size/PAGE_SIZE; +#if defined(__i386__) || defined(__x86_64__) vgaram_pages = (vga_ram_size -1)/PAGE_SIZE + 1; free_pages = vgaram_pages / L1_PAGETABLE_ENTRIES; extra_pages = vgaram_pages + free_pages; +#else + /* Test vga acceleration later */ + extra_pages = 0; +#endif xc_handle = xc_interface_open(); @@ -3049,6 +3067,7 @@ exit(-1); } +#if defined(__i386__) || defined(__x86_64__) if ( xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages ) { perror("xc_get_pfn_list"); @@ -3077,8 +3096,6 @@ exit(-1); } - - memset(shared_vram, 0, vgaram_pages * PAGE_SIZE); toptab = page_array[ram_pages] << PAGE_SHIFT; @@ -3087,7 +3104,31 @@ page_array[ram_pages]); freepage_array = &page_array[nr_pages - extra_pages]; - +#elif defined(__ia64__) + if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, 0, ram_pages) != ram_pages) + { + perror("xc_ia64_get_pfn_list"); + exit(-1); + } + + if ((phys_ram_base = xc_map_foreign_batch(xc_handle, domid, + PROT_READ|PROT_WRITE, + page_array, + ram_pages)) == 0) { + perror("xc_map_foreign_batch"); + exit(-1); + } + + if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, IO_PAGE_START>>PAGE_SHIFT, 1) != 1) + { + perror("xc_ia64_get_pfn_list"); + exit(-1); + } + + shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE, + PROT_READ|PROT_WRITE, + page_array[0]); +#endif fprintf(logfile, "shared page at pfn:%lx, mfn: %lx\n", (nr_pages-1), (page_array[nr_pages - 1])); diff -r 65c3b9382caa -r 0cae0c6436f5 tools/ioemu/ia64_intrinsic.h --- /dev/null Sat Nov 5 10:26:29 2005 +++ b/tools/ioemu/ia64_intrinsic.h Sat Nov 5 10:30:01 2005 @@ -0,0 +1,275 @@ +#ifndef IA64_INTRINSIC_H +#define IA64_INTRINSIC_H + +/* + * Compiler-dependent Intrinsics + * + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@xxxxxxxxx> + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@xxxxxxxxx> + * + */ +extern long ia64_cmpxchg_called_with_bad_pointer (void); +extern void ia64_bad_param_for_getreg (void); +#define ia64_cmpxchg(sem,ptr,o,n,s) ({ \ + uint64_t _o, _r; \ + switch(s) { \ + case 1: _o = (uint8_t)(long)(o); break; \ + case 2: _o = (uint16_t)(long)(o); break; \ + case 4: _o = (uint32_t)(long)(o); break; \ + case 8: _o = (uint64_t)(long)(o); break; \ + default: break; \ + } \ + switch(s) { \ + case 1: \ + _r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break; \ + case 2: \ + _r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break; \ + case 4: \ + _r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break; \ + case 8: \ + _r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break; \ + default: \ + _r = ia64_cmpxchg_called_with_bad_pointer(); break; \ + } \ + (__typeof__(o)) _r; \ +}) + +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr)) +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr)) + +/* + * Register Names for getreg() and setreg(). + * + * The "magic" numbers happen to match the values used by the Intel compiler's + * getreg()/setreg() intrinsics. + */ + +/* Special Registers */ + +#define _IA64_REG_IP 1016 /* getreg only */ +#define _IA64_REG_PSR 1019 +#define _IA64_REG_PSR_L 1019 + +/* General Integer Registers */ + +#define _IA64_REG_GP 1025 /* R1 */ +#define _IA64_REG_R8 1032 /* R8 */ +#define _IA64_REG_R9 1033 /* R9 */ +#define _IA64_REG_SP 1036 /* R12 */ +#define _IA64_REG_TP 1037 /* R13 */ + +/* Application Registers */ + +#define _IA64_REG_AR_KR0 3072 +#define _IA64_REG_AR_KR1 3073 +#define _IA64_REG_AR_KR2 3074 +#define _IA64_REG_AR_KR3 3075 +#define _IA64_REG_AR_KR4 3076 +#define _IA64_REG_AR_KR5 3077 +#define _IA64_REG_AR_KR6 3078 +#define _IA64_REG_AR_KR7 3079 +#define _IA64_REG_AR_RSC 3088 +#define _IA64_REG_AR_BSP 3089 +#define _IA64_REG_AR_BSPSTORE 3090 +#define _IA64_REG_AR_RNAT 3091 +#define _IA64_REG_AR_FCR 3093 +#define _IA64_REG_AR_EFLAG 3096 +#define _IA64_REG_AR_CSD 3097 +#define _IA64_REG_AR_SSD 3098 +#define _IA64_REG_AR_CFLAG 3099 +#define _IA64_REG_AR_FSR 3100 +#define _IA64_REG_AR_FIR 3101 +#define _IA64_REG_AR_FDR 3102 +#define _IA64_REG_AR_CCV 3104 +#define _IA64_REG_AR_UNAT 3108 +#define _IA64_REG_AR_FPSR 3112 +#define _IA64_REG_AR_ITC 3116 +#define _IA64_REG_AR_PFS 3136 +#define _IA64_REG_AR_LC 3137 +#define _IA64_REG_AR_EC 3138 + +/* Control Registers */ + +#define _IA64_REG_CR_DCR 4096 +#define _IA64_REG_CR_ITM 4097 +#define _IA64_REG_CR_IVA 4098 +#define _IA64_REG_CR_PTA 4104 +#define _IA64_REG_CR_IPSR 4112 +#define _IA64_REG_CR_ISR 4113 +#define _IA64_REG_CR_IIP 4115 +#define _IA64_REG_CR_IFA 4116 +#define _IA64_REG_CR_ITIR 4117 +#define _IA64_REG_CR_IIPA 4118 +#define _IA64_REG_CR_IFS 4119 +#define _IA64_REG_CR_IIM 4120 +#define _IA64_REG_CR_IHA 4121 +#define _IA64_REG_CR_LID 4160 +#define _IA64_REG_CR_IVR 4161 /* getreg only */ +#define _IA64_REG_CR_TPR 4162 +#define _IA64_REG_CR_EOI 4163 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */ +#define _IA64_REG_CR_IRR1 4165 /* getreg only */ +#define _IA64_REG_CR_IRR2 4166 /* getreg only */ +#define _IA64_REG_CR_IRR3 4167 /* getreg only */ +#define _IA64_REG_CR_ITV 4168 +#define _IA64_REG_CR_PMV 4169 +#define _IA64_REG_CR_CMCV 4170 +#define _IA64_REG_CR_LRR0 4176 +#define _IA64_REG_CR_LRR1 4177 + +/* Indirect Registers for getindreg() and setindreg() */ + +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ +#define _IA64_REG_INDR_DBR 9001 +#define _IA64_REG_INDR_IBR 9002 +#define _IA64_REG_INDR_PKR 9003 +#define _IA64_REG_INDR_PMC 9004 +#define _IA64_REG_INDR_PMD 9005 +#define _IA64_REG_INDR_RR 9006 + +#ifdef __INTEL_COMPILER +void __fc(uint64_t *addr); +void __synci(void); +void __isrlz(void); +void __dsrlz(void); +uint64_t __getReg(const int whichReg); +uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp); +u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp); + +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq + +#define ia64_srlz_d __dsrlz +#define ia64_srlz_i __isrlz +#define __ia64_fc __fc +#define ia64_sync_i __synci +#define __ia64_getreg __getReg +#else /* __INTEL_COMPILER */ +#define ia64_cmpxchg1_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); +#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") + +#define __ia64_getreg(regnum) \ +({ \ + uint64_t ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_GP: \ + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_IP: \ + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_PSR: \ + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_TP: /* for current() */ \ + ia64_intri_res = ia64_r13; \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \ + : "i"(regnum - _IA64_REG_AR_KR0)); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \ + : "i" (regnum - _IA64_REG_CR_DCR)); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \ + break; \ + default: \ + ia64_bad_param_for_getreg(); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#endif /* __INTEL_COMPILER */ +#endif /* IA64_INTRINSIC_H */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |