[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] FPH enabling + cleanup



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID ced37bea064709d2290c3f22a9f43e53c1d47b70
# Parent  d23c088eac6dcc79c8319775bfe51c3e12103702
[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors.  FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>

diff -r d23c088eac6d -r ced37bea0647 xen/arch/ia64/linux-xen/setup.c
--- a/xen/arch/ia64/linux-xen/setup.c   Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/arch/ia64/linux-xen/setup.c   Tue Apr 25 22:35:41 2006 -0600
@@ -384,7 +384,9 @@ setup_arch (char **cmdline_p)
 {
        unw_init();
 
+#ifndef XEN
        ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
__end___vtop_patchlist);
+#endif
 
        *cmdline_p = __va(ia64_boot_param->command_line);
 #ifndef XEN
@@ -870,6 +872,11 @@ cpu_init (void)
 #endif
                BUG();
 
+#ifdef XEN
+       ia64_fph_enable();
+       __ia64_init_fpu();
+#endif
+
        ia64_mmu_init(ia64_imva(cpu_data));
        ia64_mca_cpu_init(ia64_imva(cpu_data));
 
@@ -931,9 +938,11 @@ cpu_init (void)
 #endif
 }
 
+#ifndef XEN
 void
 check_bugs (void)
 {
        ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
                               (unsigned long) __end___mckinley_e9_bundles);
 }
+#endif
diff -r d23c088eac6d -r ced37bea0647 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Tue Apr 25 22:35:41 2006 -0600
@@ -36,48 +36,6 @@
 #include <asm/vhpt.h>
 #include <asm/vmmu.h>
 #include "vmx_minstate.h"
-
-/*
- * prev_task <- vmx_ia64_switch_to(struct task_struct *next)
- *     With Ingo's new scheduler, interrupts are disabled when this routine 
gets
- *     called.  The code starting at .map relies on this.  The rest of the code
- *     doesn't care about the interrupt masking status.
- *
- * Since we allocate domain stack in xenheap, there's no need to map new
- * domain's stack since all xenheap is mapped by TR. Another different task
- * for vmx_ia64_switch_to is to switch to bank0 and change current pointer.
- */
-GLOBAL_ENTRY(vmx_ia64_switch_to)
-       .prologue
-       alloc r16=ar.pfs,1,0,0,0
-       DO_SAVE_SWITCH_STACK
-       .body
-
-       bsw.0   // Switch to bank0, because bank0 r21 is current pointer
-       ;;
-       adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-       movl r25=init_task
-       adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0
-       ;;
-       st8 [r22]=sp                    // save kernel stack pointer of old task
-       ;;
-       /*
-        * TR always mapped this task's page, we can skip doing it again.
-        */
-       ld8 sp=[r26]                    // load kernel stack pointer of new task
-       mov r21=in0                     // update "current" application register
-       mov r8=r13                      // return pointer to previously running 
task
-       mov r13=in0                     // set "current" pointer
-       ;;
-       bsw.1
-       ;;
-       DO_LOAD_SWITCH_STACK
-
-#ifdef CONFIG_SMP
-       sync.i                          // ensure "fc"s done by this CPU are 
visible on other CPUs
-#endif
-       br.ret.sptk.many rp             // boogie on out in new context
-END(vmx_ia64_switch_to)
 
 GLOBAL_ENTRY(ia64_leave_nested)
        rsm psr.i
diff -r d23c088eac6d -r ced37bea0647 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Tue Apr 25 22:35:41 2006 -0600
@@ -72,11 +72,8 @@ extern unsigned long running_on_sim;
 #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
 
 /* FIXME: where these declarations should be there ? */
-extern void domain_pend_keyboard_interrupt(int);
 extern long platform_is_hp_ski(void);
-extern void sync_split_caches(void);
 extern void serial_input_init(void);
-
 static void init_switch_stack(struct vcpu *v);
 void build_physmap_table(struct domain *d);
 
@@ -145,23 +142,6 @@ void startup_cpu_idle_loop(void)
        /* Just some sanity to ensure that the scheduler is set up okay. */
        ASSERT(current->domain == IDLE_DOMAIN_ID);
        raise_softirq(SCHEDULE_SOFTIRQ);
-#if 0
-//do we have to ensure the idle task has a shared page so that, for example,
-//region registers can be loaded from it.  Apparently not...
-       idle0_task.shared_info = (void *)alloc_xenheap_page();
-       memset(idle0_task.shared_info, 0, PAGE_SIZE);
-       /* pin mapping */
-       // FIXME: Does this belong here?  Or do only at domain switch time?
-       {
-               /* WARNING: following must be inlined to avoid nested fault */
-               unsigned long psr = ia64_clear_ic();
-               ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
-                pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> 
PAGE_SHIFT, PAGE_KERNEL)),
-                PAGE_SHIFT);
-               ia64_set_psr(psr);
-               ia64_srlz_i();
-       }
-#endif
 
        continue_cpu_idle_loop();
 }
@@ -304,7 +284,6 @@ void arch_getdomaininfo_ctxt(struct vcpu
 {
        struct pt_regs *regs = vcpu_regs (v);
 
-       printf("arch_getdomaininfo_ctxt\n");
        c->regs = *regs;
        c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
 
@@ -316,7 +295,6 @@ int arch_set_info_guest(struct vcpu *v, 
        struct pt_regs *regs = vcpu_regs (v);
        struct domain *d = v->domain;
 
-       printf("arch_set_info_guest\n");
        if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
             return 0;
        if (c->flags & VGCF_VMX_GUEST) {
@@ -1237,9 +1215,8 @@ void alloc_dom0(void)
        dom0_start = alloc_boot_pages(dom0_size >> PAGE_SHIFT, dom0_align >> 
PAGE_SHIFT);
        dom0_start <<= PAGE_SHIFT;
        if (!dom0_start) {
-       printf("alloc_dom0: can't allocate contiguous memory size=%lu\n",
+         panic("alloc_dom0: can't allocate contiguous memory size=%lu\n",
                dom0_size);
-       while(1);
        }
        printf("alloc_dom0: dom0_start=0x%lx\n", dom0_start);
 #else
@@ -1495,17 +1472,6 @@ void dummy_called(char *function)
        while(1);
 }
 
-
-#if 0
-void switch_to(struct vcpu *prev, struct vcpu *next)
-{
-       struct vcpu *last;
-
-       __switch_to(prev,next,last);
-       //set_current(next);
-}
-#endif
-
 void domain_pend_keyboard_interrupt(int irq)
 {
        vcpu_pend_interrupt(dom0->vcpu[0],irq);
@@ -1513,13 +1479,9 @@ void domain_pend_keyboard_interrupt(int 
 
 void sync_vcpu_execstate(struct vcpu *v)
 {
-       ia64_save_fpu(v->arch._thread.fph);
+       __ia64_save_fpu(v->arch._thread.fph);
        if (VMX_DOMAIN(v))
                vmx_save_state(v);
-       else {
-               if (IA64_HAS_EXTRA_STATE(v))
-                       ia64_save_extra(v);
-       }
        // FIXME SMP: Anything else needed here for SMP?
 }
 
diff -r d23c088eac6d -r ced37bea0647 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/arch/ia64/xen/process.c       Tue Apr 25 22:35:41 2006 -0600
@@ -219,9 +219,6 @@ void reflect_interruption(unsigned long 
 
        regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
        regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
-#ifdef CONFIG_SMP
-#warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
-#endif
        regs->r31 = XSI_IPSR;
 
        v->vcpu_info->evtchn_upcall_mask = 1;
diff -r d23c088eac6d -r ced37bea0647 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/arch/ia64/xen/xenmisc.c       Tue Apr 25 22:35:41 2006 -0600
@@ -1,6 +1,6 @@
 /*
  * Xen misc
- * 
+ *
  * Functions/decls that are/may be needed to link with Xen because
  * of x86 dependencies
  *
@@ -21,11 +21,8 @@
 #include <asm/debugger.h>
 #include <asm/vmx.h>
 #include <asm/vmx_vcpu.h>
-
-efi_memory_desc_t ia64_efi_io_md;
-EXPORT_SYMBOL(ia64_efi_io_md);
-unsigned long wait_init_idle;
-int phys_proc_id[NR_CPUS];
+#include <asm/vcpu.h>
+
 unsigned long loops_per_jiffy = (1<<12);       // from linux/init/main.c
 
 /* FIXME: where these declarations should be there ? */
@@ -33,8 +30,6 @@ extern void show_registers(struct pt_reg
 
 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check 
abort handling)\n"); }
 void ia64_mca_cpu_init(void *x) { }
-void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
-void ia64_patch_vtop(unsigned long a, unsigned long b) { }
 void hpsim_setup(char **x)
 {
 #ifdef CONFIG_SMP
@@ -68,21 +63,8 @@ platform_is_hp_ski(void)
        return running_on_sim;
 }
 
-/* calls in xen/common code that are unused on ia64 */
-
-void sync_lazy_execstate_cpu(unsigned int cpu) {}
-
-#if 0
-int grant_table_create(struct domain *d) { return 0; }
-void grant_table_destroy(struct domain *d) { return; }
-#endif
 
 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
-
-void raise_actimer_softirq(void)
-{
-       raise_softirq(TIMER_SOFTIRQ);
-}
 
 unsigned long
 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
@@ -127,15 +109,12 @@ u32 tlbflush_time[NR_CPUS];
 ///////////////////////////////
 
 
-void free_page_type(struct page_info *page, u32 type)
-{
-//     dummy();
-       return;
-}
-
-int alloc_page_type(struct page_info *page, u32 type)
-{
-//     dummy();
+static void free_page_type(struct page_info *page, u32 type)
+{
+}
+
+static int alloc_page_type(struct page_info *page, u32 type)
+{
        return 1;
 }
 
@@ -161,7 +140,7 @@ void *pgtable_quicklist_alloc(void)
 {
     void *p;
     p = alloc_xenheap_pages(0);
-    if (p) 
+    if (p)
         clear_page(p);
     return p;
 }
@@ -276,12 +255,10 @@ void *__module_text_address(unsigned lon
 void *__module_text_address(unsigned long addr) { return NULL; }
 void *module_text_address(unsigned long addr) { return NULL; }
 
-void cs10foo(void) {}
-void cs01foo(void) {}
-
 unsigned long context_switch_count = 0;
 
-#include <asm/vcpu.h>
+extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
+
 
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
@@ -289,14 +266,20 @@ void context_switch(struct vcpu *prev, s
     uint64_t pta;
 
     local_irq_save(spsr);
-//    if(VMX_DOMAIN(prev)){
-//     vtm_domain_out(prev);
-//    }
-       context_switch_count++;
-       switch_to(prev,next,prev);
-//    if(VMX_DOMAIN(current)){
-//        vtm_domain_in(current);
-//    }
+    context_switch_count++;
+
+    __ia64_save_fpu(prev->arch._thread.fph);
+    __ia64_load_fpu(next->arch._thread.fph);
+    if (VMX_DOMAIN(prev))
+           vmx_save_state(prev);
+    if (VMX_DOMAIN(next))
+           vmx_load_state(next);
+    /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
+    prev = ia64_switch_to(next);
+    if (!VMX_DOMAIN(current)){
+           vcpu_set_next_timer(current);
+    }
+
 
 // leave this debug for now: it acts as a heartbeat when more than
 // one domain is active
@@ -309,28 +292,26 @@ if (!i--) { printk("+"); i = 1000000; }
 }
 
     if (VMX_DOMAIN(current)){
-//        vtm_domain_in(current);
                vmx_load_all_rr(current);
     }else{
        extern char ia64_ivt;
        ia64_set_iva(&ia64_ivt);
        if (!is_idle_domain(current->domain)) {
                ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
-                       VHPT_ENABLED);
+                            VHPT_ENABLED);
                load_region_regs(current);
                vcpu_load_kernel_regs(current);
-                   if (vcpu_timer_expired(current))
-                vcpu_pend_timer(current);
+               if (vcpu_timer_expired(current))
+                       vcpu_pend_timer(current);
        }else {
-        /* When switching to idle domain, only need to disable vhpt
-        * walker. Then all accesses happen within idle context will
-        * be handled by TR mapping and identity mapping.
-        */
-           pta = ia64_get_pta();
-           ia64_set_pta(pta & ~VHPT_ENABLED);
+               /* When switching to idle domain, only need to disable vhpt
+                * walker. Then all accesses happen within idle context will
+                * be handled by TR mapping and identity mapping.
+                */
+               pta = ia64_get_pta();
+               ia64_set_pta(pta & ~VHPT_ENABLED);
         }
     }
-
     local_irq_restore(spsr);
     context_saved(prev);
 }
@@ -349,9 +330,9 @@ void panic_domain(struct pt_regs *regs, 
        va_list args;
        char buf[128];
        struct vcpu *v = current;
-    
+
        printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
-               v->domain->domain_id, 
+               v->domain->domain_id,
                __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
        va_start(args, fmt);
        (void)vsnprintf(buf, sizeof(buf), fmt, args);
@@ -395,19 +376,19 @@ void put_page_type(struct page_info *pag
         ASSERT((x & PGT_count_mask) != 0);
 
         /*
-         * The page should always be validated while a reference is held. The 
-         * exception is during domain destruction, when we forcibly invalidate 
+         * The page should always be validated while a reference is held. The
+         * exception is during domain destruction, when we forcibly invalidate
          * page-table pages if we detect a referential loop.
          * See domain.c:relinquish_list().
          */
-        ASSERT((x & PGT_validated) || 
+        ASSERT((x & PGT_validated) ||
                test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
 
         if ( unlikely((nx & PGT_count_mask) == 0) )
         {
             /* Record TLB information for flush later. Races are harmless. */
             page->tlbflush_timestamp = tlbflush_current_time();
-            
+
             if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
                  likely(nx & PGT_validated) )
             {
@@ -416,7 +397,7 @@ void put_page_type(struct page_info *pag
                  * 'free' is safe because the refcnt is non-zero and validated
                  * bit is clear => other ops will spin or fail.
                  */
-                if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, 
+                if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
                                            x & ~PGT_validated)) != x) )
                     goto again;
                 /* We cleared the 'valid bit' so we do the clean up. */
@@ -426,7 +407,7 @@ void put_page_type(struct page_info *pag
                 nx &= ~PGT_validated;
             }
         }
-        else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) == 
+        else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
                             (PGT_pinned | 1)) &&
                            ((nx & PGT_type_mask) != PGT_writable_page)) )
         {
diff -r d23c088eac6d -r ced37bea0647 xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/arch/ia64/xen/xensetup.c      Tue Apr 25 22:35:41 2006 -0600
@@ -319,9 +319,6 @@ void start_kernel(void)
 
     init_frametable();
 
-    ia64_fph_enable();
-    __ia64_init_fpu();
-
     alloc_dom0();
 
     end_boot_allocator();
diff -r d23c088eac6d -r ced37bea0647 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/include/asm-ia64/domain.h     Tue Apr 25 22:35:41 2006 -0600
@@ -63,7 +63,6 @@ struct arch_domain {
     offsetof(vcpu_info_t, evtchn_upcall_mask))
 
 struct arch_vcpu {
-#if 1
        TR_ENTRY itrs[NITRS];
        TR_ENTRY dtrs[NDTRS];
        TR_ENTRY itlb;
@@ -81,8 +80,11 @@ struct arch_vcpu {
        unsigned long domain_itm;
        unsigned long domain_itm_last;
        unsigned long xen_itm;
-#endif
+
     mapped_regs_t *privregs; /* save the state of vcpu */
+
+    /* These fields are copied from arch_domain to make access easier/faster
+       in assembly code.  */
     unsigned long metaphysical_rr0;            // from arch_domain (so is 
pinned)
     unsigned long metaphysical_rr4;            // from arch_domain (so is 
pinned)
     unsigned long metaphysical_saved_rr0;      // from arch_domain (so is 
pinned)
@@ -90,6 +92,7 @@ struct arch_vcpu {
     int breakimm;                      // from arch_domain (so is pinned)
     int starting_rid;          /* first RID assigned to domain */
     int ending_rid;            /* one beyond highest RID assigned to domain */
+
     struct thread_struct _thread;      // this must be last
 
     thash_cb_t vtlb;
@@ -108,58 +111,9 @@ struct arch_vcpu {
 // FOLLOWING FROM linux-2.6.7/include/sched.h
 
 struct mm_struct {
-       struct vm_area_struct * mmap;           /* list of VMAs */
-#ifndef XEN
-       struct rb_root mm_rb;
-#endif
-       struct vm_area_struct * mmap_cache;     /* last find_vma result */
-       unsigned long free_area_cache;          /* first hole */
        pgd_t * pgd;
-       atomic_t mm_users;                      /* How many users with user 
space? */
-       atomic_t mm_count;                      /* How many references to 
"struct mm_struct" (users count as 1) */
-       int map_count;                          /* number of VMAs */
-#ifndef XEN
-       struct rw_semaphore mmap_sem;
-#endif
-       spinlock_t page_table_lock;             /* Protects task page tables 
and mm->rss */
-
+    // atomic_t mm_users;                      /* How many users with user 
space? */
        struct list_head pt_list;               /* List of pagetable */
-
-       struct list_head mmlist;                /* List of all active mm's.  
These are globally strung
-                                                * together off init_mm.mmlist, 
and are protected
-                                                * by mmlist_lock
-                                                */
-
-#ifndef XEN
-       unsigned long start_code, end_code, start_data, end_data;
-       unsigned long start_brk, brk, start_stack;
-       unsigned long arg_start, arg_end, env_start, env_end;
-       unsigned long rss, total_vm, locked_vm;
-       unsigned long def_flags;
-
-       unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
-
-       unsigned dumpable:1;
-#endif
-#ifdef CONFIG_HUGETLB_PAGE
-       int used_hugetlb;
-#endif
-#ifndef XEN
-       cpumask_t cpu_vm_mask;
-
-       /* Architecture-specific MM context */
-       mm_context_t context;
-
-       /* coredumping support */
-       int core_waiters;
-       struct completion *core_startup_done, core_done;
-
-       /* aio bits */
-       rwlock_t                ioctx_list_lock;
-       struct kioctx           *ioctx_list;
-
-       struct kioctx           default_kioctx;
-#endif
 };
 
 extern struct mm_struct init_mm;
diff -r d23c088eac6d -r ced37bea0647 xen/include/asm-ia64/linux-xen/asm/io.h
--- a/xen/include/asm-ia64/linux-xen/asm/io.h   Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/io.h   Tue Apr 25 22:35:41 2006 -0600
@@ -23,7 +23,9 @@
 #define __SLOW_DOWN_IO do { } while (0)
 #define SLOW_DOWN_IO   do { } while (0)
 
-#ifndef XEN
+#ifdef XEN
+#include <asm/xensystem.h>
+#else
 #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL    /* region 6 */
 #endif
 
diff -r d23c088eac6d -r ced37bea0647 xen/include/asm-ia64/linux-xen/asm/page.h
--- a/xen/include/asm-ia64/linux-xen/asm/page.h Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Tue Apr 25 22:35:41 2006 -0600
@@ -119,6 +119,7 @@ typedef union ia64_va {
        void *p;
 } ia64_va;
 
+#ifndef XEN
 /*
  * Note: These macros depend on the fact that PAGE_OFFSET has all
  * region bits set to 1 and all other bits set to zero.  They are
@@ -127,6 +128,7 @@ typedef union ia64_va {
  */
 #define __pa(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
 #define __va(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
+#endif /* XEN */
 
 #define REGION_NUMBER(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
 #define REGION_OFFSET(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
@@ -198,6 +200,7 @@ get_order (unsigned long size)
 # define __pgprot(x)   (x)
 #endif /* !STRICT_MM_TYPECHECKS */
 
+#ifndef XEN
 #define PAGE_OFFSET                    __IA64_UL_CONST(0xe000000000000000)
 
 #define VM_DATA_DEFAULT_FLAGS          (VM_READ | VM_WRITE |                   
                \
@@ -205,7 +208,7 @@ get_order (unsigned long size)
                                         (((current->personality & 
READ_IMPLIES_EXEC) != 0)     \
                                          ? VM_EXEC : 0))
 
-#ifdef XEN
+#else
 #include <asm/xenpage.h>
 #endif
 
diff -r d23c088eac6d -r ced37bea0647 
xen/include/asm-ia64/linux-xen/asm/pgtable.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h      Tue Apr 25 22:32:14 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h      Tue Apr 25 22:35:41 
2006 -0600
@@ -349,6 +349,7 @@ pgd_offset (struct mm_struct *mm, unsign
 #define pte_unmap(pte)                 do { } while (0)
 #define pte_unmap_nested(pte)          do { } while (0)
 
+#ifndef XEN
 /* atomic versions of the some PTE manipulations: */
 
 static inline int
@@ -418,6 +419,7 @@ pte_same (pte_t a, pte_t b)
 }
 
 #define update_mmu_cache(vma, address, pte) do { } while (0)
+#endif /* XEN */
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern void paging_init (void);
diff -r d23c088eac6d -r ced37bea0647 xen/include/asm-ia64/linux-xen/asm/system.h
--- a/xen/include/asm-ia64/linux-xen/asm/system.h       Tue Apr 25 22:32:14 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h       Tue Apr 25 22:35:41 
2006 -0600
@@ -187,7 +187,9 @@ do {                                                        
        \
        (__ia64_id_flags & IA64_PSR_I) == 0;    \
 })
 
-#ifndef XEN
+#ifdef XEN
+#define local_irq_is_enabled() (!irqs_disabled())
+#else
 #ifdef __KERNEL__
 
 #ifdef CONFIG_IA32_SUPPORT
diff -r d23c088eac6d -r ced37bea0647 xen/include/asm-ia64/xenpage.h
--- a/xen/include/asm-ia64/xenpage.h    Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/include/asm-ia64/xenpage.h    Tue Apr 25 22:35:41 2006 -0600
@@ -66,7 +66,4 @@ static inline int get_order_from_pages(u
 /* It is sometimes very useful to have unsigned long as result.  */
 #define __va_ul(x)     ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
 
-#undef PAGE_OFFSET
-#define PAGE_OFFSET    __IA64_UL_CONST(0xf000000000000000)
-
 #endif /* _ASM_IA64_XENPAGE_H */
diff -r d23c088eac6d -r ced37bea0647 xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h  Tue Apr 25 22:32:14 2006 -0600
+++ b/xen/include/asm-ia64/xensystem.h  Tue Apr 25 22:35:41 2006 -0600
@@ -25,9 +25,9 @@
 #define HYPERVISOR_VIRT_START   0xf000000000000000
 #define KERNEL_START            0xf000000004000000
 #define SHAREDINFO_ADDR                 0xf100000000000000
-#define SHARED_ARCHINFO_ADDR    (SHAREDINFO_ADDR + PAGE_SIZE)
+#define XSI_OFS                 PAGE_SIZE
+#define SHARED_ARCHINFO_ADDR    (SHAREDINFO_ADDR + XSI_OFS)
 #define PERCPU_ADDR             (SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
-#define XSI_OFS                 (SHARED_ARCHINFO_ADDR - SHAREDINFO_ADDR)
 #define VHPT_ADDR               0xf200000000000000
 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
 #define VIRT_FRAME_TABLE_ADDR   0xf300000000000000
@@ -35,45 +35,8 @@
 #endif
 #define XEN_END_ADDR            0xf400000000000000
 
+#define PAGE_OFFSET    __IA64_UL_CONST(0xf000000000000000)
+
 #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
 
-#ifndef __ASSEMBLY__
-
-#define IA64_HAS_EXTRA_STATE(t) 0
-
-struct vcpu;
-extern void ia64_save_extra (struct vcpu *v);
-extern void ia64_load_extra (struct vcpu *v);
-
-extern struct vcpu *vmx_ia64_switch_to (struct vcpu *next_task);
-extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
-
-#define __switch_to(prev,next,last) do {       \
-       ia64_save_fpu(prev->arch._thread.fph);  \
-       ia64_load_fpu(next->arch._thread.fph);  \
-       if (VMX_DOMAIN(prev))                   \
-               vmx_save_state(prev);           \
-       else {                                  \
-               if (IA64_HAS_EXTRA_STATE(prev)) \
-                       ia64_save_extra(prev);  \
-       }                                       \
-       if (VMX_DOMAIN(next))                   \
-               vmx_load_state(next);           \
-       else {                                  \
-               if (IA64_HAS_EXTRA_STATE(next)) \
-                       ia64_save_extra(next);  \
-       }                                       \
-       /*ia64_psr(ia64_task_regs(next))->dfh = 
!ia64_is_local_fpu_owner(next);*/                        \
-       (last) = ia64_switch_to((next));        \
-       if (!VMX_DOMAIN(current)){                   \
-          vcpu_set_next_timer(current);                \
-       }                                       \
-} while (0)
-
-// FIXME SMP... see system.h, does this need to be different?
-#define switch_to(prev,next,last)      __switch_to(prev, next, last)
-
-#define local_irq_is_enabled() (!irqs_disabled())
-
-#endif // __ASSEMBLY__
 #endif // _ASM_IA64_XENSYSTEM_H

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.