[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] do not use interrupt_mask_addr inside Xen



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 11f228aa783a5c76917520c1066c32b529a55930
# Parent  d33add81096b057f98fa740ab88d6c17426f8d68
[IA64] do not use interrupt_mask_addr inside Xen

Create current_psr_i_addr per cpu variables.
Inside Xen, do not use interrupt_mask_addr because it can be modified by
the guest.

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
 xen/arch/ia64/xen/domain.c      |   31 +++++++++++++++++++++----------
 xen/arch/ia64/xen/hyperprivop.S |   36 ++++++++++++++++++------------------
 xen/arch/ia64/xen/xensetup.c    |    2 +-
 3 files changed, 40 insertions(+), 29 deletions(-)

diff -r d33add81096b -r 11f228aa783a xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Jun 14 16:05:45 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Thu Jun 15 08:42:34 2006 -0600
@@ -89,6 +89,10 @@ unsigned long context_switch_count = 0;
 
 extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
 
+/* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu.
+   This is a Xen virtual address.  */
+DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
+
 #include <xen/sched-if.h>
 
 void schedule_tail(struct vcpu *prev)
@@ -104,6 +108,8 @@ void schedule_tail(struct vcpu *prev)
                        VHPT_ENABLED);
                load_region_regs(current);
                vcpu_load_kernel_regs(current);
+               __ia64_per_cpu_var(current_psr_i_addr) = &current->domain->
+                 shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask;
        }
 }
 
@@ -124,12 +130,9 @@ void context_switch(struct vcpu *prev, s
     /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
     prev = ia64_switch_to(next);
 
+    /* Note: ia64_switch_to does not return here at vcpu initialization.  */
+
     //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
-
-    if (!VMX_DOMAIN(current)){
-           vcpu_set_next_timer(current);
-    }
-
 
 // leave this debug for now: it acts as a heartbeat when more than
 // one domain is active
@@ -140,26 +143,34 @@ if (!cnt[id]--) { cnt[id] = 500000; prin
 if (!cnt[id]--) { cnt[id] = 500000; printk("%x",id); }
 if (!i--) { i = 1000000; printk("+"); }
 }
-
+ 
     if (VMX_DOMAIN(current)){
-               vmx_load_all_rr(current);
-    }else{
+       vmx_load_all_rr(current);
+    } else {
+       struct domain *nd;
        extern char ia64_ivt;
+
        ia64_set_iva(&ia64_ivt);
-       if (!is_idle_domain(current->domain)) {
+
+       nd = current->domain;
+       if (!is_idle_domain(nd)) {
                ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
                             VHPT_ENABLED);
                load_region_regs(current);
                vcpu_load_kernel_regs(current);
+               vcpu_set_next_timer(current);
                if (vcpu_timer_expired(current))
                        vcpu_pend_timer(current);
-       }else {
+               __ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info->
+                 vcpu_info[current->vcpu_id].evtchn_upcall_mask;
+       } else {
                /* When switching to idle domain, only need to disable vhpt
                 * walker. Then all accesses happen within idle context will
                 * be handled by TR mapping and identity mapping.
                 */
                pta = ia64_get_pta();
                ia64_set_pta(pta & ~VHPT_ENABLED);
+               __ia64_per_cpu_var(current_psr_i_addr) = NULL;
         }
     }
     local_irq_restore(spsr);
diff -r d33add81096b -r 11f228aa783a xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Wed Jun 14 16:05:45 2006 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S   Thu Jun 15 08:42:34 2006 -0600
@@ -107,9 +107,9 @@ GLOBAL_ENTRY(fast_hyperprivop)
        or r20=r23,r21;;
 1:     // when we get to here r20=~=interrupts pending
        // Check pending event indication
-(p7)   adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18;;
+(p7)   movl r20=THIS_CPU(current_psr_i_addr);;
 (p7)   ld8 r20=[r20];;
-(p7)   adds r20=-1,r20;;
+(p7)   adds r20=-1,r20;;       /* evtchn_upcall_pending */
 (p7)   ld1 r20=[r20];;
 
        // HYPERPRIVOP_RFI?
@@ -276,14 +276,14 @@ ENTRY(hyper_ssm_i)
        or r30=r30,r28;;
        and r30=r30,r27;;
        mov r20=1
-       adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
+       movl r22=THIS_CPU(current_psr_i_addr)
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
        ld8 r22=[r22]
-       st8 [r21]=r30 ;;
+       st8 [r21]=r30;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st1 [r22]=r20;;
-       st4 [r18]=r0;;
+       st1 [r22]=r20
+       st4 [r18]=r0
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -407,7 +407,7 @@ GLOBAL_ENTRY(fast_tick_reflect)
        cmp.eq p6,p0=r16,r0;;
 (p6)   br.cond.spnt.few fast_tick_reflect_done;;
        // if guest vpsr.i is off, we're done
-       adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       movl r21=THIS_CPU(current_psr_i_addr);;
        ld8 r21=[r21];;
        ld1 r21=[r21];;
        cmp.eq p0,p6=r21,r0
@@ -448,17 +448,16 @@ GLOBAL_ENTRY(fast_tick_reflect)
        dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
        or r17=r17,r28;;
        and r17=r17,r27;;
-       ld4 r16=[r18],XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS;;
+       ld4 r16=[r18];;
        cmp.ne p6,p0=r16,r0;;
-       ld8 r16=[r18],XSI_PSR_IC_OFS-XSI_PSR_I_ADDR_OFS
+       movl r22=THIS_CPU(current_psr_i_addr);;
+       ld8 r22=[r22]
 (p6)   dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;;
-       ld1 r16=[r16];;
+       ld1 r16=[r22];;
        cmp.eq p6,p0=r16,r0;;
 (p6)   dep r17=-1,r17,IA64_PSR_I_BIT,1 ;;
        mov r20=1
-       adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
-       ld8 r22=[r22]
        st8 [r21]=r17 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
@@ -606,7 +605,7 @@ ENTRY(fast_reflect)
        // set shared_mem isr
        st8 [r21]=r16 ;;
        // set cr.ipsr
-       adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
+       movl r21=THIS_CPU(current_psr_i_addr)
        mov r29=r30 ;;
        ld8 r21=[r21]
        movl r28=DELIVER_PSR_SET;;
@@ -1077,7 +1076,7 @@ just_do_rfi:
        dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
        mov cr.ifs=r20 ;;
        // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
-       adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
+       movl r20=THIS_CPU(current_psr_i_addr)
        dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
        // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
        ld8 r20=[r20]
@@ -1229,13 +1228,14 @@ ENTRY(rfi_with_interrupt)
        extr.u r20=r21,41,2 ;;  // get v(!)psr.ri
        dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei
        adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
-       st8 [r22]=r16,XSI_PSR_I_ADDR_OFS-XSI_ISR_OFS ;;
+       st8 [r22]=r16;;
+       movl r22=THIS_CPU(current_psr_i_addr)
        // set cr.ipsr (make sure cpl==2!)
-       mov r29=r17 ;;
+       mov r29=r17
        movl r28=DELIVER_PSR_SET;;
-       mov r20=1
+       mov r20=1;;
        ld8 r22=[r22]
-       movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
+       movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0)
        or r29=r29,r28;;
        and r29=r29,r27;;
        mov cr.ipsr=r29;;
diff -r d33add81096b -r 11f228aa783a xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Wed Jun 14 16:05:45 2006 -0600
+++ b/xen/arch/ia64/xen/xensetup.c      Thu Jun 15 08:42:34 2006 -0600
@@ -521,7 +521,7 @@ printk("About to call init_trace_bufs()\
     local_irq_enable();
 
     printf("About to call schedulers_start dom0=%p, idle_dom=%p\n",
-          dom0, &idle_domain);
+          dom0, idle_domain);
     schedulers_start();
 
     domain_unpause_by_systemcontroller(dom0);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.