[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Enable SMP on VTI-Domain



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID b20733e82ab6f6ab204a24c5779683cf79c78390
# Parent  d8d2b5c082458152fb30e73138bd8822828247ac
[IA64] Enable SMP on VTI-Domain

Signed-off-by: Anthony Xu < anthony.xu@xxxxxxxxx >
---
 xen/arch/ia64/vmx/mmio.c        |   88 +++++++++++++++++++++++++---------------
 xen/arch/ia64/vmx/vlsapic.c     |    2 
 xen/arch/ia64/vmx/vmmu.c        |   57 ++++++++++++++++++++++++-
 xen/arch/ia64/vmx/vmx_support.c |    3 -
 xen/arch/ia64/vmx/vmx_virt.c    |   66 +++++++++++++++++++++++++++---
 xen/arch/ia64/vmx/vtlb.c        |   43 +++----------------
 xen/arch/ia64/xen/xentime.c     |    2 
 xen/include/asm-ia64/vmx.h      |    2 
 8 files changed, 186 insertions(+), 77 deletions(-)

diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/vmx/mmio.c  Thu Jun 08 11:00:09 2006 -0600
@@ -33,8 +33,9 @@
 #include <asm/mm.h>
 #include <asm/vmx.h>
 #include <public/event_channel.h>
+#include <public/arch-ia64.h>
 #include <linux/event.h>
-
+#include <xen/domain.h>
 /*
 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
 {
@@ -51,7 +52,7 @@ struct mmio_list *lookup_mmio(u64 gpa, s
 #define PIB_OFST_INTA           0x1E0000
 #define PIB_OFST_XTP            0x1E0008
 
-static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
+static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
 
 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int 
ma)
 {
@@ -356,42 +357,67 @@ static void deliver_ipi (VCPU *vcpu, uin
  */
 static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
 {
-       int   i;
-       VCPU  *vcpu;
-       LID       lid;
-       for (i=0; i<MAX_VIRT_CPUS; i++) {
-               vcpu = d->vcpu[i];
-               if (!vcpu)
-                       continue;
-               lid.val = VCPU(vcpu, lid);
-               if ( lid.id == id && lid.eid == eid ) {
-                   return vcpu;
-               }
-       }
-       return NULL;
+    int   i;
+    VCPU  *vcpu;
+    LID   lid;
+    for (i=0; i<MAX_VIRT_CPUS; i++) {
+        vcpu = d->vcpu[i];
+        if (!vcpu)
+            continue;
+        lid.val = VCPU_LID(vcpu);
+        if ( lid.id == id && lid.eid == eid )
+            return vcpu;
+    }
+    return NULL;
 }
 
 /*
  * execute write IPI op.
  */
-static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
-{
-    VCPU   *target_cpu;
- 
-    target_cpu = lid_2_vcpu(vcpu->domain, 
-                               ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
-    if ( target_cpu == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
-    if ( target_cpu == vcpu ) {
-       // IPI to self
-        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
-                ((ipi_d_t)value).vector);
-        return 1;
+static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
+{
+    VCPU   *targ;
+    struct domain *d=vcpu->domain; 
+    targ = lid_2_vcpu(vcpu->domain, 
+           ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
+    if ( targ == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
+
+    if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
+        struct pt_regs *targ_regs = vcpu_regs (targ);
+        struct vcpu_guest_context c;
+
+        printf ("arch_boot_vcpu: %p %p\n",
+                (void *)d->arch.boot_rdv_ip,
+                (void *)d->arch.boot_rdv_r1);
+        memset (&c, 0, sizeof (c));
+
+        c.flags = VGCF_VMX_GUEST;
+        if (arch_set_info_guest (targ, &c) != 0) {
+            printf ("arch_boot_vcpu: failure\n");
+            return;
+        }
+        /* First or next rendez-vous: set registers.  */
+        vcpu_init_regs (targ);
+        targ_regs->cr_iip = d->arch.boot_rdv_ip;
+        targ_regs->r1 = d->arch.boot_rdv_r1;
+
+        if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
+            vcpu_wake(targ);
+            printf ("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
+                    targ->vcpu_id, targ_regs->cr_iip);
+        }
+        else
+            printf ("arch_boot_vcpu: huu, already awaken!");
     }
     else {
-       // TODO: send Host IPI to inject guest SMP IPI interruption
-        panic_domain (NULL, "No SM-VP supported!\n");
-        return 0;
-    }
+        int running = test_bit(_VCPUF_running,&targ->vcpu_flags);
+        deliver_ipi (targ, ((ipi_d_t)value).dm, 
+                    ((ipi_d_t)value).vector);
+        vcpu_unblock(targ);
+        if (running)
+            smp_send_event_check_cpu(targ->processor);
+    }
+    return;
 }
 
 
diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/vmx/vlsapic.c       Thu Jun 08 11:00:09 2006 -0600
@@ -362,7 +362,7 @@ void vlsapic_reset(VCPU *vcpu)
 {
     int     i;
 
-    VCPU(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
+    VCPU(vcpu, lid) = VCPU_LID(vcpu);
     VCPU(vcpu, ivr) = 0;
     VCPU(vcpu,tpr) = 0x10000;
     VCPU(vcpu, eoi) = 0;
diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Thu Jun 08 11:00:09 2006 -0600
@@ -492,13 +492,64 @@ IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UIN
 
 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
 {
+    vmx_vcpu_ptc_ga(vcpu, va, ps);
+    return IA64_ILLOP_FAULT;
+}
+/*
+IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
+{
     vmx_vcpu_ptc_l(vcpu, va, ps);
-    return IA64_ILLOP_FAULT;
-}
+    return IA64_NO_FAULT;
+}
+ */
+struct ptc_ga_args {
+    unsigned long vadr;
+    unsigned long rid;
+    unsigned long ps;
+    struct vcpu *vcpu;
+};
+
+static void ptc_ga_remote_func (void *varg)
+{
+    u64 oldrid, moldrid;
+    VCPU *v;
+    struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
+    v = args->vcpu;
+    oldrid = VMX(v, vrr[0]);
+    VMX(v, vrr[0]) = args->rid;
+    moldrid = ia64_get_rr(0x0);
+    ia64_set_rr(0x0,vrrtomrr(v,args->rid));
+    vmx_vcpu_ptc_l(v, args->vadr, args->ps);
+    VMX(v, vrr[0]) = oldrid; 
+    ia64_set_rr(0x0,moldrid);
+}
+
 
 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
 {
-    vmx_vcpu_ptc_l(vcpu, va, ps);
+
+    struct domain *d = vcpu->domain;
+    struct vcpu *v;
+    struct ptc_ga_args args;
+
+    args.vadr = va<<3>>3;
+    vcpu_get_rr(vcpu, va, &args.rid);
+    args.ps = ps;
+    for_each_vcpu (d, v) {
+        args.vcpu = v;
+        if (v->processor != vcpu->processor) {
+            int proc;
+            /* Flush VHPT on remote processors.  */
+            do {
+                proc = v->processor;
+                smp_call_function_single(v->processor, 
+                    &ptc_ga_remote_func, &args, 0, 1);
+                /* Try again if VCPU has migrated.  */
+            } while (proc != v->processor);
+        }
+        else
+            ptc_ga_remote_func(&args);
+    }
     return IA64_NO_FAULT;
 }
 
diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c   Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_support.c   Thu Jun 08 11:00:09 2006 -0600
@@ -138,7 +138,8 @@ void vmx_intr_assist(struct vcpu *v)
 
 #ifdef V_IOSAPIC_READY
     /* Confirm virtual interrupt line signals, and set pending bits in vpd */
-    vmx_virq_line_assist(v);
+    if(v->vcpu_id==0)
+        vmx_virq_line_assist(v);
 #endif
     return;
 }
diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Thu Jun 08 11:00:09 2006 -0600
@@ -317,12 +317,68 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INS
 
 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
 {
-    return vmx_emul_ptc_l(vcpu, inst);
+    u64 r2,r3;
+#ifdef  VMAL_NO_FAULT_CHECK    
+    IA64_PSR  vpsr;
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr (vcpu, 0);
+        privilege_op (vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK    
+    
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
+#ifdef  VMAL_NO_FAULT_CHECK
+        ISR isr;
+        set_isr_reg_nat_consumption(vcpu,0,0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+#endif // VMAL_NO_FAULT_CHECK
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (unimplemented_gva(vcpu,r3) ) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        unimpl_daddr(vcpu);
+        return IA64_FAULT;
+   }
+#endif // VMAL_NO_FAULT_CHECK
+    return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
 }
 
 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
 {
-    return vmx_emul_ptc_l(vcpu, inst);
+    u64 r2,r3;
+#ifdef  VMAL_NO_FAULT_CHECK    
+    IA64_PSR  vpsr;
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr (vcpu, 0);
+        privilege_op (vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK    
+    
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
+#ifdef  VMAL_NO_FAULT_CHECK
+        ISR isr;
+        set_isr_reg_nat_consumption(vcpu,0,0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+#endif // VMAL_NO_FAULT_CHECK
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (unimplemented_gva(vcpu,r3) ) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        unimpl_daddr(vcpu);
+        return IA64_FAULT;
+   }
+#endif // VMAL_NO_FAULT_CHECK
+    return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
 }
 
 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
@@ -1191,7 +1247,6 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
     }
 #endif  //CHECK_FAULT
     r2 = cr_igfld_mask(inst.M32.cr3,r2);
-    VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
     switch (inst.M32.cr3) {
         case 0: return vmx_vcpu_set_dcr(vcpu,r2);
         case 1: return vmx_vcpu_set_itm(vcpu,r2);
@@ -1207,7 +1262,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
         case 24:return vcpu_set_iim(vcpu,r2);
         case 25:return vcpu_set_iha(vcpu,r2);
         case 64:printk("SET LID to 0x%lx\n", r2);
-               return vmx_vcpu_set_lid(vcpu,r2);
+                return IA64_NO_FAULT;
         case 65:return IA64_NO_FAULT;
         case 66:return vmx_vcpu_set_tpr(vcpu,r2);
         case 67:return vmx_vcpu_set_eoi(vcpu,r2);
@@ -1220,7 +1275,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
         case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
         case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
         case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
-        default: return IA64_NO_FAULT;
+        default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
+                return IA64_NO_FAULT;
     }
 }
 
diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c  Thu Jun 08 11:00:09 2006 -0600
@@ -58,12 +58,6 @@ static thash_data_t *cch_alloc(thash_cb_
     return p;
 }
 
-static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
-{
-    cch->next = hcb->cch_freelist;
-    hcb->cch_freelist = cch;
-}
-
 /*
  * Check to see if the address rid:va is translated by the TLB
  */
@@ -92,22 +86,6 @@ __is_tr_overlap(thash_data_t *trp, u64 r
     else
         return 1;
 
-}
-
-/*
- * Delete an thash entry leading collision chain.
- */
-static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
-{
-    thash_data_t *next=hash->next;
-    if ( next) {
-        next->len=hash->len-1;
-        *hash = *next;
-        cch_free (hcb, next);
-    }
-    else {
-        hash->ti=1;
-    }
 }
 
 thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
@@ -142,17 +120,18 @@ thash_data_t *__vtr_lookup(VCPU *vcpu, u
 
 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
 {
-    thash_data_t *p;
+    thash_data_t *p, *q;
     int i=0;
     
     p=hash;
     for(i=0; i < MAX_CCN_DEPTH; i++){
         p=p->next;
     }
-    p->next=hcb->cch_freelist;
-    hcb->cch_freelist=hash->next;
+    q=hash->next;
     hash->len=0;
     hash->next=0;
+    p->next=hcb->cch_freelist;
+    hcb->cch_freelist=q;
 }
 
 
@@ -265,16 +244,14 @@ static void vtlb_purge(thash_cb_t *hcb, 
         hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
         if(!INVALID_TLB(hash_table)){
             if(hash_table->etag == tag){
-                __rem_hash_head(hcb, hash_table);
+                 hash_table->etag = 1UL<<63;
             }
             else{
                 prev=hash_table;
                 next=prev->next;
                 while(next){
                     if(next->etag == tag){
-                        prev->next=next->next;
-                        cch_free(hcb,next);
-                        hash_table->len--;
+                        next->etag = 1UL<<63;
                         break;
                     }
                     prev=next;
@@ -300,16 +277,14 @@ static void vhpt_purge(thash_cb_t *hcb, 
         hash_table = (thash_data_t *)ia64_thash(start);
         tag = ia64_ttag(start);
         if(hash_table->etag == tag ){
-            __rem_hash_head(hcb, hash_table);
+            hash_table->etag = 1UL<<63; 
         }
         else{
             prev=hash_table;
             next=prev->next;
             while(next){
                 if(next->etag == tag){
-                    prev->next=next->next;
-                    cch_free(hcb,next);
-                    hash_table->len--;
+                    next->etag = 1UL<<63;
                     break; 
                 }
                 prev=next;
@@ -383,7 +358,6 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
         hash_table->page_flags = pte;
         hash_table->itir=itir;
         hash_table->etag=tag;
-        hash_table->next = 0;
         return;
     }
     if (hash_table->len>=MAX_CCN_DEPTH){
@@ -539,7 +513,6 @@ void thash_purge_all(VCPU *v)
         num--;
     }while(num);
     cch_mem_init(vhpt);
-
     local_flush_tlb_all();
 }
 
diff -r d8d2b5c08245 -r b20733e82ab6 xen/arch/ia64/xen/xentime.c
--- a/xen/arch/ia64/xen/xentime.c       Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/arch/ia64/xen/xentime.c       Thu Jun 08 11:00:09 2006 -0600
@@ -124,7 +124,7 @@ xen_timer_interrupt (int irq, void *dev_
 #endif
 #endif
 
-       if (!is_idle_domain(current->domain))
+       if (!is_idle_domain(current->domain)&&!VMX_DOMAIN(current))
                if (vcpu_timer_expired(current)) {
                        vcpu_pend_timer(current);
                        // ensure another timer interrupt happens even if 
domain doesn't
diff -r d8d2b5c08245 -r b20733e82ab6 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Thu Jun 08 10:17:22 2006 -0600
+++ b/xen/include/asm-ia64/vmx.h        Thu Jun 08 11:00:09 2006 -0600
@@ -24,6 +24,8 @@
 
 #define RR7_SWITCH_SHIFT       12      /* 4k enough */
 #include <public/hvm/ioreq.h>
+
+#define VCPU_LID(v) (((u64)(v)->vcpu_id)<<24)
 
 extern void identify_vmx_feature(void);
 extern unsigned int vmx_enabled;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.