[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] emulate ldfp8 in mmio



# HG changeset patch
# User awilliam@localhost
# Node ID 2d2ef3f4c7470659ea69036ae3a2b4b4833e49fd
# Parent  af32ca486466426a70c6c399ad23f00f4a09706c
[IA64] emulate ldfp8 in mmio

1. emulate ldpf8 in mmio
2. handle floating point register rotation in functions setfpreg and getfpreg

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>

diff -r af32ca486466 -r 2d2ef3f4c747 xen/arch/ia64/linux-xen/unaligned.c
--- a/xen/arch/ia64/linux-xen/unaligned.c       Tue Apr 25 16:55:09 2006 -0600
+++ b/xen/arch/ia64/linux-xen/unaligned.c       Tue Apr 25 17:05:16 2006 -0600
@@ -754,6 +754,9 @@ void
 void
 getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
 {
+       // Take floating register rotation into consideration
+       if(regnum >= IA64_FIRST_ROTATING_FR)
+               regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); 
 #define CASE_FIXED_FP(reg)                     \
        case reg:                               \
                ia64_stf_spill(fpval,reg);      \
@@ -898,6 +901,161 @@ getfpreg (unsigned long regnum, struct i
 #undef CASE_FIXED_FP
 #undef CASE_SAVED_FP
 }
+
+
+void
+setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
+{
+       // Take floating register rotation into consideration
+       ia64_fph_enable();
+       if(regnum >= IA64_FIRST_ROTATING_FR)
+               regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); 
+
+#define CASE_FIXED_FP(reg)                     \
+       case reg:                               \
+               ia64_ldf_fill(reg,fpval);       \
+               break
+#define CASE_RESTORED_FP(reg)                                  \
+       case reg:                                               \
+               regs->f##reg.u.bits[0] = fpval->u.bits[0];      \
+               regs->f##reg.u.bits[1] = fpval->u.bits[1] ;     \
+               break
+       switch(regnum) {
+               CASE_FIXED_FP(0);
+               CASE_FIXED_FP(1);
+               CASE_FIXED_FP(2);
+               CASE_FIXED_FP(3);
+               CASE_FIXED_FP(4);
+               CASE_FIXED_FP(5);
+
+               CASE_RESTORED_FP(6);
+               CASE_RESTORED_FP(7);
+               CASE_RESTORED_FP(8);
+               CASE_RESTORED_FP(9);
+               CASE_RESTORED_FP(10);
+               CASE_RESTORED_FP(11);
+
+               CASE_FIXED_FP(12);
+               CASE_FIXED_FP(13);
+               CASE_FIXED_FP(14);
+               CASE_FIXED_FP(15);
+               CASE_FIXED_FP(16);
+               CASE_FIXED_FP(17);
+               CASE_FIXED_FP(18);
+               CASE_FIXED_FP(19);
+               CASE_FIXED_FP(20);
+               CASE_FIXED_FP(21);
+               CASE_FIXED_FP(22);
+               CASE_FIXED_FP(23);
+               CASE_FIXED_FP(24);
+               CASE_FIXED_FP(25);
+               CASE_FIXED_FP(26);
+               CASE_FIXED_FP(27);
+               CASE_FIXED_FP(28);
+               CASE_FIXED_FP(29);
+               CASE_FIXED_FP(30);
+               CASE_FIXED_FP(31);
+               CASE_FIXED_FP(32);
+               CASE_FIXED_FP(33);
+               CASE_FIXED_FP(34);
+               CASE_FIXED_FP(35);
+               CASE_FIXED_FP(36);
+               CASE_FIXED_FP(37);
+               CASE_FIXED_FP(38);
+               CASE_FIXED_FP(39);
+               CASE_FIXED_FP(40);
+               CASE_FIXED_FP(41);
+               CASE_FIXED_FP(42);
+               CASE_FIXED_FP(43);
+               CASE_FIXED_FP(44);
+               CASE_FIXED_FP(45);
+               CASE_FIXED_FP(46);
+               CASE_FIXED_FP(47);
+               CASE_FIXED_FP(48);
+               CASE_FIXED_FP(49);
+               CASE_FIXED_FP(50);
+               CASE_FIXED_FP(51);
+               CASE_FIXED_FP(52);
+               CASE_FIXED_FP(53);
+               CASE_FIXED_FP(54);
+               CASE_FIXED_FP(55);
+               CASE_FIXED_FP(56);
+               CASE_FIXED_FP(57);
+               CASE_FIXED_FP(58);
+               CASE_FIXED_FP(59);
+               CASE_FIXED_FP(60);
+               CASE_FIXED_FP(61);
+               CASE_FIXED_FP(62);
+               CASE_FIXED_FP(63);
+               CASE_FIXED_FP(64);
+               CASE_FIXED_FP(65);
+               CASE_FIXED_FP(66);
+               CASE_FIXED_FP(67);
+               CASE_FIXED_FP(68);
+               CASE_FIXED_FP(69);
+               CASE_FIXED_FP(70);
+               CASE_FIXED_FP(71);
+               CASE_FIXED_FP(72);
+               CASE_FIXED_FP(73);
+               CASE_FIXED_FP(74);
+               CASE_FIXED_FP(75);
+               CASE_FIXED_FP(76);
+               CASE_FIXED_FP(77);
+               CASE_FIXED_FP(78);
+               CASE_FIXED_FP(79);
+               CASE_FIXED_FP(80);
+               CASE_FIXED_FP(81);
+               CASE_FIXED_FP(82);
+               CASE_FIXED_FP(83);
+               CASE_FIXED_FP(84);
+               CASE_FIXED_FP(85);
+               CASE_FIXED_FP(86);
+               CASE_FIXED_FP(87);
+               CASE_FIXED_FP(88);
+               CASE_FIXED_FP(89);
+               CASE_FIXED_FP(90);
+               CASE_FIXED_FP(91);
+               CASE_FIXED_FP(92);
+               CASE_FIXED_FP(93);
+               CASE_FIXED_FP(94);
+               CASE_FIXED_FP(95);
+               CASE_FIXED_FP(96);
+               CASE_FIXED_FP(97);
+               CASE_FIXED_FP(98);
+               CASE_FIXED_FP(99);
+               CASE_FIXED_FP(100);
+               CASE_FIXED_FP(101);
+               CASE_FIXED_FP(102);
+               CASE_FIXED_FP(103);
+               CASE_FIXED_FP(104);
+               CASE_FIXED_FP(105);
+               CASE_FIXED_FP(106);
+               CASE_FIXED_FP(107);
+               CASE_FIXED_FP(108);
+               CASE_FIXED_FP(109);
+               CASE_FIXED_FP(110);
+               CASE_FIXED_FP(111);
+               CASE_FIXED_FP(112);
+               CASE_FIXED_FP(113);
+               CASE_FIXED_FP(114);
+               CASE_FIXED_FP(115);
+               CASE_FIXED_FP(116);
+               CASE_FIXED_FP(117);
+               CASE_FIXED_FP(118);
+               CASE_FIXED_FP(119);
+               CASE_FIXED_FP(120);
+               CASE_FIXED_FP(121);
+               CASE_FIXED_FP(122);
+               CASE_FIXED_FP(123);
+               CASE_FIXED_FP(124);
+               CASE_FIXED_FP(125);
+               CASE_FIXED_FP(126);
+               CASE_FIXED_FP(127);
+       }
+#undef CASE_FIXED_FP
+#undef CASE_RESTORED_FP
+}
+
 #endif /* XEN */
 
 
diff -r af32ca486466 -r 2d2ef3f4c747 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Tue Apr 25 16:55:09 2006 -0600
+++ b/xen/arch/ia64/vmx/mmio.c  Tue Apr 25 17:05:16 2006 -0600
@@ -529,6 +529,26 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
        vmx_vcpu_increment_iip(vcpu);
        return;
     }
+    // Floating-point Load Pair + Imm ldfp8 M12
+    else if(inst.M12.major==6&&inst.M12.m==1&&inst.M12.x==1&&inst.M12.x6==1){
+        struct ia64_fpreg v;
+        inst_type=SL_FLOATING;
+        dir = IOREQ_READ;
+        size = 8;     //ldfd
+        mmio_access(vcpu, padr, &data, size, ma, dir);
+        v.u.bits[0]=data;
+        v.u.bits[1]=0x1003E;
+        vcpu_set_fpreg(vcpu,inst.M12.f1,&v);
+        padr += 8;
+        mmio_access(vcpu, padr, &data, size, ma, dir);
+        v.u.bits[0]=data;
+        v.u.bits[1]=0x1003E;
+        vcpu_set_fpreg(vcpu,inst.M12.f2,&v);
+        padr += 8;
+        vcpu_set_gr(vcpu,inst.M12.r3,padr,0);
+        vmx_vcpu_increment_iip(vcpu);
+        return;
+    }                                  
     else{
         panic_domain
          (NULL,"This memory access instr can't be emulated: %lx pc=%lx\n ",
diff -r af32ca486466 -r 2d2ef3f4c747 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Tue Apr 25 16:55:09 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c  Tue Apr 25 17:05:16 2006 -0600
@@ -24,6 +24,8 @@ extern void setreg(unsigned long regnum,
 extern void setreg(unsigned long regnum, unsigned long val, int nat, struct 
pt_regs *regs);
 extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct 
pt_regs *regs);
 
+extern void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct 
pt_regs *regs);
+
 extern void panic_domain(struct pt_regs *, const char *, ...);
 extern unsigned long translate_domain_mpaddr(unsigned long);
 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
@@ -111,7 +113,16 @@ vcpu_get_fpreg(VCPU *vcpu, unsigned long
 {
        REGS *regs = vcpu_regs(vcpu);
        getfpreg(reg,val,regs); // FIXME: handle NATs later
-       return 0;
+       return IA64_NO_FAULT;
+}
+
+IA64FAULT
+vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
+{
+       REGS *regs = vcpu_regs(vcpu);
+       if(reg > 1)
+               setfpreg(reg,val,regs); // FIXME: handle NATs later
+       return IA64_NO_FAULT;
 }
 
 #else
diff -r af32ca486466 -r 2d2ef3f4c747 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h     Tue Apr 25 16:55:09 2006 -0600
+++ b/xen/include/asm-ia64/privop.h     Tue Apr 25 17:05:16 2006 -0600
@@ -180,6 +180,11 @@ typedef union U_INST64_M10 {
     struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2, x6:6, s:1, 
major:4; };
 } INST64_M10;
 
+typedef union U_INST64_M12 {
+    IA64_INST inst;
+    struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2, x6:6, m:1, 
major:4; };
+} INST64_M12;
+                        
 typedef union U_INST64_M15 {
     IA64_INST inst;
     struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1, 
major:4; };
@@ -204,6 +209,7 @@ typedef union U_INST64 {
     INST64_M6  M6;     // ldfd floating pointer
     INST64_M9  M9;     // stfd floating pointer
     INST64_M10 M10;    // stfd floating pointer
+    INST64_M12 M12;    // ldfd pair floating pointer
     INST64_M15 M15;    // lfetch + imm update
     INST64_M28 M28;    // purge translation cache entry
     INST64_M29 M29;    // mov register to ar (M unit)
diff -r af32ca486466 -r 2d2ef3f4c747 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Tue Apr 25 16:55:09 2006 -0600
+++ b/xen/include/asm-ia64/vcpu.h       Tue Apr 25 17:05:16 2006 -0600
@@ -41,6 +41,8 @@ extern IA64FAULT vcpu_get_gr_nat(VCPU *v
 extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val);
 extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int 
nat);
 extern IA64FAULT vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct 
ia64_fpreg *val);
+
+extern IA64FAULT vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct 
ia64_fpreg *val);
 
 /* application registers */
 extern void vcpu_load_kernel_regs(VCPU *vcpu);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.