[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 10/10] x86/misc: use unambiguous register names
This is in preparation of eliminating the mis-naming of 64-bit fields with 32-bit register names (eflags instead of rflags etc). Use the guaranteed 32-bit underscore prefixed names for now where appropriate. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/cpu/vpmu.c +++ b/xen/arch/x86/cpu/vpmu.c @@ -265,7 +265,7 @@ void vpmu_do_interrupt(struct cpu_user_r cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs; cmp->ip = cur_regs->rip; cmp->sp = cur_regs->rsp; - cmp->flags = cur_regs->eflags; + cmp->flags = cur_regs->rflags; cmp->ss = cur_regs->ss; cmp->cs = cur_regs->cs; if ( (cmp->cs & 3) > 1 ) @@ -288,7 +288,7 @@ void vpmu_do_interrupt(struct cpu_user_r r->ip = cur_regs->rip; r->sp = cur_regs->rsp; - r->flags = cur_regs->eflags; + r->flags = cur_regs->rflags; if ( !has_hvm_container_vcpu(sampled) ) { --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1044,11 +1044,11 @@ int arch_set_info_guest( init_int80_direct_trap(v); /* IOPL privileges are virtualised. */ - v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL; - v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL; + v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL; + v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL; /* Ensure real hardware interrupts are enabled. */ - v->arch.user_regs.eflags |= X86_EFLAGS_IF; + v->arch.user_regs._eflags |= X86_EFLAGS_IF; if ( !v->is_initialised ) { @@ -2235,7 +2235,7 @@ void hypercall_cancel_continuation(void) else { if ( is_pv_vcpu(current) ) - regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */ + regs->rip += 2; /* skip re-execute 'syscall' / 'int $xx' */ else current->arch.hvm_vcpu.hcall_preempted = 0; } @@ -2264,11 +2264,11 @@ unsigned long hypercall_create_continuat struct cpu_user_regs *regs = guest_cpu_user_regs(); struct vcpu *curr = current; - regs->eax = op; + regs->rax = op; /* Ensure the hypercall trap instruction is re-executed. */ if ( is_pv_vcpu(curr) ) - regs->eip -= 2; /* re-execute 'syscall' / 'int $xx' */ + regs->rip -= 2; /* re-execute 'syscall' / 'int $xx' */ else curr->arch.hvm_vcpu.hcall_preempted = 1; @@ -2297,12 +2297,12 @@ unsigned long hypercall_create_continuat arg = next_arg(p, args); switch ( i ) { - case 0: regs->ebx = arg; break; - case 1: regs->ecx = arg; break; - case 2: regs->edx = arg; break; - case 3: regs->esi = arg; break; - case 4: regs->edi = arg; break; - case 5: regs->ebp = arg; break; + case 0: regs->rbx = arg; break; + case 1: regs->rcx = arg; break; + case 2: regs->rdx = arg; break; + case 3: regs->rsi = arg; break; + case 4: regs->rdi = arg; break; + case 5: regs->rbp = arg; break; } } } @@ -2372,12 +2372,12 @@ int hypercall_xlat_continuation(unsigned switch ( i ) { - case 0: reg = ®s->ebx; break; - case 1: reg = ®s->ecx; break; - case 2: reg = ®s->edx; break; - case 3: reg = ®s->esi; break; - case 4: reg = ®s->edi; break; - case 5: reg = ®s->ebp; break; + case 0: reg = ®s->rbx; break; + case 1: reg = ®s->rcx; break; + case 2: reg = ®s->rdx; break; + case 3: reg = ®s->rsi; break; + case 4: reg = ®s->rdi; break; + case 5: reg = ®s->rbp; break; default: BUG(); reg = NULL; break; } if ( (mask & 1) ) --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -1584,10 +1584,10 @@ int __init construct_dom0( /* * Initial register values: * DS,ES,FS,GS = FLAT_KERNEL_DS - * CS:EIP = FLAT_KERNEL_CS:start_pc - * SS:ESP = FLAT_KERNEL_SS:start_stack - * ESI = start_info - * [EAX,EBX,ECX,EDX,EDI,EBP are zero] + * CS:rIP = FLAT_KERNEL_CS:start_pc + * SS:rSP = FLAT_KERNEL_SS:start_stack + * rSI = start_info + * [rAX,rBX,rCX,rDX,rDI,rBP,R8-R15 are zero] */ regs = &v->arch.user_regs; regs->ds = regs->es = regs->fs = regs->gs = @@ -1596,10 +1596,10 @@ int __init construct_dom0( FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS); regs->cs = (!is_pv_32bit_domain(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS); - regs->eip = parms.virt_entry; - regs->esp = vstack_end; - regs->esi = vstartinfo_start; - regs->eflags = X86_EFLAGS_IF; + regs->rip = parms.virt_entry; + regs->rsp = vstack_end; + regs->rsi = vstartinfo_start; + regs->_eflags = X86_EFLAGS_IF; #ifdef CONFIG_SHADOW_PAGING if ( opt_dom0_shadow ) --- a/xen/arch/x86/extable.c +++ b/xen/arch/x86/extable.c @@ -98,7 +98,7 @@ search_exception_table(unsigned long add unsigned long search_pre_exception_table(struct cpu_user_regs *regs) { - unsigned long addr = (unsigned long)regs->eip; + unsigned long addr = regs->rip; unsigned long fixup = search_one_extable( __start___pre_ex_table, __stop___pre_ex_table-1, addr); if ( fixup ) --- a/xen/arch/x86/hypercall.c +++ b/xen/arch/x86/hypercall.c @@ -146,7 +146,7 @@ void pv_hypercall(struct cpu_user_regs * ASSERT(guest_kernel_mode(curr, regs)); - eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->eax; + eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->rax; BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) > ARRAY_SIZE(hypercall_args_table)); @@ -154,7 +154,7 @@ void pv_hypercall(struct cpu_user_regs * if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) || !pv_hypercall_table[eax].native ) { - regs->eax = -ENOSYS; + regs->rax = -ENOSYS; return; } @@ -186,7 +186,7 @@ void pv_hypercall(struct cpu_user_regs * __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args); } - regs->eax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9); + regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9); #ifndef NDEBUG if ( regs->rip == old_rip ) --- a/xen/arch/x86/trace.c +++ b/xen/arch/x86/trace.c @@ -48,7 +48,7 @@ void __trace_pv_trap(int trapnr, unsigne void __trace_pv_page_fault(unsigned long addr, unsigned error_code) { - unsigned long eip = guest_cpu_user_regs()->eip; + unsigned long eip = guest_cpu_user_regs()->rip; if ( is_pv_32bit_vcpu(current) ) { @@ -119,7 +119,7 @@ void __trace_trap_two_addr(unsigned even void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) { - unsigned long eip = guest_cpu_user_regs()->eip; + unsigned long eip = guest_cpu_user_regs()->rip; /* We have a couple of different modes to worry about: * - 32-on-32: 32-bit pte, 32-bit virtual addresses --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -44,7 +44,7 @@ void __dummy__(void) OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask); OFFSET(UREGS_rip, struct cpu_user_regs, rip); OFFSET(UREGS_cs, struct cpu_user_regs, cs); - OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); + OFFSET(UREGS_eflags, struct cpu_user_regs, rflags); OFFSET(UREGS_rsp, struct cpu_user_regs, rsp); OFFSET(UREGS_ss, struct cpu_user_regs, ss); OFFSET(UREGS_ds, struct cpu_user_regs, ds); --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -327,7 +327,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA struct cpu_user_regs *regs = guest_cpu_user_regs(); struct mc_state *mcs = ¤t->mc_state; unsigned int arg1 = !(mcs->flags & MCSF_in_multicall) - ? regs->ecx + ? regs->_ecx : mcs->call.args[1]; unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED; --- a/xen/include/asm-x86/regs.h +++ b/xen/include/asm-x86/regs.h @@ -15,6 +15,6 @@ (diff == 0); \ }) -#define return_reg(v) ((v)->arch.user_regs.eax) +#define return_reg(v) ((v)->arch.user_regs.rax) #endif /* __X86_REGS_H__ */ --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -764,7 +764,7 @@ typedef struct shared_info shared_info_t * (may be omitted) * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) - * d. start_info_t structure [register ESI (x86)] + * d. start_info_t structure [register rSI (x86)] * in case of dom0 this page contains the console info, too * e. unless dom0: xenstore ring page * f. unless dom0: console ring page Attachment:
x86-regnames-misc.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |