|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 22/27] x86: move compat_iret along side its non-compat variant
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/pv/iret.c | 120 +++++++++++++++++++++++++++++++++++++
xen/arch/x86/x86_64/compat/traps.c | 120 -------------------------------------
2 files changed, 120 insertions(+), 120 deletions(-)
diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c
index 358ae7cf08..013e619b3f 100644
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -61,6 +61,126 @@ unsigned long do_iret(void)
return 0;
}
+unsigned int compat_iret(void)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct vcpu *v = current;
+ u32 eflags;
+
+ /* Trim stack pointer to 32 bits. */
+ regs->rsp = (u32)regs->rsp;
+
+ /* Restore EAX (clobbered by hypercall). */
+ if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ /* Restore CS and EIP. */
+ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
+ unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ /*
+ * Fix up and restore EFLAGS. We fix up in a local staging area
+ * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
+ */
+ if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ if ( VM_ASSIST(v->domain, architectural_iopl) )
+ v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
+
+ regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+
+ if ( unlikely(eflags & X86_EFLAGS_VM) )
+ {
+ /*
+ * Cannot return to VM86 mode: inject a GP fault instead. Note that
+ * the GP fault is reported on the first VM86 mode instruction, not on
+ * the IRET (which is why we can simply leave the stack frame as-is
+ * (except for perhaps having to copy it), which in turn seems better
+ * than teaching create_bounce_frame() to needlessly deal with vm86
+ * mode frames).
+ */
+ const struct trap_info *ti;
+ u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
+ unsigned int i;
+ int rc = 0;
+
+ gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
+ regs->esp, ksp);
+ if ( ksp < regs->esp )
+ {
+ for (i = 1; i < 10; ++i)
+ {
+ rc |= __get_user(x, (u32 *)regs->rsp + i);
+ rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
+ }
+ }
+ else if ( ksp > regs->esp )
+ {
+ for ( i = 9; i > 0; --i )
+ {
+ rc |= __get_user(x, (u32 *)regs->rsp + i);
+ rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
+ }
+ }
+ if ( rc )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+ regs->esp = ksp;
+ regs->ss = v->arch.pv_vcpu.kernel_ss;
+
+ ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
+ if ( TI_GET_IF(ti) )
+ eflags &= ~X86_EFLAGS_IF;
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+ X86_EFLAGS_NT|X86_EFLAGS_TF);
+ if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+ regs->eip = ti->address;
+ regs->cs = ti->cs;
+ }
+ else if ( unlikely(ring_0(regs)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+ else if ( ring_1(regs) )
+ regs->esp += 16;
+ /* Return to ring 2/3: restore ESP and SS. */
+ else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
+ __get_user(regs->esp, (u32 *)regs->rsp + 4) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ /* Restore upcall mask from supplied EFLAGS.IF. */
+ vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
+
+ async_exception_cleanup(v);
+
+ /*
+ * The hypercall exit path will overwrite EAX with this return
+ * value.
+ */
+ return regs->eax;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/x86_64/compat/traps.c
b/xen/arch/x86/x86_64/compat/traps.c
index add4af3403..df691f0ae3 100644
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -66,126 +66,6 @@ void compat_show_guest_stack(struct vcpu *v, const struct
cpu_user_regs *regs,
printk("\n");
}
-unsigned int compat_iret(void)
-{
- struct cpu_user_regs *regs = guest_cpu_user_regs();
- struct vcpu *v = current;
- u32 eflags;
-
- /* Trim stack pointer to 32 bits. */
- regs->rsp = (u32)regs->rsp;
-
- /* Restore EAX (clobbered by hypercall). */
- if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- /* Restore CS and EIP. */
- if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
- unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- /*
- * Fix up and restore EFLAGS. We fix up in a local staging area
- * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
- */
- if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- if ( VM_ASSIST(v->domain, architectural_iopl) )
- v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
-
- regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
-
- if ( unlikely(eflags & X86_EFLAGS_VM) )
- {
- /*
- * Cannot return to VM86 mode: inject a GP fault instead. Note that
- * the GP fault is reported on the first VM86 mode instruction, not on
- * the IRET (which is why we can simply leave the stack frame as-is
- * (except for perhaps having to copy it), which in turn seems better
- * than teaching create_bounce_frame() to needlessly deal with vm86
- * mode frames).
- */
- const struct trap_info *ti;
- u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
- unsigned int i;
- int rc = 0;
-
- gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
- regs->esp, ksp);
- if ( ksp < regs->esp )
- {
- for (i = 1; i < 10; ++i)
- {
- rc |= __get_user(x, (u32 *)regs->rsp + i);
- rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
- }
- }
- else if ( ksp > regs->esp )
- {
- for ( i = 9; i > 0; --i )
- {
- rc |= __get_user(x, (u32 *)regs->rsp + i);
- rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
- }
- }
- if ( rc )
- {
- domain_crash(v->domain);
- return 0;
- }
- regs->esp = ksp;
- regs->ss = v->arch.pv_vcpu.kernel_ss;
-
- ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
- if ( TI_GET_IF(ti) )
- eflags &= ~X86_EFLAGS_IF;
- regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
- X86_EFLAGS_NT|X86_EFLAGS_TF);
- if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
- {
- domain_crash(v->domain);
- return 0;
- }
- regs->eip = ti->address;
- regs->cs = ti->cs;
- }
- else if ( unlikely(ring_0(regs)) )
- {
- domain_crash(v->domain);
- return 0;
- }
- else if ( ring_1(regs) )
- regs->esp += 16;
- /* Return to ring 2/3: restore ESP and SS. */
- else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
- __get_user(regs->esp, (u32 *)regs->rsp + 4) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- /* Restore upcall mask from supplied EFLAGS.IF. */
- vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
-
- async_exception_cleanup(v);
-
- /*
- * The hypercall exit path will overwrite EAX with this return
- * value.
- */
- return regs->eax;
-}
-
static long compat_register_guest_callback(
struct compat_callback_register *reg)
{
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |