[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Provide basic Xen PM infrastructure



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1184165234 -3600
# Node ID 24379dde8ac4b58389121c38ee56d46e7ea84b17
# Parent  c6491ed12f840e97ea76419b61a0e1735845121c
Provide basic Xen PM infrastructure

Basic infrastructure for Xen S3 support with a common CPU
context save/restore logic for both 32bit and 64bit.

Wakeup code is split into two parts:
    - the first locates after trampoline code, to share all the
      tricks on the latter, like relocation base and identy mapping
    - the 2nd part locates in xen code segment, to do the actual
      CPU context restore

Signed-off-by Ke Yu <ke.yu@xxxxxxxxx>
Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/acpi/Makefile        |    1 
 xen/arch/x86/acpi/power.c         |  258 ++++++++++++++++++++++++++++++++++++
 xen/arch/x86/acpi/suspend.c       |   85 ++++++++++++
 xen/arch/x86/acpi/wakeup_prot.S   |  267 ++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/boot/Makefile        |    3 
 xen/arch/x86/boot/head.S          |    2 
 xen/arch/x86/boot/wakeup.S        |  212 ++++++++++++++++++++++++++++++
 xen/arch/x86/dmi_scan.c           |    1 
 xen/arch/x86/platform_hypercall.c |   17 ++
 xen/include/asm-x86/acpi.h        |    8 +
 xen/include/asm-x86/config.h      |    7 
 xen/include/public/platform.h     |   27 +++
 12 files changed, 884 insertions(+), 4 deletions(-)

diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/acpi/Makefile
--- a/xen/arch/x86/acpi/Makefile        Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/arch/x86/acpi/Makefile        Wed Jul 11 15:47:14 2007 +0100
@@ -1,1 +1,2 @@ obj-y += boot.o
 obj-y += boot.o
+obj-y += power.o suspend.o wakeup_prot.o
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/acpi/power.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/acpi/power.c Wed Jul 11 15:47:14 2007 +0100
@@ -0,0 +1,258 @@
+/* drivers/acpi/sleep/power.c - PM core functionality for Xen
+ *
+ * Copyrights from Linux side:
+ * Copyright (c) 2000-2003 Patrick Mochel
+ * Copyright (C) 2001-2003 Pavel Machek <pavel@xxxxxxx>
+ * Copyright (c) 2003 Open Source Development Lab
+ * Copyright (c) 2004 David Shaohua Li <shaohua.li@xxxxxxxxx>
+ * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@xxxxxxxxx>
+ *
+ * Slimmed with Xen specific support.
+ */
+
+#include <xen/config.h>
+#include <asm/io.h>
+#include <asm/acpi.h>
+#include <xen/acpi.h>
+#include <xen/errno.h>
+#include <xen/iocap.h>
+#include <xen/sched.h>
+#include <asm/acpi.h>
+#include <asm/irq.h>
+#include <asm/init.h>
+#include <xen/spinlock.h>
+#include <xen/sched.h>
+#include <xen/domain.h>
+#include <xen/console.h>
+#include <public/platform.h>
+
+#define pmprintk(_l, _f, _a...) printk(_l "<PM>" _f, ## _a )
+
+u8 sleep_states[ACPI_S_STATE_COUNT];
+DEFINE_SPINLOCK(pm_lock);
+
+struct acpi_sleep_info {
+    uint16_t pm1a_cnt;
+    uint16_t pm1b_cnt;
+    uint16_t pm1a_evt;
+    uint16_t pm1b_evt;
+    uint16_t pm1a_cnt_val;
+    uint16_t pm1b_cnt_val;
+    uint32_t sleep_state;
+} acpi_sinfo;
+
+extern void do_suspend_lowlevel(void);
+
+static char *acpi_states[ACPI_S_STATE_COUNT] =
+{
+    [ACPI_STATE_S1] = "standby",
+    [ACPI_STATE_S3] = "mem",
+    [ACPI_STATE_S4] = "disk",
+};
+
+unsigned long acpi_video_flags;
+unsigned long saved_videomode;
+
+/* XXX: Add suspend failure recover later */
+static int device_power_down(void)
+{
+    console_suspend();
+
+    time_suspend();
+
+    i8259A_suspend();
+    
+    ioapic_suspend();
+    
+    lapic_suspend();
+
+    return 0;
+}
+
+static void device_power_up(void)
+{
+    lapic_resume();
+    
+    ioapic_resume();
+
+    i8259A_resume();
+    
+    time_resume();
+
+    console_resume();
+}
+
+/* Main interface to do xen specific suspend/resume */
+int enter_state(u32 state)
+{
+    struct domain *d;
+    unsigned long flags;
+    int error;
+
+    if (state <= ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
+        return -EINVAL;
+
+    /* Sync lazy state on ths cpu */
+    __sync_lazy_execstate();
+    pmprintk(XENLOG_INFO, "Flush lazy state\n");
+
+    if (!spin_trylock(&pm_lock))
+        return -EBUSY;
+    
+    for_each_domain(d)
+        if (d->domain_id != 0)
+            domain_pause(d);
+
+    pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
+        acpi_states[state]);
+
+    local_irq_save(flags);
+
+    if ((error = device_power_down()))
+    {
+        printk(XENLOG_ERR "Some devices failed to power down\n");
+        goto Done;
+    }
+
+    ACPI_FLUSH_CPU_CACHE();
+
+    switch (state)
+    {
+        case ACPI_STATE_S3:
+            do_suspend_lowlevel();
+            break;
+        default:
+            error = -EINVAL;
+            break;
+    }
+
+    pmprintk(XENLOG_INFO, "Back to C!\n");
+
+    device_power_up();
+
+    pmprintk(XENLOG_INFO, "PM: Finishing wakeup.\n");
+
+ Done:
+    local_irq_restore(flags);
+
+    for_each_domain(d)
+       if (d->domain_id!=0)
+           domain_unpause(d);
+
+    spin_unlock(&pm_lock);
+    return error;
+
+}
+
+/*
+ * Xen just requires address of pm1x_cnt, and ACPI interpreter
+ * is still kept in dom0. Address of xen wakeup stub will be
+ * returned, and then dom0 writes that address to FACS.
+ */
+int set_acpi_sleep_info(struct xenpf_set_acpi_sleep *info)
+{
+    if (acpi_sinfo.pm1a_cnt)
+        pmprintk(XENLOG_WARNING, "Multiple setting on acpi sleep info\n");
+
+    acpi_sinfo.pm1a_cnt = info->pm1a_cnt_port;
+    acpi_sinfo.pm1b_cnt = info->pm1b_cnt_port;
+    acpi_sinfo.pm1a_evt = info->pm1a_evt_port;
+    acpi_sinfo.pm1b_evt = info->pm1b_evt_port;
+    info->xen_waking_vec = (uint64_t)bootsym_phys(wakeup_start);
+
+    pmprintk(XENLOG_INFO, "pm1a[%x],pm1b[%x],pm1a_e[%x],pm1b_e[%x]"
+                       "wake[%"PRIx64"]",
+                       acpi_sinfo.pm1a_cnt, acpi_sinfo.pm1b_cnt,
+                       acpi_sinfo.pm1a_evt, acpi_sinfo.pm1b_evt,
+                       info->xen_waking_vec);
+    return 0;
+}
+
+/*
+ * Dom0 issues this hypercall in place of writing pm1a_cnt. Xen then
+ * takes over the control and put the system into sleep state really.
+ * Also video flags and mode are passed here, in case user may use
+ * "acpi_sleep=***" for video resume.
+ *
+ * Guest may issue a two-phases write to PM1x_CNT, to work
+ * around poorly implemented hardware. It's better to keep
+ * this logic here. Two writes can be differentiated by 
+ * enable bit setting.
+ */
+int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep)
+{
+    if (!IS_PRIV(current->domain) || !acpi_sinfo.pm1a_cnt)
+        return -EPERM;
+
+    /* Sanity check */
+    if (acpi_sinfo.pm1b_cnt_val &&
+        ((sleep->pm1a_cnt_val ^ sleep->pm1b_cnt_val) &
+        ACPI_BITMASK_SLEEP_ENABLE))
+    {
+        pmprintk(XENLOG_ERR, "Mismatched pm1a/pm1b setting\n");
+        return -EINVAL;
+    }
+
+    /* Write #1 */
+    if (!(sleep->pm1a_cnt_val & ACPI_BITMASK_SLEEP_ENABLE))
+    {
+        outw((u16)sleep->pm1a_cnt_val, acpi_sinfo.pm1a_cnt);
+        if (acpi_sinfo.pm1b_cnt)
+            outw((u16)sleep->pm1b_cnt_val, acpi_sinfo.pm1b_cnt);
+        return 0;
+    }
+
+    /* Write #2 */
+    acpi_sinfo.pm1a_cnt_val = sleep->pm1a_cnt_val;
+    acpi_sinfo.pm1b_cnt_val = sleep->pm1b_cnt_val;
+    acpi_sinfo.sleep_state = sleep->sleep_state;
+    acpi_video_flags = sleep->video_flags;
+    saved_videomode = sleep->video_mode;
+
+    return enter_state(acpi_sinfo.sleep_state);
+}
+
+static int acpi_get_wake_status(void)
+{
+    uint16_t val;
+
+    /* Wake status is the 15th bit of PM1 status register. (ACPI spec 3.0) */
+    val = inw(acpi_sinfo.pm1a_evt) | inw(acpi_sinfo.pm1b_evt);
+    val &= ACPI_BITMASK_WAKE_STATUS;
+    val >>= ACPI_BITPOSITION_WAKE_STATUS;
+    return val;
+}
+
+/* System is really put into sleep state by this stub */
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+{
+    ACPI_FLUSH_CPU_CACHE();
+
+    outw((u16)acpi_sinfo.pm1a_cnt_val, acpi_sinfo.pm1a_cnt);
+    if (acpi_sinfo.pm1b_cnt)
+        outw((u16)acpi_sinfo.pm1b_cnt_val, acpi_sinfo.pm1b_cnt);
+    
+    /* Wait until we enter sleep state, and spin until we wake */
+    while (!acpi_get_wake_status());
+    return_ACPI_STATUS(AE_OK);
+}
+
+static int __init acpi_sleep_init(void)
+{
+    int i = 0; 
+
+    pmprintk(XENLOG_INFO, "ACPI (supports");
+    for (i = 0; i < ACPI_S_STATE_COUNT; i++)
+    {
+        if (i == ACPI_STATE_S3)
+        {
+            sleep_states[i] = 1;
+            printk(" S%d", i);
+        }
+        else
+            sleep_states[i] = 0;
+    }
+    printk(")\n");
+    return 0;
+}
+__initcall(acpi_sleep_init);
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/acpi/suspend.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/acpi/suspend.c       Wed Jul 11 15:47:14 2007 +0100
@@ -0,0 +1,85 @@
+/*
+ * Suspend support specific for i386.
+ *
+ * Distribute under GPLv2
+ *
+ * Copyright (c) 2002 Pavel Machek <pavel@xxxxxxx>
+ * Copyright (c) 2001 Patrick Mochel <mochel@xxxxxxxx>
+ */
+#include <xen/config.h>
+#include <xen/acpi.h>
+#include <xen/smp.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/flushtlb.h>
+#include <asm/hvm/hvm.h>
+#include <asm/hvm/support.h>
+#include <asm/i387.h>
+
+/* Following context save/restore happens on the real context
+ * of current vcpu, with a lazy state sync forced earlier. 
+ */
+#if defined(CONFIG_X86_64)
+unsigned long saved_lstar, saved_cstar;
+#endif
+void save_rest_processor_state(void)
+{
+    /*
+     * Net effect of unlazy_fpu is to set cr0.ts and thus there's no
+     * need to restore fpu after resume.
+     */
+    if (!is_idle_vcpu(current))
+        unlazy_fpu(current);
+
+#if defined(CONFIG_X86_64)
+    rdmsrl(MSR_CSTAR, saved_cstar);
+    rdmsrl(MSR_LSTAR, saved_lstar);
+#endif
+
+    bootsym(video_flags) = acpi_video_flags;
+    bootsym(video_mode) = saved_videomode;
+}
+
+#define loaddebug(_v,_reg) \
+    __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
+
+void restore_rest_processor_state(void)
+{
+    int cpu = smp_processor_id();
+    struct tss_struct *t = &init_tss[cpu];
+    struct vcpu *v = current;
+
+    /* Really scared by suffixed comment from Linux, and keep it for safe */
+    set_tss_desc(cpu, t);    /* This just modifies memory; should not be 
necessary. But... This is necessary, because 386 hardware has concept of busy 
TSS or some similar stupidity. */
+
+    load_TR(cpu);
+
+#if defined(CONFIG_X86_64)
+    /* Recover syscall MSRs */
+    wrmsrl(MSR_LSTAR, saved_lstar);
+    wrmsrl(MSR_CSTAR, saved_cstar);
+    wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS);
+    wrmsr(MSR_SYSCALL_MASK, EF_VM|EF_RF|EF_NT|EF_DF|EF_IE|EF_TF, 0U);    
+#else /* !defined(CONFIG_X86_64) */
+    if (supervisor_mode_kernel && cpu_has_sep)
+        wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0);
+#endif
+
+    /* Maybe load the debug registers. */
+    if ( !is_idle_vcpu(v) && unlikely(v->arch.guest_context.debugreg[7]) )
+    {
+        loaddebug(&v->arch.guest_context, 0);
+        loaddebug(&v->arch.guest_context, 1);
+        loaddebug(&v->arch.guest_context, 2);
+        loaddebug(&v->arch.guest_context, 3);
+        /* no 4 and 5 */
+        loaddebug(&v->arch.guest_context, 6);
+        loaddebug(&v->arch.guest_context, 7);
+    }
+
+    /* Do we start fpu really? Just set cr0.ts to monitor it */
+    stts();
+
+    mtrr_ap_init();
+    mcheck_init(&boot_cpu_data);
+}
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/acpi/wakeup_prot.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/acpi/wakeup_prot.S   Wed Jul 11 15:47:14 2007 +0100
@@ -0,0 +1,267 @@
+        .text
+
+#include <xen/config.h>
+#include <xen/multiboot.h>
+#include <public/xen.h>
+#include <asm/asm_defns.h>
+#include <asm/desc.h>
+#include <asm/page.h>
+#include <asm/msr.h>
+
+#if defined(__x86_64__)
+
+        .code64
+
+#define GREG(x)         %r##x
+#define SAVED_GREG(x)   saved_r##x(%rip)
+#define DECLARE_GREG(x) saved_r##x:     .quad   0
+#define SAVE_GREG(x)    movq GREG(x), SAVED_GREG(x)
+#define LOAD_GREG(x)    movq SAVED_GREG(x), GREG(x)
+
+#define REF(x)          x(%rip)
+
+#define RDMSR(ind, m)                   \
+        xorq    %rdx, %rdx;             \
+        mov     $ind, %ecx;             \
+        rdmsr;                          \
+        shlq    $0x20, %rdx;              \
+        orq     %rax, %rdx;             \
+        movq    %rdx, m(%rip);
+
+#define WRMSR(ind, m)                   \
+        mov     $ind, %ecx;             \
+        movq    m(%rip), %rdx;          \
+        mov     %edx, %eax;             \
+        shrq    $0x20, %rdx;              \
+        wrmsr;
+
+#else /* !defined(__x86_64__) */
+
+        .code32
+
+#define GREG(x)         %e##x
+#define SAVED_GREG(x)   saved_e##x
+#define DECLARE_GREG(x) saved_e##x:     .long   0
+#define SAVE_GREG(x)    movl GREG(x), SAVED_GREG(x)
+#define LOAD_GREG(x)    movl SAVED_GREG(x), GREG(x)
+
+#define REF(x)          x
+
+#endif
+
+ENTRY(do_suspend_lowlevel)
+
+        SAVE_GREG(sp)
+        SAVE_GREG(ax)
+        SAVE_GREG(bx)
+        SAVE_GREG(cx)
+        SAVE_GREG(dx)
+        SAVE_GREG(bp)
+        SAVE_GREG(si)
+        SAVE_GREG(di)
+
+#if defined(__x86_64__)
+
+        SAVE_GREG(8)     # save r8...r15
+        SAVE_GREG(9)
+        SAVE_GREG(10)
+        SAVE_GREG(11)
+        SAVE_GREG(12)
+        SAVE_GREG(13)
+        SAVE_GREG(14)
+        SAVE_GREG(15)
+        pushfq;
+        popq    SAVED_GREG(flags)
+
+        mov     %cr8, GREG(ax)
+        mov     GREG(ax), REF(saved_cr8)
+
+        RDMSR(MSR_FS_BASE, saved_fs_base)
+        RDMSR(MSR_GS_BASE, saved_gs_base)
+        RDMSR(MSR_SHADOW_GS_BASE, saved_kernel_gs_base)
+
+#else /* !defined(__x86_64__) */
+
+        pushfl;
+        popl    SAVED_GREG(flags)
+
+#endif
+
+        mov     %ds, REF(saved_ds)
+        mov     %es, REF(saved_es)
+        mov     %fs, REF(saved_fs)
+        mov     %gs, REF(saved_gs)
+        mov     %ss, REF(saved_ss)
+
+        sgdt    REF(saved_gdt)
+        sidt    REF(saved_idt)
+        sldt    REF(saved_ldt)
+
+        mov     %cr0, GREG(ax)
+        mov     GREG(ax), REF(saved_cr0)
+
+        mov     %cr3, GREG(ax)
+        mov     GREG(ax), REF(saved_cr3)
+
+        call    save_rest_processor_state
+
+#if defined(__x86_64__)
+
+        mov     $3, %rdi
+        xor     %eax, %eax
+
+#else /* !defined(__x86_64__) */
+
+        push    $3
+
+#endif
+
+        /* enter sleep state physically */
+        call    acpi_enter_sleep_state
+        jmp     __ret_point
+
+        .align  16
+        .globl  __ret_point
+__ret_point:
+
+        /* mmu_cr4_features contains latest cr4 setting */
+        mov     REF(mmu_cr4_features), GREG(ax)
+        mov     GREG(ax), %cr4
+
+        mov     REF(saved_cr3), GREG(ax)
+        mov     GREG(ax), %cr3
+
+        mov     REF(saved_cr0), GREG(ax)
+        mov     GREG(ax), %cr0
+
+        lgdt    REF(saved_gdt)
+        lidt    REF(saved_idt)
+        lldt    REF(saved_ldt)
+
+        mov     REF(saved_ss), %ss
+        LOAD_GREG(sp)
+
+#if defined(__x86_64__)
+
+        mov     REF(saved_cr8), %rax
+        mov     %rax, %cr8
+
+        pushq   SAVED_GREG(flags)
+        popfq
+
+        /* Idle vcpu doesn't need segment selectors reload, since
+         * those may contain stale value from other domains and 
+         * reload may result page fault due to no matched gdt entry
+         */
+        mov     $(STACK_SIZE - 8), %rax
+        or      %rsp, %rax
+        and     $~7, %rax
+        mov     (%rax), %rax
+        mov     0x10(%rax), %rax
+        cmpw    $0x7fff, (%rax)
+        je      1f
+
+        /* These selectors are from guest, and thus need reload */
+        mov     REF(saved_ds), %ds
+        mov     REF(saved_es), %es
+        mov     REF(saved_fs), %fs
+
+        /* gs load is special */
+        mov     REF(saved_gs), %rsi
+        mov     $3, %rdi        # SEGBASE_GS_USER_SEL
+        call    do_set_segment_base
+
+1:
+        # MSR restore
+        WRMSR(MSR_FS_BASE, saved_fs_base)
+        WRMSR(MSR_GS_BASE, saved_gs_base)
+        WRMSR(MSR_SHADOW_GS_BASE, saved_kernel_gs_base)
+
+#else /* !defined(__x86_64__) */
+
+        pushl   SAVED_GREG(flags)
+        popfl
+
+        /* No reload to fs/gs, which is saved in bottom stack already */
+        mov     REF(saved_ds), %ds
+        mov     REF(saved_es), %es
+
+#endif
+
+        call restore_rest_processor_state
+
+        LOAD_GREG(bp)
+        LOAD_GREG(ax)
+        LOAD_GREG(bx)
+        LOAD_GREG(cx)
+        LOAD_GREG(dx)
+        LOAD_GREG(si)
+        LOAD_GREG(di)
+#if defined(__x86_64__)
+        LOAD_GREG(8)     # save r8...r15
+        LOAD_GREG(9)
+        LOAD_GREG(10)
+        LOAD_GREG(11)
+        LOAD_GREG(12)
+        LOAD_GREG(13)
+        LOAD_GREG(14)
+        LOAD_GREG(15)
+#endif
+        ret 
+
+.data
+        .align 16
+saved_ds:        .word   0
+saved_es:        .word   0
+saved_ss:        .word   0
+saved_gs:        .word   0
+saved_fs:        .word   0
+
+        .align 4
+        .globl   saved_magic
+saved_magic:     .long   0x9abcdef0
+
+        .align 8
+DECLARE_GREG(sp)
+DECLARE_GREG(bp)
+DECLARE_GREG(ax)
+DECLARE_GREG(bx)
+DECLARE_GREG(cx)
+DECLARE_GREG(dx)
+DECLARE_GREG(si)
+DECLARE_GREG(di)
+DECLARE_GREG(flags)
+
+#if defined(__x86_64__)
+
+DECLARE_GREG(8)
+DECLARE_GREG(9)
+DECLARE_GREG(10)
+DECLARE_GREG(11)
+DECLARE_GREG(12)
+DECLARE_GREG(13)
+DECLARE_GREG(14)
+DECLARE_GREG(15)
+
+saved_gdt:      .quad   0,0
+saved_idt:      .quad   0,0
+saved_ldt:      .quad   0,0
+
+saved_cr0:      .quad   0
+saved_cr3:      .quad   0
+saved_cr8:      .quad   0
+
+saved_gs_base:  .quad   0
+saved_fs_base:  .quad   0
+saved_kernel_gs_base:   .quad   0
+
+#else /* !defined(__x86_64__) */
+
+saved_gdt:      .long   0,0
+saved_idt:      .long   0,0
+saved_ldt:      .long   0
+
+saved_cr0:      .long   0
+saved_cr3:      .long   0
+
+#endif 
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/boot/Makefile
--- a/xen/arch/x86/boot/Makefile        Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/arch/x86/boot/Makefile        Wed Jul 11 15:47:14 2007 +0100
@@ -1,3 +1,4 @@ obj-y += head.o
 obj-y += head.o
 
-head.o: head.S $(TARGET_SUBARCH).S trampoline.S mem.S video.S cmdline.S edd.S
+head.o: head.S $(TARGET_SUBARCH).S trampoline.S mem.S video.S \
+       cmdline.S edd.S wakeup.S
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/boot/head.S
--- a/xen/arch/x86/boot/head.S  Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/arch/x86/boot/head.S  Wed Jul 11 15:47:14 2007 +0100
@@ -175,9 +175,11 @@ 1:      stosl   /* low mappings cover up
 
 #include "cmdline.S"
 
+        .align 16
         .globl trampoline_start, trampoline_end
 trampoline_start:
 #include "trampoline.S"
+#include "wakeup.S"
 trampoline_end:
 
         .text
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/boot/wakeup.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/boot/wakeup.S        Wed Jul 11 15:47:14 2007 +0100
@@ -0,0 +1,212 @@
+        .code16
+
+#undef wakesym
+/* Used in real mode, to cal offset in current segment */
+#define wakesym(sym) (sym - wakeup_start)
+
+ENTRY(wakeup_start)
+        wakeup_code_start = .
+
+        cli
+        cld
+
+        # setup data segment
+        movw    %cs, %ax
+        movw    %ax, %ds
+        movw    %ax, %ss        # A stack required for BIOS call
+        movw    $wakesym(wakeup_stack), %sp
+
+        pushl   $0              # Kill dangerous flag early
+        popfl
+
+        # check magic number
+        movl    wakesym(real_magic), %eax
+        cmpl    $0x12345678, %eax
+        jne     bogus_real_magic
+
+        # for acpi_sleep=s3_bios
+        testl   $1, wakesym(video_flags)
+        jz      1f
+        lcall   $0xc000, $3
+        movw    %cs, %ax        # In case messed by BIOS
+        movw    %ax, %ds
+        movw    %ax, %ss        # Need this? How to ret if clobbered?
+
+1:      # for acpi_sleep=s3_mode
+        testl   $2, wakesym(video_flags)
+        jz      1f
+        movl    wakesym(video_mode), %eax
+        call    mode_setw
+
+1:      # Show some progress if VGA is resumed
+        movw    $0xb800, %ax
+        movw    %ax, %fs
+        movw    $0x0e00 + 'L', %fs:(0x10)
+
+        # boot trampoline is under 1M, and shift its start into
+        # %fs to reference symbols in that area
+        movl    $BOOT_TRAMPOLINE, %eax
+        shrl    $4, %eax
+        movl    %eax, %fs
+        lidt    %fs:bootsym(idt_48)
+        lgdt    %fs:bootsym(gdt_48)
+
+        movw    $1, %ax
+        lmsw    %ax             # Turn on CR0.PE 
+        jmp     1f
+1:      ljmpl   $BOOT_CS32, $bootsym_phys(wakeup_32)
+
+/* This code uses an extended set of video mode numbers. These include:
+ * Aliases for standard modes
+ *      NORMAL_VGA (-1)
+ *      EXTENDED_VGA (-2)
+ *      ASK_VGA (-3)
+ * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
+ * of compatibility when extending the table. These are between 0x00 and 0xff.
+ */
+#define VIDEO_FIRST_MENU 0x0000
+
+/* Standard BIOS video modes (BIOS number + 0x0100) */
+#define VIDEO_FIRST_BIOS 0x0100
+
+/* VESA BIOS video modes (VESA number + 0x0200) */
+#define VIDEO_FIRST_VESA 0x0200
+
+/* Video7 special modes (BIOS number + 0x0900) */
+#define VIDEO_FIRST_V7 0x0900
+
+# Setting of user mode (AX=mode ID) => CF=success
+mode_setw:
+        movw    %ax, %bx
+        cmpb    $VIDEO_FIRST_VESA>>8, %ah
+        jnc     check_vesaw
+        decb    %ah
+
+setbadw: clc
+        ret
+
+check_vesaw:
+        subb    $VIDEO_FIRST_VESA>>8, %bh
+        orw     $0x4000, %bx                    # Use linear frame buffer
+        movw    $0x4f02, %ax                    # VESA BIOS mode set call
+        int     $0x10
+        cmpw    $0x004f, %ax                    # AL=4f if implemented
+        jnz     _setbadw                        # AH=0 if OK
+
+        stc
+        ret
+
+_setbadw: jmp    setbadw
+
+bogus_real_magic:
+        movw    $0x0e00 + 'B', %fs:(0x12)
+        jmp     bogus_real_magic
+
+        .align 4
+real_magic:     .long 0x12345678
+         .globl video_mode, video_flags
+video_mode:     .long 0
+video_flags:    .long 0
+
+        .code32
+
+        # Now in protect mode, with paging disabled
+        # Add offset for any reference to xen specific symbols
+
+wakeup_32:
+        mov     $BOOT_DS, %eax
+        mov     %eax, %ds
+        mov     %eax, %ss
+        mov     $bootsym_phys(wakeup_stack), %esp
+
+        # check saved magic again
+        mov     $sym_phys(saved_magic), %eax
+        add     bootsym_phys(trampoline_xen_phys_start), %eax
+        mov     (%eax), %eax
+        cmp     $0x9abcdef0, %eax
+        jne     bogus_saved_magic
+        
+        /* fpu init? */
+
+        /* Initialise CR4. */
+#if CONFIG_PAGING_LEVELS == 2
+        mov     $X86_CR4_PSE, %ecx
+#else
+        mov     $X86_CR4_PAE, %ecx
+#endif
+        mov     %ecx, %cr4
+
+        /* Load pagetable base register */
+        mov     $sym_phys(idle_pg_table),%eax
+        add     bootsym_phys(trampoline_xen_phys_start),%eax
+        mov     %eax,%cr3
+
+        /* Will cpuid feature change after resume? */
+#if CONFIG_PAGING_LEVELS != 2
+        /* Set up EFER (Extended Feature Enable Register). */
+        mov     bootsym_phys(cpuid_ext_features),%edi
+        test    $0x20100800,%edi /* SYSCALL/SYSRET, No Execute, Long Mode? */
+        jz      .Lskip_eferw
+        movl    $MSR_EFER,%ecx
+        rdmsr
+#if CONFIG_PAGING_LEVELS == 4
+        btsl    $_EFER_LME,%eax /* Long Mode      */
+        btsl    $_EFER_SCE,%eax /* SYSCALL/SYSRET */
+#endif
+        btl     $20,%edi        /* No Execute?    */
+        jnc     1f
+        btsl    $_EFER_NX,%eax  /* No Execute     */
+1:      wrmsr
+.Lskip_eferw:
+#endif
+
+        wbinvd
+
+        mov     $0x80050033,%eax /* hi-to-lo: PG,AM,WP,NE,ET,MP,PE */
+        mov     %eax,%cr0
+        jmp     1f
+1:
+
+#if defined(__x86_64__)
+
+        /* Now in compatibility mode. Long-jump to 64-bit mode */
+        ljmp    $BOOT_CS64, $bootsym_phys(wakeup_64)
+
+        .code64
+        .align  8
+        .word   0,0,0
+lgdt_descr:
+        .word   LAST_RESERVED_GDT_BYTE
+        .quad   gdt_table - FIRST_RESERVED_GDT_BYTE
+        
+wakeup_64:
+        lgdt    lgdt_descr(%rip)
+        mov     $(__HYPERVISOR_DS64), %eax
+        mov     %eax, %ds
+
+        # long jump to return point, with cs reload
+        rex64 ljmp    *ret_point(%rip)
+
+        .align 8
+ret_point:
+        .quad   __ret_point
+        .word   __HYPERVISOR_CS64
+
+#else /* !defined(__x86_64__) */
+        lgdt    gdt_descr
+        mov     $(__HYPERVISOR_DS), %eax
+        mov     %eax, %ds
+
+        ljmp    $(__HYPERVISOR_CS), $__ret_point
+#endif
+
+bogus_saved_magic:
+        movw    $0x0e00 + 'S', 0xb8014
+        jmp     bogus_saved_magic
+
+        .align  16
+wakeup_stack_begin:     # Stack grows down
+
+        .fill   PAGE_SIZE,1,0
+wakeup_stack:           # Just below end of first page in this section
+ENTRY(wakeup_end)
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/dmi_scan.c
--- a/xen/arch/x86/dmi_scan.c   Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/arch/x86/dmi_scan.c   Wed Jul 11 15:47:14 2007 +0100
@@ -184,7 +184,6 @@ static __init int reset_videomode_after_
 static __init int reset_videomode_after_s3(struct dmi_blacklist *d)
 {
        /* See acpi_wakeup.S */
-       extern long acpi_video_flags;
        acpi_video_flags |= 2;
        return 0;
 }
diff -r c6491ed12f84 -r 24379dde8ac4 xen/arch/x86/platform_hypercall.c
--- a/xen/arch/x86/platform_hypercall.c Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/arch/x86/platform_hypercall.c Wed Jul 11 15:47:14 2007 +0100
@@ -18,6 +18,7 @@
 #include <xen/console.h>
 #include <xen/iocap.h>
 #include <xen/guest_access.h>
+#include <xen/acpi.h>
 #include <asm/current.h>
 #include <public/platform.h>
 #include <asm/edd.h>
@@ -247,6 +248,22 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
         }
         break;
 
+#if 0
+    case XENPF_set_acpi_sleep:
+    {
+        ret = set_acpi_sleep_info(&op->u.set_acpi_sleep);
+        if (!ret && copy_to_guest(u_xenpf_op, op, 1))
+            ret = -EFAULT;
+    }
+    break;
+
+    case XENPF_enter_acpi_sleep:
+    {
+        ret = acpi_enter_sleep(&op->u.enter_acpi_sleep);
+    }
+    break;
+#endif
+
     default:
         ret = -ENOSYS;
         break;
diff -r c6491ed12f84 -r 24379dde8ac4 xen/include/asm-x86/acpi.h
--- a/xen/include/asm-x86/acpi.h        Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/include/asm-x86/acpi.h        Wed Jul 11 15:47:14 2007 +0100
@@ -173,6 +173,14 @@ extern unsigned long acpi_wakeup_address
 /* early initialization routine */
 extern void acpi_reserve_bootmem(void);
 
+extern unsigned long acpi_video_flags;
+extern unsigned long saved_videomode;
+struct xenpf_set_acpi_sleep;
+struct xenpf_enter_acpi_sleep;
+extern int set_acpi_sleep_info(struct xenpf_set_acpi_sleep *info);
+extern int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep);
+extern int acpi_enter_state(u32 state);
+
 #endif /*CONFIG_ACPI_SLEEP*/
 
 extern u8 x86_acpiid_to_apicid[];
diff -r c6491ed12f84 -r 24379dde8ac4 xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/include/asm-x86/config.h      Wed Jul 11 15:47:14 2007 +0100
@@ -25,9 +25,7 @@
 #define CONFIG_X86_PM_TIMER 1
 #define CONFIG_HPET_TIMER 1
 #define CONFIG_X86_MCE_P4THERMAL 1
-#define CONFIG_ACPI_NUMA 1
 #define CONFIG_NUMA 1
-#define CONFIG_ACPI_SRAT 1
 #define CONFIG_DISCONTIGMEM 1
 #define CONFIG_NUMA_EMU 1
 
@@ -36,6 +34,9 @@
 
 #define CONFIG_ACPI 1
 #define CONFIG_ACPI_BOOT 1
+#define CONFIG_ACPI_SLEEP 1
+#define CONFIG_ACPI_NUMA 1
+#define CONFIG_ACPI_SRAT 1
 
 #define CONFIG_VGA 1
 
@@ -100,6 +101,8 @@ extern char trampoline_realmode_entry[];
 extern char trampoline_realmode_entry[];
 extern unsigned int trampoline_xen_phys_start;
 extern unsigned char trampoline_cpu_started;
+extern char wakeup_start[];
+extern unsigned int video_mode, video_flags;
 #endif
 
 #if defined(__x86_64__)
diff -r c6491ed12f84 -r 24379dde8ac4 xen/include/public/platform.h
--- a/xen/include/public/platform.h     Wed Jul 11 13:49:11 2007 +0100
+++ b/xen/include/public/platform.h     Wed Jul 11 15:47:14 2007 +0100
@@ -153,6 +153,31 @@ typedef struct xenpf_firmware_info xenpf
 typedef struct xenpf_firmware_info xenpf_firmware_info_t;
 DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
 
+#define XENPF_set_acpi_sleep      51
+struct xenpf_set_acpi_sleep {
+    /* IN variables. */
+    uint16_t pm1a_cnt_port;
+    uint16_t pm1b_cnt_port;
+    uint16_t pm1a_evt_port;
+    uint16_t pm1b_evt_port;
+    /* OUT variables */
+    uint64_t xen_waking_vec;   /* Tell dom0 to set FACS waking vector */
+};
+typedef struct xenpf_set_acpi_sleep xenpf_set_acpi_sleep_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_set_acpi_sleep_t);
+
+#define XENPF_enter_acpi_sleep    52
+struct xenpf_enter_acpi_sleep {
+    /* IN variables */
+    uint16_t pm1a_cnt_val;
+    uint16_t pm1b_cnt_val;
+    uint32_t sleep_state;       /* Which state to enter */
+    uint32_t video_flags;       /* S3_bios or s3_mode */
+    uint32_t video_mode;        /* Mode setting for s3_mode */
+};
+typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
+
 struct xen_platform_op {
     uint32_t cmd;
     uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -164,6 +189,8 @@ struct xen_platform_op {
         struct xenpf_microcode_update  microcode;
         struct xenpf_platform_quirk    platform_quirk;
         struct xenpf_firmware_info     firmware_info;
+        struct xenpf_set_acpi_sleep    set_acpi_sleep;
+        struct xenpf_enter_acpi_sleep  enter_acpi_sleep;
         uint8_t                        pad[128];
     } u;
 };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.