[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/10] Add HVM support


  • To: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • From: Keir Fraser <keir@xxxxxxxxxxxxx>
  • Date: Tue, 10 Jul 2007 10:50:55 +0100
  • Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
  • Delivery-date: Tue, 10 Jul 2007 02:48:55 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: Ace4v8AvEHvNElKqQi6Qqle8QqzaiAKGA7QQ
  • Thread-topic: [Xen-devel] [PATCH 3/10] Add HVM support

Kevin,

Is it documented anywhere in which CPU states VMCS synchronisation may be
lost? I see it makes sense that S3 would do so since the CPUs are powered
off in that state (although I don't see this documented anywhere -- maybe
it's 'obvious'), but what about e.g., Deep Sleep (C states)?

Shouldn't you VMXOFF before S3?

 -- Keir

On 27/6/07 14:33, "Tian, Kevin" <kevin.tian@xxxxxxxxx> wrote:

> Add HVM hardware feature suspend/resume.
> 
> Signed-off-by Ke Yu <ke.yu@xxxxxxxxx>
> Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>
> 
> diff -r f217aafc1c17 xen/arch/x86/acpi/power.c
> --- a/xen/arch/x86/acpi/power.c Mon Jun 25 13:28:41 2007 -0400
> +++ b/xen/arch/x86/acpi/power.c Mon Jun 25 17:36:32 2007 -0400
> @@ -83,7 +83,7 @@ static void device_power_up(void)
>  /* Main interface to do xen specific suspend/resume */
>  int enter_state(u32 state)
>  {
> -    struct domain *d;
> +    struct domain *d, *pd = NULL;
>      unsigned long flags;
>      int error;
>  
> @@ -99,7 +99,15 @@ int enter_state(u32 state)
>      
>      for_each_domain(d)
>          if (d->domain_id != 0)
> +        {
>              domain_pause(d);
> +            if (is_hvm_domain(d) && !hvm_suspend_domain(d))
> +            {
> +                error = -EINVAL;
> +                goto Unpause;
> +            }
> +            pd = d;
> +        }
>  
>      pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
>          acpi_states[state]);
> @@ -133,13 +141,22 @@ int enter_state(u32 state)
>   Done:
>      local_irq_restore(flags);
>  
> -    for_each_domain(d)
> -       if (d->domain_id!=0)
> -           domain_unpause(d);
> + Unpause:
> +    if (pd)
> +    {
> +       for_each_domain(d)
> +       {
> +           /* Unpause until recorded last paused domain */
> +           if (d == pd)
> +               break;
> +
> +           if (d->domain_id != 0)
> +               domain_unpause(d);
> +       }
> +    }
>  
>      spin_unlock(&pm_lock);
>      return error;
> -
>  }
>  
>  /*
> diff -r f217aafc1c17 xen/arch/x86/acpi/suspend.c
> --- a/xen/arch/x86/acpi/suspend.c Mon Jun 25 13:28:41 2007 -0400
> +++ b/xen/arch/x86/acpi/suspend.c Mon Jun 25 14:52:17 2007 -0400
> @@ -82,4 +82,6 @@ void restore_rest_processor_state(void)
>  
>      mtrr_ap_init();
>      mcheck_init(&boot_cpu_data);
> +    if (hvm_enabled)
> +        hvm_resume_cpu();
>  }
> diff -r f217aafc1c17 xen/arch/x86/hvm/vmx/vmcs.c
> --- a/xen/arch/x86/hvm/vmx/vmcs.c Mon Jun 25 13:28:41 2007 -0400
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c Mon Jun 25 14:03:32 2007 -0400
> @@ -178,7 +178,7 @@ static void __vmx_clear_vmcs(void *info)
>      v->arch.hvm_vmx.launched   = 0;
>  }
>  
> -static void vmx_clear_vmcs(struct vcpu *v)
> +void vmx_clear_vmcs(struct vcpu *v)
>  {
>      int cpu = v->arch.hvm_vmx.active_cpu;
>  
> diff -r f217aafc1c17 xen/arch/x86/hvm/vmx/vmx.c
> --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 25 13:28:41 2007 -0400
> +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 25 17:38:49 2007 -0400
> @@ -53,6 +53,11 @@
>  
>  char *vmx_msr_bitmap;
>  
> +static DEFINE_PER_CPU(struct vmcs_struct*, host_vmcs);
> +
> +static int vmx_suspend_domain(struct domain *d);
> +static int vmx_resume_cpu(void);
> +
>  static void vmx_ctxt_switch_from(struct vcpu *v);
>  static void vmx_ctxt_switch_to(struct vcpu *v);
>  
> @@ -1211,7 +1216,9 @@ static struct hvm_function_table vmx_fun
>      .inject_exception     = vmx_inject_exception,
>      .init_ap_context      = vmx_init_ap_context,
>      .init_hypercall_page  = vmx_init_hypercall_page,
> -    .event_injection_faulted = vmx_event_injection_faulted
> +    .event_injection_faulted = vmx_event_injection_faulted,
> +    .suspend_domain       = vmx_suspend_domain,
> +    .resume_cpu           = vmx_resume_cpu,
>  };
>  
>  int start_vmx(void)
> @@ -1265,6 +1272,8 @@ int start_vmx(void)
>          vmx_free_host_vmcs(vmcs);
>          return 0;
>      }
> +
> +    this_cpu(host_vmcs) = vmcs;
>  
>      vmx_save_host_msrs();
>  
> @@ -3013,6 +3022,42 @@ asmlinkage void vmx_trace_vmentry(void)
>      HVMTRACE_0D(VMENTRY, v);
>  }
>  
> +/* Suspend target domain with VMCS sync-ed */
> +int vmx_suspend_domain(struct domain* d){
> +    struct vcpu *v;
> +
> +    if (!is_hvm_domain(d))
> +        return 1;
> +
> +    if (!atomic_read(&d->pause_count))
> +        return 0;
> +
> +    for_each_vcpu(d, v)
> +    {
> +        spin_lock(&v->arch.hvm_vmx.vmcs_lock);
> +        vmx_clear_vmcs(v);
> +        spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
> +    }
> +
> +    return 1;
> +}
> +
> +/* Resume vmx feature on the given cpu */
> +static int vmx_resume_cpu(void){
> +    struct vmcs_struct *vmcs = this_cpu(host_vmcs);
> +
> +    if ( __vmxon(virt_to_maddr(vmcs)) )
> +    {
> +        clear_in_cr4(X86_CR4_VMXE);
> +        printk("VMXON failed\n");
> +        vmx_free_host_vmcs(vmcs);
> +        return 0;
> +    }
> +
> +    printk("VMXON is done\n");
> +    return 1;
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> diff -r f217aafc1c17 xen/include/asm-x86/hvm/hvm.h
> --- a/xen/include/asm-x86/hvm/hvm.h Mon Jun 25 13:28:41 2007 -0400
> +++ b/xen/include/asm-x86/hvm/hvm.h Mon Jun 25 15:27:34 2007 -0400
> @@ -145,6 +145,10 @@ struct hvm_function_table {
>      void (*init_hypercall_page)(struct domain *d, void
> *hypercall_page);
>  
>      int  (*event_injection_faulted)(struct vcpu *v);
> +
> +    int  (*suspend_domain)(struct domain *d);
> +
> +    int  (*resume_cpu)(void);
>  };
>  
>  extern struct hvm_function_table hvm_funcs;
> @@ -279,4 +283,26 @@ static inline int hvm_event_injection_fa
>      return hvm_funcs.event_injection_faulted(v);
>  }
>  
> +static inline int
> +hvm_suspend_domain(struct domain* d)
> +{
> +    int ret = 1;
> +
> +    if (hvm_funcs.suspend_domain)
> +        ret = hvm_funcs.suspend_domain(d);
> +
> +    return ret;
> +}
> +
> +static inline int
> +hvm_resume_cpu(void)
> +{
> +    int ret = 1;
> +
> +    if (hvm_funcs.resume_cpu)
> +        ret = hvm_funcs.resume_cpu();
> +    
> +    return ret;
> +}
> +
>  #endif /* __ASM_X86_HVM_HVM_H__ */
> diff -r f217aafc1c17 xen/include/asm-x86/hvm/vmx/vmcs.h
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Mon Jun 25 13:28:41 2007
> -0400
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Mon Jun 25 14:03:32 2007
> -0400
> @@ -28,6 +28,7 @@ extern void vmcs_dump_vcpu(void);
>  extern void vmcs_dump_vcpu(void);
>  extern void vmx_init_vmcs_config(void);
>  extern void setup_vmcs_dump(void);
> +extern void vmx_clear_vmcs(struct vcpu *v);
>  
>  struct vmcs_struct {
>      u32 vmcs_revision_id;
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.