[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] xen viridian: cleanup



On 13/10/2011 12:39, "Tim Deegan" <tim@xxxxxxx> wrote:

> At 15:43 +0200 on 11 Oct (1318347830), Christoph Egger wrote:
>> 
>> Simplify accesses into viridian structures.
> 
> I'm afraid this looks like a bunch of code churn for not much benefit;
> unless anyone else wants it I don't think I'll apply it.

Well, it looks like a reasonable cleanup to me, but if we're doing it then I
think we should use the shorthand forms idiomatically in every function in
viridian.c, even where only one field access occurs for example.

 -- Keir

> Cheers,
> 
> Tim.
> 
>> Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
>> 
>> -- 
>> ---to satisfy European Law for business letters:
>> Advanced Micro Devices GmbH
>> Einsteinring 24, 85689 Dornach b. Muenchen
>> Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
>> Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
>> Registergericht Muenchen, HRB Nr. 43632
> 
> Content-Description: xen_viridian.diff
>> diff -r 04fbcc0c1ec5 xen/arch/x86/hvm/viridian.c
>> --- a/xen/arch/x86/hvm/viridian.c Wed Sep 28 12:09:13 2011 +0200
>> +++ b/xen/arch/x86/hvm/viridian.c Wed Sep 28 12:45:39 2011 +0200
>> @@ -98,37 +98,43 @@ int cpuid_viridian_leaves(unsigned int l
>>  
>>  void dump_guest_os_id(struct domain *d)
>>  {
>> +    struct viridian_domain *vd = &d->arch.hvm_domain.viridian;
>> +
>>      gdprintk(XENLOG_INFO, "GUEST_OS_ID:\n");
>>      gdprintk(XENLOG_INFO, "\tvendor: %x\n",
>> -            d->arch.hvm_domain.viridian.guest_os_id.fields.vendor);
>> +            vd->guest_os_id.fields.vendor);
>>      gdprintk(XENLOG_INFO, "\tos: %x\n",
>> -            d->arch.hvm_domain.viridian.guest_os_id.fields.os);
>> +            vd->guest_os_id.fields.os);
>>      gdprintk(XENLOG_INFO, "\tmajor: %x\n",
>> -            d->arch.hvm_domain.viridian.guest_os_id.fields.major);
>> +            vd->guest_os_id.fields.major);
>>      gdprintk(XENLOG_INFO, "\tminor: %x\n",
>> -            d->arch.hvm_domain.viridian.guest_os_id.fields.minor);
>> +            vd->guest_os_id.fields.minor);
>>      gdprintk(XENLOG_INFO, "\tsp: %x\n",
>> -            d->arch.hvm_domain.viridian.guest_os_id.fields.service_pack);
>> +            vd->guest_os_id.fields.service_pack);
>>      gdprintk(XENLOG_INFO, "\tbuild: %x\n",
>> -            d->arch.hvm_domain.viridian.guest_os_id.fields.build_number);
>> +            vd->guest_os_id.fields.build_number);
>>  }
>>  
>>  void dump_hypercall(struct domain *d)
>>  {
>> +    struct viridian_domain *vd = &d->arch.hvm_domain.viridian;
>> +
>>      gdprintk(XENLOG_INFO, "HYPERCALL:\n");
>>      gdprintk(XENLOG_INFO, "\tenabled: %x\n",
>> -            d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled);
>> +            vd->hypercall_gpa.fields.enabled);
>>      gdprintk(XENLOG_INFO, "\tpfn: %lx\n",
>> -            (unsigned
>> long)d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn);
>> +            (unsigned long)vd->hypercall_gpa.fields.pfn);
>>  }
>>  
>>  void dump_apic_assist(struct vcpu *v)
>>  {
>> +    struct viridian_vcpu *vv = &v->arch.hvm_vcpu.viridian;
>> +
>>      gdprintk(XENLOG_INFO, "APIC_ASSIST[%d]:\n", v->vcpu_id);
>>      gdprintk(XENLOG_INFO, "\tenabled: %x\n",
>> -            v->arch.hvm_vcpu.viridian.apic_assist.fields.enabled);
>> +            vv->apic_assist.fields.enabled);
>>      gdprintk(XENLOG_INFO, "\tpfn: %lx\n",
>> -            (unsigned
>> long)v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn);
>> +            (unsigned long)vv->apic_assist.fields.pfn);
>>  }
>>  
>>  static void enable_hypercall_page(struct domain *d)
>> @@ -201,6 +207,8 @@ int wrmsr_viridian_regs(uint32_t idx, ui
>>  {
>>      struct vcpu *v = current;
>>      struct domain *d = v->domain;
>> +    struct viridian_domain *vd = &d->arch.hvm_domain.viridian;
>> +    struct viridian_vcpu *vv = &v->arch.hvm_vcpu.viridian;
>>  
>>      if ( !is_viridian_domain(d) )
>>          return 0;
>> @@ -209,15 +217,15 @@ int wrmsr_viridian_regs(uint32_t idx, ui
>>      {
>>      case VIRIDIAN_MSR_GUEST_OS_ID:
>>          perfc_incr(mshv_wrmsr_osid);
>> -        d->arch.hvm_domain.viridian.guest_os_id.raw = val;
>> +        vd->guest_os_id.raw = val;
>>          dump_guest_os_id(d);
>>          break;
>>  
>>      case VIRIDIAN_MSR_HYPERCALL:
>>          perfc_incr(mshv_wrmsr_hc_page);
>> -        d->arch.hvm_domain.viridian.hypercall_gpa.raw = val;
>> +        vd->hypercall_gpa.raw = val;
>>          dump_hypercall(d);
>> -        if ( d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled )
>> +        if ( vd->hypercall_gpa.fields.enabled )
>>              enable_hypercall_page(d);
>>          break;
>>  
>> @@ -249,9 +257,9 @@ int wrmsr_viridian_regs(uint32_t idx, ui
>>  
>>      case VIRIDIAN_MSR_APIC_ASSIST:
>>          perfc_incr(mshv_wrmsr_apic_msr);
>> -        v->arch.hvm_vcpu.viridian.apic_assist.raw = val;
>> +        vv->apic_assist.raw = val;
>>          dump_apic_assist(v);
>> -        if (v->arch.hvm_vcpu.viridian.apic_assist.fields.enabled)
>> +        if (vv->apic_assist.fields.enabled)
>>              initialize_apic_assist(v);
>>          break;
>>  
>> @@ -266,6 +274,8 @@ int rdmsr_viridian_regs(uint32_t idx, ui
>>  {
>>      struct vcpu *v = current;
>>      struct domain *d = v->domain;
>> +    struct viridian_domain *vd = &d->arch.hvm_domain.viridian;
>> +    struct viridian_vcpu *vv = &v->arch.hvm_vcpu.viridian;
>>      
>>      if ( !is_viridian_domain(d) )
>>          return 0;
>> @@ -274,12 +284,12 @@ int rdmsr_viridian_regs(uint32_t idx, ui
>>      {
>>      case VIRIDIAN_MSR_GUEST_OS_ID:
>>          perfc_incr(mshv_rdmsr_osid);
>> -        *val = d->arch.hvm_domain.viridian.guest_os_id.raw;
>> +        *val = vd->guest_os_id.raw;
>>          break;
>>  
>>      case VIRIDIAN_MSR_HYPERCALL:
>>          perfc_incr(mshv_rdmsr_hc_page);
>> -        *val = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
>> +        *val = vd->hypercall_gpa.raw;
>>          break;
>>  
>>      case VIRIDIAN_MSR_VP_INDEX:
>> @@ -300,7 +310,7 @@ int rdmsr_viridian_regs(uint32_t idx, ui
>>  
>>      case VIRIDIAN_MSR_APIC_ASSIST:
>>          perfc_incr(mshv_rdmsr_apic_msr);
>> -        *val = v->arch.hvm_vcpu.viridian.apic_assist.raw;
>> +        *val = vv->apic_assist.raw;
>>          break;
>>  
>>      default:
>> @@ -390,12 +400,13 @@ out:
>>  static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t
>> *h)
>>  {
>>      struct hvm_viridian_domain_context ctxt;
>> +    struct viridian_domain *vd = &d->arch.hvm_domain.viridian;
>>  
>>      if ( !is_viridian_domain(d) )
>>          return 0;
>>  
>> -    ctxt.hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
>> -    ctxt.guest_os_id   = d->arch.hvm_domain.viridian.guest_os_id.raw;
>> +    ctxt.hypercall_gpa = vd->hypercall_gpa.raw;
>> +    ctxt.guest_os_id   = vd->guest_os_id.raw;
>>  
>>      return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0);
>>  }
>> @@ -403,12 +414,13 @@ static int viridian_save_domain_ctxt(str
>>  static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t
>> *h)
>>  {
>>      struct hvm_viridian_domain_context ctxt;
>> +    struct viridian_domain *vd = &d->arch.hvm_domain.viridian;
>>  
>>      if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
>>          return -EINVAL;
>>  
>> -    d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
>> -    d->arch.hvm_domain.viridian.guest_os_id.raw   = ctxt.guest_os_id;
>> +    vd->hypercall_gpa.raw = ctxt.hypercall_gpa;
>> +    vd->guest_os_id.raw   = ctxt.guest_os_id;
>>  
>>      return 0;
>>  }
> 
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@xxxxxxxxxxxxxxxxxxx
>> http://lists.xensource.com/xen-devel
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.