[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/6] x86: prefer is_..._domain() over is_..._vcpu()



... when the domain pointer is already available or such operations
occur frequently in a function.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v)
 
     v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
-    rc = is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0;
+    rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0;
  done:
     if ( rc )
     {
@@ -722,7 +722,7 @@ int arch_set_info_guest(
 #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
     flags = c(flags);
 
-    if ( is_pv_vcpu(v) )
+    if ( is_pv_domain(d) )
     {
         if ( !compat )
         {
@@ -763,7 +763,7 @@ int arch_set_info_guest(
              (c(ldt_ents) > 8192) )
             return -EINVAL;
     }
-    else if ( is_pvh_vcpu(v) )
+    else if ( is_pvh_domain(d) )
     {
         /* PVH 32bitfixme */
         ASSERT(!compat);
@@ -781,7 +781,7 @@ int arch_set_info_guest(
     v->fpu_initialised = !!(flags & VGCF_I387_VALID);
 
     v->arch.flags &= ~TF_kernel_mode;
-    if ( (flags & VGCF_in_kernel) || has_hvm_container_vcpu(v)/*???*/ )
+    if ( (flags & VGCF_in_kernel) || has_hvm_container_domain(d)/*???*/ )
         v->arch.flags |= TF_kernel_mode;
 
     v->arch.vgc_flags = flags;
@@ -796,7 +796,7 @@ int arch_set_info_guest(
     if ( !compat )
     {
         memcpy(&v->arch.user_regs, &c.nat->user_regs, 
sizeof(c.nat->user_regs));
-        if ( is_pv_vcpu(v) )
+        if ( is_pv_domain(d) )
             memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
                    sizeof(c.nat->trap_ctxt));
     }
@@ -808,14 +808,14 @@ int arch_set_info_guest(
                            c.cmp->trap_ctxt + i);
     }
 
-    if ( has_hvm_container_vcpu(v) )
+    if ( has_hvm_container_domain(d) )
     {
         for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
             v->arch.debugreg[i] = c(debugreg[i]);
 
         hvm_set_info_guest(v);
 
-        if ( is_hvm_vcpu(v) || v->is_initialised )
+        if ( is_hvm_domain(d) || v->is_initialised )
             goto out;
 
         /* NB: No need to use PV cr3 un-pickling macros */
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1150,10 +1150,11 @@ CHECK_FIELD_(struct, vcpu_guest_context,
 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
 {
     unsigned int i;
-    bool_t compat = is_pv_32on64_domain(v->domain);
+    const struct domain *d = v->domain;
+    bool_t compat = is_pv_32on64_domain(d);
 #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
 
-    if ( !is_pv_vcpu(v) )
+    if ( !is_pv_domain(d) )
         memset(c.nat, 0, sizeof(*c.nat));
     memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt));
     c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
@@ -1164,7 +1165,7 @@ void arch_get_info_guest(struct vcpu *v,
     if ( !compat )
     {
         memcpy(&c.nat->user_regs, &v->arch.user_regs, 
sizeof(c.nat->user_regs));
-        if ( is_pv_vcpu(v) )
+        if ( is_pv_domain(d) )
             memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
                    sizeof(c.nat->trap_ctxt));
     }
@@ -1179,7 +1180,7 @@ void arch_get_info_guest(struct vcpu *v,
     for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
         c(debugreg[i] = v->arch.debugreg[i]);
 
-    if ( has_hvm_container_vcpu(v) )
+    if ( has_hvm_container_domain(d) )
     {
         struct segment_register sreg;
 
@@ -1245,7 +1246,7 @@ void arch_get_info_guest(struct vcpu *v,
         BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
         c(user_regs.eflags |= v->arch.pv_vcpu.iopl << 12);
 
-        if ( !is_pv_32on64_domain(v->domain) )
+        if ( !compat )
         {
             c.nat->ctrlreg[3] = xen_pfn_to_cr3(
                 pagetable_get_pfn(v->arch.guest_table));
@@ -1274,7 +1275,7 @@ void arch_get_info_guest(struct vcpu *v,
             c(flags |= VGCF_in_kernel);
     }
 
-    c(vm_assist = v->domain->vm_assist);
+    c(vm_assist = d->vm_assist);
 #undef c
 }
 
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -798,7 +798,7 @@ int cpuid_hypervisor_leaves( uint32_t id
             *ebx = 0x40000200;
         *ecx = 0;          /* Features 1 */
         *edx = 0;          /* Features 2 */
-        if ( is_pv_vcpu(current) )
+        if ( is_pv_domain(d) )
             *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
         break;
 
@@ -822,18 +822,19 @@ void pv_cpuid(struct cpu_user_regs *regs
 {
     uint32_t a, b, c, d;
     struct vcpu *curr = current;
+    struct domain *currd = curr->domain;
 
     a = regs->eax;
     b = regs->ebx;
     c = regs->ecx;
     d = regs->edx;
 
-    if ( !is_control_domain(curr->domain) && !is_hardware_domain(curr->domain) 
)
+    if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
     {
         unsigned int cpuid_leaf = a, sub_leaf = c;
 
         if ( !cpuid_hypervisor_leaves(a, c, &a, &b, &c, &d) )
-            domain_cpuid(curr->domain, a, c, &a, &b, &c, &d);
+            domain_cpuid(currd, a, c, &a, &b, &c, &d);
 
         switch ( cpuid_leaf )
         {
@@ -849,7 +850,7 @@ void pv_cpuid(struct cpu_user_regs *regs
                 {
                     if ( !(curr->arch.xcr0 & (1ULL << sub_leaf)) )
                         continue;
-                    domain_cpuid(curr->domain, cpuid_leaf, sub_leaf,
+                    domain_cpuid(currd, cpuid_leaf, sub_leaf,
                                  &_eax, &_ebx, &_ecx, &_edx);
                     if ( (_eax + _ebx) > b )
                         b = _eax + _ebx;
@@ -869,7 +870,7 @@ void pv_cpuid(struct cpu_user_regs *regs
         if ( !cpu_has_apic )
             __clear_bit(X86_FEATURE_APIC, &d);
 
-        if ( !is_pvh_vcpu(curr) )
+        if ( !is_pvh_domain(currd) )
         {
             __clear_bit(X86_FEATURE_PSE, &d);
             __clear_bit(X86_FEATURE_PGE, &d);
@@ -887,7 +888,7 @@ void pv_cpuid(struct cpu_user_regs *regs
         __clear_bit(X86_FEATURE_DS, &d);
         __clear_bit(X86_FEATURE_ACC, &d);
         __clear_bit(X86_FEATURE_PBE, &d);
-        if ( is_pvh_vcpu(curr) )
+        if ( is_pvh_domain(currd) )
             __clear_bit(X86_FEATURE_MTRR, &d);
 
         __clear_bit(X86_FEATURE_DTES64 % 32, &c);
@@ -896,7 +897,7 @@ void pv_cpuid(struct cpu_user_regs *regs
         __clear_bit(X86_FEATURE_VMXE % 32, &c);
         __clear_bit(X86_FEATURE_SMXE % 32, &c);
         __clear_bit(X86_FEATURE_TM2 % 32, &c);
-        if ( is_pv_32bit_vcpu(curr) )
+        if ( is_pv_32bit_domain(currd) )
             __clear_bit(X86_FEATURE_CX16 % 32, &c);
         __clear_bit(X86_FEATURE_XTPR % 32, &c);
         __clear_bit(X86_FEATURE_PDCM % 32, &c);
@@ -945,12 +946,12 @@ void pv_cpuid(struct cpu_user_regs *regs
 
     case 0x80000001:
         /* Modify Feature Information. */
-        if ( is_pv_32bit_vcpu(curr) )
+        if ( is_pv_32bit_domain(currd) )
         {
             __clear_bit(X86_FEATURE_LM % 32, &d);
             __clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
         }
-        if ( is_pv_32on64_vcpu(curr) &&
+        if ( is_pv_32on64_domain(currd) &&
              boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
             __clear_bit(X86_FEATURE_SYSCALL % 32, &d);
         __clear_bit(X86_FEATURE_PAGE1GB % 32, &d);


Attachment: use-is_X_domain-x86.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.