|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/pv: Rename d->arch.pv_domain to d->arch.pv
commit 091fcec52b427cec0e165750386139130c9beae6
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Aug 28 15:49:09 2018 +0000
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Aug 30 10:36:00 2018 +0100
x86/pv: Rename d->arch.pv_domain to d->arch.pv
The trailing _domain suffix is redundant, but adds to code volume. Drop it.
Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
where applicable.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/cpu/amd.c | 4 ++--
xen/arch/x86/cpu/intel.c | 4 ++--
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/domain_page.c | 8 ++++----
xen/arch/x86/domctl.c | 10 +++++-----
xen/arch/x86/mm.c | 14 +++++++-------
xen/arch/x86/pv/dom0_build.c | 4 ++--
xen/arch/x86/pv/domain.c | 35 ++++++++++++++++-------------------
xen/include/asm-x86/domain.h | 4 ++--
xen/include/asm-x86/flushtlb.h | 2 +-
xen/include/asm-x86/shadow.h | 4 ++--
11 files changed, 44 insertions(+), 47 deletions(-)
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index a7afa2fa7a..e0ee11419b 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -208,8 +208,8 @@ static void amd_ctxt_switch_masking(const struct vcpu *next)
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
const struct cpuidmasks *masks =
- (nextd && is_pv_domain(nextd) &&
nextd->arch.pv_domain.cpuidmasks)
- ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+ ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
uint64_t val = masks->_1cd;
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 377beef9ba..8c375c80f3 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -119,8 +119,8 @@ static void intel_ctxt_switch_masking(const struct vcpu
*next)
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
const struct cpuidmasks *masks =
- (nextd && is_pv_domain(nextd) &&
nextd->arch.pv_domain.cpuidmasks)
- ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+ ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
if (msr_basic) {
uint64_t val = masks->_1cd;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index eb1e93fc59..8c7ddf55f5 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -970,7 +970,7 @@ int arch_set_info_guest(
if ( d != current->domain && !VM_ASSIST(d, m2p_strict) &&
is_pv_domain(d) && !is_pv_32bit_domain(d) &&
test_bit(VMASST_TYPE_m2p_strict, &c.nat->vm_assist) &&
- atomic_read(&d->arch.pv_domain.nr_l4_pages) )
+ atomic_read(&d->arch.pv.nr_l4_pages) )
{
bool done = false;
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index aee9a80720..735f65ada7 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -85,7 +85,7 @@ void *map_domain_page(mfn_t mfn)
if ( !v || !is_pv_vcpu(v) )
return mfn_to_virt(mfn_x(mfn));
- dcache = &v->domain->arch.pv_domain.mapcache;
+ dcache = &v->domain->arch.pv.mapcache;
vcache = &v->arch.pv_vcpu.mapcache;
if ( !dcache->inuse )
return mfn_to_virt(mfn_x(mfn));
@@ -189,7 +189,7 @@ void unmap_domain_page(const void *ptr)
v = mapcache_current_vcpu();
ASSERT(v && is_pv_vcpu(v));
- dcache = &v->domain->arch.pv_domain.mapcache;
+ dcache = &v->domain->arch.pv.mapcache;
ASSERT(dcache->inuse);
idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
@@ -233,7 +233,7 @@ void unmap_domain_page(const void *ptr)
int mapcache_domain_init(struct domain *d)
{
- struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
+ struct mapcache_domain *dcache = &d->arch.pv.mapcache;
unsigned int bitmap_pages;
ASSERT(is_pv_domain(d));
@@ -261,7 +261,7 @@ int mapcache_domain_init(struct domain *d)
int mapcache_vcpu_init(struct vcpu *v)
{
struct domain *d = v->domain;
- struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
+ struct mapcache_domain *dcache = &d->arch.pv.mapcache;
unsigned long i;
unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 6f1c43e251..e27e971a6d 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -194,7 +194,7 @@ static int update_domain_cpuid_info(struct domain *d,
break;
}
- d->arch.pv_domain.cpuidmasks->_1cd = mask;
+ d->arch.pv.cpuidmasks->_1cd = mask;
}
break;
@@ -206,7 +206,7 @@ static int update_domain_cpuid_info(struct domain *d,
if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
mask &= (~0ULL << 32) | ctl->ecx;
- d->arch.pv_domain.cpuidmasks->_6c = mask;
+ d->arch.pv.cpuidmasks->_6c = mask;
}
break;
@@ -223,7 +223,7 @@ static int update_domain_cpuid_info(struct domain *d,
if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
mask &= ((uint64_t)eax << 32) | ebx;
- d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
+ d->arch.pv.cpuidmasks->_7ab0 = mask;
}
/*
@@ -262,7 +262,7 @@ static int update_domain_cpuid_info(struct domain *d,
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
mask &= (~0ULL << 32) | eax;
- d->arch.pv_domain.cpuidmasks->Da1 = mask;
+ d->arch.pv.cpuidmasks->Da1 = mask;
}
break;
@@ -305,7 +305,7 @@ static int update_domain_cpuid_info(struct domain *d,
break;
}
- d->arch.pv_domain.cpuidmasks->e1cd = mask;
+ d->arch.pv.cpuidmasks->e1cd = mask;
}
break;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 44ff7a6b76..cb0fb570c5 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -501,7 +501,7 @@ void make_cr3(struct vcpu *v, mfn_t mfn)
struct domain *d = v->domain;
v->arch.cr3 = mfn_x(mfn) << PAGE_SHIFT;
- if ( is_pv_domain(d) && d->arch.pv_domain.pcid )
+ if ( is_pv_domain(d) && d->arch.pv.pcid )
v->arch.cr3 |= get_pcid_bits(v, false);
}
@@ -514,9 +514,9 @@ unsigned long pv_guest_cr4_to_real_cr4(const struct vcpu *v)
cr4 |= mmu_cr4_features & (X86_CR4_PSE | X86_CR4_SMEP | X86_CR4_SMAP |
X86_CR4_OSXSAVE | X86_CR4_FSGSBASE);
- if ( d->arch.pv_domain.pcid )
+ if ( d->arch.pv.pcid )
cr4 |= X86_CR4_PCIDE;
- else if ( !d->arch.pv_domain.xpti )
+ else if ( !d->arch.pv.xpti )
cr4 |= X86_CR4_PGE;
cr4 |= d->arch.vtsc ? X86_CR4_TSD : 0;
@@ -533,7 +533,7 @@ void write_ptbase(struct vcpu *v)
? pv_guest_cr4_to_real_cr4(v)
: ((read_cr4() & ~(X86_CR4_PCIDE | X86_CR4_TSD)) | X86_CR4_PGE);
- if ( is_pv_vcpu(v) && v->domain->arch.pv_domain.xpti )
+ if ( is_pv_vcpu(v) && v->domain->arch.pv.xpti )
{
cpu_info->root_pgt_changed = true;
cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
@@ -1757,7 +1757,7 @@ static int alloc_l4_table(struct page_info *page)
{
init_xen_l4_slots(pl4e, _mfn(pfn),
d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
- atomic_inc(&d->arch.pv_domain.nr_l4_pages);
+ atomic_inc(&d->arch.pv.nr_l4_pages);
}
unmap_domain_page(pl4e);
@@ -1876,7 +1876,7 @@ static int free_l4_table(struct page_info *page)
if ( rc >= 0 )
{
- atomic_dec(&d->arch.pv_domain.nr_l4_pages);
+ atomic_dec(&d->arch.pv.nr_l4_pages);
rc = 0;
}
@@ -3787,7 +3787,7 @@ long do_mmu_update(
break;
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- if ( !rc && pt_owner->arch.pv_domain.xpti )
+ if ( !rc && pt_owner->arch.pv.xpti )
{
bool local_in_use = false;
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 34c77bcbe4..078288bd4f 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -387,8 +387,8 @@ int __init dom0_construct_pv(struct domain *d,
if ( compat32 )
{
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
- d->arch.pv_domain.xpti = false;
- d->arch.pv_domain.pcid = false;
+ d->arch.pv.xpti = false;
+ d->arch.pv.pcid = false;
v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
if ( setup_compat_arg_xlat(v) != 0 )
BUG();
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 301e25078e..022831a7b0 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -122,8 +122,8 @@ int switch_compat(struct domain *d)
d->arch.x87_fip_width = 4;
- d->arch.pv_domain.xpti = false;
- d->arch.pv_domain.pcid = false;
+ d->arch.pv.xpti = false;
+ d->arch.pv.pcid = false;
return 0;
@@ -142,7 +142,7 @@ static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
{
return create_perdomain_mapping(v->domain, GDT_VIRT_START(v),
1U << GDT_LDT_VCPU_SHIFT,
- v->domain->arch.pv_domain.gdt_ldt_l1tab,
+ v->domain->arch.pv.gdt_ldt_l1tab,
NULL);
}
@@ -215,11 +215,9 @@ void pv_domain_destroy(struct domain *d)
destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
- xfree(d->arch.pv_domain.cpuidmasks);
- d->arch.pv_domain.cpuidmasks = NULL;
+ XFREE(d->arch.pv.cpuidmasks);
- free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
- d->arch.pv_domain.gdt_ldt_l1tab = NULL;
+ FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
}
@@ -234,14 +232,14 @@ int pv_domain_initialise(struct domain *d)
pv_l1tf_domain_init(d);
- d->arch.pv_domain.gdt_ldt_l1tab =
+ d->arch.pv.gdt_ldt_l1tab =
alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
- if ( !d->arch.pv_domain.gdt_ldt_l1tab )
+ if ( !d->arch.pv.gdt_ldt_l1tab )
goto fail;
- clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ clear_page(d->arch.pv.gdt_ldt_l1tab);
if ( levelling_caps & ~LCAP_faulting &&
- (d->arch.pv_domain.cpuidmasks = xmemdup(&cpuidmask_defaults)) == NULL
)
+ (d->arch.pv.cpuidmasks = xmemdup(&cpuidmask_defaults)) == NULL )
goto fail;
rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
@@ -255,8 +253,8 @@ int pv_domain_initialise(struct domain *d)
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- d->arch.pv_domain.xpti = opt_xpti & (is_hardware_domain(d)
- ? OPT_XPTI_DOM0 : OPT_XPTI_DOMU);
+ d->arch.pv.xpti = opt_xpti & (is_hardware_domain(d)
+ ? OPT_XPTI_DOM0 : OPT_XPTI_DOMU);
if ( !is_pv_32bit_domain(d) && use_invpcid && cpu_has_pcid )
switch ( opt_pcid )
@@ -265,15 +263,15 @@ int pv_domain_initialise(struct domain *d)
break;
case PCID_ALL:
- d->arch.pv_domain.pcid = true;
+ d->arch.pv.pcid = true;
break;
case PCID_XPTI:
- d->arch.pv_domain.pcid = d->arch.pv_domain.xpti;
+ d->arch.pv.pcid = d->arch.pv.xpti;
break;
case PCID_NOXPTI:
- d->arch.pv_domain.pcid = !d->arch.pv_domain.xpti;
+ d->arch.pv.pcid = !d->arch.pv.xpti;
break;
default:
@@ -301,14 +299,13 @@ static void _toggle_guest_pt(struct vcpu *v)
v->arch.flags ^= TF_kernel_mode;
update_cr3(v);
- if ( d->arch.pv_domain.xpti )
+ if ( d->arch.pv.xpti )
{
struct cpu_info *cpu_info = get_cpu_info();
cpu_info->root_pgt_changed = true;
cpu_info->pv_cr3 = __pa(this_cpu(root_pgt)) |
- (d->arch.pv_domain.pcid
- ? get_pcid_bits(v, true) : 0);
+ (d->arch.pv.pcid ? get_pcid_bits(v, true) : 0);
}
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 3da2c68073..2f029eeeb1 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -305,7 +305,7 @@ struct arch_domain
struct list_head pdev_list;
union {
- struct pv_domain pv_domain;
+ struct pv_domain pv;
struct hvm_domain hvm_domain;
};
@@ -458,7 +458,7 @@ struct arch_domain
#define gdt_ldt_pt_idx(v) \
((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
#define pv_gdt_ptes(v) \
- ((v)->domain->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
+ ((v)->domain->arch.pv.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
(((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)))
#define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16)
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index ed5f45e806..434821aaf3 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -138,7 +138,7 @@ void flush_area_mask(const cpumask_t *, const void *va,
unsigned int flags);
#define flush_root_pgtbl_domain(d) \
{ \
- if ( is_pv_domain(d) && (d)->arch.pv_domain.xpti ) \
+ if ( is_pv_domain(d) && (d)->arch.pv.xpti ) \
flush_mask((d)->dirty_cpumask, FLUSH_ROOT_PGTBL); \
}
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index f40f411871..b3ebe56ab0 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -169,7 +169,7 @@ static inline bool pv_l1tf_check_pte(struct domain *d,
unsigned int level,
ASSERT(is_pv_domain(d));
ASSERT(!(pte & _PAGE_PRESENT));
- if ( d->arch.pv_domain.check_l1tf && !paging_mode_sh_forced(d) &&
+ if ( d->arch.pv.check_l1tf && !paging_mode_sh_forced(d) &&
(((level > 1) && (pte & _PAGE_PSE)) || !is_l1tf_safe_maddr(pte)) )
{
#ifdef CONFIG_SHADOW_PAGING
@@ -224,7 +224,7 @@ void pv_l1tf_tasklet(unsigned long data);
static inline void pv_l1tf_domain_init(struct domain *d)
{
- d->arch.pv_domain.check_l1tf =
+ d->arch.pv.check_l1tf =
opt_pv_l1tf & (is_hardware_domain(d)
? OPT_PV_L1TF_DOM0 : OPT_PV_L1TF_DOMU);
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |