[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask



Hi Jan,

On 23/01/18 10:16, Jan Beulich wrote:
It being a field of struct domain is sufficient to recognize its
purpose.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Julien Grall <julien.grall@xxxxxxxxxx>

Cheers,

---
v2: White space changes (consolidate split line statements into single
     line ones). Re-base.

--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -470,7 +470,7 @@ void startup_cpu_idle_loop(void)
ASSERT(is_idle_vcpu(v));
      /* TODO
-       cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
+       cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
         v->dirty_cpu = v->processor;
      */
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -145,7 +145,7 @@ void startup_cpu_idle_loop(void)
      struct vcpu *v = current;
ASSERT(is_idle_vcpu(v));
-    cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
+    cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
      v->dirty_cpu = v->processor;
reset_stack_and_jump(idle_loop);
@@ -1617,7 +1617,7 @@ static void __context_switch(void)
       * which is synchronised on that function.
       */
      if ( pd != nd )
-        cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
+        cpumask_set_cpu(cpu, nd->dirty_cpumask);
      n->dirty_cpu = cpu;
if ( !is_idle_domain(nd) )
@@ -1673,7 +1673,7 @@ static void __context_switch(void)
      }
if ( pd != nd )
-        cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
+        cpumask_clear_cpu(cpu, pd->dirty_cpumask);
      p->dirty_cpu = VCPU_CPU_CLEAN;
per_cpu(curr_vcpu, cpu) = n;
@@ -1922,7 +1922,7 @@ int domain_relinquish_resources(struct d
      int ret;
      struct vcpu *v;
- BUG_ON(!cpumask_empty(d->domain_dirty_cpumask));
+    BUG_ON(!cpumask_empty(d->dirty_cpumask));
switch ( d->arch.relmem )
      {
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4045,7 +4045,7 @@ static int hvmop_flush_tlb_all(void)
          paging_update_cr3(v);
/* Flush all dirty TLBs. */
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
/* Done. */
      for_each_vcpu ( d, v )
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2322,7 +2322,7 @@ static int svm_is_erratum_383(struct cpu
      wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2));
/* flush TLB */
-    flush_tlb_mask(v->domain->domain_dirty_cpumask);
+    flush_tlb_mask(v->domain->dirty_cpumask);
return 1;
  }
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2546,7 +2546,7 @@ static int _get_page_type(struct page_in
                  cpumask_t *mask = this_cpu(scratch_cpumask);
BUG_ON(in_irq());
-                cpumask_copy(mask, d->domain_dirty_cpumask);
+                cpumask_copy(mask, d->dirty_cpumask);
/* Don't flush if the timestamp is old enough */
                  tlbflush_filter(mask, page->tlbflush_timestamp);
@@ -3277,7 +3277,7 @@ long do_mmuext_op(
case MMUEXT_TLB_FLUSH_ALL:
              if ( likely(currd == pg_owner) )
-                flush_tlb_mask(currd->domain_dirty_cpumask);
+                flush_tlb_mask(currd->dirty_cpumask);
              else
                  rc = -EPERM;
              break;
@@ -3286,8 +3286,7 @@ long do_mmuext_op(
              if ( unlikely(currd != pg_owner) )
                  rc = -EPERM;
              else if ( __addr_ok(op.arg1.linear_addr) )
-                flush_tlb_one_mask(currd->domain_dirty_cpumask,
-                                   op.arg1.linear_addr);
+                flush_tlb_one_mask(currd->dirty_cpumask, op.arg1.linear_addr);
              break;
case MMUEXT_FLUSH_CACHE:
@@ -3772,7 +3771,7 @@ long do_mmu_update(
          unsigned int cpu = smp_processor_id();
          cpumask_t *mask = per_cpu(scratch_cpumask, cpu);
- cpumask_andnot(mask, pt_owner->domain_dirty_cpumask, cpumask_of(cpu));
+        cpumask_andnot(mask, pt_owner->dirty_cpumask, cpumask_of(cpu));
          if ( !cpumask_empty(mask) )
              flush_area_mask(mask, ZERO_BLOCK_PTR, FLUSH_VA_VALID);
      }
@@ -3955,7 +3954,7 @@ static int __do_update_va_mapping(
              flush_tlb_local();
              break;
          case UVMF_ALL:
-            mask = d->domain_dirty_cpumask;
+            mask = d->dirty_cpumask;
              break;
          default:
              mask = this_cpu(scratch_cpumask);
@@ -3975,7 +3974,7 @@ static int __do_update_va_mapping(
              paging_invlpg(v, va);
              break;
          case UVMF_ALL:
-            mask = d->domain_dirty_cpumask;
+            mask = d->dirty_cpumask;
              break;
          default:
              mask = this_cpu(scratch_cpumask);
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -124,7 +124,7 @@ int hap_track_dirty_vram(struct domain *
              p2m_change_type_range(d, begin_pfn, begin_pfn + nr,
                                    p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->domain_dirty_cpumask);
+            flush_tlb_mask(d->dirty_cpumask);
memset(dirty_bitmap, 0xff, size); /* consider all pages dirty */
          }
@@ -211,7 +211,7 @@ static int hap_enable_log_dirty(struct d
           * to be read-only, or via hardware-assisted log-dirty.
           */
          p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      }
      return 0;
  }
@@ -240,7 +240,7 @@ static void hap_clean_dirty_bitmap(struc
       * be read-only, or via hardware-assisted log-dirty.
       */
      p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
  }
/************************************************/
@@ -741,7 +741,7 @@ hap_write_p2m_entry(struct domain *d, un
safe_write_pte(p, new);
      if ( old_flags & _PAGE_PRESENT )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
paging_unlock(d); --- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1195,12 +1195,12 @@ void ept_sync_domain(struct p2m_domain *
          return;
      }
- ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
+    ept_sync_domain_mask(p2m, d->dirty_cpumask);
  }
static void ept_tlb_flush(struct p2m_domain *p2m)
  {
-    ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
+    ept_sync_domain_mask(p2m, p2m->domain->dirty_cpumask);
  }
static void ept_enable_pml(struct p2m_domain *p2m)
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -929,7 +929,7 @@ static void p2m_pt_change_entry_type_glo
      unmap_domain_page(tab);
if ( changed )
-         flush_tlb_mask(p2m->domain->domain_dirty_cpumask);
+         flush_tlb_mask(p2m->domain->dirty_cpumask);
  }
static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m,
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -619,7 +619,7 @@ void paging_log_dirty_range(struct domai
p2m_unlock(p2m); - flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
  }
/*
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -637,7 +637,7 @@ static int oos_remove_write_access(struc
      }
if ( ftlb )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
return 0;
  }
@@ -1064,7 +1064,7 @@ sh_validate_guest_pt_write(struct vcpu *
      rc = sh_validate_guest_entry(v, gmfn, entry, size);
      if ( rc & SHADOW_SET_FLUSH )
          /* Need to flush TLBs to pick up shadow PT changes */
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      if ( rc & SHADOW_SET_ERROR )
      {
          /* This page is probably not a pagetable any more: tear it out of the
@@ -1227,7 +1227,7 @@ static void _shadow_prealloc(struct doma
                  /* See if that freed up enough space */
                  if ( d->arch.paging.shadow.free_pages >= pages )
                  {
-                    flush_tlb_mask(d->domain_dirty_cpumask);
+                    flush_tlb_mask(d->dirty_cpumask);
                      return;
                  }
              }
@@ -1281,7 +1281,7 @@ static void shadow_blow_tables(struct do
                                 pagetable_get_mfn(v->arch.shadow_table[i]), 0);
/* Make sure everyone sees the unshadowings */
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
  }
void shadow_blow_tables_per_domain(struct domain *d)
@@ -1385,7 +1385,7 @@ mfn_t shadow_alloc(struct domain *d,
          sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
          /* Before we overwrite the old contents of this page,
           * we need to be sure that no TLB holds a pointer to it. */
-        cpumask_copy(&mask, d->domain_dirty_cpumask);
+        cpumask_copy(&mask, d->dirty_cpumask);
          tlbflush_filter(&mask, sp->tlbflush_timestamp);
          if ( unlikely(!cpumask_empty(&mask)) )
          {
@@ -2797,7 +2797,7 @@ void sh_remove_shadows(struct domain *d,
/* Need to flush TLBs now, so that linear maps are safe next time we
       * take a fault. */
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
paging_unlock(d);
  }
@@ -3481,7 +3481,7 @@ static void sh_unshadow_for_p2m_change(s
          {
              sh_remove_all_shadows_and_parents(d, mfn);
              if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
-                flush_tlb_mask(d->domain_dirty_cpumask);
+                flush_tlb_mask(d->dirty_cpumask);
          }
      }
@@ -3517,8 +3517,7 @@ static void sh_unshadow_for_p2m_change(s
                      sh_remove_all_shadows_and_parents(d, omfn);
                      if ( sh_remove_all_mappings(d, omfn,
                                                  _gfn(gfn + (i << 
PAGE_SHIFT))) )
-                        cpumask_or(&flushmask, &flushmask,
-                                   d->domain_dirty_cpumask);
+                        cpumask_or(&flushmask, &flushmask, d->dirty_cpumask);
                  }
                  omfn = _mfn(mfn_x(omfn) + 1);
              }
@@ -3795,7 +3794,7 @@ int shadow_track_dirty_vram(struct domai
          }
      }
      if ( flush_tlb )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      goto out;
out_sl1ma:
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3134,7 +3134,7 @@ static int sh_page_fault(struct vcpu *v,
          perfc_incr(shadow_rm_write_flush_tlb);
          smp_wmb();
          atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      }
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -4114,7 +4114,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
       * (old) shadow linear maps in the writeable mapping heuristics. */
  #if GUEST_PAGING_LEVELS == 2
      if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
  #elif GUEST_PAGING_LEVELS == 3
      /* PAE guests have four shadow_table entries, based on the
@@ -4137,7 +4137,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
              }
          }
          if ( flush )
-            flush_tlb_mask(d->domain_dirty_cpumask);
+            flush_tlb_mask(d->dirty_cpumask);
          /* Now install the new shadows. */
          for ( i = 0; i < 4; i++ )
          {
@@ -4158,7 +4158,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
      }
  #elif GUEST_PAGING_LEVELS == 4
      if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
      if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
      {
@@ -4605,7 +4605,7 @@ static void sh_pagetable_dying(struct vc
          }
      }
      if ( flush )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
/* Remember that we've seen the guest use this interface, so we
       * can rely on it using it in future, instead of guessing at
@@ -4641,7 +4641,7 @@ static void sh_pagetable_dying(struct vc
          mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
          shadow_unhook_mappings(d, smfn, 1/* user pages only */);
          /* Now flush the TLB: we removed toplevel mappings. */
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
      }
/* Remember that we've seen the guest use this interface, so we
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -297,7 +297,7 @@ struct domain *domain_create(domid_t dom
      rwlock_init(&d->vnuma_rwlock);
err = -ENOMEM;
-    if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
+    if ( !zalloc_cpumask_var(&d->dirty_cpumask) )
          goto fail;
if ( domcr_flags & DOMCRF_hvm )
@@ -415,7 +415,7 @@ struct domain *domain_create(domid_t dom
          watchdog_domain_destroy(d);
      if ( init_status & INIT_xsm )
          xsm_free_security_domain(d);
-    free_cpumask_var(d->domain_dirty_cpumask);
+    free_cpumask_var(d->dirty_cpumask);
      free_domain_struct(d);
      return ERR_PTR(err);
  }
@@ -851,7 +851,7 @@ static void complete_domain_destroy(stru
      radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
xsm_free_security_domain(d);
-    free_cpumask_var(d->domain_dirty_cpumask);
+    free_cpumask_var(d->dirty_cpumask);
      xfree(d->vcpu);
      free_domain_struct(d);
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -276,7 +276,7 @@ static inline void grant_write_unlock(st
  static inline void gnttab_flush_tlb(const struct domain *d)
  {
      if ( !paging_mode_external(d) )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
  }
static inline unsigned int
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -298,7 +298,7 @@ static void dump_domains(unsigned char k
          process_pending_softirqs();
printk("General information for domain %u:\n", d->domain_id);
-        cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
+        cpuset_print(tmpstr, sizeof(tmpstr), d->dirty_cpumask);
          printk("    refcnt=%d dying=%d pause_count=%d\n",
                 atomic_read(&d->refcnt), d->is_dying,
                 atomic_read(&d->pause_count));
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -193,7 +193,8 @@ struct p2m_domain {
      /* Shadow translated domain: p2m mapping */
      pagetable_t        phys_table;
- /* Same as domain_dirty_cpumask but limited to
+    /*
+     * Same as a domain's dirty_cpumask but limited to
       * this p2m and those physical cpus whose vcpu's are in
       * guestmode.
       */
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -417,7 +417,7 @@ struct domain
      unsigned long    vm_assist;
/* Bitmask of CPUs which are holding onto this domain's state. */
-    cpumask_var_t    domain_dirty_cpumask;
+    cpumask_var_t    dirty_cpumask;
struct arch_domain arch;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel


--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.