[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCHv8] x86/ept: defer the invalidation until the p2m lock is released



Hi Wei,

This patch has all the required acks now.  Can you consider it for 4.7?

It's a signficant scalability improvement (see the cover letter for
details).

v7 has been in XenServer's upcoming release for a while now so it has
been tested with many guests and many life cycle operations, including
plenty of uses of PoD.

Thanks.

David

On 12/04/2016 17:19, David Vrabel wrote:
> Holding the p2m lock while calling ept_sync_domain() is very expensive
> since it does an on_selected_cpus() call.  IPIs on many socket
> machines can be very slow and on_selected_cpus() is serialized.
> 
> It is safe to defer the invalidate until the p2m lock is released
> except for two cases:
> 
> 1. When freeing a page table page (since partial translations may be
>    cached).
> 2. When reclaiming a zero page as part of PoD.
> 
> For these cases, add p2m_tlb_flush_sync() calls which will immediately
> perform the invalidate before the page is freed or reclaimed.
> 
> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
> ---
> v8:
> - p2m_tlb_flush_and_unlock() -> p2m_unlock_and_tlb_flush().
> - p2m_unlock_and_tlb_flush() now does the unlock and the p2m
>   implementation need only provide a tlb_flush() op.
> 
> v7:
> - Add some more p2m_tlb_flush_sync() calls to PoD.
> - More comments.
> 
> v6:
> - Move p2m_tlb_flush_sync() to immediately before p2m_free_ptp().  It was
>   called all the time otherwise.
> 
> v5:
> - add p2m_tlb_flush_sync() and call it before freeing pgae table pages
>   and reclaiming zeroed pod pages.
> 
> v2:
> - use per-p2m list for deferred pages.
> - update synced_mask while holding write lock.
> ---
>  xen/arch/x86/mm/mm-locks.h | 23 +++++++++++++++--------
>  xen/arch/x86/mm/p2m-ept.c  | 39 +++++++++++++++++++++++++++++++--------
>  xen/arch/x86/mm/p2m-pod.c  |  4 ++++
>  xen/arch/x86/mm/p2m.c      | 26 ++++++++++++++++++++++++++
>  xen/include/asm-x86/p2m.h  | 22 ++++++++++++++++++++++
>  5 files changed, 98 insertions(+), 16 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
> index 8a40986..086c8bb 100644
> --- a/xen/arch/x86/mm/mm-locks.h
> +++ b/xen/arch/x86/mm/mm-locks.h
> @@ -265,14 +265,21 @@ declare_mm_lock(altp2mlist)
>   */
>  
>  declare_mm_rwlock(altp2m);
> -#define p2m_lock(p)                         \
> -{                                           \
> -    if ( p2m_is_altp2m(p) )                 \
> -        mm_write_lock(altp2m, &(p)->lock);  \
> -    else                                    \
> -        mm_write_lock(p2m, &(p)->lock);     \
> -}
> -#define p2m_unlock(p)         mm_write_unlock(&(p)->lock);
> +#define p2m_lock(p)                             \
> +    do {                                        \
> +        if ( p2m_is_altp2m(p) )                 \
> +            mm_write_lock(altp2m, &(p)->lock);  \
> +        else                                    \
> +            mm_write_lock(p2m, &(p)->lock);     \
> +        (p)->defer_flush++;                     \
> +    } while (0)
> +#define p2m_unlock(p)                           \
> +    do {                                        \
> +        if ( --(p)->defer_flush == 0 )          \
> +            p2m_unlock_and_tlb_flush(p);        \
> +        else                                    \
> +            mm_write_unlock(&(p)->lock);        \
> +    } while (0)
>  #define gfn_lock(p,g,o)       p2m_lock(p)
>  #define gfn_unlock(p,g,o)     p2m_unlock(p)
>  #define p2m_read_lock(p)      mm_read_lock(p2m, &(p)->lock)
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index 3cb6868..1ed5b47 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -264,6 +264,7 @@ static void ept_free_entry(struct p2m_domain *p2m, 
> ept_entry_t *ept_entry, int l
>          unmap_domain_page(epte);
>      }
>      
> +    p2m_tlb_flush_sync(p2m);
>      p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
>  }
>  
> @@ -1096,15 +1097,10 @@ static void __ept_sync_domain(void *info)
>       */
>  }
>  
> -void ept_sync_domain(struct p2m_domain *p2m)
> +static void ept_sync_domain_prepare(struct p2m_domain *p2m)
>  {
>      struct domain *d = p2m->domain;
>      struct ept_data *ept = &p2m->ept;
> -    /* Only if using EPT and this domain has some VCPUs to dirty. */
> -    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
> -        return;
> -
> -    ASSERT(local_irq_is_enabled());
>  
>      if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
>          p2m_flush_nestedp2m(d);
> @@ -1117,9 +1113,35 @@ void ept_sync_domain(struct p2m_domain *p2m)
>       *    of an EP4TA reuse is still needed.
>       */
>      cpumask_setall(ept->invalidate);
> +}
> +
> +static void ept_sync_domain_mask(struct p2m_domain *p2m, const cpumask_t 
> *mask)
> +{
> +    on_selected_cpus(mask, __ept_sync_domain, p2m, 1);
> +}
> +
> +void ept_sync_domain(struct p2m_domain *p2m)
> +{
> +    struct domain *d = p2m->domain;
>  
> -    on_selected_cpus(d->domain_dirty_cpumask,
> -                     __ept_sync_domain, p2m, 1);
> +    /* Only if using EPT and this domain has some VCPUs to dirty. */
> +    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
> +        return;
> +
> +    ept_sync_domain_prepare(p2m);
> +
> +    if ( p2m->defer_flush )
> +    {
> +        p2m->need_flush = 1;
> +        return;
> +    }
> +
> +    ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
> +}
> +
> +static void ept_tlb_flush(struct p2m_domain *p2m)
> +{
> +    ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
>  }
>  
>  static void ept_enable_pml(struct p2m_domain *p2m)
> @@ -1170,6 +1192,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
>      p2m->change_entry_type_range = ept_change_entry_type_range;
>      p2m->memory_type_changed = ept_memory_type_changed;
>      p2m->audit_p2m = NULL;
> +    p2m->tlb_flush = ept_tlb_flush;
>  
>      /* Set the memory type used when accessing EPT paging structures. */
>      ept->ept_mt = EPT_DEFAULT_MT;
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index ea16d3e..35835d1 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -626,6 +626,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>  
>              p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
>                            p2m_invalid, p2m->default_access);
> +            p2m_tlb_flush_sync(p2m);
>              for ( j = 0; j < n; ++j )
>                  set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
>              p2m_pod_cache_add(p2m, page, cur_order);
> @@ -755,6 +756,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
> unsigned long gfn)
>      /* Try to remove the page, restoring old mapping if it fails. */
>      p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
>                    p2m_populate_on_demand, p2m->default_access);
> +    p2m_tlb_flush_sync(p2m);
>  
>      /* Make none of the MFNs are used elsewhere... for example, mapped
>       * via the grant table interface, or by qemu.  Allow one refcount for
> @@ -886,6 +888,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long 
> *gfns, int count)
>          }
>      }
>  
> +    p2m_tlb_flush_sync(p2m);
> +
>      /* Now check each page for real */
>      for ( i=0; i < count; i++ )
>      {
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index b3fce1b..491deac 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -325,6 +325,32 @@ void p2m_flush_hardware_cached_dirty(struct domain *d)
>      }
>  }
>  
> +/*
> + * Force a synchronous P2M TLB flush if a deferred flush is pending.
> + *
> + * Must be called with the p2m lock held.
> + */
> +void p2m_tlb_flush_sync(struct p2m_domain *p2m)
> +{
> +    if ( p2m->need_flush ) {
> +        p2m->need_flush = 0;
> +        p2m->tlb_flush(p2m);
> +    }
> +}
> +
> +/*
> + * Unlock the p2m lock and do a P2M TLB flush if needed.
> + */
> +void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m)
> +{
> +    if ( p2m->need_flush ) {
> +        p2m->need_flush = 0;
> +        mm_write_unlock(&p2m->lock);
> +        p2m->tlb_flush(p2m);
> +    } else
> +        mm_write_unlock(&p2m->lock);
> +}
> +
>  mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
>                      p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
>                      unsigned int *page_order, bool_t locked)
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 5392eb0..65675a2 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -262,6 +262,22 @@ struct p2m_domain {
>                                            l1_pgentry_t new, unsigned int 
> level);
>      long               (*audit_p2m)(struct p2m_domain *p2m);
>  
> +    /*
> +     * P2M updates may require TLBs to be flushed (invalidated).
> +     *
> +     * If 'defer_flush' is set, flushes may be deferred by setting
> +     * 'need_flush' and then flushing in 'tlb_flush()'.
> +     *
> +     * 'tlb_flush()' is only called if 'need_flush' was set.
> +     *
> +     * If a flush may be being deferred but an immediate flush is
> +     * required (e.g., if a page is being freed to pool other than the
> +     * domheap), call p2m_tlb_flush_sync().
> +     */
> +    void (*tlb_flush)(struct p2m_domain *p2m);
> +    unsigned int defer_flush;
> +    bool_t need_flush;
> +
>      /* Default P2M access type for each page in the the domain: new pages,
>       * swapped in pages, cleared pages, and pages that are ambiguously
>       * retyped get this access type.  See definition of p2m_access_t. */
> @@ -353,6 +369,12 @@ static inline bool_t p2m_is_altp2m(const struct 
> p2m_domain *p2m)
>  
>  #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
>  
> +/*
> + * Ensure any deferred p2m TLB flush has been completed on all VCPUs.
> + */
> +void p2m_tlb_flush_sync(struct p2m_domain *p2m);
> +void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m);
> +
>  /**** p2m query accessors. They lock p2m_lock, and thus serialize
>   * lookups wrt modifications. They _do not_ release the lock on exit.
>   * After calling any of the variants below, caller needs to use
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.