[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH 07/16] x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only


  • To: Jan Beulich <jbeulich@xxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Wed, 7 Jul 2021 01:35:30 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=intel.com; dmarc=pass action=none header.from=intel.com; dkim=pass header.d=intel.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=gvXAQNkeq/K1e4VNMs1Mat8zbNFyJKAE0QW1J9na4t4=; b=fFAl3YHfetIB+n30OdX2ViEQTXXBf6yR31NaQfl2PYbQutGB9qxqm7eVvb3rLCSrwleCyHq5x8wTQLY14hTKjcYebg6G+BvOX+p3zW7WuOcnxU2nEO2zFlXyGaocML0OiROICbFFsm+hGMPyiDobf8+cls+G8NQx+idvSymR1lH0vw0cuEvTnxUBiXz2nfc/nyMZJU3VONIaxUAirVF3cpYJclP0bndXqHlkvFLtidMCDbiLX+wNKP9vJyg/gQV6gA1b1XdB4UgNb8CNnUoocvGquQu/jHL6Hw1GqpwUGHs1IZ4zzmyUgwxfJWWjyMD9MHCxfqTidG/QwM7y6DD5Lw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=OhfE0jGlZ5HKB6x3RLetdkTLNpNkdYqQnBmfDlGXr0Z+z4XsdPVa2JzoEkVjmQ3lrtZg2PtsVcAOOaQAwjt8GYGEpgb1+2RBUyp61zPuCXoTxw3tzzSy+IFB5hGfDPczi6vVGdWTZ1f0Ke228U+TVFJeJu5Lj7HC87rurtRfKFLwReMhw+HKMr07JhSDKkCYsk5RheJZ0Ac+VlQAV7cSOJDyWPMYiSzF+5yqkMnJdBTMfSJRuARusjQBpjDwOkBfomIXQlQBUdrQAuxjHbqG/f12iPO6XbelZN6fti0edKBjzBDZij9OiPT6lrqoU+ki9iGc3nTVz4IRYPDcPkooVA==
  • Authentication-results: suse.com; dkim=none (message not signed) header.d=none;suse.com; dmarc=none action=none header.from=intel.com;
  • Cc: "Cooper, Andrew" <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>
  • Delivery-date: Wed, 07 Jul 2021 01:35:39 +0000
  • Dlp-product: dlpe-windows
  • Dlp-reaction: no-action
  • Dlp-version: 11.5.1.3
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHXcbgpk0b+361jIk+Tsj/Wl7GPf6s2vTRg
  • Thread-topic: [PATCH 07/16] x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only

> From: Jan Beulich <jbeulich@xxxxxxxx>
> Sent: Tuesday, July 6, 2021 12:09 AM
> 
> This also includes the two p2m related fields.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> 
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -94,7 +94,9 @@ static int p2m_initialise(struct domain
>      int ret = 0;
> 
>      mm_rwlock_init(&p2m->lock);
> +#ifdef CONFIG_HVM
>      INIT_PAGE_LIST_HEAD(&p2m->pages);
> +#endif
> 
>      p2m->domain = d;
>      p2m->default_access = p2m_access_rwx;
> @@ -628,6 +630,7 @@ struct page_info *p2m_get_page_from_gfn(
>  }
> 
>  #ifdef CONFIG_HVM
> +
>  /* Returns: 0 for success, -errno for failure */
>  int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
>                    unsigned int page_order, p2m_type_t p2mt, p2m_access_t 
> p2ma)
> @@ -667,7 +670,6 @@ int p2m_set_entry(struct p2m_domain *p2m
> 
>      return rc;
>  }
> -#endif
> 
>  mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level)
>  {
> @@ -746,6 +748,8 @@ int p2m_alloc_table(struct p2m_domain *p
>      return 0;
>  }
> 
> +#endif /* CONFIG_HVM */
> +
>  /*
>   * hvm fixme: when adding support for pvh non-hardware domains, this
> path must
>   * cleanup any foreign p2m types (release refcnts on them).
> @@ -754,7 +758,9 @@ void p2m_teardown(struct p2m_domain *p2m
>  /* Return all the p2m pages to Xen.
>   * We know we don't have any extra mappings to these pages */
>  {
> +#ifdef CONFIG_HVM
>      struct page_info *pg;
> +#endif
>      struct domain *d;
> 
>      if (p2m == NULL)
> @@ -763,11 +769,16 @@ void p2m_teardown(struct p2m_domain *p2m
>      d = p2m->domain;
> 
>      p2m_lock(p2m);
> +
>      ASSERT(atomic_read(&d->shr_pages) == 0);
> +
> +#ifdef CONFIG_HVM
>      p2m->phys_table = pagetable_null();
> 
>      while ( (pg = page_list_remove_head(&p2m->pages)) )
>          d->arch.paging.free_page(d, pg);
> +#endif
> +
>      p2m_unlock(p2m);
>  }
> 
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -2700,8 +2700,10 @@ int shadow_enable(struct domain *d, u32
>   out_locked:
>      paging_unlock(d);
>   out_unlocked:
> +#ifdef CONFIG_HVM
>      if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
>          p2m_teardown(p2m);
> +#endif
>      if ( rv != 0 && pg != NULL )
>      {
>          pg->count_info &= ~PGC_count_mask;
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -339,12 +339,14 @@ static uint64_t domain_pgd_maddr(struct
> 
>      ASSERT(spin_is_locked(&hd->arch.mapping_lock));
> 
> +#ifdef CONFIG_HVM
>      if ( iommu_use_hap_pt(d) )
>      {
>          pagetable_t pgt = p2m_get_pagetable(p2m_get_hostp2m(d));
> 
>          return pagetable_get_paddr(pgt);
>      }
> +#endif
> 
>      if ( !hd->arch.vtd.pgd_maddr )
>      {
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -202,9 +202,6 @@ struct p2m_domain {
>      /* Lock that protects updates to the p2m */
>      mm_rwlock_t           lock;
> 
> -    /* Shadow translated domain: p2m mapping */
> -    pagetable_t        phys_table;
> -
>      /*
>       * Same as a domain's dirty_cpumask but limited to
>       * this p2m and those physical cpus whose vcpu's are in
> @@ -223,9 +220,6 @@ struct p2m_domain {
>       */
>      p2m_access_t default_access;
> 
> -    /* Pages used to construct the p2m */
> -    struct page_list_head pages;
> -
>      /* Host p2m: Log-dirty ranges registered for the domain. */
>      struct rangeset   *logdirty_ranges;
> 
> @@ -233,6 +227,12 @@ struct p2m_domain {
>      bool               global_logdirty;
> 
>  #ifdef CONFIG_HVM
> +    /* Translated domain: p2m mapping */
> +    pagetable_t        phys_table;
> +
> +    /* Pages used to construct the p2m */
> +    struct page_list_head pages;
> +
>      /* Alternate p2m: count of vcpu's currently using this p2m. */
>      atomic_t           active_vcpus;
> 


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.