[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH V2 1/3] HACK: arm: initial XENMAPSPACE_gmfn_foreign



On Mon, 2012-07-23 at 16:31 +0100, Ian Campbell wrote:
> Should use same interface as hybrid x86.

Mukesh,

Did you settle on an interface on the x86 side for hybrid?

Could post just that bit of the hypervisor and kernel patches so we can
confirm that the interface works for ARM too and potentially take them
in advance of the full hybrid stuff on x86?

Ian.

> ---
>  xen/arch/arm/mm.c             |   32 ++++++++++++++++++++++++++------
>  xen/arch/x86/mm.c             |    2 ++
>  xen/include/public/arch-arm.h |    1 +
>  xen/include/public/memory.h   |   12 +++++++-----
>  4 files changed, 36 insertions(+), 11 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 40ac176..d369ee3 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -470,12 +470,32 @@ static int xenmem_add_to_physmap_once(
>  
>      switch ( xatp->space )
>      {
> -        case XENMAPSPACE_shared_info:
> -            if ( xatp->idx == 0 )
> -                mfn = virt_to_mfn(d->shared_info);
> -            break;
> -        default:
> -            return -ENOSYS;
> +    case XENMAPSPACE_shared_info:
> +        if ( xatp->idx == 0 )
> +            mfn = virt_to_mfn(d->shared_info);
> +        break;
> +    case XENMAPSPACE_gmfn_foreign:
> +    {
> +        paddr_t maddr;
> +        struct domain *od;
> +
> +        rc = rcu_lock_target_domain_by_id(xatp->foreign_domid, &od);
> +        if ( rc < 0 )
> +            return rc;
> +        maddr = p2m_lookup(od, xatp->idx << PAGE_SHIFT);
> +        if ( maddr == INVALID_PADDR )
> +        {
> +            printk("bad p2m lookup\n");
> +            dump_p2m_lookup(od, xatp->idx << PAGE_SHIFT);
> +            rcu_unlock_domain(od);
> +            return -EINVAL;
> +        }
> +        mfn = maddr >> PAGE_SHIFT;
> +        rcu_unlock_domain(od);
> +        break;
> +    }
> +    default:
> +        return -ENOSYS;
>      }
>  
>      domain_lock(d);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 9338575..9f63974 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4561,6 +4561,8 @@ static int xenmem_add_to_physmap_once(
>              mfn = idx;
>              page = mfn_to_page(mfn);
>              break;
> +        case XENMAPSPACE_gmfn_foreign:
> +            return -ENOSYS;
>          }
>          default:
>              break;
> diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
> index eb1add9..7ebe966 100644
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -121,6 +121,7 @@ typedef uint64_t xen_pfn_t;
>  #define XEN_LEGACY_MAX_VCPUS 1
>  
>  typedef uint32_t xen_ulong_t;
> +#define PRI_xen_ulong PRIx32
>  
>  struct vcpu_guest_context {
>      struct cpu_user_regs user_regs;         /* User-level CPU registers     
> */
> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
> index 86d02c8..b2adfbe 100644
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -212,11 +212,13 @@ struct xen_add_to_physmap {
>      uint16_t    size;
>  
>      /* Source mapping space. */
> -#define XENMAPSPACE_shared_info 0 /* shared info page */
> -#define XENMAPSPACE_grant_table 1 /* grant table page */
> -#define XENMAPSPACE_gmfn        2 /* GMFN */
> -#define XENMAPSPACE_gmfn_range  3 /* GMFN range */
> -    unsigned int space;
> +#define XENMAPSPACE_shared_info  0 /* shared info page */
> +#define XENMAPSPACE_grant_table  1 /* grant table page */
> +#define XENMAPSPACE_gmfn         2 /* GMFN */
> +#define XENMAPSPACE_gmfn_range   3 /* GMFN range */
> +#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another guest */
> +    uint16_t space;
> +    domid_t foreign_domid; /* IFF gmfn_foreign */
>  
>  #define XENMAPIDX_grant_table_status 0x80000000
>  



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.