[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 10/12] x86/altp2m: define and implement alternate p2m HVMOP types.



On 22/06/15 19:56, Ed White wrote:
> Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/hvm.c          | 216 
> ++++++++++++++++++++++++++++++++++++++++
>  xen/include/public/hvm/hvm_op.h |  69 +++++++++++++
>  2 files changed, 285 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index b758ee1..b3e74ce 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -6424,6 +6424,222 @@ long do_hvm_op(unsigned long op, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          break;
>      }
>  
> +    case HVMOP_altp2m_get_domain_state:
> +    {
> +        struct xen_hvm_altp2m_domain_state a;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() )
> +            goto param_fail9;
> +
> +        a.state = altp2mhvm_active(d);
> +        rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> +
> +    param_fail9:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_set_domain_state:
> +    {
> +        struct xen_hvm_altp2m_domain_state a;
> +        struct domain *d;
> +        struct vcpu *v;
> +        bool_t ostate;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
> +             nestedhvm_enabled(d) )
> +            goto param_fail10;
> +
> +        ostate = d->arch.altp2m_active;
> +        d->arch.altp2m_active = !!a.state;
> +
> +        /* If the alternate p2m state has changed, handle appropriately */
> +        if ( d->arch.altp2m_active != ostate )
> +        {
> +            if ( !ostate && !p2m_init_altp2m_by_id(d, 0) )
> +                    goto param_fail10;

Indentation.

> +
> +            for_each_vcpu( d, v )
> +                if (!ostate)
> +                    altp2mhvm_vcpu_initialise(v);
> +                else
> +                    altp2mhvm_vcpu_destroy(v);

Although strictly speaking this is (almost) ok by the style guidelines,
it would probably be better to have braces for the for_each_vcpu()
loop.  Also, spaces for the brackets for !ostate.

> +
> +            if ( ostate )
> +                p2m_flush_altp2m(d);
> +        }
> +
> +        rc = 0;
> +
> +    param_fail10:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_vcpu_enable_notify:
> +    {
> +        struct domain *curr_d = current->domain;
> +        struct vcpu *curr = current;
> +        struct xen_hvm_altp2m_vcpu_enable_notify a;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        if ( !is_hvm_domain(curr_d) || !hvm_altp2m_supported() ||
> +             !curr_d->arch.altp2m_active || vcpu_altp2mhvm(curr).veinfo_gfn )
> +            return -EINVAL;
> +
> +        vcpu_altp2mhvm(curr).veinfo_gfn = a.pfn;
> +        ahvm_vcpu_update_vmfunc_ve(curr);

You need a gfn bounds check against the host p2m here.

> +        rc = 0;
> +
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_create_p2m:
> +    {
> +        struct xen_hvm_altp2m_view a;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
> +             !d->arch.altp2m_active )
> +            goto param_fail11;
> +
> +        if ( !p2m_init_next_altp2m(d, &a.view) )
> +            goto param_fail11;
> +
> +        rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> +
> +    param_fail11:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_destroy_p2m:
> +    {
> +        struct xen_hvm_altp2m_view a;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
> +             !d->arch.altp2m_active )
> +            goto param_fail12;
> +
> +        if ( p2m_destroy_altp2m_by_id(d, a.view) )
> +            rc = 0;
> +
> +    param_fail12:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_switch_p2m:
> +    {
> +        struct xen_hvm_altp2m_view a;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
> +             !d->arch.altp2m_active )
> +            goto param_fail13;
> +
> +        if ( p2m_switch_domain_altp2m_by_id(d, a.view) )
> +            rc = 0;
> +
> +    param_fail13:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_set_mem_access:
> +    {
> +        struct xen_hvm_altp2m_set_mem_access a;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
> +             !d->arch.altp2m_active )
> +            goto param_fail14;
> +
> +        if ( p2m_set_altp2m_mem_access(d, a.view, a.pfn, a.hvmmem_access) )
> +            rc = 0;
> +
> +    param_fail14:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
> +    case HVMOP_altp2m_change_pfn:
> +    {
> +        struct xen_hvm_altp2m_change_pfn a;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
> +             !d->arch.altp2m_active )
> +            goto param_fail15;
> +
> +        if ( p2m_change_altp2m_pfn(d, a.view, a.old_pfn, a.new_pfn) )
> +            rc = 0;
> +
> +    param_fail15:
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
>      default:
>      {
>          gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op);
> diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
> index cde3571..f6abce9 100644
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -389,6 +389,75 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
>  
>  #endif /* defined(__i386__) || defined(__x86_64__) */
>  

We have an upper ABI limit of 255 HVMOPs.  As such, I would recommend
having a single HVMOP_altp2m and a subop which lives as the first
parameter in any structure.

~Andrew

> +/* Set/get the altp2m state for a domain */
> +#define HVMOP_altp2m_set_domain_state     24
> +#define HVMOP_altp2m_get_domain_state     25
> +struct xen_hvm_altp2m_domain_state {
> +    /* Domain to be updated or queried */
> +    domid_t domid;
> +    /* IN or OUT variable on/off */
> +    uint8_t state;
> +};
> +typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
> +
> +/* Set the current VCPU to receive altp2m event notifications */
> +#define HVMOP_altp2m_vcpu_enable_notify   26
> +struct xen_hvm_altp2m_vcpu_enable_notify {
> +    /* #VE info area pfn */
> +    uint64_t pfn;
> +};
> +typedef struct xen_hvm_altp2m_vcpu_enable_notify 
> xen_hvm_altp2m_vcpu_enable_notify_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
> +
> +/* Create a new view */
> +#define HVMOP_altp2m_create_p2m   27
> +/* Destroy a view */
> +#define HVMOP_altp2m_destroy_p2m  28
> +/* Switch view for an entire domain */
> +#define HVMOP_altp2m_switch_p2m   29
> +struct xen_hvm_altp2m_view {
> +    /* Domain to be updated */
> +    domid_t domid;
> +    /* IN/OUT variable */
> +    uint16_t view;
> +    /* Create view only: default access type
> +     * NOTE: currently ignored */
> +    uint16_t hvmmem_default_access; /* xenmem_access_t */
> +};
> +typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
> +
> +/* Notify that a page of memory is to have specific access types */
> +#define HVMOP_altp2m_set_mem_access 30
> +struct xen_hvm_altp2m_set_mem_access {
> +    /* Domain to be updated. */
> +    domid_t domid;
> +    /* view */
> +    uint16_t view;
> +    /* Memory type */
> +    uint16_t hvmmem_access; /* xenmem_access_t */
> +    /* pfn */
> +    uint64_t pfn;
> +};
> +typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
> +
> +/* Change a p2m entry to map a different pfn */
> +#define HVMOP_altp2m_change_pfn 31
> +struct xen_hvm_altp2m_change_pfn {
> +    /* Domain to be updated. */
> +    domid_t domid;
> +    /* view */
> +    uint16_t view;
> +    /* old pfn */
> +    uint64_t old_pfn;
> +    /* new pfn, -1 means revert */
> +    uint64_t new_pfn;
> +};
> +typedef struct xen_hvm_altp2m_change_pfn xen_hvm_altp2m_change_pfn_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_pfn_t);
> +
>  #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
>  
>  /*


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.