|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RESEND 1/4] xen/xsm: add hooks for claim
On Fri, May 03, 2013 at 10:09:13AM -0400, Daniel De Graaf wrote:
> Adds XSM hooks for the recently introduced XENMEM_claim_pages and
> XENMEM_get_outstanding_pages operations, and adds FLASK access vectors
> for them. This makes the access control decisions for these operations
> match those in the rest of the hypervisor.
I am not that familiar with this, but it looks OK. However I am
going to post a patch soon that will eliminate one of the hypercalls
(xenmem_get_outstanding_pages).
Do you want to wait for this or would it be better if I posted
the patch along with some changes to xsm hooks to delete it and
you can Ack it?
>
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> (for 4.3 release)
> Cc: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
> Cc: Keir Fraser <keir@xxxxxxx>
> ---
> tools/flask/policy/policy/modules/xen/xen.if | 2 +-
> xen/common/memory.c | 15 ++++++++-------
> xen/include/xsm/dummy.h | 12 ++++++++++++
> xen/include/xsm/xsm.h | 12 ++++++++++++
> xen/xsm/dummy.c | 2 ++
> xen/xsm/flask/hooks.c | 13 +++++++++++++
> xen/xsm/flask/policy/access_vectors | 4 +++-
> 7 files changed, 51 insertions(+), 9 deletions(-)
>
> diff --git a/tools/flask/policy/policy/modules/xen/xen.if
> b/tools/flask/policy/policy/modules/xen/xen.if
> index 3a59f38..c86a618 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.if
> +++ b/tools/flask/policy/policy/modules/xen/xen.if
> @@ -49,7 +49,7 @@ define(`create_domain_common', `
> getdomaininfo hypercall setvcpucontext setextvcpucontext
> getscheduler getvcpuinfo getvcpuextstate getaddrsize
> getaffinity setaffinity };
> - allow $1 $2:domain2 { set_cpuid settsc setscheduler };
> + allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim };
> allow $1 $2:security check_context;
> allow $1 $2:shadow enable;
> allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage
> mmuext_op };
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index 68501d1..3239d53 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -712,9 +712,6 @@ long do_memory_op(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> }
>
> case XENMEM_claim_pages:
> - if ( !IS_PRIV(current->domain) )
> - return -EPERM;
> -
> if ( copy_from_guest(&reservation, arg, 1) )
> return -EFAULT;
>
> @@ -731,17 +728,21 @@ long do_memory_op(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> if ( d == NULL )
> return -EINVAL;
>
> - rc = domain_set_outstanding_pages(d, reservation.nr_extents);
> + rc = xsm_claim_pages(XSM_PRIV, d);
> +
> + if ( !rc )
> + rc = domain_set_outstanding_pages(d, reservation.nr_extents);
>
> rcu_unlock_domain(d);
>
> break;
>
> case XENMEM_get_outstanding_pages:
> - if ( !IS_PRIV(current->domain) )
> - return -EPERM;
> + rc = xsm_xenmem_get_outstanding_pages(XSM_PRIV);
> +
> + if ( !rc )
> + rc = get_outstanding_claims();
>
> - rc = get_outstanding_claims();
> break;
>
> default:
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index 9cae61c..9bfe596 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -247,6 +247,18 @@ static XSM_INLINE int
> xsm_memory_pin_page(XSM_DEFAULT_ARG struct domain *d1, str
> return xsm_default_action(action, d1, d2);
> }
>
> +static XSM_INLINE int xsm_claim_pages(XSM_DEFAULT_ARG struct domain *d)
> +{
> + XSM_ASSERT_ACTION(XSM_PRIV);
> + return xsm_default_action(action, current->domain, d);
> +}
> +
> +static XSM_INLINE int xsm_xenmem_get_outstanding_pages(XSM_DEFAULT_VOID)
> +{
> + XSM_ASSERT_ACTION(XSM_PRIV);
> + return xsm_default_action(action, current->domain, NULL);
> +}
> +
> static XSM_INLINE int xsm_evtchn_unbound(XSM_DEFAULT_ARG struct domain *d,
> struct evtchn *chn,
> domid_t id2)
> {
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index 5103070..69fe64a 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -92,6 +92,8 @@ struct xsm_operations {
> int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct
> page_info *page);
> int (*add_to_physmap) (struct domain *d1, struct domain *d2);
> int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
> + int (*claim_pages) (struct domain *d);
> + int (*xenmem_get_outstanding_pages) (void);
>
> int (*console_io) (struct domain *d, int cmd);
>
> @@ -350,6 +352,16 @@ static inline int xsm_remove_from_physmap(xsm_default_t
> def, struct domain *d1,
> return xsm_ops->remove_from_physmap(d1, d2);
> }
>
> +static inline int xsm_claim_pages(xsm_default_t def, struct domain *d)
> +{
> + return xsm_ops->claim_pages(d);
> +}
> +
> +static inline int xsm_xenmem_get_outstanding_pages(xsm_default_t def)
> +{
> + return xsm_ops->xenmem_get_outstanding_pages();
> +}
> +
> static inline int xsm_console_io (xsm_default_t def, struct domain *d, int
> cmd)
> {
> return xsm_ops->console_io(d, cmd);
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index f7b0399..3d84e73 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -66,6 +66,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
> set_to_dummy_if_null(ops, memory_adjust_reservation);
> set_to_dummy_if_null(ops, memory_stat_reservation);
> set_to_dummy_if_null(ops, memory_pin_page);
> + set_to_dummy_if_null(ops, claim_pages);
> + set_to_dummy_if_null(ops, xenmem_get_outstanding_pages);
>
> set_to_dummy_if_null(ops, console_io);
>
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 04c8a39..3291aa2 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -417,6 +417,17 @@ static int flask_memory_pin_page(struct domain *d1,
> struct domain *d2,
> return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PINPAGE);
> }
>
> +static int flask_claim_pages(struct domain *d)
> +{
> + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM);
> +}
> +
> +static int flask_xenmem_get_outstanding_pages(void)
> +{
> + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN,
> + XEN__HEAP, NULL);
> +}
> +
> static int flask_console_io(struct domain *d, int cmd)
> {
> u32 perm;
> @@ -1485,6 +1496,8 @@ static struct xsm_operations flask_ops = {
> .memory_adjust_reservation = flask_memory_adjust_reservation,
> .memory_stat_reservation = flask_memory_stat_reservation,
> .memory_pin_page = flask_memory_pin_page,
> + .claim_pages = flask_claim_pages,
> + .xenmem_get_outstanding_pages = flask_xenmem_get_outstanding_pages,
>
> .console_io = flask_console_io,
>
> diff --git a/xen/xsm/flask/policy/access_vectors
> b/xen/xsm/flask/policy/access_vectors
> index c8ae806..544c3ba 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -54,7 +54,7 @@ class xen
> debug
> # XEN_SYSCTL_getcpuinfo, XENPF_get_cpu_version, XENPF_get_cpuinfo
> getcpuinfo
> -# XEN_SYSCTL_availheap
> +# XEN_SYSCTL_availheap, XENMEM_get_outstanding_pages
> heap
> # XEN_SYSCTL_get_pmstat, XEN_SYSCTL_pm_op, XENPF_set_processor_pminfo,
> # XENPF_core_parking
> @@ -192,6 +192,8 @@ class domain2
> settsc
> # XEN_DOMCTL_scheduler_op with XEN_DOMCTL_SCHEDOP_putinfo
> setscheduler
> +# XENMEM_claim_pages
> + setclaim
> }
>
> # Similar to class domain, but primarily contains domctls related to HVM
> domains
> --
> 1.8.1.4
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |