[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/hvm: refactor set param



commit 336fbbdf61562e5ae1112f24bc90c1164adf2144
Author:     Norbert Manthey <nmanthey@xxxxxxxxx>
AuthorDate: Fri Feb 19 17:24:03 2021 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Feb 19 17:24:03 2021 +0100

    x86/hvm: refactor set param
    
    To prevent leaking HVM params via L1TF and similar issues on a
    hyperthread pair, let's load values of domains only after performing all
    relevant checks, and blocking speculative execution.
    
    For both get and set, the value of the index is already checked in the
    outer calling function. The block_speculation calls in hvmop_get_param
    and hvmop_set_param are removed, because is_hvm_domain already blocks
    speculation.
    
    Furthermore, speculative barriers are re-arranged to make sure we do not
    allow guests running on co-located VCPUs to leak hvm parameter values of
    other domains.
    
    To improve symmetry between the get and set operations, function
    hvmop_set_param is made static.
    
    This is part of the speculative hardening effort.
    
    Reported-by: Hongyan Xia <hongyxia@xxxxxxxxxxxx>
    Signed-off-by: Norbert Manthey <nmanthey@xxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Release-Acked-by: Ian Jackson <iwj@xxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index a2ecd54ab7..e7bcffebc4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4060,7 +4060,7 @@ static int hvm_allow_set_param(struct domain *d,
                                uint32_t index,
                                uint64_t new_value)
 {
-    uint64_t value = d->arch.hvm.params[index];
+    uint64_t value;
     int rc;
 
     rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
@@ -4108,6 +4108,10 @@ static int hvm_allow_set_param(struct domain *d,
     if ( rc )
         return rc;
 
+    /* Make sure we evaluate permissions before loading data of domains. */
+    block_speculation();
+
+    value = d->arch.hvm.params[index];
     switch ( index )
     {
     /* The following parameters should only be changed once. */
@@ -4134,13 +4138,13 @@ static int hvm_set_param(struct domain *d, uint32_t 
index, uint64_t value)
     struct vcpu *v;
     int rc;
 
-    if ( index >= HVM_NR_PARAMS )
-        return -EINVAL;
-
     rc = hvm_allow_set_param(d, index, value);
     if ( rc )
         return rc;
 
+    /* Make sure we evaluate permissions before loading data of domains. */
+    block_speculation();
+
     switch ( index )
     {
     case HVM_PARAM_CALLBACK_IRQ:
@@ -4305,7 +4309,7 @@ static int hvm_set_param(struct domain *d, uint32_t 
index, uint64_t value)
     return rc;
 }
 
-int hvmop_set_param(
+static int hvmop_set_param(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
 {
     struct xen_hvm_param a;
@@ -4318,9 +4322,6 @@ int hvmop_set_param(
     if ( a.index >= HVM_NR_PARAMS )
         return -EINVAL;
 
-    /* Make sure the above bound check is not bypassed during speculation. */
-    block_speculation();
-
     d = rcu_lock_domain_by_any_id(a.domid);
     if ( d == NULL )
         return -ESRCH;
@@ -4388,6 +4389,9 @@ int hvm_get_param(struct domain *d, uint32_t index, 
uint64_t *value)
     if ( rc )
         return rc;
 
+    /* Make sure the above domain permissions check is respected. */
+    block_speculation();
+
     switch ( index )
     {
     case HVM_PARAM_ACPI_S_STATE:
@@ -4428,9 +4432,6 @@ static int hvmop_get_param(
     if ( a.index >= HVM_NR_PARAMS )
         return -EINVAL;
 
-    /* Make sure the above bound check is not bypassed during speculation. */
-    block_speculation();
-
     d = rcu_lock_domain_by_any_id(a.domid);
     if ( d == NULL )
         return -ESRCH;
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.