[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/6] hvm/mtrr: use the hardware number of variable ranges for Dom0



>>> On 15.05.18 at 16:36, <roger.pau@xxxxxxxxxx> wrote:
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -154,14 +154,26 @@ uint8_t pat_type_2_pte_flags(uint8_t pat_type)
>  int hvm_vcpu_cacheattr_init(struct vcpu *v)
>  {
>      struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
> +    unsigned int num_var_ranges =
> +        is_hardware_domain(v->domain) ? MASK_EXTR(mtrr_state.mtrr_cap,
> +                                                  MTRRcap_VCNT)
> +                                      : MTRR_VCNT;
> +
> +    if ( num_var_ranges > MTRR_VCNT_MAX )
> +    {
> +        ASSERT(is_hardware_domain(v->domain));
> +        printk("WARNING: limited Dom0 variable range MTRRs from %u to %u\n",
> +               num_var_ranges, MTRR_VCNT_MAX);

Please log v->domain->domain_id here instead of hard coded Dom0, to cope
with the hardware domain being different from Dom0.

> @@ -675,6 +690,8 @@ static int hvm_save_mtrr_msr(struct domain *d, 
> hvm_domain_context_t *h)
>      /* save mtrr&pat */
>      for_each_vcpu(d, v)
>      {
> +        unsigned int num_var_ranges;
> +
>          mtrr_state = &v->arch.hvm_vcpu.mtrr;
>  
>          hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
> @@ -683,7 +700,11 @@ static int hvm_save_mtrr_msr(struct domain *d, 
> hvm_domain_context_t *h)
>                                  | (mtrr_state->enabled << 10);
>          hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
>  
> -        for ( i = 0; i < MTRR_VCNT; i++ )
> +        num_var_ranges = MASK_EXTR(mtrr_state->mtrr_cap, MTRRcap_VCNT);
> +        if ( num_var_ranges > MTRR_VCNT )
> +            return -EINVAL;

This would better be accompanied by a dprintk().

> +        for ( i = 0; i < num_var_ranges; i++ )

Following your v1 I had already put together a patch to change just the
save and load functions here, as the adjustments are necessary
independent of the Dom0 aspect. Should num_var_ranges indeed be
below MTRR_VCNT, there's an information leak here (of hypervisor stack
data) without pre-initializing hw_mtrr. Here's the hunk from my patch, in
case you care to re-use parts of it:

@@ -676,22 +676,22 @@ int hvm_set_mem_pinned_cacheattr(struct
 
 static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
 {
-    int i;
     struct vcpu *v;
-    struct hvm_hw_mtrr hw_mtrr;
-    struct mtrr_state *mtrr_state;
+
     /* save mtrr&pat */
     for_each_vcpu(d, v)
     {
-        mtrr_state = &v->arch.hvm_vcpu.mtrr;
+        const struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
+        struct hvm_hw_mtrr hw_mtrr = {
+            .msr_mtrr_def_type = mtrr_state->def_type |
+                                 (mtrr_state->enabled << 10),
+            .msr_mtrr_cap      = mtrr_state->mtrr_cap,
+        };
+        unsigned int i;
 
         hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
 
-        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
-                                | (mtrr_state->enabled << 10);
-        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
-
-        for ( i = 0; i < MTRR_VCNT; i++ )
+        for ( i = 0; i < (uint8_t)hw_mtrr.msr_mtrr_cap; i++ )
         {
             /* save physbase */
             hw_mtrr.msr_mtrr_var[i*2] =

(I didn't send it out yet as I'm generally of the opinion that prior to
branching focus should be on the code to be released rather than
the next following version.)

> --- a/xen/include/asm-x86/mtrr.h
> +++ b/xen/include/asm-x86/mtrr.h
> @@ -39,6 +39,8 @@ typedef u8 mtrr_type;
>  #define MTRR_PHYSBASE_SHIFT      12
>  /* Number of variable range MSR pairs we emulate for HVM guests: */
>  #define MTRR_VCNT                8
> +/* Maximum number of variable range MSR pairs if FE is supported. */
> +#define MTRR_VCNT_MAX            40

((MSR_MTRRfix64K_00000 - MSR_IA32_MTRR_PHYSBASE(0)) / 2)

or some such please (to make obvious where the value is coming from).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.