[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/1] x86/hvm: prevent hvm_free_ioreq_gmfn() clobber of arbitrary memory



At 14:17 +0100 on 14 Apr (1429021061), Andrew Cooper wrote:
> On 14/04/15 12:47, Jan Beulich wrote:
> >>>> On 13.04.15 at 18:01, <dslutz@xxxxxxxxxxx> wrote:
> >> --- a/xen/arch/x86/hvm/hvm.c
> >> +++ b/xen/arch/x86/hvm/hvm.c
> >> @@ -536,8 +536,9 @@ static int hvm_alloc_ioreq_gmfn(struct domain *d, 
> >> unsigned long *gmfn)
> >>  
> >>  static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn)
> >>  {
> >> -    unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
> >> +    unsigned long i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
> >>  
> >> +    BUG_ON(i >= sizeof(d->arch.hvm_domain.ioreq_gmfn.mask) * 8);
> >>      clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
> >>  }
> > I'd be happier with an ASSERT() - Andrew?
> 
> If I recall, this is a follow on from the compiler error, where gmfn now
> gets initialised to ~0 to avoid a build failure.
> 
> If gcc is correct and there is a way for gmfn to be used, then the
> clear_bit() here clobber memory.  The BUG_ON() serves as a protection
> against the clobbering.
> 
> If however gcc was actually wrong, then the code here is actually fine,
> and a BUG_ON() or ASSERT() will never actually trigger.
> 
> In addition, not a hotpath in the slightest, so performance isn't a concern.
> 
> 
> I have still not managed to conclusively work out whether gcc is correct
> or wrong.  As a result, I would lean in the direction of BUG_ON() rather
> than ASSERT(), out of paranoia.  However, I would prefer even more a
> solution where we were certain that gmfn isn't bogus.

AFAICT GCC is wrong, though the code is confusing enough to me that I
don't blame GCC for being confused too. :)

I would be inclined to use a bigger hammer here.  IMO refactoring like
this makes it easier to reason about (compile tested only):

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 21cf744..bdc2457 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -495,7 +495,8 @@ static void hvm_free_ioreq_gmfn(struct domain *d, unsigned 
long gmfn)
 {
     unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
 
-    clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
+    if (gmfn != INVALID_GFN)
+        clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
 }
 
 static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
@@ -728,64 +729,61 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
hvm_ioreq_server *s)
     spin_unlock(&s->lock);
 }
 
+
 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
-                                      bool_t is_default, bool_t 
handle_bufioreq)
+                                      unsigned long ioreq_pfn,
+                                      unsigned long bufioreq_pfn)
+{
+    int rc;
+
+    rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
+    if ( rc )
+        return rc;
+
+    if ( bufioreq_pfn != INVALID_GFN )
+        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+
+    if ( rc )
+        hvm_unmap_ioreq_page(s, 0);
+
+    return rc;
+}
+
+static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
+                                        bool_t is_default,
+                                        bool_t handle_bufioreq)
 {
     struct domain *d = s->domain;
-    unsigned long ioreq_pfn;
-    unsigned long bufioreq_pfn = ~0UL; /* gcc uninitialised var warning */
+    unsigned long ioreq_pfn = INVALID_GFN;
+    unsigned long bufioreq_pfn = INVALID_GFN;
     int rc;
 
     if ( is_default )
     {
-        ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
-
         /*
          * The default ioreq server must handle buffered ioreqs, for
          * backwards compatibility.
          */
         ASSERT(handle_bufioreq);
-        bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
+        return hvm_ioreq_server_map_pages(s,
+                   d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
+                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
     }
-    else
-    {
-        rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
-        if ( rc )
-            goto fail1;
 
-        if ( handle_bufioreq )
-        {
-            rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
-            if ( rc )
-                goto fail2;
-        }
-    }
+    rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
 
-    rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
-    if ( rc )
-        goto fail3;
+    if ( !rc && handle_bufioreq )
+        rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
 
-    if ( handle_bufioreq )
-    {
-        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
-        if ( rc )
-            goto fail4;
-    }
+    if ( !rc )
+        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn);
 
-    return 0;
-
-fail4:
-    hvm_unmap_ioreq_page(s, 0);
-
-fail3:
-    if ( !is_default && handle_bufioreq )
-        hvm_free_ioreq_gmfn(d, bufioreq_pfn);
-
-fail2:
-    if ( !is_default )
+    if ( rc )
+    {
         hvm_free_ioreq_gmfn(d, ioreq_pfn);
+        hvm_free_ioreq_gmfn(d, bufioreq_pfn);
+    }
 
-fail1:
     return rc;
 }
 
@@ -938,7 +936,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s, struct domain *d,
     if ( rc )
         return rc;
 
-    rc = hvm_ioreq_server_map_pages(s, is_default, handle_bufioreq);
+    rc = hvm_ioreq_server_setup_pages(s, is_default, handle_bufioreq);
     if ( rc )
         goto fail_map;
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.