[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] tighten guest memory accesses


  • To: Jan Beulich <JBeulich@xxxxxxxx>, xen-devel <xen-devel@xxxxxxxxxxxxx>
  • From: Keir Fraser <keir@xxxxxxx>
  • Date: Thu, 06 Dec 2012 13:05:24 +0000
  • Delivery-date: Thu, 06 Dec 2012 13:05:52 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: Ac3TslqGP4kXYI6sOU237qn8exWewg==
  • Thread-topic: [Xen-devel] [PATCH] tighten guest memory accesses

On 06/12/2012 12:49, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:

> Failure should always be detected and handled.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Acked-by: Keir Fraser <keir@xxxxxxx>

> --- a/xen/common/compat/grant_table.c
> +++ b/xen/common/compat/grant_table.c
> @@ -173,7 +173,9 @@ int compat_grant_table_op(unsigned int c
>                          for ( i = 0; i < (_s_)->nr_frames; ++i ) \
>                          { \
>                              unsigned int frame = (_s_)->frame_list.p[i]; \
> -                            (void)__copy_to_compat_offset((_d_)->frame_list,
> i, &frame, 1); \
> +                            if ( __copy_to_compat_offset((_d_)->frame_list, \
> +                                                         i, &frame, 1) ) \
> +                                (_s_)->status = GNTST_bad_virt_addr; \
>                          } \
>                      } \
>                  } while (0)
> @@ -310,7 +312,9 @@ int compat_grant_table_op(unsigned int c
>                          for ( i = 0; i < (_s_)->nr_frames; ++i ) \
>                          { \
>                              uint64_t frame = (_s_)->frame_list.p[i]; \
> -                            (void)__copy_to_compat_offset((_d_)->frame_list,
> i, &frame, 1); \
> +                            if ( __copy_to_compat_offset((_d_)->frame_list, \
> +                                                         i, &frame, 1) ) \
> +                                (_s_)->status = GNTST_bad_virt_addr; \
>                          } \
>                      } \
>                  } while (0)
> --- a/xen/common/compat/memory.c
> +++ b/xen/common/compat/memory.c
> @@ -283,18 +283,25 @@ int compat_memory_op(unsigned int cmd, X
>                  compat_pfn_t pfn =
> nat.xchg->out.extent_start.p[start_extent];
>  
>                  BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]);
> -                /* Note that we ignore errors accessing the output extent
> list. */
> -                __copy_to_compat_offset(cmp.xchg.out.extent_start,
> start_extent, &pfn, 1);
> +                if ( __copy_to_compat_offset(cmp.xchg.out.extent_start,
> +                                             start_extent, &pfn, 1) )
> +                {
> +                    rc = -EFAULT;
> +                    break;
> +                }
>              }
>  
>              cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
>              if ( copy_field_to_guest(guest_handle_cast(compat,
> compat_memory_exchange_t),
>                                       &cmp.xchg, nr_exchanged) )
> +                rc = -EFAULT;
> +
> +            if ( rc < 0 )
>              {
>                  if ( split < 0 )
>                      /* Cannot cancel the continuation... */
>                      domain_crash(current->domain);
> -                return -EFAULT;
> +                return rc;
>              }
>              break;
>          }
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -1347,6 +1347,9 @@ gnttab_setup_table(
>          goto out1;
>      }
>  
> +    if ( !guest_handle_okay(op.frame_list, op.nr_frames) )
> +        return -EFAULT;
> +
>      d = gt_lock_target_domain_by_id(op.dom);
>      if ( IS_ERR(d) )
>      {
> @@ -1384,7 +1387,8 @@ gnttab_setup_table(
>          gmfn = gnttab_shared_gmfn(d, gt, i);
>          /* Grant tables cannot be shared */
>          BUG_ON(SHARED_M2P(gmfn));
> -        (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
> +        if ( __copy_to_guest_offset(op.frame_list, i, &gmfn, 1) )
> +            op.status = GNTST_bad_virt_addr;
>      }
>  
>   out3:
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -445,8 +445,7 @@ static long memory_exchange(XEN_GUEST_HA
>          }
>  
>          /* Assign each output page to the domain. */
> -        j = 0;
> -        while ( (page = page_list_remove_head(&out_chunk_list)) )
> +        for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
>          {
>              if ( assign_pages(d, page, exch.out.extent_order,
>                                MEMF_no_refcount) )
> @@ -477,9 +476,12 @@ static long memory_exchange(XEN_GUEST_HA
>                  goto dying;
>              }
>  
> -            /* Note that we ignore errors accessing the output extent list.
> */
> -            (void)__copy_from_guest_offset(
> -                &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
> +            if ( __copy_from_guest_offset(&gpfn, exch.out.extent_start,
> +                                          (i << out_chunk_order) + j, 1) )
> +            {
> +                rc = -EFAULT;
> +                continue;
> +            }
>  
>              mfn = page_to_mfn(page);
>              guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
> @@ -488,10 +490,11 @@ static long memory_exchange(XEN_GUEST_HA
>              {
>                  for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
>                      set_gpfn_from_mfn(mfn + k, gpfn + k);
> -                (void)__copy_to_guest_offset(
> -                    exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
> +                if ( __copy_to_guest_offset(exch.out.extent_start,
> +                                            (i << out_chunk_order) + j,
> +                                            &mfn, 1) )
> +                    rc = -EFAULT;
>              }
> -            j++;
>          }
>          BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
>      }
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.