[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] dm_op: Add xendevicemodel_modified_memory_bulk.



>>> On 16.03.17 at 15:44, <jennifer.herbert@xxxxxxxxxx> wrote:
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -119,56 +119,96 @@ static int set_isa_irq_level(struct domain *d, uint8_t 
> isa_irq,
>  }
>  
>  static int modified_memory(struct domain *d,
> -                           struct xen_dm_op_modified_memory *data)
> +                           struct xen_dm_op_modified_memory *header,
> +                           xen_dm_op_buf_t* buf)
>  {
> -    xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
> -    unsigned int iter = 0;
> -    int rc = 0;
> +    /* Process maximum of 255 pfns before checking for continuation */
> +    const uint32_t max_pfns = 0xff;
>  
> -    if ( (data->first_pfn > last_pfn) ||
> -         (last_pfn > domain_get_maximum_gpfn(d)) )
> -        return -EINVAL;
> +    xen_pfn_t last_pfn;
> +    int rc = 0;
> +    uint32_t offset = header->offset;
> +    unsigned long pfn;
> +    unsigned long max_pfn;
> +    unsigned max_nr;
> +    uint32_t rem_pfns = max_pfns;

Please avoid fixed width types when you don't really need them. Also
"unsigned int" please instead of plain "unsigned".

>      if ( !paging_mode_log_dirty(d) )
>          return 0;
>  
> -    while ( iter < data->nr )
> +    if ( (buf->size / sizeof(struct xen_dm_op_modified_memory_extent)) <
> +        header->nr_extents )

Indentation.

> +        return -EINVAL;
> +
> +    while ( offset < header->nr_extents )
>      {
> -        unsigned long pfn = data->first_pfn + iter;
> -        struct page_info *page;
> +        struct xen_dm_op_modified_memory_extent extent;
>  
> -        page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
> -        if ( page )
> +        if ( copy_from_guest_offset(&extent, buf->h, offset, 1) )
> +            return -EFAULT;
> +
> +        last_pfn = extent.first_pfn + extent.nr - 1;
> +
> +        if ( last_pfn > domain_get_maximum_gpfn(d) )

Where did the original "(data->first_pfn > last_pfn)" go?

> +            return -EINVAL;
> +
> +        if ( extent.nr > rem_pfns )
> +           max_nr = rem_pfns;
> +        else
>          {
> -            mfn_t gmfn = _mfn(page_to_mfn(page));
> -
> -            paging_mark_dirty(d, gmfn);
> -            /*
> -             * These are most probably not page tables any more
> -             * don't take a long time and don't die either.
> -             */
> -            sh_remove_shadows(d, gmfn, 1, 0);
> -            put_page(page);
> +            max_nr = extent.nr;
> +            offset++;
>          }
>  
> -        iter++;
> +        rem_pfns -= max_nr;
> +        max_pfn = extent.first_pfn + max_nr;
>  
> -        /*
> -         * Check for continuation every 256th iteration and if the
> -         * iteration is not the last.
> -         */
> -        if ( (iter < data->nr) && ((iter & 0xff) == 0) &&
> -             hypercall_preempt_check() )
> +        pfn = extent.first_pfn;
> +        while ( pfn < max_pfn )

The max_ prefixes chosen are somewhat problematic with a loop
condition like this: "max" commonly means the maximum valid one
rather than the first following item. Perhaps "end_pfn" and just
"nr"?

>          {
> -            data->first_pfn += iter;
> -            data->nr -= iter;
> +            struct page_info *page;
> +            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
> +
> +            if ( page )
> +            {
> +                mfn_t gmfn = _mfn(page_to_mfn(page));
> +
> +                paging_mark_dirty(d, gmfn);
> +                /*
> +                 * These are most probably not page tables any more
> +                 * don't take a long time and don't die either.
> +                 */
> +                sh_remove_shadows(d, gmfn, 1, 0);
> +                put_page(page);
> +            }
> +            pfn++;
> +        }
>  
> -            rc = -ERESTART;
> -            break;
> +        if ( max_nr < extent.nr )
> +        {
> +            extent.first_pfn += max_nr;
> +            extent.nr-= max_nr;
> +            if ( copy_to_guest_offset(buf->h, offset, &extent, 1) )
> +                return -EFAULT;

Is this copying back needed when not returning -ERESTART below?
I do realize that with the current code structure it's needed for the
copy_from above to read back the value, but even if this doesn't
look to be a double fetch vulnerability I think it would be better if
the value propagation would occur without going through guest
memory.

>          }
> -    }
>  
> -    return rc;
> +        /*
> +         * Check for continuation every 256th pfn and if the
> +         * pfn is not the last.
> +         */
> +
> +        if ( (rem_pfns == 0) && (offset <= header->nr_extents) )

DYM < instead of <= here?

> +        {
> +            if ( hypercall_preempt_check() )
> +            {
> +                header->offset = offset;
> +                rc = -ERESTART;
> +                break;
> +            }
> +            rem_pfns += max_pfns;

rem_pfns is zero prior to this anyway: Please use = instead of += .

> --- a/xen/include/public/hvm/dm_op.h
> +++ b/xen/include/public/hvm/dm_op.h
> @@ -237,13 +237,19 @@ struct xen_dm_op_set_pci_link_route {
>   * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
>   *                           an emulator.
>   *
> - * NOTE: In the event of a continuation, the @first_pfn is set to the
> - *       value of the pfn of the remaining set of pages and @nr reduced
> - *       to the size of the remaining set.
> + * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
> + * nr_extent entries.
> + *
> + * On continuation, @offset is set to the next extent to be processed.
>   */
>  #define XEN_DMOP_modified_memory 11
>  
>  struct xen_dm_op_modified_memory {
> +    uint32_t nr_extents; /* IN - number of extents. */
> +    uint32_t offset;     /* Caller must set to 0. */

I think you should try to get away without this extra field: For the
purpose here, incrementing the handle value together with
decrementing nr_extents should be sufficient (just like done with
various other batchable hypercalls). And if the field was to stay,
"offset" is a bad name imo, as this leaves open whether this is
byte- or extent-granular. "cur_extent" perhaps?

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.