[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: user/hypervisor address space solution




hollisb@xxxxxxxxxxxxxxxxxxxxxxx wrote on 02/13/2006 05:56:36 PM:

> I think I've solved this problem to my satisfaction. The solution I
> implemented was not registering buffers and running an allocator out of
> it, since that got a lot too complicated for my taste.
>
> The code I've just checked in to the PPC trees
> (http://xenbits.xensource.com/ext/linux-ppc-2.6.hg and
> http://xenbits.xensource.com/ext/xenppc-unstable.hg) basically creates a
> scatter/gather list for communication, and requires no hcall-user
> modifications (e.g. libxc). In summary, all pointers in the hcall data
> structures are replaced with pointers to scatter/gather structures.
> Xen's copy_to/from_user now works only with these structures.
>
> Userspace allocates memory for buffers and for the dom0_op itself from
> anywhere it likes. When it calls into the kernel via privcmd_ioctl(),
> the kernel records the virtually contiguous buffers with scatter/gather
> structures containing physical addresses, and replaces all nested
> virtual pointers with physical pointers to these structures.


Would the following be a simpler solution for pcc for the privcmd interface:

  - force copying of all structures into arrays allocated with __get_free_pages() using a wrapper function (for contig. memory to avoid scatter-gather)
    function returns the physical address

If the same code was to also be used for x86 to re-write the user-space arrays:
  - the wrapper function that is used to copying into contiguous memory on PPC results in a no-op on x86 and returns the address passed to it


The code could look something like this:

#ifdef PPC

unsigned long xencomm_copy(unsigned long addr, unsigned long len, struct collect *c)
{
        struct collect *coll = malloc(*c);

        if (coll) {
                coll->order = get_order(len);

                coll->addr = __get_free_pages(order);
                copy_from_user(coll->addr, addr, len);

                coll->next = c->next;
                c->next = coll;
                return coll->addr;
        }
        return 0;
}

unsigned long xencomm_txcopy(unsigned long addr, unsigned long len, struct collect *c)
{
        return __pa(xencomm_copy(addr,len,c));
}

#else
unsigned long xencomm_txcopy(unsigned long addr, unsigned long len, struct collect *c)
{
        return addr;
}
unsigned long xencomm_copy(unsigned long addr, unsigned long len, struct collect *c)
{
        return __pa(xencomm_copy(addr,len,c));
}


#endif

xencomm_free(struct collect *c)
{
        struct collect *_c;
        while (c) {
                free_pages(c->dest, c->order);
                _c = c->next;
                free(c);
                c = _c;
        }
}


For the PPC-tree:


                 struct collection *coll;
                [...]

                 if (copy_from_user(&kern_op, user_op, sizeof(dom0_op_t)))
                                 return -EFAULT;

                if (kern_op.interface_version != DOM0_INTERFACE_VERSION)
                                 return -EACCES;

                switch (kern_op.cmd) {
                                 case DOM0_GETMEMLIST:

                                                 kern_op.u.getmemlist.buffer = xencomm_txcopy(kern_op.u.getmemlist.buffer, )
                                                                                                                     kern_op.u.getmemlist.max_pfns * sizeof(long),

                                                                           coll);
                                                  break;

                [...]

                 ret = plpar_hcall_norets(XEN_MARK(hypercall->op), __pa(&kern_op),
                                                  0, 0, 0, 0);

                xencomm_free(coll);
               


This should not require changes to the copy_from_user function in the HV but does not get around copying of the arrays / rewriting of the data structures.

 Stefan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.