[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [XenARM] XEN tools for ARM with Virtualization Extensions



On Tue, 2013-07-09 at 17:10 +0000, Eric Trudeau wrote:
> Now, I am looking into how to enable IRQs in my guest domains.
> Would I implement xc_domain_bind_pt_irq/XEN_DOMCTL_bind_pt_irq in a similar
> way as xc_domain_memory_mapping?  Or will the existing 
> xc_domain_irq_permission
> calls work?

I think in principal either should work. I'm not 100% familiar with this
stuff but I think xc_domain_irq_permissions just lets you expose an IRQ
to the guest, with a 1:1 mapping from host to guest IRQs etc. The
bind_pt_irq stuff is more flexible and lets you map a non-1:1 mapping
and handles some of the more complex variants.

I think for your usecase you can probably get away with the simple
version. When we come to do proper device pass through we will likely
need to build upon the bind_pt functionality.

> What functions should I call to implement  XEN_DOMCTL_bind_pt_irq on ARM?

There's a function like route_irq_to_guest which we use to route IRQs to
dom0 during boot. In principal that could also be used to reroute an IRQ
to a guest, but I'm not sure how it will interact with the reassignment,
since in your case the IRQ starts off bound to dom0. Hopefully it's just
a small change to make it work for this case.

> 
> Thanks,
> Eric
> 
> ------------------------------------------------------------------
> 
> diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
> index 0c32d0b..4196c0c 100644
> --- a/tools/libxl/libxl_create.c
> +++ b/tools/libxl/libxl_create.c
> @@ -970,8 +970,9 @@ static void domcreate_launch_dm(libxl__egc *egc, 
> libxl__multidev *multidev,
>          LOG(DEBUG, "dom%d iomem %"PRIx64"-%"PRIx64,
>              domid, io->start, io->start + io->number - 1);
> 
> -        ret = xc_domain_iomem_permission(CTX->xch, domid,
> -                                          io->start, io->number, 1);
> +        ret = xc_domain_memory_mapping(CTX->xch, domid,
> +                                       io->start, io->start,
> +                                       io->number, 1);
>          if (ret < 0) {
>              LOGE(ERROR,
>                   "failed give dom%d access to iomem range %"PRIx64"-%"PRIx64,
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index 851ee40..222aac9 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -10,11 +10,83 @@
>  #include <xen/errno.h>
>  #include <xen/sched.h>
>  #include <public/domctl.h>
> +#include <xen/iocap.h>
> +#include <xsm/xsm.h>
> +#include <xen/paging.h>
> +#include <xen/guest_access.h>
> 
>  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>                      XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
>  {
> -    return -ENOSYS;
> +    long ret = 0;
> +    bool_t copyback = 0;
> +
> +    switch ( domctl->cmd )
> +    {
> +    case XEN_DOMCTL_memory_mapping:
> +    {
> +        unsigned long gfn = domctl->u.memory_mapping.first_gfn;
> +        unsigned long mfn = domctl->u.memory_mapping.first_mfn;
> +        unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
> +        int add = domctl->u.memory_mapping.add_mapping;
> +
> +        /* removing i/o memory is not implemented yet */
> +        if (!add) {
> +            ret = -ENOSYS;
> +            break;
> +        }
> +        ret = -EINVAL;
> +        if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
> +             /* x86 checks wrap based on paddr_bits which is not implemented 
> on ARM? */
> +             /* ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) 
> || */
> +             (gfn + nr_mfns - 1) < gfn ) /* wrap? */
> +            break;
> +
> +        ret = -EPERM;
> +        if ( current->domain->domain_id != 0 )
> +            break;
> +
> +        ret = xsm_iomem_mapping(XSM_HOOK, d, mfn, mfn + nr_mfns - 1, add);
> +        if ( ret )
> +            break;
> +
> +        if ( add )
> +        {
> +            printk(XENLOG_G_INFO
> +                   "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> +                   d->domain_id, gfn, mfn, nr_mfns);
> +
> +            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
> +            if ( !ret && paging_mode_translate(d) )
> +            {
> +                ret = map_mmio_regions(d, gfn << PAGE_SHIFT,
> +                                       (gfn + nr_mfns - 1) << PAGE_SHIFT,
> +                                       mfn << PAGE_SHIFT);
> +                if ( ret )
> +                {
> +                    printk(XENLOG_G_WARNING
> +                           "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> +                           d->domain_id, gfn, mfn, nr_mfns);
> +                    if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
> +                         is_hardware_domain(current->domain) )
> +                        printk(XENLOG_ERR
> +                               "memory_map: failed to deny dom%d access to 
> [%lx,%lx]\n",
> +                               d->domain_id, mfn, mfn + nr_mfns - 1);
> +                }
> +            }
> +        }
> +    }
> +    break;
> +
> +    default:
> +        ret = -ENOSYS;
> +        break;
> +    }
> +
> +    if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
> +        ret = -EFAULT;
> +
> +    return ret;
>  }
> 
>  void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.