[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] [IA64] Work around for xencomm memory reservation op.
# HG changeset patch # User Alex Williamson <alex.williamson@xxxxxx> # Date 1187791760 21600 # Node ID ec6f71d1b3351faebbfb0345ca0ccfc3a52b5d7c # Parent b5fdf02c38f4765697196f5fad5d1262f2c157f4 [IA64] Work around for xencomm memory reservation op. - Xencomm has single page size limit caused by xencomm_alloc()/xencomm_free() so that we have to repeat the hypercall. Repeating the hypercall allows us to create domains larger than ~63G. This limitation could also be removed by allowing xencomm calls to cross pages. - Even if the above limitation is removed, the hypercall with large number of extents may cause the soft lockup warning. In order to avoid the warning, we limit the number of extents and repeat the hypercall. Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx> Signed-off-by: Alex Williamson <alex.williamson@xxxxxx> --- arch/ia64/xen/xcom_privcmd.c | 139 ++++++++++++++++++++++++++++++------------- 1 files changed, 97 insertions(+), 42 deletions(-) diff -r b5fdf02c38f4 -r ec6f71d1b335 arch/ia64/xen/xcom_privcmd.c --- a/arch/ia64/xen/xcom_privcmd.c Thu Aug 16 13:44:51 2007 -0600 +++ b/arch/ia64/xen/xcom_privcmd.c Wed Aug 22 08:09:20 2007 -0600 @@ -377,6 +377,102 @@ xencomm_privcmd_acm_op(privcmd_hypercall } static int +xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall) +{ + const unsigned long cmd = hypercall->arg[0]; + int ret = 0; + xen_memory_reservation_t kern_op; + xen_memory_reservation_t __user *user_op; + struct xencomm_handle *desc = NULL; + struct xencomm_handle *desc_op; + + user_op = (xen_memory_reservation_t __user *)hypercall->arg[1]; + if (copy_from_user(&kern_op, user_op, + sizeof(xen_memory_reservation_t))) + return -EFAULT; + desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op)); + + if (!xen_guest_handle(kern_op.extent_start)) { + ret = xencomm_arch_hypercall_memory_op(cmd, desc_op); + if (ret < 0) + return ret; + } else { + xen_ulong_t nr_done = 0; + xen_ulong_t nr_extents = kern_op.nr_extents; + void *addr = xen_guest_handle(kern_op.extent_start); + + /* + * Work around. + * Xencomm has single page size limit caused + * by xencomm_alloc()/xencomm_free() so that + * we have to repeat the hypercall. + * This limitation can be removed. + */ +#define MEMORYOP_XENCOMM_LIMIT \ + (((((PAGE_SIZE - sizeof(struct xencomm_desc)) / \ + sizeof(uint64_t)) - 2) * PAGE_SIZE) / \ + sizeof(*xen_guest_handle(kern_op.extent_start))) + + /* + * Work around. + * Even if the above limitation is removed, + * the hypercall with large number of extents + * may cause the soft lockup warning. + * In order to avoid the warning, we limit + * the number of extents and repeat the hypercall. + * The following value is determined by experimentation. + * If the following limit causes soft lockup warning, + * we should decrease this value. + * + * Another way would be that start with small value and + * increase adoptively measuring hypercall time. + * It might be over-kill. + */ +#define MEMORYOP_MAX_EXTENTS (MEMORYOP_XENCOMM_LIMIT / 4) + + while (nr_extents > 0) { + xen_ulong_t nr_tmp = nr_extents; + if (nr_tmp > MEMORYOP_MAX_EXTENTS) + nr_tmp = MEMORYOP_MAX_EXTENTS; + + kern_op.nr_extents = nr_tmp; + desc = xencomm_map + (addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)), + nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start))); + if (addr != NULL && nr_tmp > 0 && desc == NULL) + return nr_done > 0 ? nr_done : -ENOMEM; + + set_xen_guest_handle(kern_op.extent_start, + (void *)desc); + + ret = xencomm_arch_hypercall_memory_op(cmd, desc_op); + xencomm_free(desc); + if (ret < 0) + return nr_done > 0 ? nr_done : ret; + + nr_done += ret; + nr_extents -= ret; + if (ret < nr_tmp) + break; + + /* + * prevent softlock up message. + * give cpu to soft lockup kernel thread. + */ + if (nr_extents > 0) + schedule(); + } + ret = nr_done; + set_xen_guest_handle(kern_op.extent_start, addr); + } + + if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t))) + return -EFAULT; + + return ret; +} + +static int xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall) { const unsigned long cmd = hypercall->arg[0]; @@ -386,48 +482,7 @@ xencomm_privcmd_memory_op(privcmd_hyperc case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - { - xen_memory_reservation_t kern_op; - xen_memory_reservation_t __user *user_op; - struct xencomm_handle *desc = NULL; - struct xencomm_handle *desc_op; - - user_op = (xen_memory_reservation_t __user *)hypercall->arg[1]; - if (copy_from_user(&kern_op, user_op, - sizeof(xen_memory_reservation_t))) - return -EFAULT; - desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op)); - - if (xen_guest_handle(kern_op.extent_start)) { - void * addr; - - addr = xen_guest_handle(kern_op.extent_start); - desc = xencomm_map - (addr, - kern_op.nr_extents * - sizeof(*xen_guest_handle - (kern_op.extent_start))); - if (addr != NULL && kern_op.nr_extents > 0 && - desc == NULL) - return -ENOMEM; - - set_xen_guest_handle(kern_op.extent_start, - (void *)desc); - } - - ret = xencomm_arch_hypercall_memory_op(cmd, desc_op); - - xencomm_free(desc); - - if (ret != 0) - return ret; - - if (copy_to_user(user_op, &kern_op, - sizeof(xen_memory_reservation_t))) - return -EFAULT; - - return ret; - } + return xencomm_privcmd_memory_reservation_op(hypercall); case XENMEM_maximum_gpfn: { domid_t kern_domid; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |