|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 10/13] xen: introduce xen_event_channel_register_3level
>>> On 31.01.13 at 15:47, Wei Liu <wei.liu2@xxxxxxxxxx> wrote:
> @@ -2123,6 +2126,97 @@ void xen_callback_vector(void)
> void xen_callback_vector(void) {}
> #endif
>
> +static int xen_event_channel_register_3level(void)
> +{
> + evtchn_register_nlevel_t reg;
> + int i, cpu;
> + unsigned long *_evtchn_pending = NULL;
> + unsigned long *_evtchn_mask = NULL;
> + unsigned long *l2sel_mfns = NULL;
> + unsigned long *l2sel_offsets = NULL;
> + int rc;
> +
> + /* If we come from restore path, we don't need to allocate
> + * pages.
> + */
> + if (!evtchn_pending && !evtchn_mask) {
> + evtchn_pending =
> + (unsigned long *)__get_free_pages(GFP_KERNEL,
> + BITMAP_PG_ORDER);
> + evtchn_mask =
> + (unsigned long *)__get_free_pages(GFP_KERNEL,
> + BITMAP_PG_ORDER);
> + if (!evtchn_pending || !evtchn_mask) {
> + free_pages((unsigned long)evtchn_pending,
> BITMAP_NR_PAGES);
> + free_pages((unsigned long)evtchn_mask, BITMAP_NR_PAGES);
free_pages() takes an order just like __get_free_pages() does.
> + evtchn_pending = NULL;
> + evtchn_mask = NULL;
> + rc = -ENOMEM;
> + goto err;
> + }
> + }
> +
> + rc = -ENOMEM; /* Common error code for following operations */
> +#define __ALLOC_ARRAY(_ptr, _nr) \
> + do { \
> + (_ptr) = kzalloc(sizeof(unsigned long) * (_nr), \
> + GFP_KERNEL); \
> + if (!(_ptr)) \
> + goto out; \
> + } while (0)
> +
> + __ALLOC_ARRAY(_evtchn_pending, BITMAP_NR_PAGES);
> + __ALLOC_ARRAY(_evtchn_mask, BITMAP_NR_PAGES);
> + __ALLOC_ARRAY(l2sel_mfns, nr_cpu_ids);
> + __ALLOC_ARRAY(l2sel_offsets, nr_cpu_ids);
> +#undef __ALLOC_ARRAY
> +
> + memset(®, 0, sizeof(reg));
> +
> + for (i = 0; i < BITMAP_NR_PAGES; i++) {
> + unsigned long offset = PAGE_SIZE * i;
> + _evtchn_pending[i] =
> + arbitrary_virt_to_mfn(
> + (void *)((unsigned long)evtchn_pending+offset));
> + _evtchn_mask[i] =
> + arbitrary_virt_to_mfn(
> + (void *)((unsigned long)evtchn_mask+offset));
> + }
> +
> + for_each_possible_cpu(cpu) {
> + l2sel_mfns[cpu] =
> + arbitrary_virt_to_mfn(&per_cpu(evtchn_sel_l2, cpu));
> + l2sel_offsets[cpu] =
> + offset_in_page(&per_cpu(evtchn_sel_l2, cpu));
> + }
> +
> + reg.u.l3.nr_pages = BITMAP_NR_PAGES;
> + reg.u.l3.evtchn_pending = _evtchn_pending;
> + reg.u.l3.evtchn_mask = _evtchn_mask;
> +
> + reg.u.l3.nr_vcpus = nr_cpu_ids;
> + reg.u.l3.l2sel_mfns = l2sel_mfns;
> + reg.u.l3.l2sel_offsets = l2sel_offsets;
> +
> + reg.level = 3;
> +
> + rc = HYPERVISOR_event_channel_op(EVTCHNOP_register_nlevel, ®);
> + if (rc) {
> + free_pages((unsigned long)evtchn_pending, BITMAP_NR_PAGES);
> + free_pages((unsigned long)evtchn_mask, BITMAP_NR_PAGES);
Same here.
Jan
> + evtchn_pending = NULL;
> + evtchn_mask = NULL;
> + }
> +
> +out:
> + kfree(_evtchn_pending);
> + kfree(_evtchn_mask);
> + kfree(l2sel_mfns);
> + kfree(l2sel_offsets);
> +err:
> + return rc;
> +}
> +
> void __init xen_init_IRQ(void)
> {
> int i, rc;
> --
> 1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |