|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v5 3/3] x86/ioreq: Extend ioreq server to support multiple ioreq pages
On 16.03.2026 12:17, Julian Vetter wrote:
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -163,6 +163,14 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s,
> bool buf)
> if ( d->is_dying )
> return -EINVAL;
>
> + /*
> + * The legacy GFN path supports only a single ioreq page. Guests
> requiring
> + * more ioreq slots must use the resource mapping interface
> + * (XENMEM_acquire_resource).
> + */
> + if ( !buf && nr_ioreq_pages(d) > 1 )
> + return -EOPNOTSUPP;
Nit towards the comment: It's not guests using this themselves, but their
device models. This wants wording accurately.
> --- a/xen/common/ioreq.c
> +++ b/xen/common/ioreq.c
> @@ -261,8 +261,9 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
> static int ioreq_server_alloc_mfn(struct ioreq_server *s, bool buf)
> {
> struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> - struct page_info *page;
> - mfn_t mfn;
> + unsigned int i, nr_pages = buf ? 1 : nr_ioreq_pages(s->target);
> + mfn_t *mfns;
> + int rc;
>
> if ( iorp->va )
> {
> @@ -277,11 +278,20 @@ static int ioreq_server_alloc_mfn(struct ioreq_server
> *s, bool buf)
> return 0;
> }
>
> + mfns = xmalloc_array(mfn_t, nr_pages);
> + if ( !mfns )
> + return -ENOMEM;
It would be nice to avoid this allocation for guests requiring only a single
page, perhaps even for ones requiring only a few. (Ideally we'd avoid such a
runtime allocation altogether, as they hinder certification aiui.)
> + for ( i = 0; i < nr_pages; i++ )
> {
> - page = alloc_domheap_page(s->target, MEMF_no_refcount);
> + struct page_info *page = alloc_domheap_page(s->target,
> + MEMF_no_refcount);
To limit churn in this patch as much as possible, this scope reduction of
"page" may want to also move into the earlier, purely mechanical change.
> if ( !page )
> - return -ENOMEM;
> + {
> + rc = -ENOMEM;
> + goto fail;
> + }
This and ...
> if ( !get_page_and_type(page, s->target, PGT_writable_page) )
> {
> @@ -290,41 +300,60 @@ static int ioreq_server_alloc_mfn(struct ioreq_server
> *s, bool buf)
> * here is a clear indication of something fishy going on.
> */
> domain_crash(s->emulator);
> - return -ENODATA;
> + rc = -ENODATA;
> + goto fail;
> }
... this error patch have different cleanup needs, yet they both simply
"goto fail".
> - mfn = page_to_mfn(page);
> + mfns[i] = page_to_mfn(page);
> }
> - iorp->va = vmap(&mfn, 1);
> +
> + iorp->va = vmap(mfns, nr_pages);
> if ( !iorp->va )
> + {
> + rc = -ENOMEM;
> goto fail;
> + }
> +
> + xfree(mfns);
> +
> + for ( i = 0; i < nr_pages; i++ )
> + clear_page((char *)iorp->va + i * PAGE_SIZE);
As before - please prefer casts to void * when no particular type is needed.
Plus you don't need a cast here at all, do you?
I wonder though - might this not better be a single memset()? (Else we may
want to introduce clear_pages().)
> - clear_page(iorp->va);
> return 0;
>
> fail:
> - put_page_alloc_ref(page);
> - put_page_and_type(page);
> + while ( i-- )
> + {
> + struct page_info *page = mfn_to_page(mfns[i]);
> +
> + put_page_alloc_ref(page);
> + put_page_and_type(page);
> + }
> + xfree(mfns);
>
> - return -ENOMEM;
> + return rc;
> }
>
> static void ioreq_server_free_mfn(struct ioreq_server *s, bool buf)
> {
> struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> - struct page_info *page;
> + unsigned int i, nr_pages;
>
> if ( !iorp->va )
> return;
>
> + nr_pages = vmap_size(iorp->va);
Why would you need to query this? You should be able to calculate this the
same as ioreq_server_alloc_mfn() does, I suppose.
> + for ( i = 0; i < nr_pages; i++ )
> {
> - page = vmap_to_page(iorp->va);
> - vunmap(iorp->va);
> - iorp->va = NULL;
> + struct page_info *page = vmap_to_page(iorp->va + i * PAGE_SIZE);
>
> put_page_alloc_ref(page);
> put_page_and_type(page);
> }
> +
> + vunmap(iorp->va);
> + iorp->va = NULL;
> }
This re-ordering isn't nice. At least iorp->va ought to be cleared before
the references on the pages are dropped. I fear, though, that even the
mapping needs dropping ahead of time: By (likely) freeing the pages, they
can be reused for another purpose. Speculative accesses through the
mapping would better be excluded as a possibile source of a security issue.
> @@ -337,12 +366,28 @@ bool is_ioreq_server_page(struct domain *d, const
> struct page_info *page)
>
> FOR_EACH_IOREQ_SERVER(d, id, s)
> {
> - if ( (s->ioreq.va && vmap_to_page(s->ioreq.va) == page) ||
> - (s->bufioreq.va && vmap_to_page(s->bufioreq.va) == page) )
> + if ( s->bufioreq.va && vmap_to_page(s->bufioreq.va) == page )
> {
> found = true;
> break;
> }
> +
> + if ( s->ioreq.va )
> + {
> + unsigned int i;
> +
> + for ( i = 0; i < vmap_size(s->ioreq.va); i++ )
> + {
> + if ( vmap_to_page(s->ioreq.va + i * PAGE_SIZE) == page )
> + {
> + found = true;
> + break;
> + }
> + }
This raises the question whether storing the array of MFNs permanently
wouldn't be better. This would then also eliminate other concerns voiced
above.
> @@ -818,26 +863,30 @@ int ioreq_server_get_frame(struct domain *d, ioservid_t
> id,
> if ( rc )
> goto out;
>
> - switch ( idx )
> + if ( idx == XENMEM_resource_ioreq_server_frame_bufioreq )
> {
> - case XENMEM_resource_ioreq_server_frame_bufioreq:
> rc = -ENOENT;
> if ( !HANDLE_BUFIOREQ(s) )
> goto out;
>
> *mfn = page_to_mfn(vmap_to_page(s->bufioreq.va));
> rc = 0;
> - break;
> + }
> + else if ( idx >= XENMEM_resource_ioreq_server_frame_ioreq(0) &&
> + idx <
> XENMEM_resource_ioreq_server_frame_ioreq(nr_ioreq_pages(d)) )
> + {
> + unsigned int page_idx = idx -
> XENMEM_resource_ioreq_server_frame_ioreq(0);
> + if ( page_idx >= vmap_size(s->ioreq.va) )
This ought to be impossible, so may at best want to be an ASSERT().
Also, nit: Blank line please between declaration(s) and statement(s).
Jan
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |