# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1164685924 -32400 # Node ID ec4428278e416371c6ddfaaa3bcc10419e55dff2 # Parent 84c0f49de1b1e241a4b409a48192c110b01788a0 [XENOPROFILE] fix shared_xenoprof_page_with_guest() and clean up. - share_xenoprof_page_with_guest() must check page reference count manually and clear the page owner in order to keep page_info consistency. free_domheap_pages() called by put_page() doesn't clear page owner. It's intentional. On the other hand, share_xen_page_with_guest() checks whether the page owner is already a requested one at first. If so, it simply returns doing nothing because share_xen_page_with_guest() expects freshly allocated pages or already shared one and doesn't expect put_page()-and-free_domheap_pages() pages or pages shared with another domains. - clean up remove unsed argument gmaddr of alloc_xenoprof_buf(), alloc_xenoprof_struct() PATCHNAME: xenoprof_common_allocation Signed-off-by: Isaku Yamahata diff -r 84c0f49de1b1 -r ec4428278e41 xen/common/xenoprof.c --- a/xen/common/xenoprof.c Mon Nov 27 10:06:41 2006 +0000 +++ b/xen/common/xenoprof.c Tue Nov 28 12:52:04 2006 +0900 @@ -92,13 +92,42 @@ static void xenoprof_reset_buf(struct do } } -static void +static int share_xenoprof_page_with_guest(struct domain* d, unsigned long mfn, int npages) { int i; + /* + * kludge. + * free_domheap_pages() called by put_page() doesn't clear + * page owner. It's intentional. + * On the other hand, share_xen_page_with_guest() checks whether + * the page owner is already a requested one at first. + * If so, it simply returns doing nothing because + * share_xen_page_with_guest() expects freshly allocated pages or + * already shared one and doesn't expect + * put_page()-and-free_domheap_pages() pages or pages shared with + * another domains. + * we have to check it manually and clear the page owner in order + * to keep page_info consistency. + */ + for ( i = 0; i < npages; i++ ) + { + struct page_info* page = mfn_to_page(mfn + i); + if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 ) + { + gdprintk(XENLOG_INFO, "%s: mfn 0x%lx page->count_info 0x%x\n", + __func__, mfn + i, page->count_info); + return -EBUSY; + } + + page_set_owner(page, NULL); + } + + /* Share pages so that kernel can map it */ for ( i = 0; i < npages; i++ ) share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable); + return 0; } static void @@ -128,7 +157,7 @@ xenoprof_shared_gmfn_with_guest( } } -static char *alloc_xenoprof_buf(struct domain *d, int npages, uint64_t gmaddr) +static char *alloc_xenoprof_buf(struct domain *d, int npages) { char *rawbuf; int order; @@ -146,7 +175,7 @@ static char *alloc_xenoprof_buf(struct d } static int alloc_xenoprof_struct( - struct domain *d, int max_samples, int is_passive, uint64_t gmaddr) + struct domain *d, int max_samples, int is_passive) { struct vcpu *v; int nvcpu, npages, bufsize, max_bufsize; @@ -179,8 +208,7 @@ static int alloc_xenoprof_struct( (max_samples - 1) * sizeof(struct event_log); npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1; - d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages, - gmaddr); + d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages); if ( d->xenoprof->rawbuf == NULL ) { @@ -368,8 +396,7 @@ static int add_passive_list(XEN_GUEST_HA if ( d->xenoprof == NULL ) { - ret = alloc_xenoprof_struct( - d, passive.max_samples, 1, passive.buf_gmaddr); + ret = alloc_xenoprof_struct(d, passive.max_samples, 1); if ( ret < 0 ) { put_domain(d); @@ -377,9 +404,14 @@ static int add_passive_list(XEN_GUEST_HA } } - share_xenoprof_page_with_guest( + ret = share_xenoprof_page_with_guest( current->domain, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages); + if ( ret < 0 ) + { + put_domain(d); + return ret; + } d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE; passive.nbuf = d->xenoprof->nbuf; @@ -509,15 +541,15 @@ static int xenoprof_op_get_buffer(XEN_GU */ if ( d->xenoprof == NULL ) { - ret = alloc_xenoprof_struct( - d, xenoprof_get_buffer.max_samples, 0, - xenoprof_get_buffer.buf_gmaddr); + ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0); if ( ret < 0 ) return ret; } - share_xenoprof_page_with_guest( + ret = share_xenoprof_page_with_guest( d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages); + if ( ret < 0 ) + return ret; xenoprof_reset_buf(d);