[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] Re: how to handle paged hypercall args?
>>> On 02.12.10 at 11:11, Olaf Hering <olaf@xxxxxxxxx> wrote: > On Thu, Nov 18, Keir Fraser wrote: > >> I've done something along these lines now as xen-unstable:22402. It actually >> seems to work okay! So you can go ahead and use waitqueues in __hvm_copy() >> now. > > This is my first attempt to do it. I didn't look in detail whether that's being done in a non-intuitive way elsewhere, but I can't see how the event you're waiting on would ever get signaled - wouldn't you need to pass it into __hvm_copy() and further down from there? Jan > It crashed Xen on the very first try in a spectacular way. But it > happend only once for some reason. > See my other mail. > > > Olaf > > --- xen-unstable.hg-4.1.22447.orig/xen/arch/x86/hvm/hvm.c > +++ xen-unstable.hg-4.1.22447/xen/arch/x86/hvm/hvm.c > @@ -1986,69 +1986,117 @@ static enum hvm_copy_result __hvm_copy( > enum hvm_copy_result hvm_copy_to_guest_phys( > paddr_t paddr, void *buf, int size) > { > - return __hvm_copy(buf, paddr, size, > + enum hvm_copy_result res; > + struct waitqueue_head wq; > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, paddr, size, > HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys, > - 0); > + 0)) != HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_copy_from_guest_phys( > void *buf, paddr_t paddr, int size) > { > - return __hvm_copy(buf, paddr, size, > + enum hvm_copy_result res; > + struct waitqueue_head wq; > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, paddr, size, > HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys, > - 0); > + 0)) != HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_copy_to_guest_virt( > unsigned long vaddr, void *buf, int size, uint32_t pfec) > { > - return __hvm_copy(buf, vaddr, size, > + enum hvm_copy_result res; > + struct waitqueue_head wq; > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, vaddr, size, > HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt, > - PFEC_page_present | PFEC_write_access | pfec); > + PFEC_page_present | PFEC_write_access | pfec)) != > HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_copy_from_guest_virt( > void *buf, unsigned long vaddr, int size, uint32_t pfec) > { > - return __hvm_copy(buf, vaddr, size, > + enum hvm_copy_result res; > + struct waitqueue_head wq; > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, vaddr, size, > HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, > - PFEC_page_present | pfec); > + PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_fetch_from_guest_virt( > void *buf, unsigned long vaddr, int size, uint32_t pfec) > { > + enum hvm_copy_result res; > + struct waitqueue_head wq; > if ( hvm_nx_enabled(current) ) > pfec |= PFEC_insn_fetch; > - return __hvm_copy(buf, vaddr, size, > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, vaddr, size, > HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, > - PFEC_page_present | pfec); > + PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_copy_to_guest_virt_nofault( > unsigned long vaddr, void *buf, int size, uint32_t pfec) > { > - return __hvm_copy(buf, vaddr, size, > + enum hvm_copy_result res; > + struct waitqueue_head wq; > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, vaddr, size, > HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt, > - PFEC_page_present | PFEC_write_access | pfec); > + PFEC_page_present | PFEC_write_access | pfec)) != > HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_copy_from_guest_virt_nofault( > void *buf, unsigned long vaddr, int size, uint32_t pfec) > { > - return __hvm_copy(buf, vaddr, size, > + enum hvm_copy_result res; > + struct waitqueue_head wq; > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, vaddr, size, > HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, > - PFEC_page_present | pfec); > + PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out); > + return res; > } > > enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( > void *buf, unsigned long vaddr, int size, uint32_t pfec) > { > + enum hvm_copy_result res; > + struct waitqueue_head wq; > if ( hvm_nx_enabled(current) ) > pfec |= PFEC_insn_fetch; > - return __hvm_copy(buf, vaddr, size, > + init_waitqueue_head(&wq); > + > + wait_event(wq, ( > + res = __hvm_copy(buf, vaddr, size, > HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, > - PFEC_page_present | pfec); > + PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out); > + return res; > } > > unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int > len) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |