[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 23/23] xen/arch/*: add struct domain parameter to arch_do_domctl
Since the arch-independent do_domctl function now RCU locks the domain specified by op->domain, pass the struct domain to the arch-specific domctl function and remove the duplicate per-subfunction locking. This also removes two get_domain/put_domain call pairs (in XEN_DOMCTL_assign_device and XEN_DOMCTL_deassign_device), replacing them with RCU locking. Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/arm/domctl.c | 2 +- xen/arch/x86/domctl.c | 434 +++++++--------------------------------- xen/common/domctl.c | 2 +- xen/drivers/passthrough/iommu.c | 31 +-- xen/include/xen/hypercall.h | 2 +- xen/include/xen/iommu.h | 3 +- 6 files changed, 76 insertions(+), 398 deletions(-) diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c index 1a5f79f..12531d1 100644 --- a/xen/arch/arm/domctl.c +++ b/xen/arch/arm/domctl.c @@ -10,7 +10,7 @@ #include <xen/errno.h> #include <public/domctl.h> -long arch_do_domctl(struct xen_domctl *domctl, +long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) { return -ENOSYS; diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 7062f02..734b2f2 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -47,7 +47,7 @@ static int gdbsx_guest_mem_io( } long arch_do_domctl( - struct xen_domctl *domctl, + struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) { long ret = 0; @@ -57,23 +57,15 @@ long arch_do_domctl( case XEN_DOMCTL_shadow_op: { - struct domain *d; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = paging_domctl(d, - &domctl->u.shadow_op, - guest_handle_cast(u_domctl, void)); - rcu_unlock_domain(d); - copy_to_guest(u_domctl, domctl, 1); - } + ret = paging_domctl(d, + &domctl->u.shadow_op, + guest_handle_cast(u_domctl, void)); + copy_to_guest(u_domctl, domctl, 1); } break; case XEN_DOMCTL_ioport_permission: { - struct domain *d; unsigned int fp = domctl->u.ioport_permission.first_port; unsigned int np = domctl->u.ioport_permission.nr_ports; int allow = domctl->u.ioport_permission.allow_access; @@ -82,10 +74,6 @@ long arch_do_domctl( if ( (fp + np) > 65536 ) break; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) - break; - if ( np == 0 ) ret = 0; else if ( xsm_ioport_permission(d, fp, fp + np - 1, allow) ) @@ -94,8 +82,6 @@ long arch_do_domctl( ret = ioports_permit_access(d, fp, fp + np - 1); else ret = ioports_deny_access(d, fp, fp + np - 1); - - rcu_unlock_domain(d); } break; @@ -103,23 +89,16 @@ long arch_do_domctl( { struct page_info *page; unsigned long mfn = domctl->u.getpageframeinfo.gmfn; - domid_t dom = domctl->domain; - struct domain *d; ret = -EINVAL; - - if ( unlikely(!mfn_valid(mfn)) || - unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) ) + if ( unlikely(!mfn_valid(mfn)) ) break; page = mfn_to_page(mfn); ret = xsm_getpageframeinfo(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } if ( likely(get_page(page, d)) ) { @@ -149,8 +128,6 @@ long arch_do_domctl( put_page(page); } - rcu_unlock_domain(d); - copy_to_guest(u_domctl, domctl, 1); } break; @@ -160,27 +137,17 @@ long arch_do_domctl( { unsigned int n, j; unsigned int num = domctl->u.getpageframeinfo3.num; - domid_t dom = domctl->domain; - struct domain *d; struct page_info *page; xen_pfn_t *arr; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) ) - break; - ret = xsm_getpageframeinfo(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } if ( unlikely(num > 1024) || unlikely(num != domctl->u.getpageframeinfo3.num) ) { ret = -E2BIG; - rcu_unlock_domain(d); break; } @@ -188,7 +155,6 @@ long arch_do_domctl( if ( !page ) { ret = -ENOMEM; - rcu_unlock_domain(d); break; } arr = page_to_virt(page); @@ -254,7 +220,6 @@ long arch_do_domctl( free_domheap_page(virt_to_page(arr)); - rcu_unlock_domain(d); break; } /* fall thru */ @@ -262,25 +227,15 @@ long arch_do_domctl( { int n,j; int num = domctl->u.getpageframeinfo2.num; - domid_t dom = domctl->domain; - struct domain *d; uint32_t *arr32; - ret = -ESRCH; - - if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) ) - break; ret = xsm_getpageframeinfo(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } if ( unlikely(num > 1024) ) { ret = -E2BIG; - rcu_unlock_domain(d); break; } @@ -288,7 +243,6 @@ long arch_do_domctl( if ( !arr32 ) { ret = -ENOMEM; - rcu_unlock_domain(d); break; } @@ -360,78 +314,58 @@ long arch_do_domctl( } free_xenheap_page(arr32); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_getmemlist: { int i; - struct domain *d = rcu_lock_domain_by_id(domctl->domain); unsigned long max_pfns = domctl->u.getmemlist.max_pfns; uint64_t mfn; struct page_info *page; - ret = -EINVAL; - if ( d != NULL ) - { - ret = xsm_getmemlist(d); - if ( ret ) - { - rcu_unlock_domain(d); - break; - } + ret = xsm_getmemlist(d); + if ( ret ) + break; - spin_lock(&d->page_alloc_lock); + if ( unlikely(d->is_dying) ) { + ret = -EINVAL; + break; + } - if ( unlikely(d->is_dying) ) { - spin_unlock(&d->page_alloc_lock); - goto getmemlist_out; - } + spin_lock(&d->page_alloc_lock); - ret = i = 0; - page_list_for_each(page, &d->page_list) + ret = i = 0; + page_list_for_each(page, &d->page_list) + { + if ( i >= max_pfns ) + break; + mfn = page_to_mfn(page); + if ( copy_to_guest_offset(domctl->u.getmemlist.buffer, + i, &mfn, 1) ) { - if ( i >= max_pfns ) - break; - mfn = page_to_mfn(page); - if ( copy_to_guest_offset(domctl->u.getmemlist.buffer, - i, &mfn, 1) ) - { - ret = -EFAULT; - break; - } - ++i; + ret = -EFAULT; + break; } - - spin_unlock(&d->page_alloc_lock); - - domctl->u.getmemlist.num_pfns = i; - copy_to_guest(u_domctl, domctl, 1); - getmemlist_out: - rcu_unlock_domain(d); + ++i; } + + spin_unlock(&d->page_alloc_lock); + + domctl->u.getmemlist.num_pfns = i; + copy_to_guest(u_domctl, domctl, 1); } break; case XEN_DOMCTL_hypercall_init: { - struct domain *d = rcu_lock_domain_by_id(domctl->domain); unsigned long gmfn = domctl->u.hypercall_init.gmfn; struct page_info *page; void *hypercall_page; - ret = -ESRCH; - if ( unlikely(d == NULL) ) - break; - ret = xsm_hypercall_init(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); @@ -440,7 +374,6 @@ long arch_do_domctl( { if ( page ) put_page(page); - rcu_unlock_domain(d); break; } @@ -451,19 +384,12 @@ long arch_do_domctl( unmap_domain_page(hypercall_page); put_page_and_type(page); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_sethvmcontext: { struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size }; - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; ret = xsm_hvmcontext(d, domctl->cmd); if ( ret ) @@ -488,19 +414,12 @@ long arch_do_domctl( sethvmcontext_out: if ( c.data != NULL ) xfree(c.data); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gethvmcontext: { struct hvm_domain_context c = { 0 }; - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; ret = xsm_hvmcontext(d, domctl->cmd); if ( ret ) @@ -544,53 +463,33 @@ long arch_do_domctl( if ( c.data != NULL ) xfree(c.data); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gethvmcontext_partial: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_hvmcontext(d, domctl->cmd); if ( ret ) - goto gethvmcontext_partial_out; + break; ret = -EINVAL; if ( !is_hvm_domain(d) ) - goto gethvmcontext_partial_out; + break; domain_pause(d); ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type, domctl->u.hvmcontext_partial.instance, domctl->u.hvmcontext_partial.buffer); domain_unpause(d); - - gethvmcontext_partial_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_set_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_address_size(d, domctl->cmd); if ( ret ) - { - rcu_unlock_domain(d); break; - } switch ( domctl->u.address_size.size ) { @@ -604,31 +503,19 @@ long arch_do_domctl( ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL; break; } - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_get_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_address_size(d, domctl->cmd); if ( ret ) - { - rcu_unlock_domain(d); break; - } domctl->u.address_size.size = is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG; ret = 0; - rcu_unlock_domain(d); if ( copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; @@ -637,76 +524,51 @@ long arch_do_domctl( case XEN_DOMCTL_set_machine_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_machine_address_size(d, domctl->cmd); if ( ret ) - goto set_machine_address_size_out; + break; ret = -EBUSY; if ( d->tot_pages > 0 ) - goto set_machine_address_size_out; + break; d->arch.physaddr_bitsize = domctl->u.address_size.size; ret = 0; - set_machine_address_size_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_get_machine_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_machine_address_size(d, domctl->cmd); if ( ret ) - { - rcu_unlock_domain(d); break; - } domctl->u.address_size.size = d->arch.physaddr_bitsize; ret = 0; - rcu_unlock_domain(d); if ( copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; - - } break; case XEN_DOMCTL_sendtrigger: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_sendtrigger(d); if ( ret ) - goto sendtrigger_out; + break; ret = -EINVAL; if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS ) - goto sendtrigger_out; + break; ret = -ESRCH; if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus || (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL ) - goto sendtrigger_out; + break; switch ( domctl->u.sendtrigger.trigger ) { @@ -743,34 +605,27 @@ long arch_do_domctl( default: ret = -ENOSYS; } - - sendtrigger_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_bind_pt_irq: { - struct domain * d; xen_domctl_bind_pt_irq_t * bind; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; bind = &(domctl->u.bind_pt_irq); ret = -EINVAL; if ( !is_hvm_domain(d) ) - goto bind_out; + break; ret = xsm_bind_pt_irq(d, bind); if ( ret ) - goto bind_out; + break; ret = -EPERM; if ( !IS_PRIV(current->domain) && !irq_access_permitted(current->domain, bind->machine_irq) ) - goto bind_out; + break; ret = -ESRCH; if ( iommu_enabled ) @@ -782,30 +637,23 @@ long arch_do_domctl( if ( ret < 0 ) printk(XENLOG_G_ERR "pt_irq_create_bind failed (%ld) for dom%d\n", ret, d->domain_id); - - bind_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_unbind_pt_irq: { - struct domain * d; xen_domctl_bind_pt_irq_t * bind; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; bind = &(domctl->u.bind_pt_irq); ret = -EPERM; if ( !IS_PRIV(current->domain) && !irq_access_permitted(current->domain, bind->machine_irq) ) - goto unbind_out; + break; ret = xsm_unbind_pt_irq(d, bind); if ( ret ) - goto unbind_out; + break; if ( iommu_enabled ) { @@ -816,15 +664,11 @@ long arch_do_domctl( if ( ret < 0 ) printk(XENLOG_G_ERR "pt_irq_destroy_bind failed (%ld) for dom%d\n", ret, d->domain_id); - - unbind_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_memory_mapping: { - struct domain *d; unsigned long gfn = domctl->u.memory_mapping.first_gfn; unsigned long mfn = domctl->u.memory_mapping.first_mfn; unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns; @@ -840,15 +684,9 @@ long arch_do_domctl( !iomem_access_permitted(current->domain, mfn, mfn + nr_mfns - 1) ) break; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) - break; - ret = xsm_iomem_mapping(d, mfn, mfn + nr_mfns - 1, add); - if ( ret ) { - rcu_unlock_domain(d); + if ( ret ) break; - } if ( add ) { @@ -870,15 +708,12 @@ long arch_do_domctl( clear_mmio_p2m_entry(d, gfn+i); ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); } - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_ioport_mapping: { #define MAX_IOPORTS 0x10000 - struct domain *d; struct hvm_iommu *hd; unsigned int fgp = domctl->u.ioport_mapping.first_gport; unsigned int fmp = domctl->u.ioport_mapping.first_mport; @@ -902,15 +737,9 @@ long arch_do_domctl( !ioports_access_permitted(current->domain, fmp, fmp + np - 1) ) break; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) - break; - ret = xsm_ioport_mapping(d, fmp, fmp + np - 1, add); - if ( ret ) { - rcu_unlock_domain(d); + if ( ret ) break; - } hd = domain_hvm_iommu(d); if ( add ) @@ -951,30 +780,19 @@ long arch_do_domctl( } ret = ioports_deny_access(d, fmp, fmp + np - 1); } - rcu_unlock_domain(d); } break; case XEN_DOMCTL_pin_mem_cacheattr: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = xsm_pin_mem_cacheattr(d); if ( ret ) - goto pin_out; + break; ret = hvm_set_mem_pinned_cacheattr( d, domctl->u.pin_mem_cacheattr.start, domctl->u.pin_mem_cacheattr.end, domctl->u.pin_mem_cacheattr.type); - - pin_out: - rcu_unlock_domain(d); } break; @@ -982,19 +800,13 @@ long arch_do_domctl( case XEN_DOMCTL_get_ext_vcpucontext: { struct xen_domctl_ext_vcpucontext *evc; - struct domain *d; struct vcpu *v; evc = &domctl->u.ext_vcpucontext; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = xsm_ext_vcpucontext(d, domctl->cmd); if ( ret ) - goto ext_vcpucontext_out; + break; ret = -ESRCH; if ( (evc->vcpu >= d->max_vcpus) || @@ -1071,7 +883,6 @@ long arch_do_domctl( ret = 0; ext_vcpucontext_out: - rcu_unlock_domain(d); if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) && copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; @@ -1080,16 +891,10 @@ long arch_do_domctl( case XEN_DOMCTL_set_cpuid: { - struct domain *d; xen_domctl_cpuid_t *ctl = &domctl->u.cpuid; cpuid_input_t *cpuid = NULL; int i; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - for ( i = 0; i < MAX_CPUID_INPUT; i++ ) { cpuid = &d->arch.cpuids[i]; @@ -1112,21 +917,13 @@ long arch_do_domctl( memcpy(cpuid, ctl, sizeof(cpuid_input_t)); ret = 0; } - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gettscinfo: { - struct domain *d; xen_guest_tsc_info_t info; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - domain_pause(d); tsc_get_info(d, &info.tsc_mode, &info.elapsed_nsec, @@ -1137,20 +934,11 @@ long arch_do_domctl( else ret = 0; domain_unpause(d); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_settscinfo: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - domain_pause(d); tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode, domctl->u.tsc_info.info.elapsed_nsec, @@ -1158,138 +946,83 @@ long arch_do_domctl( domctl->u.tsc_info.info.incarnation); domain_unpause(d); - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_suppress_spurious_page_faults: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - d->arch.suppress_spurious_page_faults = 1; - rcu_unlock_domain(d); - ret = 0; - } + d->arch.suppress_spurious_page_faults = 1; + ret = 0; } break; case XEN_DOMCTL_debug_op: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = -EINVAL; if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) || ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) ) - goto debug_op_out; + break; ret = -EINVAL; if ( !is_hvm_domain(d)) - goto debug_op_out; + break; ret = hvm_debug_op(v, domctl->u.debug_op.op); - - debug_op_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_guestmemio: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - domctl->u.gdbsx_guest_memio.remain = domctl->u.gdbsx_guest_memio.len; ret = gdbsx_guest_mem_io(domctl->domain, &domctl->u.gdbsx_guest_memio); if ( !ret && copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_pausevcpu: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = -EBUSY; if ( !d->is_paused_by_controller ) - { - rcu_unlock_domain(d); break; - } ret = -EINVAL; if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS || (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL ) - { - rcu_unlock_domain(d); break; - } vcpu_pause(v); ret = 0; - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_unpausevcpu: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = -EBUSY; if ( !d->is_paused_by_controller ) - { - rcu_unlock_domain(d); break; - } ret = -EINVAL; if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS || (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL ) - { - rcu_unlock_domain(d); break; - } if ( !atomic_read(&v->pause_count) ) printk("WARN: Unpausing vcpu:%d which is not paused\n", v->vcpu_id); vcpu_unpause(v); ret = 0; - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_domstatus: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - domctl->u.gdbsx_domstatus.vcpu_id = -1; domctl->u.gdbsx_domstatus.paused = d->is_paused_by_controller; if ( domctl->u.gdbsx_domstatus.paused ) @@ -1309,7 +1042,6 @@ long arch_do_domctl( ret = 0; if ( copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; - rcu_unlock_domain(d); } break; @@ -1317,7 +1049,6 @@ long arch_do_domctl( case XEN_DOMCTL_getvcpuextstate: { struct xen_domctl_vcpuextstate *evc; - struct domain *d; struct vcpu *v; uint32_t offset = 0; uint64_t _xfeature_mask = 0; @@ -1328,12 +1059,6 @@ long arch_do_domctl( evc = &domctl->u.vcpuextstate; - ret = -ESRCH; - - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = xsm_vcpuextstate(d, domctl->cmd); if ( ret ) goto vcpuextstate_out; @@ -1432,7 +1157,6 @@ long arch_do_domctl( ret = 0; vcpuextstate_out: - rcu_unlock_domain(d); if ( (domctl->cmd == XEN_DOMCTL_getvcpuextstate) && copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; @@ -1441,50 +1165,33 @@ long arch_do_domctl( case XEN_DOMCTL_mem_event_op: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = mem_event_domctl(d, &domctl->u.mem_event_op, - guest_handle_cast(u_domctl, void)); - rcu_unlock_domain(d); - copy_to_guest(u_domctl, domctl, 1); - } + ret = mem_event_domctl(d, &domctl->u.mem_event_op, + guest_handle_cast(u_domctl, void)); + copy_to_guest(u_domctl, domctl, 1); } break; case XEN_DOMCTL_mem_sharing_op: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = xsm_mem_sharing(d); - if ( !ret ) - ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op); - rcu_unlock_domain(d); - } + ret = xsm_mem_sharing(d); + if ( !ret ) + ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op); } break; #if P2M_AUDIT case XEN_DOMCTL_audit_p2m: { - struct domain *d; - - ret = rcu_lock_remote_domain_by_id(domctl->domain, &d); - if ( ret != 0 ) + if ( d == current->domain ) + { + ret = -EPERM; break; + } audit_p2m(d, &domctl->u.audit_p2m.orphans, &domctl->u.audit_p2m.m2p_bad, &domctl->u.audit_p2m.p2m_bad); - rcu_unlock_domain(d); if ( copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; } @@ -1493,29 +1200,22 @@ long arch_do_domctl( case XEN_DOMCTL_set_access_required: { - struct domain *d; struct p2m_domain* p2m; ret = -EPERM; - if ( current->domain->domain_id == domctl->domain ) + if ( current->domain == d ) break; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = xsm_mem_event_setup(d); - if ( !ret ) { - p2m = p2m_get_hostp2m(d); - p2m->access_required = domctl->u.access_required.access_required; - } - rcu_unlock_domain(d); - } + ret = xsm_mem_event_setup(d); + if ( !ret ) { + p2m = p2m_get_hostp2m(d); + p2m->access_required = domctl->u.access_required.access_required; + } } break; default: - ret = iommu_do_domctl(domctl, u_domctl); + ret = iommu_do_domctl(domctl, d, u_domctl); break; } diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 76f0f90..07c95a3 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -861,7 +861,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) break; default: - ret = arch_do_domctl(op, u_domctl); + ret = arch_do_domctl(op, d, u_domctl); break; } diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index b4cf16c..b3c66f8 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -526,10 +526,9 @@ void iommu_crash_shutdown(void) } int iommu_do_domctl( - struct xen_domctl *domctl, + struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) { - struct domain *d; u16 seg; u8 bus, devfn; int ret = 0; @@ -548,10 +547,6 @@ int iommu_do_domctl( if ( ret ) break; - ret = -EINVAL; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - seg = domctl->u.get_device_group.machine_sbdf >> 16; bus = (domctl->u.get_device_group.machine_sbdf >> 8) & 0xff; devfn = domctl->u.get_device_group.machine_sbdf & 0xff; @@ -572,7 +567,6 @@ int iommu_do_domctl( } if ( copy_to_guest(u_domctl, domctl, 1) ) ret = -EFAULT; - rcu_unlock_domain(d); } break; @@ -595,20 +589,15 @@ int iommu_do_domctl( break; case XEN_DOMCTL_assign_device: - if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) || - unlikely(d->is_dying) ) + if ( unlikely(d->is_dying) ) { - printk(XENLOG_G_ERR - "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); ret = -EINVAL; - if ( d ) - goto assign_device_out; break; } ret = xsm_assign_device(d, domctl->u.assign_device.machine_sbdf); if ( ret ) - goto assign_device_out; + break; seg = domctl->u.get_device_group.machine_sbdf >> 16; bus = (domctl->u.assign_device.machine_sbdf >> 8) & 0xff; @@ -622,22 +611,12 @@ int iommu_do_domctl( seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d->domain_id, ret); - assign_device_out: - put_domain(d); break; case XEN_DOMCTL_deassign_device: - if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) - { - printk(XENLOG_G_ERR - "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n"); - ret = -EINVAL; - break; - } - ret = xsm_deassign_device(d, domctl->u.assign_device.machine_sbdf); if ( ret ) - goto deassign_device_out; + break; seg = domctl->u.get_device_group.machine_sbdf >> 16; bus = (domctl->u.assign_device.machine_sbdf >> 8) & 0xff; @@ -652,8 +631,6 @@ int iommu_do_domctl( seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d->domain_id, ret); - deassign_device_out: - put_domain(d); break; default: diff --git a/xen/include/xen/hypercall.h b/xen/include/xen/hypercall.h index 1b71071..36796d2 100644 --- a/xen/include/xen/hypercall.h +++ b/xen/include/xen/hypercall.h @@ -37,7 +37,7 @@ do_domctl( extern long arch_do_domctl( - struct xen_domctl *domctl, + struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl); extern long diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index 605c7b3..c2951a5 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -156,7 +156,8 @@ void iommu_crash_shutdown(void); void iommu_set_dom0_mapping(struct domain *d); void iommu_share_p2m_table(struct domain *d); -int iommu_do_domctl(struct xen_domctl *, XEN_GUEST_HANDLE(xen_domctl_t)); +int iommu_do_domctl(struct xen_domctl *, struct domain *d, + XEN_GUEST_HANDLE(xen_domctl_t)); void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count); void iommu_iotlb_flush_all(struct domain *d); -- 1.7.11.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |