[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen/ioreq: Move x86's ioreq_server to struct domain
commit 1d9c45bd6382a0e823a3f2411c988df12921f2fb Author: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx> AuthorDate: Fri Jan 29 03:48:36 2021 +0200 Commit: Julien Grall <jgrall@xxxxxxxxxx> CommitDate: Fri Jan 29 16:18:15 2021 +0000 xen/ioreq: Move x86's ioreq_server to struct domain The IOREQ is a common feature now and this struct will be used on Arm as is. Move it to common struct domain. This also significantly reduces the layering violation in the common code (*arch.hvm* usage). We don't move ioreq_gfn since it is not used in the common code (the "legacy" mechanism is x86 specific). Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul@xxxxxxx> Reviewed-by: Alex Bennée <alex.bennee@xxxxxxxxxx> CC: Julien Grall <julien.grall@xxxxxxx> [On Arm only] Tested-by: Wei Chen <Wei.Chen@xxxxxxx> --- xen/common/ioreq.c | 60 ++++++++++++++++++++-------------------- xen/include/asm-x86/hvm/domain.h | 8 ------ xen/include/xen/sched.h | 10 +++++++ 3 files changed, 40 insertions(+), 38 deletions(-) diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c index 7320f23dfb..4cb26e6889 100644 --- a/xen/common/ioreq.c +++ b/xen/common/ioreq.c @@ -38,13 +38,13 @@ static void set_ioreq_server(struct domain *d, unsigned int id, struct ioreq_server *s) { ASSERT(id < MAX_NR_IOREQ_SERVERS); - ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]); + ASSERT(!s || !d->ioreq_server.server[id]); - d->arch.hvm.ioreq_server.server[id] = s; + d->ioreq_server.server[id] = s; } #define GET_IOREQ_SERVER(d, id) \ - (d)->arch.hvm.ioreq_server.server[id] + (d)->ioreq_server.server[id] static struct ioreq_server *get_ioreq_server(const struct domain *d, unsigned int id) @@ -285,7 +285,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page) unsigned int id; bool found = false; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); FOR_EACH_IOREQ_SERVER(d, id, s) { @@ -296,7 +296,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page) } } - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return found; } @@ -606,7 +606,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling, return -ENOMEM; domain_pause(d); - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ ) { @@ -634,13 +634,13 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling, if ( id ) *id = i; - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); domain_unpause(d); return 0; fail: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); domain_unpause(d); xfree(s); @@ -652,7 +652,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id) struct ioreq_server *s; int rc; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -684,7 +684,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id) rc = 0; out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -697,7 +697,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id, struct ioreq_server *s; int rc; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -731,7 +731,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id, rc = 0; out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -744,7 +744,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id, ASSERT(is_hvm_domain(d)); - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -782,7 +782,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id, } out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -798,7 +798,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, if ( start > end ) return -EINVAL; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -834,7 +834,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, rc = rangeset_add_range(r, start, end); out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -850,7 +850,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, if ( start > end ) return -EINVAL; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -886,7 +886,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, rc = rangeset_remove_range(r, start, end); out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -911,7 +911,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id, if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE ) return -EINVAL; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -926,7 +926,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id, rc = arch_ioreq_server_map_mem_type(d, s, flags); out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); if ( rc == 0 ) arch_ioreq_server_map_mem_type_completed(d, s, flags); @@ -940,7 +940,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id, struct ioreq_server *s; int rc; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); s = get_ioreq_server(d, id); @@ -964,7 +964,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id, rc = 0; out: - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -974,7 +974,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v) unsigned int id; int rc; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); FOR_EACH_IOREQ_SERVER(d, id, s) { @@ -983,7 +983,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v) goto fail; } - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return 0; @@ -998,7 +998,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v) hvm_ioreq_server_remove_vcpu(s, v); } - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); return rc; } @@ -1008,12 +1008,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v) struct ioreq_server *s; unsigned int id; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); FOR_EACH_IOREQ_SERVER(d, id, s) hvm_ioreq_server_remove_vcpu(s, v); - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); } void hvm_destroy_all_ioreq_servers(struct domain *d) @@ -1024,7 +1024,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d) if ( !arch_ioreq_server_destroy_all(d) ) return; - spin_lock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_lock_recursive(&d->ioreq_server.lock); /* No need to domain_pause() as the domain is being torn down */ @@ -1042,7 +1042,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d) xfree(s); } - spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock); + spin_unlock_recursive(&d->ioreq_server.lock); } struct ioreq_server *hvm_select_ioreq_server(struct domain *d, @@ -1274,7 +1274,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered) void hvm_ioreq_init(struct domain *d) { - spin_lock_init(&d->arch.hvm.ioreq_server.lock); + spin_lock_init(&d->ioreq_server.lock); arch_ioreq_domain_init(d); } diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index f26c1a2e2d..25af518e4a 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -61,8 +61,6 @@ struct hvm_pi_ops { void (*vcpu_block)(struct vcpu *); }; -#define MAX_NR_IOREQ_SERVERS 8 - struct hvm_domain { /* Guest page range used for non-default ioreq servers */ struct { @@ -71,12 +69,6 @@ struct hvm_domain { unsigned long legacy_mask; /* indexed by HVM param number */ } ioreq_gfn; - /* Lock protects all other values in the sub-struct and the default */ - struct { - spinlock_t lock; - struct ioreq_server *server[MAX_NR_IOREQ_SERVERS]; - } ioreq_server; - /* Cached CF8 for guest PCI config cycles */ uint32_t pci_cf8; diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index da19f4e9f6..f437ee31b2 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -318,6 +318,8 @@ struct sched_unit { struct evtchn_port_ops; +#define MAX_NR_IOREQ_SERVERS 8 + struct domain { domid_t domain_id; @@ -534,6 +536,14 @@ struct domain unsigned int val; struct vcpu *vcpu; } teardown; + +#ifdef CONFIG_IOREQ_SERVER + /* Lock protects all other values in the sub-struct */ + struct { + spinlock_t lock; + struct ioreq_server *server[MAX_NR_IOREQ_SERVERS]; + } ioreq_server; +#endif }; static inline struct page_list_head *page_to_list( -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |