From 28dfe010a6c1b791d933a3cbd41fcbdfcfc009c1 Mon Sep 17 00:00:00 2001 From: Don Slutz Date: Thu, 25 Sep 2014 09:07:39 -0400 Subject: [PATCH 16/19] Add IOREQ_TYPE_VMWARE_PORT This adds synchronization of the 6 vcpu registers (only 32bits of them) that vmport.c needs between Xen and QEMU. This is to avoid a 2nd and 3rd exchange between QEMU and Xen to fetch and put these 6 vcpu registers used by the code in vmport.c and vmmouse.c In the tools, enable usage of QEMU's vmport code. The currently most useful VMware port support that QEMU has is the VMware mouse support. Xorg included a VMware mouse support that uses absolute mode. This make using a mouse in X11 much nicer. Signed-off-by: Don Slutz --- tools/libxc/xc_hvm_build_x86.c | 5 +- tools/libxl/libxl_dm.c | 4 + xen/arch/x86/hvm/emulate.c | 52 +++++++++++- xen/arch/x86/hvm/hvm.c | 170 ++++++++++++++++++++++++++++++++++----- xen/arch/x86/hvm/io.c | 16 ++++ xen/arch/x86/hvm/vmware/vmport.c | 9 ++- xen/include/asm-x86/hvm/domain.h | 3 +- xen/include/asm-x86/hvm/hvm.h | 1 + xen/include/public/hvm/hvm_op.h | 5 ++ xen/include/public/hvm/ioreq.h | 17 ++++ xen/include/public/hvm/params.h | 4 +- 11 files changed, 259 insertions(+), 27 deletions(-) diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c index c81a25b..e338667 100644 --- a/tools/libxc/xc_hvm_build_x86.c +++ b/tools/libxc/xc_hvm_build_x86.c @@ -46,7 +46,8 @@ #define SPECIALPAGE_IOREQ 5 #define SPECIALPAGE_IDENT_PT 6 #define SPECIALPAGE_CONSOLE 7 -#define NR_SPECIAL_PAGES 8 +#define SPECIALPAGE_VMPORT_REGS 8 +#define NR_SPECIAL_PAGES 9 #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x)) #define NR_IOREQ_SERVER_PAGES 8 @@ -493,6 +494,8 @@ static int setup_guest(xc_interface *xch, special_pfn(SPECIALPAGE_BUFIOREQ)); xc_hvm_param_set(xch, dom, HVM_PARAM_IOREQ_PFN, special_pfn(SPECIALPAGE_IOREQ)); + xc_hvm_param_set(xch, dom, HVM_PARAM_VMPORT_REGS_PFN, + special_pfn(SPECIALPAGE_VMPORT_REGS)); xc_hvm_param_set(xch, dom, HVM_PARAM_CONSOLE_PFN, special_pfn(SPECIALPAGE_CONSOLE)); xc_hvm_param_set(xch, dom, HVM_PARAM_PAGING_RING_PFN, diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c index c27f9a4..620013c 100644 --- a/tools/libxl/libxl_dm.c +++ b/tools/libxl/libxl_dm.c @@ -765,6 +765,10 @@ static char ** libxl__build_device_model_args_new(libxl__gc *gc, machinearg, max_ram_below_4g); } } + if (libxl_defbool_val(c_info->vmware_port)) { + machinearg = libxl__sprintf(gc, "%s,vmport=on", + machinearg); + } flexarray_append(dm_args, machinearg); for (i = 0; b_info->extra_hvm && b_info->extra_hvm[i] != NULL; i++) flexarray_append(dm_args, b_info->extra_hvm[i]); diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index f271dfc..eeaa3ba 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -219,6 +219,7 @@ static int hvmemul_do_io( vio->io_state = HVMIO_handle_mmio_awaiting_completion; break; case X86EMUL_UNHANDLEABLE: + if ( vmport_check_port(p.addr) ) { struct hvm_ioreq_server *s = hvm_select_ioreq_server(curr->domain, &p); @@ -238,8 +239,50 @@ static int hvmemul_do_io( else if ( p_data == NULL ) rc = X86EMUL_OKAY; } - break; } + else + { + struct hvm_ioreq_server *s; + vmware_regs_t *vr; + + BUILD_BUG_ON(sizeof(ioreq_t) < sizeof(vmware_regs_t)); + + p.type = IOREQ_TYPE_VMWARE_PORT; + s = hvm_select_ioreq_server(curr->domain, &p); + vr = get_vmport_regs_any(s, curr); + + /* + * If there is no suitable backing DM, just ignore accesses. If + * we do not have access to registers to pass to QEMU, just + * ignore access. + */ + if ( !s || !vr ) + { + hvm_complete_assist_req(&p); + rc = X86EMUL_OKAY; + vio->io_state = HVMIO_none; + } + else + { + struct cpu_user_regs *regs = guest_cpu_user_regs(); + + p.data = regs->rax; + vr->ebx = regs->_ebx; + vr->ecx = regs->_ecx; + vr->edx = regs->_edx; + vr->esi = regs->_esi; + vr->edi = regs->_edi; + + vio->io_state = HVMIO_handle_pio_awaiting_completion; + if ( !hvm_send_assist_req(s, &p) ) + { + rc = X86EMUL_RETRY; + vio->io_state = HVMIO_none; + } + /* else leave rc as X86EMUL_UNHANDLEABLE for below. */ + } + } + break; default: BUG(); } @@ -248,6 +291,13 @@ static int hvmemul_do_io( { if ( ram_page ) put_page(ram_page); + /* + * If rc is still X86EMUL_UNHANDLEABLE, then were are of + * type IOREQ_TYPE_VMWARE_PORT, so completion in + * hvm_io_assist() with no re-emulation required + */ + if ( rc == X86EMUL_UNHANDLEABLE ) + rc = X86EMUL_OKAY; return rc; } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1000a4f..59267dd 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -391,6 +391,47 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v) return &p->vcpu_ioreq[v->vcpu_id]; } +static vmware_regs_t *get_vmport_regs_one(struct hvm_ioreq_server *s, + struct vcpu *v) +{ + struct hvm_ioreq_vcpu *sv; + + list_for_each_entry ( sv, + &s->ioreq_vcpu_list, + list_entry ) + { + if ( sv->vcpu == v ) + { + shared_vmport_iopage_t *p = s->vmport_ioreq.va; + if ( !p ) + return NULL; + return &p->vcpu_vmport_regs[v->vcpu_id]; + } + } + return NULL; +} + +vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct vcpu *v) +{ + struct domain *d = v->domain; + + ASSERT((v == current) || !vcpu_runnable(v)); + + if ( s ) + return get_vmport_regs_one(s, v); + + list_for_each_entry ( s, + &d->arch.hvm_domain.ioreq_server.list, + list_entry ) + { + vmware_regs_t *ret = get_vmport_regs_one(s, v); + + if ( ret ) + return ret; + } + return NULL; +} + bool_t hvm_io_pending(struct vcpu *v) { struct domain *d = v->domain; @@ -500,22 +541,56 @@ static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn) clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask); } -static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf) +typedef enum { + IOREQ_PAGE_TYPE_IOREQ, + IOREQ_PAGE_TYPE_BUFIOREQ, + IOREQ_PAGE_TYPE_VMPORT, +} ioreq_page_type_t; + +static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, ioreq_page_type_t buf) { - struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; + struct hvm_ioreq_page *iorp = NULL; + + switch ( buf ) + { + case IOREQ_PAGE_TYPE_IOREQ: + iorp = &s->ioreq; + break; + case IOREQ_PAGE_TYPE_BUFIOREQ: + iorp = &s->bufioreq; + break; + case IOREQ_PAGE_TYPE_VMPORT: + iorp = &s->vmport_ioreq; + break; + } + ASSERT(iorp); destroy_ring_for_helper(&iorp->va, iorp->page); } static int hvm_map_ioreq_page( - struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn) + struct hvm_ioreq_server *s, ioreq_page_type_t buf, unsigned long gmfn) { struct domain *d = s->domain; - struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; + struct hvm_ioreq_page *iorp = NULL; struct page_info *page; void *va; int rc; + switch ( buf ) + { + case IOREQ_PAGE_TYPE_IOREQ: + iorp = &s->ioreq; + break; + case IOREQ_PAGE_TYPE_BUFIOREQ: + iorp = &s->bufioreq; + break; + case IOREQ_PAGE_TYPE_VMPORT: + iorp = &s->vmport_ioreq; + break; + } + ASSERT(iorp); + if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) ) return rc; @@ -734,7 +809,8 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s, bool_t is_default, bool_t handle_bufioreq) { struct domain *d = s->domain; - unsigned long ioreq_pfn, bufioreq_pfn; + unsigned long ioreq_pfn, bufioreq_pfn = 0; + unsigned long vmport_ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_VMPORT_REGS_PFN]; int rc; if ( is_default ) @@ -762,21 +838,29 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s, } } - rc = hvm_map_ioreq_page(s, 0, ioreq_pfn); + rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ, ioreq_pfn); if ( rc ) goto fail3; if ( handle_bufioreq ) { - rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn); + rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ, bufioreq_pfn); if ( rc ) goto fail4; } + rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_VMPORT, vmport_ioreq_pfn); + if ( rc ) + goto fail5; + return 0; +fail5: + if ( handle_bufioreq ) + hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ); + fail4: - hvm_unmap_ioreq_page(s, 0); + hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ); fail3: if ( !is_default && handle_bufioreq ) @@ -795,11 +879,15 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s, { struct domain *d = s->domain; bool_t handle_bufioreq = ( s->bufioreq.va != NULL ); + bool_t handle_vmport_ioreq = ( s->vmport_ioreq.va != NULL ); + + if ( handle_vmport_ioreq ) + hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_VMPORT); if ( handle_bufioreq ) - hvm_unmap_ioreq_page(s, 1); + hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ); - hvm_unmap_ioreq_page(s, 0); + hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ); if ( !is_default ) { @@ -834,12 +922,38 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, for ( i = 0; i < NR_IO_RANGE_TYPES; i++ ) { char *name; + char *type_name = NULL; + unsigned int limit; + + switch ( i ) + { + case HVMOP_IO_RANGE_PORT: + type_name = "port"; + limit = MAX_NR_IO_RANGES; + break; + case HVMOP_IO_RANGE_MEMORY: + type_name = "memory"; + limit = MAX_NR_IO_RANGES; + break; + case HVMOP_IO_RANGE_PCI: + type_name = "pci"; + limit = MAX_NR_IO_RANGES; + break; + case HVMOP_IO_RANGE_VMWARE_PORT: + type_name = "VMware port"; + limit = 1; + break; + case HVMOP_IO_RANGE_TIMEOFFSET: + type_name = "timeoffset"; + limit = 1; + break; + default: + break; + } + if ( !type_name ) + continue; - rc = asprintf(&name, "ioreq_server %d %s", s->id, - (i == HVMOP_IO_RANGE_PORT) ? "port" : - (i == HVMOP_IO_RANGE_MEMORY) ? "memory" : - (i == HVMOP_IO_RANGE_PCI) ? "pci" : - ""); + rc = asprintf(&name, "ioreq_server %d %s", s->id, type_name); if ( rc ) goto fail; @@ -852,7 +966,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, if ( !s->range[i] ) goto fail; - rangeset_limit(s->range[i], MAX_NR_IO_RANGES); + rangeset_limit(s->range[i], limit); + + /* VMware port */ + if ( i == HVMOP_IO_RANGE_VMWARE_PORT && + s->domain->arch.hvm_domain.is_vmware_port_enabled ) + rc = rangeset_add_range(s->range[i], 1, 1); } done: @@ -1154,6 +1273,8 @@ static int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, case HVMOP_IO_RANGE_PORT: case HVMOP_IO_RANGE_MEMORY: case HVMOP_IO_RANGE_PCI: + case HVMOP_IO_RANGE_VMWARE_PORT: + case HVMOP_IO_RANGE_TIMEOFFSET: r = s->range[type]; break; @@ -1205,6 +1326,8 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, case HVMOP_IO_RANGE_PORT: case HVMOP_IO_RANGE_MEMORY: case HVMOP_IO_RANGE_PCI: + case HVMOP_IO_RANGE_VMWARE_PORT: + case HVMOP_IO_RANGE_TIMEOFFSET: r = s->range[type]; break; @@ -2428,9 +2551,6 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) ) return NULL; - if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO ) - return d->arch.hvm_domain.default_ioreq_server; - cf8 = d->arch.hvm_domain.pci_cf8; if ( p->type == IOREQ_TYPE_PIO && @@ -2473,7 +2593,11 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, BUILD_BUG_ON(IOREQ_TYPE_PIO != HVMOP_IO_RANGE_PORT); BUILD_BUG_ON(IOREQ_TYPE_COPY != HVMOP_IO_RANGE_MEMORY); BUILD_BUG_ON(IOREQ_TYPE_PCI_CONFIG != HVMOP_IO_RANGE_PCI); + BUILD_BUG_ON(IOREQ_TYPE_VMWARE_PORT != HVMOP_IO_RANGE_VMWARE_PORT); + BUILD_BUG_ON(IOREQ_TYPE_TIMEOFFSET != HVMOP_IO_RANGE_TIMEOFFSET); r = s->range[type]; + if ( !r ) + break; switch ( type ) { @@ -2500,6 +2624,13 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, } break; + case IOREQ_TYPE_VMWARE_PORT: + case IOREQ_TYPE_TIMEOFFSET: + /* The 'special' range of [1,1] is checked for being enabled */ + if ( rangeset_contains_singleton(r, 1) ) + return s; + + break; } } @@ -2661,6 +2792,7 @@ void hvm_complete_assist_req(ioreq_t *p) case IOREQ_TYPE_PCI_CONFIG: ASSERT_UNREACHABLE(); break; + case IOREQ_TYPE_VMWARE_PORT: case IOREQ_TYPE_COPY: case IOREQ_TYPE_PIO: if ( p->dir == IOREQ_READ ) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 68fb890..7684cf0 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -192,6 +192,22 @@ void hvm_io_assist(ioreq_t *p) (void)handle_mmio(); break; case HVMIO_handle_pio_awaiting_completion: + if ( p->type == IOREQ_TYPE_VMWARE_PORT ) + { + vmware_regs_t *vr = get_vmport_regs_any(NULL, curr); + + if ( vr ) + { + struct cpu_user_regs *regs = guest_cpu_user_regs(); + + /* Only change the 32bit part of the register */ + regs->_ebx = vr->ebx; + regs->_ecx = vr->ecx; + regs->_edx = vr->edx; + regs->_esi = vr->esi; + regs->_edi = vr->edi; + } + } if ( vio->io_size == 4 ) /* Needs zero extension. */ guest_cpu_user_regs()->rax = (uint32_t)p->data; else diff --git a/xen/arch/x86/hvm/vmware/vmport.c b/xen/arch/x86/hvm/vmware/vmport.c index 8a05c4b..f3867d6 100644 --- a/xen/arch/x86/hvm/vmware/vmport.c +++ b/xen/arch/x86/hvm/vmware/vmport.c @@ -115,7 +115,8 @@ static int vmport_ioport(int dir, uint32_t port, uint32_t bytes, uint32_t *val) regs->_ecx = 1000000; break; default: - break; + /* Let backing DM handle */ + return X86EMUL_UNHANDLEABLE; } if ( dir == IOREQ_READ ) *val = new_eax; @@ -134,10 +135,10 @@ void vmport_register(struct domain *d) int vmport_check_port(unsigned int port) { struct vcpu *curr = current; - struct domain *d = curr->domain; + struct domain *currd = curr->domain; - if ( is_hvm_domain(d) && d->arch.hvm_domain.is_vmware_port_enabled && - port == BDOOR_PORT ) + if ( port == BDOOR_PORT && is_hvm_domain(currd) && + currd->arch.hvm_domain.is_vmware_port_enabled ) return 0; return 1; } diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index ab0e4cf..248dcee 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -48,7 +48,7 @@ struct hvm_ioreq_vcpu { evtchn_port_t ioreq_evtchn; }; -#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1) +#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_VMWARE_PORT + 1) #define MAX_NR_IO_RANGES 256 struct hvm_ioreq_server { @@ -63,6 +63,7 @@ struct hvm_ioreq_server { ioservid_t id; struct hvm_ioreq_page ioreq; struct list_head ioreq_vcpu_list; + struct hvm_ioreq_page vmport_ioreq; struct hvm_ioreq_page bufioreq; /* Lock to serialize access to buffered ioreq ring */ diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 3cccfff..71d934d 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -536,6 +536,7 @@ extern bool_t opt_hvm_fep; void vmport_register(struct domain *d); int vmport_check_port(unsigned int port); +vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct vcpu *v); #endif /* __ASM_X86_HVM_HVM_H__ */ diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index cde3571..2dcafc3 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -314,6 +314,9 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t); * * NOTE: unless an emulation request falls entirely within a range mapped * by a secondary emulator, it will not be passed to that emulator. + * + * NOTE: The 'special' range of [1,1] is what is checked for on + * TIMEOFFSET and VMWARE_PORT. */ #define HVMOP_map_io_range_to_ioreq_server 19 #define HVMOP_unmap_io_range_from_ioreq_server 20 @@ -324,6 +327,8 @@ struct xen_hvm_io_range { # define HVMOP_IO_RANGE_PORT 0 /* I/O port range */ # define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */ # define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */ +# define HVMOP_IO_RANGE_TIMEOFFSET 7 /* TIMEOFFSET special range */ +# define HVMOP_IO_RANGE_VMWARE_PORT 9 /* VMware port special range */ uint64_aligned_t start, end; /* IN - inclusive start and end of range */ }; typedef struct xen_hvm_io_range xen_hvm_io_range_t; diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h index 5b5fedf..2d9dcbe 100644 --- a/xen/include/public/hvm/ioreq.h +++ b/xen/include/public/hvm/ioreq.h @@ -37,6 +37,7 @@ #define IOREQ_TYPE_PCI_CONFIG 2 #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ +#define IOREQ_TYPE_VMWARE_PORT 9 /* pio + vmport registers */ /* * VMExit dispatcher should cooperate with instruction decoder to @@ -48,6 +49,8 @@ * * 63....48|47..40|39..35|34..32|31........0 * SEGMENT |BUS |DEV |FN |OFFSET + * + * For I/O type IOREQ_TYPE_VMWARE_PORT also use the vmware_regs. */ struct ioreq { uint64_t addr; /* physical address */ @@ -66,11 +69,25 @@ struct ioreq { }; typedef struct ioreq ioreq_t; +struct vmware_regs { + uint32_t esi; + uint32_t edi; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; +typedef struct vmware_regs vmware_regs_t; + struct shared_iopage { struct ioreq vcpu_ioreq[1]; }; typedef struct shared_iopage shared_iopage_t; +struct shared_vmport_iopage { + struct vmware_regs vcpu_vmport_regs[1]; +}; +typedef struct shared_vmport_iopage shared_vmport_iopage_t; + struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h index 974d3a4..2f6ccf4 100644 --- a/xen/include/public/hvm/params.h +++ b/xen/include/public/hvm/params.h @@ -50,6 +50,8 @@ #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 +/* Extra vmport PFN. */ +#define HVM_PARAM_VMPORT_REGS_PFN 36 #define HVM_PARAM_BUFIOREQ_PFN 6 #define HVM_PARAM_BUFIOREQ_EVTCHN 26 @@ -197,6 +199,6 @@ /* emulated VMware Hardware Version */ #define HVM_PARAM_VMWARE_HWVER 35 -#define HVM_NR_PARAMS 36 +#define HVM_NR_PARAMS 37 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ -- 1.7.11.7