[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3] xen/HVM: atomically access pointers in bufioreq handling



On Fri, 24 Jul 2015, Jan Beulich wrote:
> The number of slots per page being 511 (i.e. not a power of two) means
> that the (32-bit) read and write indexes going beyond 2^32 will likely
> disturb operation. The hypervisor side gets I/O req server creation
> extended so we can indicate that we're using suitable atomic accesses
> where needed, allowing it to atomically canonicalize both pointers when
> both have gone through at least one cycle.
> 
> The Xen side counterpart (which is not a functional prereq to this
> change, albeit a build one) went in already (commit b7007bc6f9).
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

I have queued it up for QEMU 2.5. I'll backport to Xen 4.6.


> v3: Check for Xen 4.6 (configure and build time).
> v2: Adjust description.
> 
> --- a/configure
> +++ b/configure
> @@ -1882,6 +1882,33 @@ int main(void) {
>    xc_gnttab_open(NULL, 0);
>    xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
>    xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000);
> +  xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL);
> +  return 0;
> +}
> +EOF
> +      compile_prog "" "$xen_libs"
> +    then
> +    xen_ctrl_version=460
> +    xen=yes
> +
> +  # Xen 4.5
> +  elif
> +      cat > $TMPC <<EOF &&
> +#include <xenctrl.h>
> +#include <xenstore.h>
> +#include <stdint.h>
> +#include <xen/hvm/hvm_info_table.h>
> +#if !defined(HVM_MAX_VCPUS)
> +# error HVM_MAX_VCPUS not defined
> +#endif
> +int main(void) {
> +  xc_interface *xc;
> +  xs_daemon_open();
> +  xc = xc_interface_open(0, 0, 0);
> +  xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
> +  xc_gnttab_open(NULL, 0);
> +  xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
> +  xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000);
>    xc_hvm_create_ioreq_server(xc, 0, 0, NULL);
>    return 0;
>  }
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -963,19 +963,30 @@ static void handle_ioreq(XenIOState *sta
>  
>  static int handle_buffered_iopage(XenIOState *state)
>  {
> +    buffered_iopage_t *buf_page = state->buffered_io_page;
>      buf_ioreq_t *buf_req = NULL;
>      ioreq_t req;
>      int qw;
>  
> -    if (!state->buffered_io_page) {
> +    if (!buf_page) {
>          return 0;
>      }
>  
>      memset(&req, 0x00, sizeof(req));
>  
> -    while (state->buffered_io_page->read_pointer != 
> state->buffered_io_page->write_pointer) {
> -        buf_req = &state->buffered_io_page->buf_ioreq[
> -            state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
> +    for (;;) {
> +        uint32_t rdptr = buf_page->read_pointer, wrptr;
> +
> +        xen_rmb();
> +        wrptr = buf_page->write_pointer;
> +        xen_rmb();
> +        if (rdptr != buf_page->read_pointer) {
> +            continue;
> +        }
> +        if (rdptr == wrptr) {
> +            break;
> +        }
> +        buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
>          req.size = 1UL << buf_req->size;
>          req.count = 1;
>          req.addr = buf_req->addr;
> @@ -987,15 +998,14 @@ static int handle_buffered_iopage(XenIOS
>          req.data_is_ptr = 0;
>          qw = (req.size == 8);
>          if (qw) {
> -            buf_req = &state->buffered_io_page->buf_ioreq[
> -                (state->buffered_io_page->read_pointer + 1) % 
> IOREQ_BUFFER_SLOT_NUM];
> +            buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
> +                                           IOREQ_BUFFER_SLOT_NUM];
>              req.data |= ((uint64_t)buf_req->data) << 32;
>          }
>  
>          handle_ioreq(state, &req);
>  
> -        xen_mb();
> -        state->buffered_io_page->read_pointer += qw ? 2 : 1;
> +        atomic_add(&buf_page->read_pointer, qw + 1);
>      }
>  
>      return req.count;
> --- a/include/hw/xen/xen_common.h
> +++ b/include/hw/xen/xen_common.h
> @@ -186,6 +186,15 @@ static inline int xen_get_vmport_regs_pf
>  }
>  #endif
>  
> +/* Xen before 4.6 */
> +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
> +
> +#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
> +#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
> +#endif
> +
> +#endif
> +
>  /* Xen before 4.5 */
>  #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
>  
> @@ -370,7 +379,8 @@ static inline void xen_unmap_pcidev(XenX
>  static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
>                                            ioservid_t *ioservid)
>  {
> -    int rc = xc_hvm_create_ioreq_server(xc, dom, 1, ioservid);
> +    int rc = xc_hvm_create_ioreq_server(xc, dom, 
> HVM_IOREQSRV_BUFIOREQ_ATOMIC,
> +                                        ioservid);
>  
>      if (rc == 0) {
>          trace_xen_ioreq_server_create(*ioservid);
> 
> 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.