[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 2/5] ioreq-server: create basic ioreq server abstraction.


  • To: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>
  • From: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
  • Date: Thu, 30 Jan 2014 15:17:48 +0000
  • Accept-language: en-GB, en-US
  • Cc: "xen-devel@xxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxx>
  • Delivery-date: Thu, 30 Jan 2014 15:18:12 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHPHcZaQkXrYw5nnkO937nxcase55qdTKOAgAARfhA=
  • Thread-topic: [Xen-devel] [RFC PATCH 2/5] ioreq-server: create basic ioreq server abstraction.

> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 30 January 2014 15:04
> To: Paul Durrant
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [RFC PATCH 2/5] ioreq-server: create basic ioreq
> server abstraction.
> 
> On 30/01/14 14:19, Paul Durrant wrote:
> > Collect together data structures concerning device emulation together into
> > a new struct hvm_ioreq_server.
> >
> > Code that deals with the shared and buffered ioreq pages is extracted from
> > functions such as hvm_domain_initialise, hvm_vcpu_initialise and
> do_hvm_op
> > and consolidated into a set of hvm_ioreq_server_XXX functions.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> > ---
> >  xen/arch/x86/hvm/hvm.c           |  318 ++++++++++++++++++++++++++----
> --------
> >  xen/include/asm-x86/hvm/domain.h |    9 +-
> >  xen/include/asm-x86/hvm/vcpu.h   |    2 +-
> >  3 files changed, 229 insertions(+), 100 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index 71a44db..a0eaadb 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -345,16 +345,16 @@ void hvm_migrate_pirqs(struct vcpu *v)
> >      spin_unlock(&d->event_lock);
> >  }
> >
> > -static ioreq_t *get_ioreq(struct vcpu *v)
> > +static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, int id)
> >  {
> > -    struct domain *d = v->domain;
> > -    shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
> > -    ASSERT((v == current) || spin_is_locked(&d-
> >arch.hvm_domain.ioreq.lock));
> > -    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
> > +    shared_iopage_t *p = s->ioreq.va;
> > +    ASSERT(p != NULL);
> > +    return &p->vcpu_ioreq[id];
> >  }
> >
> >  void hvm_do_resume(struct vcpu *v)
> >  {
> > +    struct hvm_ioreq_server *s;
> >      ioreq_t *p;
> >
> >      check_wakeup_from_wait();
> > @@ -362,10 +362,14 @@ void hvm_do_resume(struct vcpu *v)
> >      if ( is_hvm_vcpu(v) )
> >          pt_restore_timer(v);
> >
> > -    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> > -    if ( !(p = get_ioreq(v)) )
> > +    s = v->arch.hvm_vcpu.ioreq_server;
> 
> This assignment can be part of the declaration of 's' (and likewise in
> most later examples).
> 

Whilst that's true it would make the subsequent patch where we moved to using 
lists less obvious.

> > +    v->arch.hvm_vcpu.ioreq_server = NULL;
> > +
> > +    if ( !s )
> >          goto check_inject_trap;
> >
> > +    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> > +    p = get_ioreq(s, v->vcpu_id);
> >      while ( p->state != STATE_IOREQ_NONE )
> >      {
> >          switch ( p->state )
> > @@ -375,7 +379,7 @@ void hvm_do_resume(struct vcpu *v)
> >              break;
> >          case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} ->
> IORESP_READY */
> >          case STATE_IOREQ_INPROCESS:
> > -            wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
> > +            wait_on_xen_event_channel(p->vp_eport,
> >                                        (p->state != STATE_IOREQ_READY) &&
> >                                        (p->state != STATE_IOREQ_INPROCESS));
> >              break;
> > @@ -398,7 +402,6 @@ void hvm_do_resume(struct vcpu *v)
> >  static void hvm_init_ioreq_page(
> >      struct domain *d, struct hvm_ioreq_page *iorp)
> >  {
> > -    memset(iorp, 0, sizeof(*iorp));
> 
> Is it worth keeping this function?  the two back to back
> domain_pause()'s from the callers are redundant.
> 

It actually becomes just the spin_lock_init in a subsequent patch. I left it 
this was as I did not want to make too many steps in one go.

> >      spin_lock_init(&iorp->lock);
> >      domain_pause(d);
> >  }
> > @@ -541,6 +544,167 @@ static int handle_pvh_io(
> >      return X86EMUL_OKAY;
> >  }
> >
> > +static int hvm_init_ioreq_server(struct domain *d)
> > +{
> > +    struct hvm_ioreq_server *s;
> > +    int i;
> > +
> > +    s = xzalloc(struct hvm_ioreq_server);
> > +    if ( !s )
> > +        return -ENOMEM;
> > +
> > +    s->domain = d;
> > +
> > +    for ( i = 0; i < MAX_HVM_VCPUS; i++ )
> > +        s->ioreq_evtchn[i] = -1;
> > +    s->buf_ioreq_evtchn = -1;
> > +
> > +    hvm_init_ioreq_page(d, &s->ioreq);
> > +    hvm_init_ioreq_page(d, &s->buf_ioreq);
> > +
> > +    d->arch.hvm_domain.ioreq_server = s;
> > +    return 0;
> > +}
> > +
> > +static void hvm_deinit_ioreq_server(struct domain *d)
> > +{
> > +    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > +
> > +    hvm_destroy_ioreq_page(d, &s->ioreq);
> > +    hvm_destroy_ioreq_page(d, &s->buf_ioreq);
> > +
> > +    xfree(s);
> > +}
> > +
> > +static void hvm_update_ioreq_server_evtchn(struct hvm_ioreq_server
> *s)
> > +{
> > +    struct domain *d = s->domain;
> > +
> > +    if ( s->ioreq.va != NULL )
> > +    {
> > +        shared_iopage_t *p = s->ioreq.va;
> > +        struct vcpu *v;
> > +
> > +        for_each_vcpu ( d, v )
> > +            p->vcpu_ioreq[v->vcpu_id].vp_eport = s->ioreq_evtchn[v-
> >vcpu_id];
> > +    }
> > +}
> > +
> > +static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
> struct vcpu *v)
> > +{
> > +    int rc;
> > +
> > +    /* Create ioreq event channel. */
> > +    rc = alloc_unbound_xen_event_channel(v, s->domid, NULL);
> > +    if ( rc < 0 )
> > +        goto done;
> > +
> > +    /* Register ioreq event channel. */
> > +    s->ioreq_evtchn[v->vcpu_id] = rc;
> > +
> > +    if ( v->vcpu_id == 0 )
> > +    {
> > +        /* Create bufioreq event channel. */
> > +        rc = alloc_unbound_xen_event_channel(v, s->domid, NULL);
> > +        if ( rc < 0 )
> > +            goto done;
> 
> skipping hvm_update_ioreq_server_evtchn() even in the case of a
> successful ioreq event channel?
> 

Yes, because the vcpu creation will fail.

> > +
> > +        s->buf_ioreq_evtchn = rc;
> > +    }
> > +
> > +    hvm_update_ioreq_server_evtchn(s);
> > +    rc = 0;
> > +
> > +done:
> > +    return rc;
> > +}
> > +
> > +static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
> struct vcpu *v)
> > +{
> > +    if ( v->vcpu_id == 0 )
> > +    {
> > +        if ( s->buf_ioreq_evtchn >= 0 )
> > +        {
> > +            free_xen_event_channel(v, s->buf_ioreq_evtchn);
> > +            s->buf_ioreq_evtchn = -1;
> > +        }
> > +    }
> > +
> > +    if ( s->ioreq_evtchn[v->vcpu_id] >= 0 )
> > +    {
> > +        free_xen_event_channel(v, s->ioreq_evtchn[v->vcpu_id]);
> > +        s->ioreq_evtchn[v->vcpu_id] = -1;
> > +    }
> > +}
> > +
> > +static int hvm_replace_event_channel(struct vcpu *v, domid_t
> remote_domid,
> > +                                     int *p_port)
> > +{
> > +    int old_port, new_port;
> > +
> > +    new_port = alloc_unbound_xen_event_channel(v, remote_domid,
> NULL);
> > +    if ( new_port < 0 )
> > +        return new_port;
> > +
> > +    /* xchg() ensures that only we call free_xen_event_channel(). */
> > +    old_port = xchg(p_port, new_port);
> > +    free_xen_event_channel(v, old_port);
> > +    return 0;
> > +}
> > +
> > +static int hvm_set_ioreq_server_domid(struct hvm_ioreq_server *s,
> domid_t domid)
> > +{
> > +    struct domain *d = s->domain;
> > +    struct vcpu *v;
> > +    int rc = 0;
> > +
> > +    domain_pause(d);
> > +
> > +    if ( d->vcpu[0] )
> > +    {
> > +        rc = hvm_replace_event_channel(d->vcpu[0], domid, &s-
> >buf_ioreq_evtchn);
> > +        if ( rc < 0 )
> > +            goto done;
> > +    }
> > +
> > +    for_each_vcpu ( d, v )
> > +    {
> > +        rc = hvm_replace_event_channel(v, domid, &s->ioreq_evtchn[v-
> >vcpu_id]);
> > +        if ( rc < 0 )
> > +            goto done;
> > +    }
> > +
> > +    hvm_update_ioreq_server_evtchn(s);
> > +
> > +    s->domid = domid;
> > +
> > +done:
> > +    domain_unpause(d);
> > +
> > +    return rc;
> > +}
> > +
> > +static int hvm_set_ioreq_server_pfn(struct hvm_ioreq_server *s,
> unsigned long pfn)
> > +{
> > +    struct domain *d = s->domain;
> > +    int rc;
> > +
> > +    rc = hvm_set_ioreq_page(d, &s->ioreq, pfn);
> > +    if ( rc < 0 )
> > +        return rc;
> > +
> > +    hvm_update_ioreq_server_evtchn(s);
> > +
> > +    return 0;
> > +}
> > +
> > +static int hvm_set_ioreq_server_buf_pfn(struct hvm_ioreq_server *s,
> unsigned long pfn)
> > +{
> > +    struct domain *d = s->domain;
> > +
> > +    return  hvm_set_ioreq_page(d, &s->buf_ioreq, pfn);
> 
> Double space.
> 
> > +}
> > +
> >  int hvm_domain_initialise(struct domain *d)
> >  {
> >      int rc;
> > @@ -608,17 +772,20 @@ int hvm_domain_initialise(struct domain *d)
> >
> >      rtc_init(d);
> >
> > -    hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
> > -    hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
> > +    rc = hvm_init_ioreq_server(d);
> > +    if ( rc != 0 )
> > +        goto fail2;
> >
> >      register_portio_handler(d, 0xe9, 1, hvm_print_line);
> >
> >      rc = hvm_funcs.domain_initialise(d);
> >      if ( rc != 0 )
> > -        goto fail2;
> > +        goto fail3;
> >
> >      return 0;
> >
> > + fail3:
> > +    hvm_deinit_ioreq_server(d);
> >   fail2:
> >      rtc_deinit(d);
> >      stdvga_deinit(d);
> > @@ -642,8 +809,7 @@ void hvm_domain_relinquish_resources(struct
> domain *d)
> >      if ( hvm_funcs.nhvm_domain_relinquish_resources )
> >          hvm_funcs.nhvm_domain_relinquish_resources(d);
> >
> > -    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
> > -    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
> > +    hvm_deinit_ioreq_server(d);
> >
> >      msixtbl_pt_cleanup(d);
> >
> > @@ -1155,7 +1321,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
> >  {
> >      int rc;
> >      struct domain *d = v->domain;
> > -    domid_t dm_domid;
> > +    struct hvm_ioreq_server *s;
> >
> >      hvm_asid_flush_vcpu(v);
> >
> > @@ -1198,30 +1364,12 @@ int hvm_vcpu_initialise(struct vcpu *v)
> >           && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown:
> nestedhvm_vcpu_destroy */
> >          goto fail5;
> >
> > -    dm_domid = d-
> >arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
> > +    s = d->arch.hvm_domain.ioreq_server;
> >
> > -    /* Create ioreq event channel. */
> > -    rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /*
> teardown: none */
> > +    rc = hvm_ioreq_server_add_vcpu(s, v);
> >      if ( rc < 0 )
> >          goto fail6;
> >
> > -    /* Register ioreq event channel. */
> > -    v->arch.hvm_vcpu.xen_port = rc;
> > -
> > -    if ( v->vcpu_id == 0 )
> > -    {
> > -        /* Create bufioreq event channel. */
> > -        rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /*
> teardown: none */
> > -        if ( rc < 0 )
> > -            goto fail6;
> > -        d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
> rc;
> > -    }
> > -
> > -    spin_lock(&d->arch.hvm_domain.ioreq.lock);
> > -    if ( d->arch.hvm_domain.ioreq.va != NULL )
> > -        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
> > -    spin_unlock(&d->arch.hvm_domain.ioreq.lock);
> > -
> >      if ( v->vcpu_id == 0 )
> >      {
> >          /* NB. All these really belong in hvm_domain_initialise(). */
> > @@ -1255,6 +1403,11 @@ int hvm_vcpu_initialise(struct vcpu *v)
> >
> >  void hvm_vcpu_destroy(struct vcpu *v)
> >  {
> > +    struct domain *d = v->domain;
> > +    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > +
> > +    hvm_ioreq_server_remove_vcpu(s, v);
> > +
> >      nestedhvm_vcpu_destroy(v);
> >
> >      free_compat_arg_xlat(v);
> > @@ -1266,9 +1419,6 @@ void hvm_vcpu_destroy(struct vcpu *v)
> >          vlapic_destroy(v);
> >
> >      hvm_funcs.vcpu_destroy(v);
> > -
> > -    /* Event channel is already freed by evtchn_destroy(). */
> > -    /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
> >  }
> >
> >  void hvm_vcpu_down(struct vcpu *v)
> > @@ -1298,8 +1448,10 @@ void hvm_vcpu_down(struct vcpu *v)
> >  int hvm_buffered_io_send(ioreq_t *p)
> >  {
> >      struct vcpu *v = current;
> > -    struct hvm_ioreq_page *iorp = &v->domain-
> >arch.hvm_domain.buf_ioreq;
> > -    buffered_iopage_t *pg = iorp->va;
> > +    struct domain *d = v->domain;
> > +    struct hvm_ioreq_server *s;
> > +    struct hvm_ioreq_page *iorp;
> > +    buffered_iopage_t *pg;
> >      buf_ioreq_t bp;
> >      /* Timeoffset sends 64b data, but no address. Use two consecutive
> slots. */
> >      int qw = 0;
> > @@ -1307,6 +1459,13 @@ int hvm_buffered_io_send(ioreq_t *p)
> >      /* Ensure buffered_iopage fits in a page */
> >      BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
> >
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +    if ( !s )
> > +        return 0;
> > +
> > +    iorp = &s->buf_ioreq;
> > +    pg = iorp->va;
> > +
> >      /*
> >       * Return 0 for the cases we can't deal with:
> >       *  - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
> > @@ -1367,8 +1526,7 @@ int hvm_buffered_io_send(ioreq_t *p)
> >      wmb();
> >      pg->write_pointer += qw ? 2 : 1;
> >
> > -    notify_via_xen_event_channel(v->domain,
> > -            v->domain-
> >arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
> > +    notify_via_xen_event_channel(d, s->buf_ioreq_evtchn);
> >      spin_unlock(&iorp->lock);
> >
> >      return 1;
> > @@ -1376,22 +1534,29 @@ int hvm_buffered_io_send(ioreq_t *p)
> >
> >  bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *proto_p)
> >  {
> > +    struct domain *d = v->domain;
> > +    struct hvm_ioreq_server *s;
> >      ioreq_t *p;
> >
> >      if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
> >          return 0; /* implicitly bins the i/o operation */
> >
> > -    if ( !(p = get_ioreq(v)) )
> > +    s = d->arch.hvm_domain.ioreq_server;
> > +    if ( !s )
> >          return 0;
> >
> > +    p = get_ioreq(s, v->vcpu_id);
> > +
> >      if ( unlikely(p->state != STATE_IOREQ_NONE) )
> >      {
> >          /* This indicates a bug in the device model. Crash the domain. */
> >          gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p-
> >state);
> > -        domain_crash(v->domain);
> > +        domain_crash(d);
> >          return 0;
> >      }
> >
> > +    v->arch.hvm_vcpu.ioreq_server = s;
> > +
> >      p->dir = proto_p->dir;
> >      p->data_is_ptr = proto_p->data_is_ptr;
> >      p->type = proto_p->type;
> > @@ -1401,14 +1566,14 @@ bool_t hvm_send_assist_req(struct vcpu *v,
> ioreq_t *proto_p)
> >      p->df = proto_p->df;
> >      p->data = proto_p->data;
> >
> > -    prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
> > +    prepare_wait_on_xen_event_channel(p->vp_eport);
> >
> >      /*
> >       * Following happens /after/ blocking and setting up ioreq contents.
> >       * prepare_wait_on_xen_event_channel() is an implicit barrier.
> >       */
> >      p->state = STATE_IOREQ_READY;
> > -    notify_via_xen_event_channel(v->domain, v-
> >arch.hvm_vcpu.xen_port);
> > +    notify_via_xen_event_channel(d, p->vp_eport);
> >
> >      return 1;
> >  }
> > @@ -3995,21 +4160,6 @@ static int hvmop_flush_tlb_all(void)
> >      return 0;
> >  }
> >
> > -static int hvm_replace_event_channel(struct vcpu *v, domid_t
> remote_domid,
> > -                                     int *p_port)
> > -{
> > -    int old_port, new_port;
> > -
> > -    new_port = alloc_unbound_xen_event_channel(v, remote_domid,
> NULL);
> > -    if ( new_port < 0 )
> > -        return new_port;
> > -
> > -    /* xchg() ensures that only we call free_xen_event_channel(). */
> > -    old_port = xchg(p_port, new_port);
> > -    free_xen_event_channel(v, old_port);
> > -    return 0;
> > -}
> > -
> >  long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void)
> arg)
> >
> >  {
> > @@ -4022,7 +4172,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >      case HVMOP_get_param:
> >      {
> >          struct xen_hvm_param a;
> > -        struct hvm_ioreq_page *iorp;
> > +        struct hvm_ioreq_server *s;
> >          struct domain *d;
> >          struct vcpu *v;
> >
> > @@ -4048,6 +4198,8 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >          if ( rc )
> >              goto param_fail;
> >
> > +        s = d->arch.hvm_domain.ioreq_server;
> > +
> 
> This should be reduced in lexical scope, and I would have said that it
> can just be 'inlined' into each of the 4 uses later.
>

Again. It's done this way to make the patch sequencing work better together.
 
> >          if ( op == HVMOP_set_param )
> >          {
> >              rc = 0;
> > @@ -4055,19 +4207,10 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >              switch ( a.index )
> >              {
> >              case HVM_PARAM_IOREQ_PFN:
> > -                iorp = &d->arch.hvm_domain.ioreq;
> > -                if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 )
> > -                    break;
> > -                spin_lock(&iorp->lock);
> > -                if ( iorp->va != NULL )
> > -                    /* Initialise evtchn port info if VCPUs already 
> > created. */
> > -                    for_each_vcpu ( d, v )
> > -                        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
> > -                spin_unlock(&iorp->lock);
> > +                rc = hvm_set_ioreq_server_pfn(s, a.value);
> >                  break;
> >              case HVM_PARAM_BUFIOREQ_PFN:
> > -                iorp = &d->arch.hvm_domain.buf_ioreq;
> > -                rc = hvm_set_ioreq_page(d, iorp, a.value);
> > +                rc = hvm_set_ioreq_server_buf_pfn(s, a.value);
> >                  break;
> >              case HVM_PARAM_CALLBACK_IRQ:
> >                  hvm_set_callback_via(d, a.value);
> > @@ -4122,31 +4265,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >                  if ( a.value == DOMID_SELF )
> >                      a.value = curr_d->domain_id;
> >
> > -                rc = 0;
> > -                domain_pause(d); /* safe to change per-vcpu xen_port */
> > -                if ( d->vcpu[0] )
> > -                    rc = hvm_replace_event_channel(d->vcpu[0], a.value,
> > -                             (int 
> > *)&d->vcpu[0]->domain->arch.hvm_domain.params
> > -                                     [HVM_PARAM_BUFIOREQ_EVTCHN]);
> > -                if ( rc )
> > -                {
> > -                    domain_unpause(d);
> > -                    break;
> > -                }
> > -                iorp = &d->arch.hvm_domain.ioreq;
> > -                for_each_vcpu ( d, v )
> > -                {
> > -                    rc = hvm_replace_event_channel(v, a.value,
> > -                                                   
> > &v->arch.hvm_vcpu.xen_port);
> > -                    if ( rc )
> > -                        break;
> > -
> > -                    spin_lock(&iorp->lock);
> > -                    if ( iorp->va != NULL )
> > -                        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
> > -                    spin_unlock(&iorp->lock);
> > -                }
> > -                domain_unpause(d);
> > +                rc = hvm_set_ioreq_server_domid(s, a.value);
> >                  break;
> >              case HVM_PARAM_ACPI_S_STATE:
> >                  /* Not reflexive, as we must domain_pause(). */
> > @@ -4241,6 +4360,9 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >          {
> >              switch ( a.index )
> >              {
> > +            case HVM_PARAM_BUFIOREQ_EVTCHN:
> > +                a.value = s->buf_ioreq_evtchn;
> > +                break;
> >              case HVM_PARAM_ACPI_S_STATE:
> >                  a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
> >                  break;
> > diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> > index b1e3187..4c039f8 100644
> > --- a/xen/include/asm-x86/hvm/domain.h
> > +++ b/xen/include/asm-x86/hvm/domain.h
> > @@ -41,10 +41,17 @@ struct hvm_ioreq_page {
> >      void *va;
> >  };
> >
> > -struct hvm_domain {
> > +struct hvm_ioreq_server {
> > +    struct domain          *domain;
> > +    domid_t                domid;
> >      struct hvm_ioreq_page  ioreq;
> > +    int                    ioreq_evtchn[MAX_HVM_VCPUS];
> >      struct hvm_ioreq_page  buf_ioreq;
> > +    int                    buf_ioreq_evtchn;
> > +};
> >
> > +struct hvm_domain {
> > +    struct hvm_ioreq_server *ioreq_server;
> >      struct pl_time         pl_time;
> >
> >      struct hvm_io_handler *io_handler;
> > diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-
> x86/hvm/vcpu.h
> > index 122ab0d..4c9d7ee 100644
> > --- a/xen/include/asm-x86/hvm/vcpu.h
> > +++ b/xen/include/asm-x86/hvm/vcpu.h
> > @@ -138,7 +138,7 @@ struct hvm_vcpu {
> >      spinlock_t          tm_lock;
> >      struct list_head    tm_list;
> >
> > -    int                 xen_port;
> > +    struct hvm_ioreq_server *ioreq_server;
> >
> 
> Why do both hvm_vcpu and hvm_domain need ioreq_server pointers?  I
> cant
> spot anything which actually uses the vcpu one.
> 

Your first comment is about one of those uses!

To explain... The reference is copied into the vcpu struct when the ioreq is 
sent to the emulator and removed when the response comes back. Now, this is 
strictly not necessary when dealing with one a single emulator per domain but 
once we move to a list of emulators then usually only one of them is in use for 
a particular vcpu at a time (except for the one case of a broadcast ioreq - for 
mapcache invalidate). This is why we need to track ioreq servers per vcpu as 
well as per domain.

  Paul

> ~Andrew
> 
> >      bool_t              flag_dr_dirty;
> >      bool_t              debug_state_latch;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.