[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 4/6] ioreq-server: on-demand creation of ioreq server


  • To: George Dunlap <George.Dunlap@xxxxxxxxxx>
  • From: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
  • Date: Tue, 11 Mar 2014 10:54:11 +0000
  • Accept-language: en-GB, en-US
  • Cc: "xen-devel@xxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxx>
  • Delivery-date: Tue, 11 Mar 2014 10:54:25 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHPOIH8qggMy4p3D0GAUBfPFp2GDpraj20AgAEuhOA=
  • Thread-topic: [Xen-devel] [PATCH v3 4/6] ioreq-server: on-demand creation of ioreq server

> -----Original Message-----
> From: dunlapg@xxxxxxxxx [mailto:dunlapg@xxxxxxxxx] On Behalf Of
> George Dunlap
> Sent: 10 March 2014 17:46
> To: Paul Durrant
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [PATCH v3 4/6] ioreq-server: on-demand creation of
> ioreq server
> 
> On Wed, Mar 5, 2014 at 2:47 PM, Paul Durrant <paul.durrant@xxxxxxxxxx>
> wrote:
> > This patch only creates the ioreq server when the legacy HVM parameters
> > are touched by an emulator. It also lays some groundwork for supporting
> > multiple IOREQ servers. For instance, it introduces ioreq server reference
> > counting which is not strictly necessary at this stage but will become so
> > when ioreq servers can be destroyed prior the domain dying.
> >
> > There is a significant change in the layout of the special pages reserved
> > in xc_hvm_build_x86.c. This is so that we can 'grow' them downwards
> without
> > moving pages such as the xenstore page when building a domain that can
> > support more than one emulator.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> > ---
> >  tools/libxc/xc_hvm_build_x86.c |   40 +++++--
> >  xen/arch/x86/hvm/hvm.c         |  240 +++++++++++++++++++++++++------
> ---------
> >  2 files changed, 176 insertions(+), 104 deletions(-)
> >
> > diff --git a/tools/libxc/xc_hvm_build_x86.c
> b/tools/libxc/xc_hvm_build_x86.c
> > index dd3b522..b65e702 100644
> > --- a/tools/libxc/xc_hvm_build_x86.c
> > +++ b/tools/libxc/xc_hvm_build_x86.c
> > @@ -41,13 +41,12 @@
> >  #define SPECIALPAGE_PAGING   0
> >  #define SPECIALPAGE_ACCESS   1
> >  #define SPECIALPAGE_SHARING  2
> > -#define SPECIALPAGE_BUFIOREQ 3
> > -#define SPECIALPAGE_XENSTORE 4
> > -#define SPECIALPAGE_IOREQ    5
> > -#define SPECIALPAGE_IDENT_PT 6
> > -#define SPECIALPAGE_CONSOLE  7
> > -#define NR_SPECIAL_PAGES     8
> > -#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
> > +#define SPECIALPAGE_XENSTORE 3
> > +#define SPECIALPAGE_IDENT_PT 4
> > +#define SPECIALPAGE_CONSOLE  5
> > +#define SPECIALPAGE_IOREQ    6
> > +#define NR_SPECIAL_PAGES     SPECIALPAGE_IOREQ + 2 /* ioreq server
> needs 2 pages */
> > +#define special_pfn(x) (0xff000u - 1 - (x))
> >
> >  #define VGA_HOLE_SIZE (0x20)
> >
> > @@ -114,7 +113,7 @@ static void build_hvm_info(void *hvm_info_page,
> uint64_t mem_size,
> >      /* Memory parameters. */
> >      hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
> >      hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
> > -    hvm_info->reserved_mem_pgstart = special_pfn(0);
> > +    hvm_info->reserved_mem_pgstart = special_pfn(0) -
> NR_SPECIAL_PAGES;
> 
> It might be better to #define this up above, just below special_pfn()?
>  If we do that, and make the change below, then the "direction of
> growth" will be encoded all in one place.
> 

Sounds like a good idea. I'll do that.

> >
> >      /* Finish with the checksum. */
> >      for ( i = 0, sum = 0; i < hvm_info->length; i++ )
> > @@ -473,6 +472,23 @@ static int setup_guest(xc_interface *xch,
> >      munmap(hvm_info_page, PAGE_SIZE);
> >
> >      /* Allocate and clear special pages. */
> > +
> > +    DPRINTF("%d SPECIAL PAGES:\n", NR_SPECIAL_PAGES);
> > +    DPRINTF("  PAGING:    %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_PAGING));
> > +    DPRINTF("  ACCESS:    %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_ACCESS));
> > +    DPRINTF("  SHARING:   %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_SHARING));
> > +    DPRINTF("  STORE:     %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_XENSTORE));
> > +    DPRINTF("  IDENT_PT:  %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_IDENT_PT));
> > +    DPRINTF("  CONSOLE:   %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_CONSOLE));
> > +    DPRINTF("  IOREQ:     %"PRI_xen_pfn"\n",
> > +            (xen_pfn_t)special_pfn(SPECIALPAGE_IOREQ));
> > +
> >      for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
> >      {
> >          xen_pfn_t pfn = special_pfn(i);
> > @@ -488,10 +504,6 @@ static int setup_guest(xc_interface *xch,
> >
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
> >                       special_pfn(SPECIALPAGE_XENSTORE));
> > -    xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
> > -                     special_pfn(SPECIALPAGE_BUFIOREQ));
> > -    xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
> > -                     special_pfn(SPECIALPAGE_IOREQ));
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
> >                       special_pfn(SPECIALPAGE_CONSOLE));
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN,
> > @@ -500,6 +512,10 @@ static int setup_guest(xc_interface *xch,
> >                       special_pfn(SPECIALPAGE_ACCESS));
> >      xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN,
> >                       special_pfn(SPECIALPAGE_SHARING));
> > +    xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
> > +                     special_pfn(SPECIALPAGE_IOREQ));
> > +    xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
> > +                     special_pfn(SPECIALPAGE_IOREQ) - 1);
> 
> If we say "special_pfn(SPECIALPAGE_IOREQ+1)", it doesn't assume that
> things grow a specific direction.
> 

Ok. 

> >
> >      /*
> >       * Identity-map page table is required for running with CR0.PG=0 when
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index bbf9577..22b2a2c 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -366,22 +366,9 @@ bool_t hvm_io_pending(struct vcpu *v)
> >      return ( p->state != STATE_IOREQ_NONE );
> >  }
> >
> > -void hvm_do_resume(struct vcpu *v)
> > +static void hvm_wait_on_io(struct domain *d, ioreq_t *p)
> >  {
> > -    struct domain *d = v->domain;
> > -    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > -    ioreq_t *p;
> > -
> > -    check_wakeup_from_wait();
> > -
> > -    if ( is_hvm_vcpu(v) )
> > -        pt_restore_timer(v);
> > -
> > -    if ( !s )
> > -        goto check_inject_trap;
> > -
> >      /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> > -    p = get_ioreq(s, v->vcpu_id);
> >      while ( p->state != STATE_IOREQ_NONE )
> >      {
> >          switch ( p->state )
> > @@ -397,12 +384,29 @@ void hvm_do_resume(struct vcpu *v)
> >              break;
> >          default:
> >              gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p-
> >state);
> > -            domain_crash(v->domain);
> > +            domain_crash(d);
> >              return; /* bail */
> >          }
> >      }
> > +}
> > +
> > +void hvm_do_resume(struct vcpu *v)
> > +{
> > +    struct domain *d = v->domain;
> > +    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
> > +
> > +    check_wakeup_from_wait();
> > +
> > +    if ( is_hvm_vcpu(v) )
> > +        pt_restore_timer(v);
> > +
> > +    if ( s )
> > +    {
> > +        ioreq_t *p = get_ioreq(s, v->vcpu_id);
> > +
> > +        hvm_wait_on_io(d, p);
> > +    }
> >
> > - check_inject_trap:
> >      /* Inject pending hw/sw trap */
> >      if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
> >      {
> > @@ -411,11 +415,13 @@ void hvm_do_resume(struct vcpu *v)
> >      }
> >  }
> >
> > -static void hvm_init_ioreq_page(
> > -    struct domain *d, struct hvm_ioreq_page *iorp)
> > +static void hvm_init_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
> >  {
> > +    struct hvm_ioreq_page *iorp;
> > +
> > +    iorp = buf ? &s->buf_ioreq : &s->ioreq;
> > +
> >      spin_lock_init(&iorp->lock);
> > -    domain_pause(d);
> >  }
> >
> >  void destroy_ring_for_helper(
> > @@ -431,16 +437,13 @@ void destroy_ring_for_helper(
> >      }
> >  }
> >
> > -static void hvm_destroy_ioreq_page(
> > -    struct domain *d, struct hvm_ioreq_page *iorp)
> > +static void hvm_destroy_ioreq_page(struct hvm_ioreq_server *s, bool_t
> buf)
> >  {
> > -    spin_lock(&iorp->lock);
> > +    struct hvm_ioreq_page *iorp;
> >
> > -    ASSERT(d->is_dying);
> > +    iorp = buf ? &s->buf_ioreq : &s->ioreq;
> >
> >      destroy_ring_for_helper(&iorp->va, iorp->page);
> > -
> > -    spin_unlock(&iorp->lock);
> 
> BTW, is there a reason you're getting rid of the locks here?
> 

Yes, I don't believe it is needed.

  Paul

>  -George

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.