[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [xen-unstable test] 9593: regressions - trouble: broken/fail/pass



>>> On 25.10.11 at 10:23, Ian Campbell <Ian.Campbell@xxxxxxxxxx> wrote:
> This doesn't appear to be a quirk of the test systems log generation,
> the patch appears to have ended up in the changelog. There is some stuff
> actually applied too so perhaps it is ok but someone who knows what was
> supposed to be in there should probably double check!

Yeah, I somehow managed to leave the patch body in the file that was
to become the commit message. I'm sorry for that, and as I realized this
only after pushing I also didn't know how to rectify it.

Jan

>  On Mon, 2011-10-24 at 20:40 +0100, xen.org wrote:
>> 
>> 
>> changeset:   23987:2682094bc243
>> user:        Jan Beulich <jbeulich@xxxxxxxx>
>> date:        Fri Oct 21 09:42:47 2011 +0200
>> 
>>     x86/p2m: allocate CPU masks dynamically
>> 
>>     Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
>>     Acked-by: Tim Deegan <tim@xxxxxxx>
>>     Acked-by: Keir Fraser <keir@xxxxxxx>
>> 
>>     --- 2011-10-18.orig/xen/arch/x86/hvm/nestedhvm.c    2011-10-11
>> 17:24:46.000000000 +0200
>>     +++ 2011-10-18/xen/arch/x86/hvm/nestedhvm.c 2011-10-18
>> 16:45:02.000000000 +0200
>>     @@ -114,9 +114,9 @@ nestedhvm_flushtlb_ipi(void *info)
>>      void
>>      nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
>>      {
>>     -    on_selected_cpus(&p2m->p2m_dirty_cpumask,
>> nestedhvm_flushtlb_ipi,
>>     +    on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
>>              p2m->domain, 1);
>>     -    cpumask_clear(&p2m->p2m_dirty_cpumask);
>>     +    cpumask_clear(p2m->dirty_cpumask);
>>      }
>> 
>>      bool_t
>>     --- 2011-10-18.orig/xen/arch/x86/mm/hap/nested_hap.c
>> 2011-10-21 09:24:51.000000000 +0200
>>     +++ 2011-10-18/xen/arch/x86/mm/hap/nested_hap.c     2011-10-18
>> 16:44:35.000000000 +0200
>>     @@ -88,7 +88,7 @@ nestedp2m_write_p2m_entry(struct p2m_dom
>>          safe_write_pte(p, new);
>> 
>>          if (old_flags & _PAGE_PRESENT)
>>     -        flush_tlb_mask(&p2m->p2m_dirty_cpumask);
>>     +        flush_tlb_mask(p2m->dirty_cpumask);
>> 
>>          paging_unlock(d);
>>      }
>>     --- 2011-10-18.orig/xen/arch/x86/mm/p2m.c   2011-10-14
>> 09:47:46.000000000 +0200
>>     +++ 2011-10-18/xen/arch/x86/mm/p2m.c        2011-10-21
>> 09:28:33.000000000 +0200
>>     @@ -81,7 +81,6 @@ static void p2m_initialise(struct domain
>>          p2m->default_access = p2m_access_rwx;
>> 
>>          p2m->cr3 = CR3_EADDR;
>>     -    cpumask_clear(&p2m->p2m_dirty_cpumask);
>> 
>>          if ( hap_enabled(d) && (boot_cpu_data.x86_vendor ==
>> X86_VENDOR_INTEL) )
>>              ept_p2m_init(p2m);
>>     @@ -102,6 +101,8 @@ p2m_init_nestedp2m(struct domain *d)
>>              d->arch.nested_p2m[i] = p2m = xzalloc(struct p2m_domain);
>>              if (p2m == NULL)
>>                  return -ENOMEM;
>>     +        if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
>>     +            return -ENOMEM;
>>              p2m_initialise(d, p2m);
>>              p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
>>              list_add(&p2m->np2m_list,
>> &p2m_get_hostp2m(d)->np2m_list);
>>     @@ -118,6 +119,11 @@ int p2m_init(struct domain *d)
>>          p2m_get_hostp2m(d) = p2m = xzalloc(struct p2m_domain);
>>          if ( p2m == NULL )
>>              return -ENOMEM;
>>     +    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
>>     +    {
>>     +        xfree(p2m);
>>     +        return -ENOMEM;
>>     +    }
>>          p2m_initialise(d, p2m);
>> 
>>          /* Must initialise nestedp2m unconditionally
>>     @@ -333,6 +339,9 @@ static void p2m_teardown_nestedp2m(struc
>>          uint8_t i;
>> 
>>          for (i = 0; i < MAX_NESTEDP2M; i++) {
>>     +        if ( !d->arch.nested_p2m[i] )
>>     +            continue;
>>     +        free_cpumask_var(d->arch.nested_p2m[i]->dirty_cpumask);
>>              xfree(d->arch.nested_p2m[i]);
>>              d->arch.nested_p2m[i] = NULL;
>>          }
>>     @@ -341,8 +350,12 @@ static void p2m_teardown_nestedp2m(struc
>>      void p2m_final_teardown(struct domain *d)
>>      {
>>          /* Iterate over all p2m tables per domain */
>>     -    xfree(d->arch.p2m);
>>     -    d->arch.p2m = NULL;
>>     +    if ( d->arch.p2m )
>>     +    {
>>     +        free_cpumask_var(d->arch.p2m->dirty_cpumask);
>>     +        xfree(d->arch.p2m);
>>     +        d->arch.p2m = NULL;
>>     +    }
>> 
>>          /* We must teardown unconditionally because
>>           * we initialise them unconditionally.
>>     @@ -1200,7 +1213,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
>>                  if (p2m->cr3 == CR3_EADDR)
>>                      hvm_asid_flush_vcpu(v);
>>                  p2m->cr3 = cr3;
>>     -            cpu_set(v->processor, p2m->p2m_dirty_cpumask);
>>     +            cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
>>                  p2m_unlock(p2m);
>>                  nestedp2m_unlock(d);
>>                  return p2m;
>>     @@ -1217,7 +1230,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
>>          p2m->cr3 = cr3;
>>          nv->nv_flushp2m = 0;
>>          hvm_asid_flush_vcpu(v);
>>     -    cpu_set(v->processor, p2m->p2m_dirty_cpumask);
>>     +    cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
>>          p2m_unlock(p2m);
>>          nestedp2m_unlock(d);
>> 
>>     --- 2011-10-18.orig/xen/include/asm-x86/p2m.h       2011-10-21
>> 09:24:51.000000000 +0200
>>     +++ 2011-10-18/xen/include/asm-x86/p2m.h    2011-10-18
>> 16:39:34.000000000 +0200
>>     @@ -198,7 +198,7 @@ struct p2m_domain {
>>           * this p2m and those physical cpus whose vcpu's are in
>>           * guestmode.
>>           */
>>     -    cpumask_t          p2m_dirty_cpumask;
>>     +    cpumask_var_t      dirty_cpumask;
>> 
>>          struct domain     *domain;   /* back pointer to domain */
>> 
>> 
>> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.