[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/6] x86/mm: use optional cache in guest_walk_tables()
>>> On 19.07.18 at 15:22, <Paul.Durrant@xxxxxxxxxx> wrote: >> From: Jan Beulich [mailto:JBeulich@xxxxxxxx] >> Sent: 19 July 2018 11:47 >> >> @@ -134,7 +135,15 @@ guest_walk_tables(struct vcpu *v, struct >> /* Get the l4e from the top level table and check its flags*/ >> gw->l4mfn = top_mfn; >> l4p = (guest_l4e_t *) top_map; >> - gw->l4e = l4p[guest_l4_table_offset(gla)]; >> + gpa = gfn_to_gaddr(top_gfn) + >> + guest_l4_table_offset(gla) * sizeof(guest_l4e_t); >> + if ( !cache || >> + !hvmemul_read_cache(cache, gpa, 4, &gw->l4e, sizeof(gw->l4e)) ) >> + { >> + gw->l4e = l4p[guest_l4_table_offset(gla)]; >> + if ( cache ) >> + hvmemul_write_cache(cache, gpa, 4, &gw->l4e, sizeof(gw->l4e)); >> + } >> gflags = guest_l4e_get_flags(gw->l4e); >> if ( !(gflags & _PAGE_PRESENT) ) >> goto out; >> @@ -164,7 +173,15 @@ guest_walk_tables(struct vcpu *v, struct >> } >> >> /* Get the l3e and check its flags*/ >> - gw->l3e = l3p[guest_l3_table_offset(gla)]; >> + gpa = gfn_to_gaddr(guest_l4e_get_gfn(gw->l4e)) + >> + guest_l3_table_offset(gla) * sizeof(guest_l3e_t); >> + if ( !cache || >> + !hvmemul_read_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)) ) >> + { >> + gw->l3e = l3p[guest_l3_table_offset(gla)]; >> + if ( cache ) >> + hvmemul_write_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)); >> + } >> gflags = guest_l3e_get_flags(gw->l3e); >> if ( !(gflags & _PAGE_PRESENT) ) >> goto out; >> @@ -216,7 +233,16 @@ guest_walk_tables(struct vcpu *v, struct >> #else /* PAE only... */ >> >> /* Get the l3e and check its flag */ >> - gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)]; >> + gpa = gfn_to_gaddr(top_gfn) + ((unsigned long)top_map & >> ~PAGE_MASK) + >> + guest_l3_table_offset(gla) * sizeof(guest_l3e_t); >> + if ( !cache || >> + !hvmemul_read_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)) ) >> + { >> + gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)]; >> + if ( cache ) >> + hvmemul_write_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)); >> + } >> + >> gflags = guest_l3e_get_flags(gw->l3e); >> if ( !(gflags & _PAGE_PRESENT) ) >> goto out; >> @@ -242,18 +268,24 @@ guest_walk_tables(struct vcpu *v, struct >> goto out; >> } >> >> - /* Get the l2e */ >> - gw->l2e = l2p[guest_l2_table_offset(gla)]; >> - >> #else /* 32-bit only... */ >> >> - /* Get l2e from the top level table */ >> gw->l2mfn = top_mfn; >> l2p = (guest_l2e_t *) top_map; >> - gw->l2e = l2p[guest_l2_table_offset(gla)]; >> >> #endif /* All levels... */ >> >> + /* Get the l2e */ >> + gpa = gfn_to_gaddr(top_gfn) + >> + guest_l2_table_offset(gla) * sizeof(guest_l2e_t); >> + if ( !cache || >> + !hvmemul_read_cache(cache, gpa, 2, &gw->l2e, sizeof(gw->l2e)) ) >> + { >> + gw->l2e = l2p[guest_l2_table_offset(gla)]; >> + if ( cache ) >> + hvmemul_write_cache(cache, gpa, 2, &gw->l2e, sizeof(gw->l2e)); >> + } >> + >> /* Check the l2e flags. */ >> gflags = guest_l2e_get_flags(gw->l2e); >> if ( !(gflags & _PAGE_PRESENT) ) >> @@ -335,7 +367,17 @@ guest_walk_tables(struct vcpu *v, struct >> gw->pfec |= rc & PFEC_synth_mask; >> goto out; >> } >> - gw->l1e = l1p[guest_l1_table_offset(gla)]; >> + >> + gpa = gfn_to_gaddr(top_gfn) + >> + guest_l1_table_offset(gla) * sizeof(guest_l1e_t); >> + if ( !cache || >> + !hvmemul_read_cache(cache, gpa, 1, &gw->l1e, sizeof(gw->l1e)) ) >> + { >> + gw->l1e = l1p[guest_l1_table_offset(gla)]; >> + if ( cache ) >> + hvmemul_write_cache(cache, gpa, 1, &gw->l1e, sizeof(gw->l1e)); >> + } >> + >> gflags = guest_l1e_get_flags(gw->l1e); >> if ( !(gflags & _PAGE_PRESENT) ) >> goto out; I've just now realized that the cache writes should either be repeated after having set A/D bits, or the setting of A/D bits should be suppressed when a read has hit the cache. The latter option would be somewhat difficult with our late setting of the bits, but I still wanted to ask: Do you have a preference either way? Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |