|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 4/4] x86/shadow: refactor shadow_vram_{get,put}_l1e()
On 08/10/2020 16:15, Roger Pau Monné wrote:
> On Wed, Sep 16, 2020 at 03:08:40PM +0200, Jan Beulich wrote:
>> By passing the functions an MFN and flags, only a single instance of
> ^ a
'an' is correct.
an MFN
a Machine Frame Number
because the pronunciation changes. "an" precedes anything with a vowel
sound, not just vowels themselves. (Isn't English great...)
>> each is needed; they were pretty large for being inline functions
>> anyway.
>>
>> While moving the code, also adjust coding style and add const where
>> sensible / possible.
>>
>> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
>> ---
>> v2: New.
>>
>> --- a/xen/arch/x86/mm/shadow/hvm.c
>> +++ b/xen/arch/x86/mm/shadow/hvm.c
>> @@ -903,6 +903,104 @@ int shadow_track_dirty_vram(struct domai
>> return rc;
>> }
>>
>> +void shadow_vram_get_mfn(mfn_t mfn, unsigned int l1f,
>> + mfn_t sl1mfn, const void *sl1e,
>> + const struct domain *d)
>> +{
>> + unsigned long gfn;
>> + struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
>> +
>> + ASSERT(is_hvm_domain(d));
>> +
>> + if ( !dirty_vram /* tracking disabled? */ ||
>> + !(l1f & _PAGE_RW) /* read-only mapping? */ ||
>> + !mfn_valid(mfn) /* mfn can be invalid in mmio_direct */)
>> + return;
>> +
>> + gfn = gfn_x(mfn_to_gfn(d, mfn));
>> + /* Page sharing not supported on shadow PTs */
>> + BUG_ON(SHARED_M2P(gfn));
>> +
>> + if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
>> + {
>> + unsigned long i = gfn - dirty_vram->begin_pfn;
>> + const struct page_info *page = mfn_to_page(mfn);
>> +
>> + if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
>> + /* Initial guest reference, record it */
>> + dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn) |
>> + PAGE_OFFSET(sl1e);
>> + }
>> +}
>> +
>> +void shadow_vram_put_mfn(mfn_t mfn, unsigned int l1f,
>> + mfn_t sl1mfn, const void *sl1e,
>> + const struct domain *d)
>> +{
>> + unsigned long gfn;
>> + struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
>> +
>> + ASSERT(is_hvm_domain(d));
>> +
>> + if ( !dirty_vram /* tracking disabled? */ ||
>> + !(l1f & _PAGE_RW) /* read-only mapping? */ ||
>> + !mfn_valid(mfn) /* mfn can be invalid in mmio_direct */)
>> + return;
>> +
>> + gfn = gfn_x(mfn_to_gfn(d, mfn));
>> + /* Page sharing not supported on shadow PTs */
>> + BUG_ON(SHARED_M2P(gfn));
>> +
>> + if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
>> + {
>> + unsigned long i = gfn - dirty_vram->begin_pfn;
>> + const struct page_info *page = mfn_to_page(mfn);
>> + bool dirty = false;
>> + paddr_t sl1ma = mfn_to_maddr(sl1mfn) | PAGE_OFFSET(sl1e);
>> +
>> + if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
>> + {
>> + /* Last reference */
>> + if ( dirty_vram->sl1ma[i] == INVALID_PADDR )
>> + {
>> + /* We didn't know it was that one, let's say it is dirty */
>> + dirty = true;
>> + }
>> + else
>> + {
>> + ASSERT(dirty_vram->sl1ma[i] == sl1ma);
>> + dirty_vram->sl1ma[i] = INVALID_PADDR;
>> + if ( l1f & _PAGE_DIRTY )
>> + dirty = true;
>> + }
>> + }
>> + else
>> + {
>> + /* We had more than one reference, just consider the page
>> dirty. */
>> + dirty = true;
>> + /* Check that it's not the one we recorded. */
>> + if ( dirty_vram->sl1ma[i] == sl1ma )
>> + {
>> + /* Too bad, we remembered the wrong one... */
>> + dirty_vram->sl1ma[i] = INVALID_PADDR;
>> + }
>> + else
>> + {
>> + /*
>> + * Ok, our recorded sl1e is still pointing to this page,
>> let's
>> + * just hope it will remain.
>> + */
>> + }
>> + }
>> +
>> + if ( dirty )
>> + {
>> + dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
> Could you use _set_bit here?
__set_bit() uses 4-byte accesses. This uses 1-byte accesses.
Last I checked, there is a boundary issue at the end of the dirty_bitmap.
Both Julien and I have considered changing our bit infrastructure to use
byte accesses, which would make them more generally useful.
~Andrew
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |