[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases
> -----Original Message----- > From: Jan Beulich [mailto:JBeulich@xxxxxxxx] > Sent: 06 September 2018 14:04 > To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx> > Cc: Olaf Hering <olaf@xxxxxxxxx>; Andrew Cooper > <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx> > Subject: [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in > more cases > > Assuming consecutive linear addresses map to all RAM or all MMIO is not > correct. Nor is assuming that a page straddling MMIO access will access > the same emulating component for both parts of the access. If a guest > RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles > a page boundary, issue accesses separately for both parts. > > The extra call to known_gla() from hvmemul_write() is just to preserve > original behavior; for consistency the check also gets added to > hvmemul_rmw() (albeit I continue to be unsure whether we wouldn't better > drop both). > > Note that the correctness of this depends on the MMIO caching used > elsewhere in the emulation code. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> > Tested-by: Olaf Hering <olaf@xxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > --- > v3: Move introduction of known_gla() to a prereq patch. Mirror check > using the function into hvmemul_rmw(). > v2: Also handle hvmemul_{write,rmw}(). > > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -1058,7 +1058,91 @@ static bool known_gla(unsigned long addr > else if ( !vio->mmio_access.read_access ) > return false; > > - return vio->mmio_gla == (addr & PAGE_MASK); > + return (vio->mmio_gla == (addr & PAGE_MASK) && > + (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE); > +} > + > +static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, > + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) > +{ > + pagefault_info_t pfinfo; > + int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, > &pfinfo); > + > + switch ( rc ) > + { > + unsigned int offset, part1; > + > + case HVMTRANS_okay: > + return X86EMUL_OKAY; > + > + case HVMTRANS_bad_linear_to_gfn: > + x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt); > + return X86EMUL_EXCEPTION; > + > + case HVMTRANS_bad_gfn_to_mfn: > + if ( pfec & PFEC_insn_fetch ) > + return X86EMUL_UNHANDLEABLE; > + > + offset = addr & ~PAGE_MASK; > + if ( offset + bytes <= PAGE_SIZE ) > + return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, > + hvmemul_ctxt, > + known_gla(addr, bytes, pfec)); > + > + /* Split the access at the page boundary. */ > + part1 = PAGE_SIZE - offset; > + rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt); > + if ( rc == X86EMUL_OKAY ) > + rc = linear_read(addr + part1, bytes - part1, p_data + part1, > + pfec, hvmemul_ctxt); > + return rc; > + > + case HVMTRANS_gfn_paged_out: > + case HVMTRANS_gfn_shared: > + return X86EMUL_RETRY; > + } > + > + return X86EMUL_UNHANDLEABLE; > +} > + > +static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, > + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) > +{ > + pagefault_info_t pfinfo; > + int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo); > + > + switch ( rc ) > + { > + unsigned int offset, part1; > + > + case HVMTRANS_okay: > + return X86EMUL_OKAY; > + > + case HVMTRANS_bad_linear_to_gfn: > + x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt); > + return X86EMUL_EXCEPTION; > + > + case HVMTRANS_bad_gfn_to_mfn: > + offset = addr & ~PAGE_MASK; > + if ( offset + bytes <= PAGE_SIZE ) > + return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, > + hvmemul_ctxt, > + known_gla(addr, bytes, pfec)); > + > + /* Split the access at the page boundary. */ > + part1 = PAGE_SIZE - offset; > + rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt); > + if ( rc == X86EMUL_OKAY ) > + rc = linear_write(addr + part1, bytes - part1, p_data + part1, > + pfec, hvmemul_ctxt); > + return rc; > + > + case HVMTRANS_gfn_paged_out: > + case HVMTRANS_gfn_shared: > + return X86EMUL_RETRY; > + } > + > + return X86EMUL_UNHANDLEABLE; > } > > static int __hvmemul_read( > @@ -1069,7 +1153,6 @@ static int __hvmemul_read( > enum hvm_access_type access_type, > struct hvm_emulate_ctxt *hvmemul_ctxt) > { > - pagefault_info_t pfinfo; > unsigned long addr, reps = 1; > uint32_t pfec = PFEC_page_present; > int rc; > @@ -1085,31 +1168,8 @@ static int __hvmemul_read( > seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr); > if ( rc != X86EMUL_OKAY || !bytes ) > return rc; > - if ( known_gla(addr, bytes, pfec) ) > - return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, > hvmemul_ctxt, 1); > > - rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo); > - > - switch ( rc ) > - { > - case HVMTRANS_okay: > - break; > - case HVMTRANS_bad_linear_to_gfn: > - x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt); > - return X86EMUL_EXCEPTION; > - case HVMTRANS_bad_gfn_to_mfn: > - if ( access_type == hvm_access_insn_fetch ) > - return X86EMUL_UNHANDLEABLE; > - > - return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, > hvmemul_ctxt, 0); > - case HVMTRANS_gfn_paged_out: > - case HVMTRANS_gfn_shared: > - return X86EMUL_RETRY; > - default: > - return X86EMUL_UNHANDLEABLE; > - } > - > - return X86EMUL_OKAY; > + return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt); > } > > static int hvmemul_read( > @@ -1189,7 +1249,7 @@ static int hvmemul_write( > unsigned long addr, reps = 1; > uint32_t pfec = PFEC_page_present | PFEC_write_access; > int rc; > - void *mapping; > + void *mapping = NULL; > > if ( is_x86_system_segment(seg) ) > pfec |= PFEC_implicit; > @@ -1201,15 +1261,15 @@ static int hvmemul_write( > if ( rc != X86EMUL_OKAY || !bytes ) > return rc; > > - if ( known_gla(addr, bytes, pfec) ) > - return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, > hvmemul_ctxt, 1); > - > - mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt); > - if ( IS_ERR(mapping) ) > - return ~PTR_ERR(mapping); > + if ( !known_gla(addr, bytes, pfec) ) > + { > + mapping = hvmemul_map_linear_addr(addr, bytes, pfec, > hvmemul_ctxt); > + if ( IS_ERR(mapping) ) > + return ~PTR_ERR(mapping); > + } > > if ( !mapping ) > - return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, > hvmemul_ctxt, 0); > + return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt); > > memcpy(mapping, p_data, bytes); > > @@ -1231,7 +1291,7 @@ static int hvmemul_rmw( > unsigned long addr, reps = 1; > uint32_t pfec = PFEC_page_present | PFEC_write_access; > int rc; > - void *mapping; > + void *mapping = NULL; > > rc = hvmemul_virtual_to_linear( > seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr); > @@ -1243,9 +1303,12 @@ static int hvmemul_rmw( > else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 ) > pfec |= PFEC_user_mode; > > - mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt); > - if ( IS_ERR(mapping) ) > - return ~PTR_ERR(mapping); > + if ( !known_gla(addr, bytes, pfec) ) > + { > + mapping = hvmemul_map_linear_addr(addr, bytes, pfec, > hvmemul_ctxt); > + if ( IS_ERR(mapping) ) > + return ~PTR_ERR(mapping); > + } > > if ( mapping ) > { > @@ -1255,17 +1318,14 @@ static int hvmemul_rmw( > else > { > unsigned long data = 0; > - bool known_gpfn = known_gla(addr, bytes, pfec); > > if ( bytes > sizeof(data) ) > return X86EMUL_UNHANDLEABLE; > - rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec, > hvmemul_ctxt, > - known_gpfn); > + rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt); > if ( rc == X86EMUL_OKAY ) > rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt); > if ( rc == X86EMUL_OKAY ) > - rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec, > - hvmemul_ctxt, known_gpfn); > + rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt); > } > > return rc; > > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |