[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/5] xen/vm_access: Support for memory-content hiding
>>> On 06.05.15 at 19:12, <rcojocaru@xxxxxxxxxxxxxxx> wrote: > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -578,6 +578,25 @@ static int hvmemul_read( > container_of(ctxt, struct hvm_emulate_ctxt, ctxt)); > } > > +static int hvmemul_read_set_context( > + enum x86_segment seg, > + unsigned long offset, > + void *p_data, > + unsigned int bytes, > + struct x86_emulate_ctxt *ctxt) > +{ > + struct vcpu *curr = current; > + unsigned int len = > + (bytes > curr->arch.vm_event.emul_read_data.size ? > + curr->arch.vm_event.emul_read_data.size : bytes); min() > + if ( len ) > + memcpy(p_data, curr->arch.vm_event.emul_read_data.data, > + curr->arch.vm_event.emul_read_data.size); Not len? And what happens to the portion of the read beyond "bytes" (in case that got reduced above)? > @@ -970,20 +990,38 @@ static int hvmemul_rep_movs( > if ( df ) > dgpa -= bytes - bytes_per_rep; > > - /* Allocate temporary buffer. Fall back to slow emulation if this fails. > */ > - buf = xmalloc_bytes(bytes); > - if ( buf == NULL ) > - return X86EMUL_UNHANDLEABLE; > + if ( unlikely(set_context) ) > + { > + struct vcpu* curr = current; * and blank need to switch places. > - /* > - * We do a modicum of checking here, just for paranoia's sake and to > - * definitely avoid copying an unitialised buffer into guest address > space. > - */ > - rc = hvm_copy_from_guest_phys(buf, sgpa, bytes); > - if ( rc == HVMCOPY_okay ) > - rc = hvm_copy_to_guest_phys(dgpa, buf, bytes); > + bytes = bytes < curr->arch.vm_event.emul_read_data.size ? > + bytes : curr->arch.vm_event.emul_read_data.size; min() again. > - xfree(buf); > + rc = hvm_copy_to_guest_phys(dgpa, > + curr->arch.vm_event.emul_read_data.data, > + bytes); Again - what happens to the portion of the MOVS beyond "bytes" (in case that got reduced above)? I think you need to carefully update *reps (without losing any bytes), or otherwise make this fall through into what is the "else" branch below. > @@ -1000,6 +1038,32 @@ static int hvmemul_rep_movs( > return X86EMUL_OKAY; > } > > +static int hvmemul_rep_movs( > + enum x86_segment src_seg, > + unsigned long src_offset, > + enum x86_segment dst_seg, > + unsigned long dst_offset, > + unsigned int bytes_per_rep, > + unsigned long *reps, > + struct x86_emulate_ctxt *ctxt) > +{ > + return _hvmemul_rep_movs(src_seg, src_offset, dst_seg, dst_offset, > + bytes_per_rep, reps, ctxt, 0); > +} > + > +static int hvmemul_rep_movs_set_context( > + enum x86_segment src_seg, > + unsigned long src_offset, > + enum x86_segment dst_seg, > + unsigned long dst_offset, > + unsigned int bytes_per_rep, > + unsigned long *reps, > + struct x86_emulate_ctxt *ctxt) > +{ > + return _hvmemul_rep_movs(src_seg, src_offset, dst_seg, dst_offset, > + bytes_per_rep, reps, ctxt, 1); > +} To be honest, these kinds of wrappers for very special, niche purposes worry me. > @@ -1107,6 +1171,22 @@ static int hvmemul_rep_stos( > } > } > > +static int hvmemul_rep_stos_set_context( > + void *p_data, > + enum x86_segment seg, > + unsigned long offset, > + unsigned int bytes_per_rep, > + unsigned long *reps, > + struct x86_emulate_ctxt *ctxt) > +{ > + struct vcpu *curr = current; > + unsigned long local_reps = 1; > + > + return hvmemul_rep_stos(curr->arch.vm_event.emul_read_data.data, seg, > + offset, curr->arch.vm_event.emul_read_data.size, > + &local_reps, ctxt); > +} How come here you can get away by simply reducing *reps to 1? And considering this patch is about supplying read values - why is fiddling with STOS emulation necessary in the first place? > +static const struct x86_emulate_ops hvm_emulate_ops_set_context = { > + .read = hvmemul_read_set_context, > + .insn_fetch = hvmemul_insn_fetch, > + .write = hvmemul_write, > + .cmpxchg = hvmemul_cmpxchg, How about this one? > + .rep_ins = hvmemul_rep_ins, > + .rep_outs = hvmemul_rep_outs, And this? > + .rep_movs = hvmemul_rep_movs_set_context, > + .rep_stos = hvmemul_rep_stos_set_context, > + .read_segment = hvmemul_read_segment, > + .write_segment = hvmemul_write_segment, > + .read_io = hvmemul_read_io, And this? > +void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr, > unsigned int errcode) > { > struct hvm_emulate_ctxt ctx = {{ 0 }}; > - int rc; > + int rc = X86EMUL_UNHANDLEABLE; > > hvm_emulate_prepare(&ctx, guest_cpu_user_regs()); > > - if ( nowrite ) > + switch ( kind ) { > + case EMUL_KIND_NOWRITE: > rc = hvm_emulate_one_no_write(&ctx); > - else > + break; > + case EMUL_KIND_NORMAL: > rc = hvm_emulate_one(&ctx); Mind having "NORMAL" as the first case in the switch()? > --- a/xen/include/asm-x86/domain.h > +++ b/xen/include/asm-x86/domain.h > @@ -512,6 +513,7 @@ struct arch_vcpu > uint32_t emulate_flags; > unsigned long gpa; > unsigned long eip; > + struct vm_event_emul_read_data emul_read_data; Considering the size of this structure I don't think this should be there for all vCPU-s of all guests. IOW - please allocate this dynamically only on domains where it's needed. > @@ -193,6 +200,11 @@ struct vm_event_xsetbv { > uint64_t value; > }; > > +struct vm_event_emul_read_data { > + uint32_t size; > + uint8_t data[164]; This number needs an explanation. Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |