[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] x86/HVM: handle_{mmio*, pio}() return value adjustments
> -----Original Message----- > From: Jan Beulich [mailto:JBeulich@xxxxxxxx] > Sent: 16 December 2016 09:31 > To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx> > Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant > <Paul.Durrant@xxxxxxxxxx> > Subject: [PATCH] x86/HVM: handle_{mmio*,pio}() return value adjustments > > Don't ignore their return values. Don't indicate success to callers of > handle_pio() when in fact the domain has been crashed. > > Make all three functions return bool. Adjust formatting of switch() > statements being touched anyway. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> > > --- a/xen/arch/x86/hvm/io.c > +++ b/xen/arch/x86/hvm/io.c > @@ -78,7 +78,7 @@ void send_invalidate_req(void) > gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n"); > } > > -int handle_mmio(void) > +bool handle_mmio(void) > { > struct hvm_emulate_ctxt ctxt; > struct vcpu *curr = current; > @@ -100,22 +100,21 @@ int handle_mmio(void) > { > case X86EMUL_UNHANDLEABLE: > hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt); > - return 0; > + return false; > + > case X86EMUL_EXCEPTION: > if ( ctxt.ctxt.event_pending ) > hvm_inject_event(&ctxt.ctxt.event); > break; > - default: > - break; Should there not be some sort of default case, even if it's simply to assert that it's not reachable? > } > > hvm_emulate_writeback(&ctxt); > > - return 1; > + return true; > } > > -int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, > - struct npfec access) > +bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, > + struct npfec access) > { > struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; > > @@ -127,7 +126,7 @@ int handle_mmio_with_translation(unsigne > return handle_mmio(); > } > > -int handle_pio(uint16_t port, unsigned int size, int dir) > +bool handle_pio(uint16_t port, unsigned int size, int dir) > { > struct vcpu *curr = current; > struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; > @@ -155,19 +154,20 @@ int handle_pio(uint16_t port, unsigned i > memcpy(&guest_cpu_user_regs()->rax, &data, size); > } > break; > + > case X86EMUL_RETRY: > /* We should not advance RIP/EIP if the domain is shutting down */ > if ( curr->domain->is_shutting_down ) > - return 0; > - > + return false; > break; > + > default: > gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc); > domain_crash(curr->domain); > - break; > + return false; > } > > - return 1; > + return true; > } > > static bool_t dpci_portio_accept(const struct hvm_io_handler *handler, > --- a/xen/arch/x86/hvm/ioreq.c > +++ b/xen/arch/x86/hvm/ioreq.c > @@ -156,13 +156,14 @@ bool_t handle_hvm_io_completion(struct v Do you not want to change this to from bool_t to bool while you're at it? > { > case HVMIO_no_completion: > break; > + > case HVMIO_mmio_completion: > - handle_mmio(); > - break; > + return handle_mmio(); > + > case HVMIO_pio_completion: > - (void)handle_pio(vio->io_req.addr, vio->io_req.size, > - vio->io_req.dir); > - break; > + return handle_pio(vio->io_req.addr, vio->io_req.size, > + vio->io_req.dir); > + > case HVMIO_realmode_completion: > { > struct hvm_emulate_ctxt ctxt; > --- a/xen/include/asm-x86/hvm/io.h > +++ b/xen/include/asm-x86/hvm/io.h > @@ -118,10 +118,10 @@ void relocate_portio_handler( > > void send_timeoffset_req(unsigned long timeoff); > void send_invalidate_req(void); > -int handle_mmio(void); > -int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, > - struct npfec); > -int handle_pio(uint16_t port, unsigned int size, int dir); > +bool handle_mmio(void); > +bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, > + struct npfec); > +bool handle_pio(uint16_t port, unsigned int size, int dir); > void hvm_interrupt_post(struct vcpu *v, int vector, int type); > void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq, > const union vioapic_redir_entry *ent); > > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |