|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 05/16] x86/hvm: unify internal portio and mmio intercepts
>>> On 03.07.15 at 18:25, <paul.durrant@xxxxxxxxxx> wrote:
> @@ -1465,11 +1462,12 @@ int hvm_domain_initialise(struct domain *d)
> goto fail0;
>
> d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
> - d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
> + d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler,
> + NR_IO_HANDLERS);
> rc = -ENOMEM;
> if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
> goto fail1;
> - d->arch.hvm_domain.io_handler->num_slot = 0;
> + d->arch.hvm_domain.io_handler_count = 0;
I don't think this is needed - the field starts out as zero anyway.
> --- a/xen/arch/x86/hvm/intercept.c
> +++ b/xen/arch/x86/hvm/intercept.c
> @@ -32,205 +32,94 @@
> #include <xen/event.h>
> #include <xen/iommu.h>
>
> -static const struct hvm_mmio_ops *const
> -hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
> +static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
> + const ioreq_t *p)
> {
> - &hpet_mmio_ops,
> - &vlapic_mmio_ops,
> - &vioapic_mmio_ops,
> - &msixtbl_mmio_ops,
> - &iommu_mmio_ops
> -};
> + BUG_ON(handler->type != IOREQ_TYPE_COPY);
> +
> + return handler->mmio.ops->check(current, p->addr);
> +}
>
> -static int hvm_mmio_access(struct vcpu *v,
> - ioreq_t *p,
> - hvm_mmio_read_t read,
> - hvm_mmio_write_t write)
> +static int hvm_mmio_read(const struct hvm_io_handler *handler,
> + uint64_t addr,
> + uint32_t size,
> + uint64_t *data)
These should really go on one line (provided this doesn't make the
line longer than 80 chars).
> @@ -324,78 +230,122 @@ static int process_portio_intercept(portio_action_t
> action, ioreq_t *p)
> return rc;
> }
>
> -/*
> - * Check if the request is handled inside xen
> - * return value: 0 --not handled; 1 --handled
> - */
> -int hvm_io_intercept(ioreq_t *p, int type)
> +const struct hvm_io_handler *hvm_find_io_handler(ioreq_t *p)
> +{
> + struct domain *curr_d = current->domain;
> + const struct hvm_io_ops *ops =
> + (p->type == IOREQ_TYPE_COPY) ?
> + &mmio_ops :
> + &portio_ops;
Similarly (and there were other cases earlier on) I don't see why this
need to be four lines when two suffice:
const struct hvm_io_ops *ops = p->type == IOREQ_TYPE_COPY ?
&mmio_ops : &portio_ops;
(arguably the ? is better placed on the second line, even if not fully
in line with our general operator placement).
> +bool_t hvm_mmio_internal(paddr_t gpa)
> +{
> + ioreq_t p = {
> + .type = IOREQ_TYPE_COPY,
> + .addr = gpa
> + };
> +
> + return (hvm_find_io_handler(&p) != NULL);
Pointless parentheses.
> --- a/xen/arch/x86/hvm/stdvga.c
> +++ b/xen/arch/x86/hvm/stdvga.c
> @@ -547,12 +547,27 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t
> *p)
> return 1;
> }
>
> -static int stdvga_intercept_mmio(ioreq_t *p)
> +int stdvga_intercept_mmio(ioreq_t *p)
> {
> struct domain *d = current->domain;
> struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
> + uint64_t start, end, count = p->count, size = p->size;
> int buf = 0, rc;
>
> + if ( p->df )
> + {
> + start = (p->addr - (count - 1) * size);
> + end = p->addr + size;
> + }
> + else
> + {
> + start = p->addr;
> + end = p->addr + count * size;
> + }
As I stumble here not for the first time, I now finally need to make
the remark: I find it quite odd for p->count and p->size to be latched
into local (and even 64-bit) variables, yet the heavier used p->addr
not being treated this same way.
Also I suppose the if and else branches should be swapped (with
the condition inverted), or unlikely() be added to the condition, as
I think the vast majority of operations will be with EFLAGS.DF clear.
> + if ( (start < VGA_MEM_BASE) || (end > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
> + return X86EMUL_UNHANDLEABLE;
> +
> if ( p->size > 8 )
Nor do I see why this wouldn't then use the local variable.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |