|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 2/2] x86/mm: merge ptwr and mmio_ro page fault handlers
>>> On 31.08.17 at 13:22, <wei.liu2@xxxxxxxxxx> wrote:
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5176,91 +5176,6 @@ static const struct x86_emulate_ops ptwr_emulate_ops =
> {
> .cpuid = pv_emul_cpuid,
> };
>
> -/* Write page fault handler: check if guest is trying to modify a PTE. */
> -int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
> - struct cpu_user_regs *regs)
> -{
> - struct domain *d = v->domain;
> - struct page_info *page;
> - l1_pgentry_t pte;
> - struct ptwr_emulate_ctxt ptwr_ctxt;
> - struct x86_emulate_ctxt ctxt = {
> - .regs = regs,
> - .vendor = d->arch.cpuid->x86_vendor,
> - .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
> - .sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
> - .lma = !is_pv_32bit_domain(d),
> - .data = &ptwr_ctxt,
> - };
> - int rc;
> -
> - /* Attempt to read the PTE that maps the VA being accessed. */
> - pte = guest_get_eff_l1e(addr);
> -
> - /* We are looking only for read-only mappings of p.t. pages. */
> - if ( ((l1e_get_flags(pte) & (_PAGE_PRESENT|_PAGE_RW)) != _PAGE_PRESENT)
> ||
> - rangeset_contains_singleton(mmio_ro_ranges, l1e_get_pfn(pte)) ||
> - !get_page_from_mfn(l1e_get_mfn(pte), d) )
> - goto bail;
> -
> - page = l1e_get_page(pte);
> - if ( !page_lock(page) )
> - {
> - put_page(page);
> - goto bail;
> - }
> -
> - if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
> - {
> - page_unlock(page);
> - put_page(page);
> - goto bail;
> - }
> -
> - ptwr_ctxt = (struct ptwr_emulate_ctxt) {
> - .cr2 = addr,
> - .pte = pte,
> - };
> -
> - rc = x86_emulate(&ctxt, &ptwr_emulate_ops);
> -
> - page_unlock(page);
> - put_page(page);
> -
> - switch ( rc )
> - {
> - case X86EMUL_EXCEPTION:
> - /*
> - * This emulation only covers writes to pagetables which are marked
> - * read-only by Xen. We tolerate #PF (in case a concurrent pagetable
> - * update has succeeded on a different vcpu). Anything else is an
> - * emulation bug, or a guest playing with the instruction stream
> under
> - * Xen's feet.
> - */
> - if ( ctxt.event.type == X86_EVENTTYPE_HW_EXCEPTION &&
> - ctxt.event.vector == TRAP_page_fault )
> - pv_inject_event(&ctxt.event);
> - else
> - gdprintk(XENLOG_WARNING,
> - "Unexpected event (type %u, vector %#x) from
> emulation\n",
> - ctxt.event.type, ctxt.event.vector);
> -
> - /* Fallthrough */
> - case X86EMUL_OKAY:
> -
> - if ( ctxt.retire.singlestep )
> - pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
> -
> - /* Fallthrough */
> - case X86EMUL_RETRY:
> - perfc_incr(ptwr_emulations);
> - return EXCRET_fault_fixed;
> - }
> -
> - bail:
> - return 0;
> -}
> -
> /*************************
> * fault handling for read-only MMIO pages
> */
Note how you move the remains of the function above below this
comment, which isn't really correct.
> @@ -6441,6 +6279,134 @@ void write_32bit_pse_identmap(uint32_t *l2)
> _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
> }
>
> +/* Check if guest is trying to modify a r/o MMIO page. */
> +static int mmio_ro_do_page_fault(struct x86_emulate_ctxt *ctxt,
> + unsigned long addr, l1_pgentry_t pte)
> +{
> + struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
> + mfn_t mfn = l1e_get_mfn(pte);
> +
> + if ( mfn_valid(mfn) )
> + {
> + struct page_info *page = mfn_to_page(mfn);
> + struct domain *owner = page_get_owner_and_reference(page);
Please add const as you move this.
> + if ( owner )
> + put_page(page);
> + if ( owner != dom_io )
> + return X86EMUL_UNHANDLEABLE;
> + }
> +
> + ctxt->data = &mmio_ro_ctxt;
> + if ( pci_ro_mmcfg_decode(mfn_x(mfn), &mmio_ro_ctxt.seg,
> &mmio_ro_ctxt.bdf) )
> + return x86_emulate(ctxt, &mmcfg_intercept_ops);
> + else
> + return x86_emulate(ctxt, &mmio_ro_emulate_ops);
> +}
> +
> +/* Write page fault handler: check if guest is trying to modify a PTE. */
> +static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt, struct domain
> *d,
> + unsigned long addr, l1_pgentry_t pte)
> +{
> + struct ptwr_emulate_ctxt ptwr_ctxt = {
> + .cr2 = addr,
> + .pte = pte,
> + };
> + struct page_info *page;
> + int rc = X86EMUL_UNHANDLEABLE;
> +
> + if ( !get_page_from_mfn(l1e_get_mfn(pte), d) )
> + goto out;
> +
> + page = l1e_get_page(pte);
> + if ( !page_lock(page) )
> + {
> + put_page(page);
> + goto out;
> + }
> +
> + if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
> + {
> + page_unlock(page);
> + put_page(page);
> + goto out;
> + }
> +
> + ctxt->data = &ptwr_ctxt;
> + rc = x86_emulate(ctxt, &ptwr_emulate_ops);
> +
> + page_unlock(page);
> + put_page(page);
> +
> + out:
> + return rc;
> +}
Could you please avoid goto when the exit path is trivial?
> +int pv_ro_page_fault(struct vcpu *v, unsigned long addr,
Since you alter this and the caller anyway, the first parameter
could as well become const struct domain *, simplifying ...
> + struct cpu_user_regs *regs)
> +{
> + l1_pgentry_t pte;
> + struct domain *d = v->domain;
> + unsigned int addr_size = is_pv_32bit_vcpu(v) ? 32 : BITS_PER_LONG;
> + struct x86_emulate_ctxt ctxt = {
> + .regs = regs,
> + .vendor = d->arch.cpuid->x86_vendor,
> + .addr_size = addr_size,
> + .sp_size = addr_size,
> + .lma = !is_pv_32bit_vcpu(v),
... both is_pv_32bit_vcpu() accesses here (which internally use
v->domain). In fact I wonder whether it wouldn't yield more
consistent code if you didn't pass in the domain at all, as this
must strictly be current->domain, or invoking x86_emulate() and
various other functions is bogus. You'd then latch this into currd
here.
Furthermore I think this second use could become "addr_size > 32".
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |