[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Xen-devel] [PATCH] mem_access: Use monitor_traps instead of mem_access_send_req
On Jul 29, 2016 01:27, "Razvan Cojocaru" <rcojocaru@xxxxxxxxxxxxxxx> wrote:
>
> On 07/28/2016 10:35 PM, Tamas K Lengyel wrote:
> > The two functions monitor_traps and mem_access_send_req duplicate
> > some of the same functionality. The mem_access_send_req however leaves a
> > lot of the standard vm_event fields to be filled by other functions.
> >
> > Since mem_access events go on the monitor ring in this patch we consolidate
> > all paths to use monitor_traps to place events on the ring and to fill in
> > the common parts of the requests.
> >
> > Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
> > ---
> > Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> > Cc: Julien Grall <julien.grall@xxxxxxx>
> > Cc: Jan Beulich <jbeulich@xxxxxxxx>
> > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> > Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
> > Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> > ---
> > xen/arch/arm/p2m.c | 69 +++++++++++++++++++--------------------
> > xen/arch/x86/hvm/hvm.c | 16 ++++++---
> > xen/arch/x86/hvm/monitor.c | 6 ++++
> > xen/arch/x86/mm/p2m.c | 24 ++------------
> > xen/common/mem_access.c | 11 -------
> > xen/include/asm-x86/hvm/monitor.h | 2 ++
> > xen/include/asm-x86/p2m.h | 13 +++++---
> > xen/include/xen/mem_access.h | 7 ----
> > 8 files changed, 63 insertions(+), 85 deletions(-)
> >
> > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> > index d82349c..df898a3 100644
> > --- a/xen/arch/arm/p2m.c
> > +++ b/xen/arch/arm/p2m.c
> > @@ -5,7 +5,7 @@
> > #include <xen/domain_page.h>
> > #include <xen/bitops.h>
> > #include <xen/vm_event.h>
> > -#include <xen/mem_access.h>
> > +#include <xen/monitor.h>
> > #include <xen/iocap.h>
> > #include <public/vm_event.h>
> > #include <asm/flushtlb.h>
> > @@ -1642,12 +1642,41 @@ void __init setup_virt_paging(void)
> > smp_call_function(setup_virt_paging_one, (void *)val, 1);
> > }
> >
> > +static int
> > +__p2m_mem_access_send_req(paddr_t gpa, vaddr_t gla, const struct npfec npfec,
> > + xenmem_access_t xma)
> > +{
> > + struct vcpu *v = current;
> > + vm_event_request_t req = {};
> > + bool_t sync = (xma == XENMEM_access_n2rwx) ? 0 : 1;
> > +
> > + req.reason = VM_EVENT_REASON_MEM_ACCESS;
> > +
> > + /* Send request to mem access subscriber */
> > + req.u.mem_access.gfn = gpa >> PAGE_SHIFT;
> > + req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
> > + if ( npfec.gla_valid )
> > + {
> > + req.u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> > + req.u.mem_access.gla = gla;
> > +
> > + if ( npfec.kind == npfec_kind_with_gla )
> > + req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> > + else if ( npfec.kind == npfec_kind_in_gpt )
> > + req.u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
> > + }
> > + req.u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0;
> > + req.u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0;
> > + req.u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0;
> > +
> > + return monitor_traps(v, sync, &req);
> > +}
> > +
> > bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
> > {
> > int rc;
> > bool_t violation;
> > xenmem_access_t xma;
> > - vm_event_request_t *req;
> > struct vcpu *v = current;
> > struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
> >
> > @@ -1734,40 +1763,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
> > return false;
> > }
> >
> > - req = xzalloc(vm_event_request_t);
> > - if ( req )
> > - {
> > - req->reason = VM_EVENT_REASON_MEM_ACCESS;
> > -
> > - /* Pause the current VCPU */
> > - if ( xma != XENMEM_access_n2rwx )
> > - req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
> > -
> > - /* Send request to mem access subscriber */
> > - req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
> > - req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
> > - if ( npfec.gla_valid )
> > - {
> > - req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> > - req->u.mem_access.gla = gla;
> > -
> > - if ( npfec.kind == npfec_kind_with_gla )
> > - req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> > - else if ( npfec.kind == npfec_kind_in_gpt )
> > - req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
> > - }
> > - req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0;
> > - req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0;
> > - req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0;
> > - req->vcpu_id = v->vcpu_id;
>
> The line setting req->vcpu_id has been removed here ...
>
> > -
> > - mem_access_send_req(v->domain, req);
> > - xfree(req);
> > - }
> > -
> > - /* Pause the current VCPU */
> > - if ( xma != XENMEM_access_n2rwx )
> > - vm_event_vcpu_pause(v);
> > + if ( __p2m_mem_access_send_req(gpa, gla, npfec, xma) < 0 )
> > + domain_crash(v->domain);
> >
> > return false;
> > }
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index daaee1d..688370d 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -1707,7 +1707,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> > int rc, fall_through = 0, paged = 0;
> > int sharing_enomem = 0;
> > vm_event_request_t *req_ptr = NULL;
> > - bool_t ap2m_active;
> > + bool_t ap2m_active, sync = 0;
> >
> > /* On Nested Virtualization, walk the guest page table.
> > * If this succeeds, all is fine.
> > @@ -1846,11 +1846,12 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> > }
> > }
> >
> > - if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
> > - {
> > + sync = p2m_mem_access_check(gpa, gla, npfec, &req_ptr);
> > +
> > + if ( !sync ) {
> > fall_through = 1;
> > } else {
> > - /* Rights not promoted, vcpu paused, work here is done */
> > + /* Rights not promoted (aka. sync event), work here is done */
> > rc = 1;
> > goto out_put_gfn;
> > }
> > @@ -1956,7 +1957,12 @@ out:
> > }
> > if ( req_ptr )
> > {
> > - mem_access_send_req(currd, req_ptr);
> > + if ( hvm_monitor_mem_access(curr, sync, req_ptr) < 0 )
> > + {
> > + /* Crash the domain */
> > + rc = 0;
> > + }
> > +
> > xfree(req_ptr);
> > }
> > return rc;
> > diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
> > index 7277c12..c7285c6 100644
> > --- a/xen/arch/x86/hvm/monitor.c
> > +++ b/xen/arch/x86/hvm/monitor.c
> > @@ -152,6 +152,12 @@ int hvm_monitor_cpuid(unsigned long insn_length)
> > return monitor_traps(curr, 1, &req);
> > }
> >
> > +int hvm_monitor_mem_access(struct vcpu* v, bool_t sync,
> > + vm_event_request_t *req)
> > +{
> > + return monitor_traps(v, sync, req);
> > +}
> > +
> > /*
> > * Local variables:
> > * mode: C
> > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> > index 812dbf6..27f9d26 100644
> > --- a/xen/arch/x86/mm/p2m.c
> > +++ b/xen/arch/x86/mm/p2m.c
> > @@ -1728,13 +1728,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> > if ( req )
> > {
> > *req_ptr = req;
> > - req->reason = VM_EVENT_REASON_MEM_ACCESS;
> > -
> > - /* Pause the current VCPU */
> > - if ( p2ma != p2m_access_n2rwx )
> > - req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
> >
> > - /* Send request to mem event */
> > + req->reason = VM_EVENT_REASON_MEM_ACCESS;
> > req->u.mem_access.gfn = gfn;
> > req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
> > if ( npfec.gla_valid )
> > @@ -1750,23 +1745,10 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> > req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0;
> > req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0;
> > req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0;
> > - req->vcpu_id = v->vcpu_id;
>
> ... and here, and as such it doesn't seem to get set anywhere else now.
> Am I missing an code path outside of this patch where req->vcpu_id is
> being correctly set so this has become unnecessary?
>
Ah yes, I forgot to add that to monitor_traps. The idea is that common parts if all requests should be set there, including the vcpu_id so we can cut down on code duplication.
Tamas
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|