[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] two new files for VTI patch



ChangeSet 1.1713.2.2, 2005/06/17 09:26:47-06:00, djm@xxxxxxxxxxxxxxx

        two new files for VTI patch
        
        Signed-off-by   Anthony Xu      <Anthony.xu@xxxxxxxxx>
        Signed-off-by Eddie Dong        <Eddie.dong@xxxxxxxxx>
        Signed-off-by Kevin Tian        <Kevin.tian@xxxxxxxxx>



 mm.c            |  141 ++++++++++++++++++++++++++++++++++++++++++
 vmx_hypercall.c |  186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 327 insertions(+)


diff -Nru a/xen/arch/ia64/mm.c b/xen/arch/ia64/mm.c
--- /dev/null   Wed Dec 31 16:00:00 196900
+++ b/xen/arch/ia64/mm.c        2005-06-23 07:02:47 -04:00
@@ -0,0 +1,141 @@
+/******************************************************************************
+ * arch/ia64/mm.c
+ * 
+ * Copyright (c) 2002-2005 K A Fraser
+ * Copyright (c) 2004 Christian Limpach
+ * Copyright (c) 2005, Intel Corporation.
+ *  Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * A description of the x86 page table API:
+ * 
+ * Domains trap to do_mmu_update with a list of update requests.
+ * This is a list of (ptr, val) pairs, where the requested operation
+ * is *ptr = val.
+ * 
+ * Reference counting of pages:
+ * ----------------------------
+ * Each page has two refcounts: tot_count and type_count.
+ * 
+ * TOT_COUNT is the obvious reference count. It counts all uses of a
+ * physical page frame by a domain, including uses as a page directory,
+ * a page table, or simple mappings via a PTE. This count prevents a
+ * domain from releasing a frame back to the free pool when it still holds
+ * a reference to it.
+ * 
+ * TYPE_COUNT is more subtle. A frame can be put to one of three
+ * mutually-exclusive uses: it might be used as a page directory, or a
+ * page table, or it may be mapped writable by the domain [of course, a
+ * frame may not be used in any of these three ways!].
+ * So, type_count is a count of the number of times a frame is being 
+ * referred to in its current incarnation. Therefore, a page can only
+ * change its type when its type count is zero.
+ * 
+ * Pinning the page type:
+ * ----------------------
+ * The type of a page can be pinned/unpinned with the commands
+ * MMUEXT_[UN]PIN_L?_TABLE. Each page can be pinned exactly once (that is,
+ * pinning is not reference counted, so it can't be nested).
+ * This is useful to prevent a page's type count falling to zero, at which
+ * point safety checks would need to be carried out next time the count
+ * is increased again.
+ * 
+ * A further note on writable page mappings:
+ * -----------------------------------------
+ * For simplicity, the count of writable mappings for a page may not
+ * correspond to reality. The 'writable count' is incremented for every
+ * PTE which maps the page with the _PAGE_RW flag set. However, for
+ * write access to be possible the page directory entry must also have
+ * its _PAGE_RW bit set. We do not check this as it complicates the 
+ * reference counting considerably [consider the case of multiple
+ * directory entries referencing a single page table, some with the RW
+ * bit set, others not -- it starts getting a bit messy].
+ * In normal use, this simplification shouldn't be a problem.
+ * However, the logic can be added if required.
+ * 
+ * One more note on read-only page mappings:
+ * -----------------------------------------
+ * We want domains to be able to map pages for read-only access. The
+ * main reason is that page tables and directories should be readable
+ * by a domain, but it would not be safe for them to be writable.
+ * However, domains have free access to rings 1 & 2 of the Intel
+ * privilege model. In terms of page protection, these are considered
+ * to be part of 'supervisor mode'. The WP bit in CR0 controls whether
+ * read-only restrictions are respected in supervisor mode -- if the 
+ * bit is clear then any mapped page is writable.
+ * 
+ * We get round this by always setting the WP bit and disallowing 
+ * updates to it. This is very unlikely to cause a problem for guest
+ * OS's, which will generally use the WP bit to simplify copy-on-write
+ * implementation (in that case, OS wants a fault when it writes to
+ * an application-supplied buffer).
+ */
+
+#include <xen/config.h>
+#include <public/xen.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <xen/errno.h>
+#include <asm/vmx_vcpu.h>
+#include <asm/vmmu.h>
+#include <asm/regionreg.h>
+
+/*
+        uregs->ptr is virtual address
+        uregs->val is pte value
+ */
+#ifdef CONFIG_VTI
+int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom)
+{
+    int i,cmd;
+    u64 mfn, gpfn;
+    VCPU *vcpu;
+    mmu_update_t req;
+    ia64_rr rr;
+    thash_cb_t *hcb;
+    thash_data_t entry={0};
+    vcpu = current;
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    for ( i = 0; i < count; i++ )
+    {
+        copy_from_user(&req, ureqs, sizeof(req));
+        cmd = req.ptr&3;
+        req.ptr &= ~3;
+        if(cmd ==MMU_NORMAL_PT_UPDATE){
+            entry.page_flags = req.val;
+            entry.locked = 1;
+            entry.tc = 1;
+            entry.cl = DSIDE_TLB;
+            rr = vmx_vcpu_rr(vcpu, req.ptr);
+            entry.ps = rr.ps;
+            entry.rid = rr.rid;
+            vtlb_insert(hcb, &entry, req.ptr);
+        }else if(cmd == MMU_MACHPHYS_UPDATE){
+            mfn = req.ptr >>PAGE_SHIFT;
+            gpfn = req.val;
+            set_machinetophys(mfn,gpfn);
+        }else{
+            printf("Unkown command of mmu_update:ptr: %lx,val: %lx 
\n",req.ptr,req.val);
+            while(1);
+        }
+        ureqs ++;
+    }
+    return 0;
+}
+#endif
diff -Nru a/xen/arch/ia64/vmx_hypercall.c b/xen/arch/ia64/vmx_hypercall.c
--- /dev/null   Wed Dec 31 16:00:00 196900
+++ b/xen/arch/ia64/vmx_hypercall.c     2005-06-23 07:02:47 -04:00
@@ -0,0 +1,186 @@
+/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/*
+ * vmx_hyparcall.c: handling hypercall from domain
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *  Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <asm/vmx_vcpu.h>
+#include <public/xen.h>
+#include <public/event_channel.h>
+#include <asm/vmmu.h>
+#include <asm/tlb.h>
+#include <asm/regionreg.h>
+#include <asm/page.h>
+#include <xen/mm.h>
+
+
+void hyper_not_support(void)
+{
+    VCPU *vcpu=current;
+    vmx_vcpu_set_gr(vcpu, 8, -1, 0);
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_mmu_update(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,r33,r34,r35,ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    vmx_vcpu_get_gr(vcpu,17,&r33);
+    vmx_vcpu_get_gr(vcpu,18,&r34);
+    vmx_vcpu_get_gr(vcpu,19,&r35);
+    ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_dom_mem_op(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,r33,r34,r35,r36;
+    u64 ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    vmx_vcpu_get_gr(vcpu,17,&r33);
+    vmx_vcpu_get_gr(vcpu,18,&r34);
+    vmx_vcpu_get_gr(vcpu,19,&r35);
+    vmx_vcpu_get_gr(vcpu,20,&r36);
+    ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
+    printf("do_dom_mem return value: %lx\n", ret);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+
+void hyper_sched_op(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    ret=do_sched_op(r32);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_dom0_op(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    ret=do_dom0_op((dom0_op_t *)r32);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_event_channel_op(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    ret=do_event_channel_op((evtchn_op_t *)r32);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_xen_version(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    ret=do_xen_version((int )r32);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
+{
+    int i;
+    ia64_rr rr;
+    thash_cb_t *hcb;
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    rr = vmx_vcpu_rr(vcpu, va);
+    return thash_lock_tc(hcb, va ,1U<<rr.ps, rr.rid, DSIDE_TLB, lock);
+}
+
+/*
+ * Lock guest page in vTLB, so that it's not relinquished by recycle
+ * session when HV is servicing that hypercall.
+ */
+void hyper_lock_page(void)
+{
+//TODO:
+    VCPU *vcpu=current;
+    u64 va,lock, ret;
+    vmx_vcpu_get_gr(vcpu,16,&va);
+    vmx_vcpu_get_gr(vcpu,17,&lock);
+    ret=do_lock_page(vcpu, va, lock);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+static int do_set_shared_page(VCPU *vcpu, u64 gpa)
+{
+    u64 shared_info, o_info;
+    if(vcpu->domain!=dom0)
+        return -EPERM;
+    shared_info = __gpa_to_mpa(vcpu->domain, gpa);
+    o_info = (u64)vcpu->domain->shared_info;
+    vcpu->domain->shared_info= (shared_info_t *)__va(shared_info);
+
+    /* Copy existing shared info into new page */
+    if (!o_info) {
+       memcpy((void*)vcpu->domain->shared_info, (void*)o_info, PAGE_SIZE);
+       /* If original page belongs to xen heap, then relinguish back
+        * to xen heap. Or else, leave to domain itself to decide.
+        */
+       if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
+               free_xenheap_page(o_info);
+    }
+    return 0;
+}
+
+void hyper_set_shared_page(void)
+{
+    VCPU *vcpu=current;
+    u64 gpa,ret;
+    vmx_vcpu_get_gr(vcpu,16,&gpa);
+
+    ret=do_set_shared_page(vcpu, gpa);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+    vmx_vcpu_increment_iip(vcpu);
+}
+
+/*
+void hyper_grant_table_op(void)
+{
+    VCPU *vcpu=current;
+    u64 r32,r33,r34,ret;
+    vmx_vcpu_get_gr(vcpu,16,&r32);
+    vmx_vcpu_get_gr(vcpu,17,&r33);
+    vmx_vcpu_get_gr(vcpu,18,&r34);
+
+    ret=do_grant_table_op((unsigned int)r32, (void *)r33, (unsigned int)r34);
+    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+}
+*/

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.