[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Remove unused code (vmx_uaccess).



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID aed7ef54fbfe31c257b54c7e6d17f480f27e2630
# Parent  fffb36174ddb70ddcda84d66b6389722dc37e1bd
[IA64] Remove unused code (vmx_uaccess).

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
 xen/include/asm-ia64/vmx_uaccess.h |  156 -------------------------------------
 xen/arch/ia64/vmx/vmmu.c           |   49 -----------
 2 files changed, 205 deletions(-)

diff -r fffb36174ddb -r aed7ef54fbfe xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Oct 18 22:06:38 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed Oct 18 22:06:49 2006 -0600
@@ -735,52 +735,3 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 v
     }
     return IA64_NO_FAULT;
 }
-
-/*
- * [FIXME] Is there any effective way to move this routine
- * into vmx_uaccess.h? struct exec_domain is incomplete type
- * in that way...
- *
- * This is the interface to lookup virtual TLB, and then
- * return corresponding machine address in 2nd parameter.
- * The 3rd parameter contains how many bytes mapped by
- * matched vTLB entry, thus to allow caller copy more once.
- *
- * If failed to lookup, -EFAULT is returned. Or else reutrn
- * 0. All upper domain access utilities rely on this routine
- * to determine the real machine address. 
- *
- * Yes, put_user and get_user seems to somhow slow upon it.
- * However it's the necessary steps for any vmx domain virtual
- * address, since that's difference address space as HV's one.
- * Later some short-circuit may be created for special case
- */
-long
-__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
-{
-    unsigned long  mpfn, gpfn, m, n = *len;
-    unsigned long  end;   /* end of the area mapped by current entry */
-    thash_data_t   *entry;
-    struct vcpu *v = current;
-
-    entry = vtlb_lookup(v, va, DSIDE_TLB);
-    if (entry == NULL)
-        return -EFAULT;
-
-    gpfn =(entry->ppn>>(PAGE_SHIFT-12));
-    gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
-    gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT)); 
-
-    mpfn = gmfn_to_mfn(v->domain, gpfn);
-    m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
-    /* machine address may be not continuous */
-    end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
-    /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
-    /* Current entry can't map all requested area */
-    if ((m + n) > end)
-        n = end - m;
-
-    *ma = m;
-    *len = n;
-    return 0;
-}
diff -r fffb36174ddb -r aed7ef54fbfe xen/include/asm-ia64/vmx_uaccess.h
--- a/xen/include/asm-ia64/vmx_uaccess.h        Wed Oct 18 22:06:38 2006 -0600
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,156 +0,0 @@
-/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
- * across the domain/hypervisor boundary.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Note:  For vmx enabled environment, poor man's policy is actually
- * useless since HV resides in completely different address space as
- * domain. So the only way to do the access is search vTLB first, and
- * access identity mapped address if hit.
- *
- * Copyright (c) 2004, Intel Corporation.
- *     Kun Tian (Kevin Tian) (kevin.tian@xxxxxxxxx)
- */
-
-#ifndef __ASM_IA64_VMX_UACCESS_H__
-#define __ASM_IA64_VMX_UACCESS_H__
-
-#include <xen/compiler.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-
-#include <asm/intrinsics.h>
-#include <asm/vmmu.h>
-
-/* Since HV never accesses domain space directly, most security check can
- * be dummy now
- */
-asm (".section \"__ex_table\", \"a\"\n\t.previous");
-
-/* For back compatibility */
-#define __access_ok(addr, size, segment)       1
-#define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
-
-/*
- * These are the main single-value transfer routines.  They automatically
- * use the right size if we just have the right pointer type.
- *
- * Careful to not
- * (a) re-use the arguments for side effects (sizeof/typeof is ok)
- * (b) require any knowledge of processes at this stage
- */
-#define put_user(x, ptr)       __put_user((x), (ptr))
-#define get_user(x, ptr)       __get_user((x), (ptr))
-
-#define __put_user(x, ptr)     __do_put_user((__typeof__(*(ptr))) (x), (ptr), 
sizeof(*(ptr)))
-#define __get_user(x, ptr)     __do_get_user((x), (ptr), sizeof(*(ptr)))
-
-/* TODO: add specific unaligned access later. If assuming aligned at
- * 1,2,4,8 bytes by far, it's impossible for operand spaning two
- * vTLB entry
- */
-extern long
-__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
-
-#define __do_put_user(x, ptr, size)                                    \
-({                                                                     \
-    __typeof__ (x) __pu_x = (x);                                       \
-    __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);                      \
-    __typeof__ (size) __pu_size = (size);                              \
-    unsigned long __pu_ma;                                             \
-    long __pu_err;                                                     \
-                                                                       \
-    __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr,              \
-                               &__pu_ma, &__pu_size);                  \
-    __pu_err ? (__pu_err = -EFAULT) :                                  \
-       (*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x);                  \
-    __pu_err;                                                          \
-})
-
-#define __do_get_user(x, ptr, size)                                    \
-({                                                                     \
-    __typeof__ (x) __gu_x = (x);                                       \
-    __typeof__ (*(ptr)) __user *__gu_ptr = (ptr);                      \
-    __typeof__ (size) __gu_size = (size);                              \
-    unsigned long __gu_ma;                                             \
-    long __gu_err;                                                     \
-                                                                       \
-    __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr,              \
-                               &__gu_ma, &__gu_size);                  \
-    __gu_err ? (__gu_err = -EFAULT) :                                  \
-       (x = *((__typeof__ (*(ptr)) *)__va(__gu_ma)));                  \
-    __gu_err;                                                          \
-})
-
-/* More complex copy from domain */
-#define copy_from_user(to, from, n)    __copy_from_user((to), (from), (n))
-#define copy_to_user(to, from, n)      __copy_to_user((to), (from), (n))
-#define clear_user(to, n)              __clear_user((t0), (n))
-
-static inline unsigned long
-__copy_from_user(void *to, void *from, unsigned long n)
-{
-    unsigned long ma, i;
-
-    i = n;
-    while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
-           memcpy(to, (void *)__va(ma), i);
-           n -= i;
-        if (!n)
-            break;
-           from += i;
-           to += i;
-           i = n;
-    }
-    return n;
-}
-
-static inline unsigned long
-__copy_to_user(void *to, void *from, unsigned long n)
-{
-    unsigned long ma, i;
-
-    i = n;
-    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
-           memcpy((void *)__va(ma), from, i);
-           n -= i;
-        if (!n)
-            break;
-           from += i;
-           to += i;
-           i = n;
-    }
-    return n;
-}
-
-static inline unsigned long
-__clear_user(void *to, unsigned long n)
-{
-    unsigned long ma, i;
-
-    i = n;
-    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
-           memset((void *)__va(ma), 0, i);
-           n -= i;
-        if (!n)
-            break;
-           to += i;
-           i = n;
-    }
-    return n;
-}
-
-#endif // __ASM_IA64_VMX_UACCESS_H__

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.