[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Hand merge...



ChangeSet 1.1389.1.57, 2005/05/13 12:21:14+01:00, mafetter@xxxxxxxxxxxxxxxx

        Hand merge...



 b/xen/arch/x86/mm.c   | 2978 ++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/memory.c | 2400 ----------------------------------------
 2 files changed, 2978 insertions(+), 2400 deletions(-)


diff -Nru a/xen/arch/x86/memory.c b/xen/arch/x86/memory.c
--- a/xen/arch/x86/memory.c     2005-05-13 16:06:54 -04:00
+++ /dev/null   Wed Dec 31 16:00:00 196900
@@ -1,2400 +0,0 @@
-/******************************************************************************
- * arch/x86/memory.c
- * 
- * Copyright (c) 2002-2004 K A Fraser
- * Copyright (c) 2004 Christian Limpach
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-/*
- * A description of the x86 page table API:
- * 
- * Domains trap to do_mmu_update with a list of update requests.
- * This is a list of (ptr, val) pairs, where the requested operation
- * is *ptr = val.
- * 
- * Reference counting of pages:
- * ----------------------------
- * Each page has two refcounts: tot_count and type_count.
- * 
- * TOT_COUNT is the obvious reference count. It counts all uses of a
- * physical page frame by a domain, including uses as a page directory,
- * a page table, or simple mappings via a PTE. This count prevents a
- * domain from releasing a frame back to the free pool when it still holds
- * a reference to it.
- * 
- * TYPE_COUNT is more subtle. A frame can be put to one of three
- * mutually-exclusive uses: it might be used as a page directory, or a
- * page table, or it may be mapped writable by the domain [of course, a
- * frame may not be used in any of these three ways!].
- * So, type_count is a count of the number of times a frame is being 
- * referred to in its current incarnation. Therefore, a page can only
- * change its type when its type count is zero.
- * 
- * Pinning the page type:
- * ----------------------
- * The type of a page can be pinned/unpinned with the commands
- * MMUEXT_[UN]PIN_L?_TABLE. Each page can be pinned exactly once (that is,
- * pinning is not reference counted, so it can't be nested).
- * This is useful to prevent a page's type count falling to zero, at which
- * point safety checks would need to be carried out next time the count
- * is increased again.
- * 
- * A further note on writable page mappings:
- * -----------------------------------------
- * For simplicity, the count of writable mappings for a page may not
- * correspond to reality. The 'writable count' is incremented for every
- * PTE which maps the page with the _PAGE_RW flag set. However, for
- * write access to be possible the page directory entry must also have
- * its _PAGE_RW bit set. We do not check this as it complicates the 
- * reference counting considerably [consider the case of multiple
- * directory entries referencing a single page table, some with the RW
- * bit set, others not -- it starts getting a bit messy].
- * In normal use, this simplification shouldn't be a problem.
- * However, the logic can be added if required.
- * 
- * One more note on read-only page mappings:
- * -----------------------------------------
- * We want domains to be able to map pages for read-only access. The
- * main reason is that page tables and directories should be readable
- * by a domain, but it would not be safe for them to be writable.
- * However, domains have free access to rings 1 & 2 of the Intel
- * privilege model. In terms of page protection, these are considered
- * to be part of 'supervisor mode'. The WP bit in CR0 controls whether
- * read-only restrictions are respected in supervisor mode -- if the 
- * bit is clear then any mapped page is writable.
- * 
- * We get round this by always setting the WP bit and disallowing 
- * updates to it. This is very unlikely to cause a problem for guest
- * OS's, which will generally use the WP bit to simplify copy-on-write
- * implementation (in that case, OS wants a fault when it writes to
- * an application-supplied buffer).
- */
-
-#include <xen/config.h>
-#include <xen/init.h>
-#include <xen/kernel.h>
-#include <xen/lib.h>
-#include <xen/mm.h>
-#include <xen/sched.h>
-#include <xen/errno.h>
-#include <xen/perfc.h>
-#include <xen/irq.h>
-#include <xen/softirq.h>
-#include <asm/shadow.h>
-#include <asm/page.h>
-#include <asm/flushtlb.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/domain_page.h>
-#include <asm/ldt.h>
-#include <asm/e820.h>
-
-#ifdef VERBOSE
-#define MEM_LOG(_f, _a...)                           \
-  printk("DOM%u: (file=memory.c, line=%d) " _f "\n", \
-         current->id , __LINE__ , ## _a )
-#else
-#define MEM_LOG(_f, _a...) ((void)0)
-#endif
-
-static int alloc_l2_table(struct pfn_info *page);
-static int alloc_l1_table(struct pfn_info *page);
-static int get_page_from_pagenr(unsigned long page_nr, struct domain *d);
-static int get_page_and_type_from_pagenr(unsigned long page_nr, 
-                                         u32 type,
-                                         struct domain *d);
-
-static void free_l2_table(struct pfn_info *page);
-static void free_l1_table(struct pfn_info *page);
-
-static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long);
-static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
-
-/* Used to defer flushing of memory structures. */
-static struct {
-#define DOP_FLUSH_TLB   (1<<0) /* Flush the TLB.                 */
-#define DOP_RELOAD_LDT  (1<<1) /* Reload the LDT shadow mapping. */
-    unsigned long  deferred_ops;
-    /* If non-NULL, specifies a foreign subject domain for some operations. */
-    struct domain *foreign;
-} __cacheline_aligned percpu_info[NR_CPUS];
-
-/*
- * Returns the current foreign domain; defaults to the currently-executing
- * domain if a foreign override hasn't been specified.
- */
-#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ? : current)
-
-/* Private domain structs for DOMID_XEN and DOMID_IO. */
-static struct domain *dom_xen, *dom_io;
-
-/* Frame table and its size in pages. */
-struct pfn_info *frame_table;
-unsigned long frame_table_size;
-unsigned long max_page;
-
-void __init init_frametable(void)
-{
-    unsigned long i, p;
-
-    frame_table      = (struct pfn_info *)FRAMETABLE_VIRT_START;
-    frame_table_size = max_page * sizeof(struct pfn_info);
-    frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
-
-    for ( i = 0; i < frame_table_size; i += (4UL << 20) )
-    {
-        p = alloc_boot_pages(min(frame_table_size - i, 4UL << 20), 4UL << 20);
-        if ( p == 0 )
-            panic("Not enough memory for frame table\n");
-        idle_pg_table[(FRAMETABLE_VIRT_START + i) >> L2_PAGETABLE_SHIFT] =
-            mk_l2_pgentry(p | __PAGE_HYPERVISOR | _PAGE_PSE);
-    }
-
-    memset(frame_table, 0, frame_table_size);
-}
-
-void arch_init_memory(void)
-{
-    unsigned long i, j, pfn, nr_pfns;
-    struct pfn_info *page;
-
-    /*
-     * We are rather picky about the layout of 'struct pfn_info'. The
-     * count_info and domain fields must be adjacent, as we perform atomic
-     * 64-bit operations on them. Also, just for sanity, we assert the size
-     * of the structure here.
-     */
-    if ( (offsetof(struct pfn_info, u.inuse.domain) != 
-          (offsetof(struct pfn_info, count_info) + sizeof(u32))) ||
-         (sizeof(struct pfn_info) != 24) )
-    {
-        printk("Weird pfn_info layout (%ld,%ld,%d)\n",
-               offsetof(struct pfn_info, count_info),
-               offsetof(struct pfn_info, u.inuse.domain),
-               sizeof(struct pfn_info));
-        for ( ; ; ) ;
-    }
-
-    memset(percpu_info, 0, sizeof(percpu_info));
-
-    /* Initialise to a magic of 0x55555555 so easier to spot bugs later. */
-    memset(machine_to_phys_mapping, 0x55, 4<<20);
-
-    /*
-     * Initialise our DOMID_XEN domain.
-     * Any Xen-heap pages that we will allow to be mapped will have
-     * their domain field set to dom_xen.
-     */
-    dom_xen = alloc_domain_struct();
-    atomic_set(&dom_xen->refcnt, 1);
-    dom_xen->id = DOMID_XEN;
-
-    /*
-     * Initialise our DOMID_IO domain.
-     * This domain owns I/O pages that are within the range of the pfn_info
-     * array. Mappings occur at the priv of the caller.
-     */
-    dom_io = alloc_domain_struct();
-    atomic_set(&dom_io->refcnt, 1);
-    dom_io->id = DOMID_IO;
-
-    /* M2P table is mappable read-only by privileged domains. */
-    for ( i = 0; i < 1024; i++ )
-    {
-       /* Ensure it's mapped read-only by guests (use GDT type). */
-        page = &frame_table[m2p_start_mfn+i];
-        page->count_info        = PGC_allocated | 1;
-        page->u.inuse.type_info = PGT_gdt_page | PGT_validated | 1;
-        page->u.inuse.domain    = dom_xen;
-    }
-
-    /* First 1MB of RAM is historically marked as I/O. */
-    for ( i = 0; i < 0x100; i++ )
-    {
-        page = &frame_table[i];
-        page->count_info        = PGC_allocated | 1;
-        page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
-        page->u.inuse.domain    = dom_io;
-    }
- 
-    /* Any non-RAM areas in the e820 map are considered to be for I/O. */
-    for ( i = 0; i < e820.nr_map; i++ )
-    {
-        if ( e820.map[i].type == E820_RAM )
-            continue;
-        pfn = e820.map[i].addr >> PAGE_SHIFT;
-        nr_pfns = (e820.map[i].size +
-                   (e820.map[i].addr & ~PAGE_MASK) +
-                   ~PAGE_MASK) >> PAGE_SHIFT;
-        for ( j = 0; j < nr_pfns; j++ )
-        {
-            if ( !pfn_valid(pfn+j) )
-                continue;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.