[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.0.3-testing] merge
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1160902561 -3600 # Node ID b2f2b7738aa2b5059be6a3251e8db38b8ddf5355 # Parent 5fdaf8842e797ba324551e8e1e5e0eab76b23bd0 # Parent 6ed4368b4a9e1924c983774c4b1a2b6baf8e98a6 merge --- tools/python/xen/xend/image.py | 5 ++ xen/arch/x86/hvm/i8259.c | 45 +++++++++---------- xen/arch/x86/hvm/platform.c | 20 ++++---- xen/arch/x86/mm/shadow/common.c | 4 - xen/arch/x86/oprofile/xenoprof.c | 90 ++++++++++++++++++++++++--------------- 5 files changed, 97 insertions(+), 67 deletions(-) diff -r 5fdaf8842e79 -r b2f2b7738aa2 tools/python/xen/xend/image.py --- a/tools/python/xen/xend/image.py Fri Oct 13 18:08:25 2006 +0100 +++ b/tools/python/xen/xend/image.py Sun Oct 15 09:56:01 2006 +0100 @@ -312,6 +312,11 @@ class HVMImageHandler(ImageHandler): if v: ret.append("-%s" % a) ret.append("%s" % v) + + if a in ['fda', 'fdb' ]: + if v: + if not os.path.isfile(v): + raise VmError("Floppy file %s does not exist." % v) log.debug("args: %s, val: %s" % (a,v)) # Handle disk/network related options diff -r 5fdaf8842e79 -r b2f2b7738aa2 xen/arch/x86/hvm/i8259.c --- a/xen/arch/x86/hvm/i8259.c Fri Oct 13 18:08:25 2006 +0100 +++ b/xen/arch/x86/hvm/i8259.c Sun Oct 15 09:56:01 2006 +0100 @@ -498,19 +498,19 @@ void pic_init(struct hvm_virpic *s, void static int intercept_pic_io(ioreq_t *p) { - struct hvm_virpic *pic; - struct vcpu *v = current; + struct hvm_virpic *pic; uint32_t data; unsigned long flags; - - if ( p->size != 1 || p->count != 1) { + + if ( p->size != 1 || p->count != 1 ) { printk("PIC_IO wrong access size %d!\n", (int)p->size); return 1; } - pic = &v->domain->arch.hvm_domain.vpic; - if ( p->dir == 0 ) { - if (p->pdata_valid) - (void)hvm_copy_from_guest_virt( + + pic = ¤t->domain->arch.hvm_domain.vpic; + if ( p->dir == IOREQ_WRITE ) { + if ( p->pdata_valid ) + (void)hvm_copy_from_guest_phys( &data, (unsigned long)p->u.pdata, p->size); else data = p->u.data; @@ -524,10 +524,10 @@ static int intercept_pic_io(ioreq_t *p) data = pic_ioport_read( (void*)&pic->pics[p->addr>>7], (uint32_t) p->addr); spin_unlock_irqrestore(&pic->lock, flags); - if (p->pdata_valid) - (void)hvm_copy_to_guest_virt( + if ( p->pdata_valid ) + (void)hvm_copy_to_guest_phys( (unsigned long)p->u.pdata, &data, p->size); - else + else p->u.data = (u64)data; } return 1; @@ -535,42 +535,41 @@ static int intercept_pic_io(ioreq_t *p) static int intercept_elcr_io(ioreq_t *p) { - struct hvm_virpic *s; - struct vcpu *v = current; + struct hvm_virpic *s; uint32_t data; unsigned long flags; - + if ( p->size != 1 || p->count != 1 ) { printk("PIC_IO wrong access size %d!\n", (int)p->size); return 1; } - s = &v->domain->arch.hvm_domain.vpic; - if ( p->dir == 0 ) { - if (p->pdata_valid) - (void)hvm_copy_from_guest_virt( + s = ¤t->domain->arch.hvm_domain.vpic; + if ( p->dir == IOREQ_WRITE ) { + if ( p->pdata_valid ) + (void)hvm_copy_from_guest_phys( &data, (unsigned long)p->u.pdata, p->size); else data = p->u.data; spin_lock_irqsave(&s->lock, flags); elcr_ioport_write((void*)&s->pics[p->addr&1], (uint32_t) p->addr, (uint32_t)( data & 0xff)); - get_sp(current->domain)->sp_global.pic_elcr = + get_sp(current->domain)->sp_global.pic_elcr = s->pics[0].elcr | ((u16)s->pics[1].elcr << 8); spin_unlock_irqrestore(&s->lock, flags); } else { data = (u64) elcr_ioport_read( (void*)&s->pics[p->addr&1], (uint32_t) p->addr); - if (p->pdata_valid) - (void)hvm_copy_to_guest_virt( + if ( p->pdata_valid ) + (void)hvm_copy_to_guest_phys( (unsigned long)p->u.pdata, &data, p->size); - else + else p->u.data = (u64)data; - } return 1; } + void register_pic_io_hook (void) { register_portio_handler(0x20, 2, intercept_pic_io); diff -r 5fdaf8842e79 -r b2f2b7738aa2 xen/arch/x86/hvm/platform.c --- a/xen/arch/x86/hvm/platform.c Fri Oct 13 18:08:25 2006 +0100 +++ b/xen/arch/x86/hvm/platform.c Sun Oct 15 09:56:01 2006 +0100 @@ -730,13 +730,13 @@ void send_pio_req(struct cpu_user_regs * vcpu_iodata_t *vio; ioreq_t *p; - if (size == 0 || count == 0) { + if ( size == 0 || count == 0 ) { printf("null pio request? port %lx, count %lx, size %d, value %lx, dir %d, pvalid %d.\n", port, count, size, value, dir, pvalid); } vio = get_vio(v->domain, v->vcpu_id); - if (vio == NULL) { + if ( vio == NULL ) { printk("bad shared page: %lx\n", (unsigned long) vio); domain_crash_synchronous(); } @@ -745,6 +745,7 @@ void send_pio_req(struct cpu_user_regs * if ( p->state != STATE_INVALID ) printk("WARNING: send pio with something already pending (%d)?\n", p->state); + p->dir = dir; p->pdata_valid = pvalid; @@ -752,19 +753,20 @@ void send_pio_req(struct cpu_user_regs * p->size = size; p->addr = port; p->count = count; - p->df = regs->eflags & EF_DF ? 1 : 0; + p->df = regs->eflags & X86_EFLAGS_DF ? 1 : 0; p->io_count++; - if (pvalid) { - if (hvm_paging_enabled(current)) - p->u.data = shadow_gva_to_gpa(current, value); + if ( pvalid ) /* get physical address of data */ + { + if ( hvm_paging_enabled(current) ) + p->u.pdata = (void *)shadow_gva_to_gpa(current, value); else - p->u.pdata = (void *) value; /* guest VA == guest PA */ - } else + p->u.pdata = (void *)value; /* guest VA == guest PA */ + } else if ( dir == IOREQ_WRITE ) p->u.data = value; - if (hvm_portio_intercept(p)) { + if ( hvm_portio_intercept(p) ) { p->state = STATE_IORESP_READY; hvm_io_assist(v); return; diff -r 5fdaf8842e79 -r b2f2b7738aa2 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Fri Oct 13 18:08:25 2006 +0100 +++ b/xen/arch/x86/mm/shadow/common.c Sun Oct 15 09:56:01 2006 +0100 @@ -2681,7 +2681,7 @@ int shadow_test_enable(struct domain *d) if ( shadow_mode_enabled(d) ) { SHADOW_ERROR("Don't support enabling test mode" - "on already shadowed doms\n"); + " on already shadowed doms\n"); ret = -EINVAL; goto out; } @@ -2754,7 +2754,7 @@ static int shadow_log_dirty_enable(struc if ( shadow_mode_enabled(d) ) { SHADOW_ERROR("Don't (yet) support enabling log-dirty" - "on already shadowed doms\n"); + " on already shadowed doms\n"); ret = -EINVAL; goto out; } diff -r 5fdaf8842e79 -r b2f2b7738aa2 xen/arch/x86/oprofile/xenoprof.c --- a/xen/arch/x86/oprofile/xenoprof.c Fri Oct 13 18:08:25 2006 +0100 +++ b/xen/arch/x86/oprofile/xenoprof.c Sun Oct 15 09:56:01 2006 +0100 @@ -12,6 +12,9 @@ /* Limit amount of pages used for shared buffer (per domain) */ #define MAX_OPROF_SHARED_PAGES 32 + +/* Lock protecting the following global state */ +static spinlock_t xenoprof_lock = SPIN_LOCK_UNLOCKED; struct domain *active_domains[MAX_OPROF_DOMAINS]; int active_ready[MAX_OPROF_DOMAINS]; @@ -122,6 +125,7 @@ int alloc_xenoprof_struct(struct domain { struct vcpu *v; int nvcpu, npages, bufsize, max_bufsize; + unsigned max_max_samples; int i; d->xenoprof = xmalloc(struct xenoprof); @@ -139,17 +143,15 @@ int alloc_xenoprof_struct(struct domain for_each_vcpu ( d, v ) nvcpu++; - /* reduce buffer size if necessary to limit pages allocated */ + /* reduce max_samples if necessary to limit pages allocated */ + max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu; + max_max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) / + sizeof(struct event_log) ) + 1; + if ( (unsigned)max_samples > max_max_samples ) + max_samples = max_max_samples; + bufsize = sizeof(struct xenoprof_buf) + (max_samples - 1) * sizeof(struct event_log); - max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu; - if ( bufsize > max_bufsize ) - { - bufsize = max_bufsize; - max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) / - sizeof(struct event_log) ) + 1; - } - npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1; d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages); @@ -515,6 +517,8 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN return -EPERM; } + spin_lock(&xenoprof_lock); + switch ( op ) { case XENOPROF_init: @@ -540,23 +544,31 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN case XENOPROF_set_active: { domid_t domid; - if ( xenoprof_state != XENOPROF_IDLE ) - return -EPERM; - if ( copy_from_guest(&domid, arg, 1) ) - return -EFAULT; + if ( xenoprof_state != XENOPROF_IDLE ) { + ret = -EPERM; + break; + } + if ( copy_from_guest(&domid, arg, 1) ) { + ret = -EFAULT; + break; + } ret = add_active_list(domid); break; } case XENOPROF_set_passive: { - if ( xenoprof_state != XENOPROF_IDLE ) - return -EPERM; + if ( xenoprof_state != XENOPROF_IDLE ) { + ret = -EPERM; + break; + } ret = add_passive_list(arg); break; } case XENOPROF_reserve_counters: - if ( xenoprof_state != XENOPROF_IDLE ) - return -EPERM; + if ( xenoprof_state != XENOPROF_IDLE ) { + ret = -EPERM; + break; + } ret = nmi_reserve_counters(); if ( !ret ) xenoprof_state = XENOPROF_COUNTERS_RESERVED; @@ -565,16 +577,20 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN case XENOPROF_counter: { struct xenoprof_counter counter; - if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED ) - return -EPERM; - if ( adomains == 0 ) - return -EPERM; - - if ( copy_from_guest(&counter, arg, 1) ) - return -EFAULT; - - if ( counter.ind > OP_MAX_COUNTER ) - return -E2BIG; + if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED || adomains == 0) { + ret = -EPERM; + break; + } + + if ( copy_from_guest(&counter, arg, 1) ) { + ret = -EFAULT; + break; + } + + if ( counter.ind > OP_MAX_COUNTER ) { + ret = -E2BIG; + break; + } counter_config[counter.ind].count = (unsigned long) counter.count; counter_config[counter.ind].enabled = (unsigned long) counter.enabled; @@ -588,8 +604,10 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN } case XENOPROF_setup_events: - if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED ) - return -EPERM; + if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED ) { + ret = -EPERM; + break; + } ret = nmi_setup_events(); if ( !ret ) xenoprof_state = XENOPROF_READY; @@ -622,16 +640,20 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN break; case XENOPROF_stop: - if ( xenoprof_state != XENOPROF_PROFILING ) - return -EPERM; + if ( xenoprof_state != XENOPROF_PROFILING ) { + ret = -EPERM; + break; + } nmi_stop(); xenoprof_state = XENOPROF_READY; break; case XENOPROF_disable_virq: if ( (xenoprof_state == XENOPROF_PROFILING) && - (is_active(current->domain)) ) - return -EPERM; + (is_active(current->domain)) ) { + ret = -EPERM; + break; + } ret = reset_active(current->domain); break; @@ -663,6 +685,8 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN ret = -EINVAL; } + spin_unlock(&xenoprof_lock); + if ( ret < 0 ) printk("xenoprof: operation %d failed for dom %d (status : %d)\n", op, current->domain->domain_id, ret); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |