[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Clean up xen-internal representation of per-vcpu
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 84cf56328ce0ced88cbcb5bae31039ec3b801fbf # Parent 07306e35a5fc3db72937dff75a0546540603dcbc Clean up xen-internal representation of per-vcpu physical cpu affinity. Rename idle_task variables and macros to idle_domain. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/linux-xen/process-linux-xen.c --- a/xen/arch/ia64/linux-xen/process-linux-xen.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/linux-xen/process-linux-xen.c Fri Jan 6 11:25:47 2006 @@ -241,7 +241,7 @@ max_xtp(); local_irq_disable(); - idle_task_exit(); + idle_domain_exit(); ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); /* * The above is a point of no-return, the processor is diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/vmx/vlsapic.c --- a/xen/arch/ia64/vmx/vlsapic.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/vmx/vlsapic.c Fri Jan 6 11:25:47 2006 @@ -218,7 +218,7 @@ */ void vtm_domain_out(VCPU *vcpu) { - if(!is_idle_task(vcpu->domain)) + if(!is_idle_domain(vcpu->domain)) rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer); } @@ -230,7 +230,7 @@ { vtime_t *vtm; - if(!is_idle_task(vcpu->domain)) { + if(!is_idle_domain(vcpu->domain)) { vtm=&(vcpu->arch.arch_vmx.vtm); vtm_interruption_update(vcpu, vtm); } diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/vmx/vmx_process.c --- a/xen/arch/ia64/vmx/vmx_process.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/vmx/vmx_process.c Fri Jan 6 11:25:47 2006 @@ -231,7 +231,7 @@ struct domain *d = current->domain; struct vcpu *v = current; // FIXME: Will this work properly if doing an RFI??? - if (!is_idle_task(d) ) { // always comes from guest + if (!is_idle_domain(d) ) { // always comes from guest extern void vmx_dorfirfi(void); struct pt_regs *user_regs = vcpu_regs(current); if (local_softirq_pending()) diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/xen/process.c --- a/xen/arch/ia64/xen/process.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/xen/process.c Fri Jan 6 11:25:47 2006 @@ -252,7 +252,7 @@ struct domain *d = current->domain; struct vcpu *v = current; // FIXME: Will this work properly if doing an RFI??? - if (!is_idle_task(d) && user_mode(regs)) { + if (!is_idle_domain(d) && user_mode(regs)) { //vcpu_poke_timer(v); if (vcpu_deliverable_interrupts(v)) reflect_extint(regs); diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/xen/vcpu.c Fri Jan 6 11:25:47 2006 @@ -1085,7 +1085,7 @@ /* gloss over the wraparound problem for now... we know it exists * but it doesn't matter right now */ - if (is_idle_task(vcpu->domain)) { + if (is_idle_domain(vcpu->domain)) { // printf("****** vcpu_set_next_timer called during idle!!\n"); vcpu_safe_set_itm(s); return; diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/xen/xenmisc.c --- a/xen/arch/ia64/xen/xenmisc.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/xen/xenmisc.c Fri Jan 6 11:25:47 2006 @@ -320,7 +320,7 @@ ia64_set_iva(&ia64_ivt); ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED); - if (!is_idle_task(current->domain)) { + if (!is_idle_domain(current->domain)) { load_region_regs(current); vcpu_load_kernel_regs(current); if (vcpu_timer_expired(current)) vcpu_pend_timer(current); diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/xen/xensetup.c --- a/xen/arch/ia64/xen/xensetup.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/xen/xensetup.c Fri Jan 6 11:25:47 2006 @@ -26,7 +26,7 @@ char saved_command_line[COMMAND_LINE_SIZE]; -struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu }; +struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu }; cpumask_t cpu_present_map; @@ -382,8 +382,7 @@ panic("Could not set up DOM0 guest OS\n"); /* PIN domain0 on CPU 0. */ - dom0->vcpu[0]->cpumap=1; - set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags); + dom0->vcpu[0]->cpumask = cpumask_of_cpu(0); #ifdef CLONE_DOMAIN0 { diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/ia64/xen/xentime.c --- a/xen/arch/ia64/xen/xentime.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/ia64/xen/xentime.c Fri Jan 6 11:25:47 2006 @@ -127,7 +127,7 @@ vcpu_wake(dom0->vcpu[0]); } } - if (!is_idle_task(current->domain)) { + if (!is_idle_domain(current->domain)) { if (vcpu_timer_expired(current)) { vcpu_pend_timer(current); // ensure another timer interrupt happens even if domain doesn't diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/x86/domain.c Fri Jan 6 11:25:47 2006 @@ -51,12 +51,12 @@ } __cacheline_aligned; static struct percpu_ctxt percpu_ctxt[NR_CPUS]; -static void continue_idle_task(struct vcpu *v) +static void continue_idle_domain(struct vcpu *v) { reset_stack_and_jump(idle_loop); } -static void continue_nonidle_task(struct vcpu *v) +static void continue_nonidle_domain(struct vcpu *v) { reset_stack_and_jump(ret_from_intr); } @@ -92,10 +92,10 @@ { struct vcpu *v = current; - ASSERT(is_idle_task(v->domain)); + ASSERT(is_idle_domain(v->domain)); percpu_ctxt[smp_processor_id()].curr_vcpu = v; cpu_set(smp_processor_id(), v->domain->cpumask); - v->arch.schedule_tail = continue_idle_task; + v->arch.schedule_tail = continue_idle_domain; reset_stack_and_jump(idle_loop); } @@ -259,7 +259,7 @@ int i; #endif - if ( is_idle_task(d) ) + if ( is_idle_domain(d) ) return 0; d->arch.ioport_caps = @@ -276,11 +276,10 @@ return rc; } - v->arch.schedule_tail = continue_nonidle_task; + v->arch.schedule_tail = continue_nonidle_domain; memset(d->shared_info, 0, PAGE_SIZE); v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id]; - v->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)); @@ -705,7 +704,7 @@ struct vcpu *p = percpu_ctxt[cpu].curr_vcpu; struct vcpu *n = current; - if ( !is_idle_task(p->domain) ) + if ( !is_idle_domain(p->domain) ) { memcpy(&p->arch.guest_context.user_regs, stack_regs, @@ -714,7 +713,7 @@ save_segments(p); } - if ( !is_idle_task(n->domain) ) + if ( !is_idle_domain(n->domain) ) { memcpy(stack_regs, &n->arch.guest_context.user_regs, @@ -767,7 +766,8 @@ set_current(next); - if ( (percpu_ctxt[cpu].curr_vcpu != next) && !is_idle_task(next->domain) ) + if ( (percpu_ctxt[cpu].curr_vcpu != next) && + !is_idle_domain(next->domain) ) { __context_switch(); percpu_ctxt[cpu].context_not_finalised = 1; diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/x86/idle0_task.c --- a/xen/arch/x86/idle0_task.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/x86/idle0_task.c Fri Jan 6 11:25:47 2006 @@ -11,6 +11,7 @@ struct vcpu idle0_vcpu = { processor: 0, + cpu_affinity:CPU_MASK_CPU0, domain: &idle0_domain }; diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/x86/setup.c Fri Jan 6 11:25:47 2006 @@ -92,7 +92,7 @@ #endif EXPORT_SYMBOL(mmu_cr4_features); -struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu }; +struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu }; int acpi_disabled; diff -r 07306e35a5fc -r 84cf56328ce0 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Thu Jan 5 11:19:12 2006 +++ b/xen/arch/x86/smpboot.c Fri Jan 6 11:25:47 2006 @@ -435,7 +435,7 @@ extern void percpu_traps_init(void); - set_current(idle_task[cpu]); + set_current(idle_domain[cpu]); set_processor_id(cpu); percpu_traps_init(); @@ -773,7 +773,7 @@ if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) panic("failed 'createdomain' for CPU %d", cpu); - v = idle_task[cpu] = idle->vcpu[0]; + v = idle_domain[cpu] = idle->vcpu[0]; set_bit(_DOMF_idle_domain, &idle->domain_flags); diff -r 07306e35a5fc -r 84cf56328ce0 xen/common/dom0_ops.c --- a/xen/common/dom0_ops.c Thu Jan 5 11:19:12 2006 +++ b/xen/common/dom0_ops.c Fri Jan 6 11:25:47 2006 @@ -319,22 +319,14 @@ break; } - v->cpumap = op->u.pincpudomain.cpumap; - - if ( v->cpumap == CPUMAP_RUNANYWHERE ) - { - clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags); - } - else - { - /* pick a new cpu from the usable map */ - int new_cpu; - new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus(); - vcpu_pause(v); - vcpu_migrate_cpu(v, new_cpu); - set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags); - vcpu_unpause(v); - } + memcpy(cpus_addr(v->cpu_affinity), + &op->u.pincpudomain.cpumap, + min((int)BITS_TO_LONGS(NR_CPUS), + (int)sizeof(op->u.pincpudomain.cpumap))); + + vcpu_pause(v); + vcpu_migrate_cpu(v, first_cpu(v->cpu_affinity)); + vcpu_unpause(v); put_domain(d); } @@ -506,7 +498,11 @@ op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags); op->u.getvcpuinfo.cpu_time = v->cpu_time; op->u.getvcpuinfo.cpu = v->processor; - op->u.getvcpuinfo.cpumap = v->cpumap; + op->u.getvcpuinfo.cpumap = 0; + memcpy(&op->u.getvcpuinfo.cpumap, + cpus_addr(v->cpu_affinity), + min((int)BITS_TO_LONGS(NR_CPUS), + (int)sizeof(op->u.getvcpuinfo.cpumap))); ret = 0; if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) diff -r 07306e35a5fc -r 84cf56328ce0 xen/common/domain.c --- a/xen/common/domain.c Thu Jan 5 11:19:12 2006 +++ b/xen/common/domain.c Fri Jan 6 11:25:47 2006 @@ -51,7 +51,7 @@ else set_bit(_DOMF_ctrl_pause, &d->domain_flags); - if ( !is_idle_task(d) && + if ( !is_idle_domain(d) && ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) ) goto fail1; @@ -68,7 +68,7 @@ (arch_do_createdomain(v) != 0) ) goto fail3; - if ( !is_idle_task(d) ) + if ( !is_idle_domain(d) ) { write_lock(&domlist_lock); pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */ diff -r 07306e35a5fc -r 84cf56328ce0 xen/common/sched_bvt.c --- a/xen/common/sched_bvt.c Thu Jan 5 11:19:12 2006 +++ b/xen/common/sched_bvt.c Fri Jan 6 11:25:47 2006 @@ -219,7 +219,7 @@ einf->vcpu = v; - if ( is_idle_task(v->domain) ) + if ( is_idle_domain(v->domain) ) { einf->avt = einf->evt = ~0U; BUG_ON(__task_on_runqueue(v)); @@ -265,7 +265,7 @@ ((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) + ctx_allow; - if ( is_idle_task(curr->domain) || (einf->evt <= curr_evt) ) + if ( is_idle_domain(curr->domain) || (einf->evt <= curr_evt) ) cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); else if ( schedule_data[cpu].s_timer.expires > r_time ) set_ac_timer(&schedule_data[cpu].s_timer, r_time); @@ -380,7 +380,7 @@ ASSERT(prev_einf != NULL); ASSERT(__task_on_runqueue(prev)); - if ( likely(!is_idle_task(prev->domain)) ) + if ( likely(!is_idle_domain(prev->domain)) ) { prev_einf->avt = calc_avt(prev, now); prev_einf->evt = calc_evt(prev, prev_einf->avt); @@ -471,13 +471,13 @@ } /* work out time for next run through scheduler */ - if ( is_idle_task(next->domain) ) + if ( is_idle_domain(next->domain) ) { r_time = ctx_allow; goto sched_done; } - if ( (next_prime == NULL) || is_idle_task(next_prime->domain) ) + if ( (next_prime == NULL) || is_idle_domain(next_prime->domain) ) { /* We have only one runnable task besides the idle task. */ r_time = 10 * ctx_allow; /* RN: random constant */ diff -r 07306e35a5fc -r 84cf56328ce0 xen/common/sched_sedf.c --- a/xen/common/sched_sedf.c Thu Jan 5 11:19:12 2006 +++ b/xen/common/sched_sedf.c Fri Jan 6 11:25:47 2006 @@ -384,7 +384,7 @@ INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q])); INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q])); - if (!is_idle_task(d->domain)) { + if (!is_idle_domain(d->domain)) { extraq_check(d); } else { EDOM_INFO(d)->deadl_abs = 0; @@ -711,7 +711,7 @@ struct task_slice ret; /*idle tasks don't need any of the following stuf*/ - if (is_idle_task(current->domain)) + if (is_idle_domain(current->domain)) goto check_waitq; /* create local state of the status of the domain, in order to avoid @@ -797,7 +797,7 @@ static void sedf_sleep(struct vcpu *d) { PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id); - if (is_idle_task(d->domain)) + if (is_idle_domain(d->domain)) return; EDOM_INFO(d)->status |= SEDF_ASLEEP; @@ -1068,7 +1068,7 @@ #define DOMAIN_IDLE 4 static inline int get_run_type(struct vcpu* d) { struct sedf_vcpu_info* inf = EDOM_INFO(d); - if (is_idle_task(d->domain)) + if (is_idle_domain(d->domain)) return DOMAIN_IDLE; if (inf->status & EXTRA_RUN_PEN) return DOMAIN_EXTRA_PEN; @@ -1126,7 +1126,7 @@ PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id); - if (unlikely(is_idle_task(d->domain))) + if (unlikely(is_idle_domain(d->domain))) return; if ( unlikely(__task_on_queue(d)) ) { diff -r 07306e35a5fc -r 84cf56328ce0 xen/common/schedule.c --- a/xen/common/schedule.c Thu Jan 5 11:19:12 2006 +++ b/xen/common/schedule.c Fri Jan 6 11:25:47 2006 @@ -100,7 +100,9 @@ v->vcpu_id = vcpu_id; v->processor = cpu_id; atomic_set(&v->pausecnt, 0); - v->cpumap = CPUMAP_RUNANYWHERE; + + v->cpu_affinity = is_idle_domain(d) ? + cpumask_of_cpu(cpu_id) : CPU_MASK_ALL; d->vcpu[vcpu_id] = v; @@ -143,7 +145,7 @@ /* Initialise the per-domain timer. */ init_ac_timer(&v->timer, dom_timer_fn, v, v->processor); - if ( is_idle_task(d) ) + if ( is_idle_domain(d) ) { schedule_data[v->processor].curr = v; schedule_data[v->processor].idle = v; @@ -428,7 +430,7 @@ prev->wokenup = NOW(); #if defined(WAKE_HISTO) - if ( !is_idle_task(next->domain) && next->wokenup ) + if ( !is_idle_domain(next->domain) && next->wokenup ) { ulong diff = (ulong)(now - next->wokenup); diff /= (ulong)MILLISECS(1); @@ -438,7 +440,7 @@ next->wokenup = (s_time_t)0; #elif defined(BLOCKTIME_HISTO) prev->lastdeschd = now; - if ( !is_idle_task(next->domain) ) + if ( !is_idle_domain(next->domain) ) { ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10)); if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; @@ -449,7 +451,7 @@ prev->sleep_tick = schedule_data[cpu].tick; /* Ensure that the domain has an up-to-date time base. */ - if ( !is_idle_task(next->domain) ) + if ( !is_idle_domain(next->domain) ) { update_dom_time(next); if ( next->sleep_tick != schedule_data[cpu].tick ) @@ -471,7 +473,7 @@ int idle_cpu(int cpu) { struct vcpu *p = schedule_data[cpu].curr; - return p == idle_task[cpu]; + return p == idle_domain[cpu]; } @@ -497,7 +499,7 @@ schedule_data[cpu].tick++; - if ( !is_idle_task(v->domain) ) + if ( !is_idle_domain(v->domain) ) { update_dom_time(v); send_guest_virq(v, VIRQ_TIMER); @@ -531,8 +533,8 @@ init_ac_timer(&t_timer[i], t_timer_fn, NULL, i); } - schedule_data[0].curr = idle_task[0]; - schedule_data[0].idle = idle_task[0]; + schedule_data[0].curr = idle_domain[0]; + schedule_data[0].idle = idle_domain[0]; for ( i = 0; schedulers[i] != NULL; i++ ) { @@ -546,10 +548,10 @@ printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name); - rc = SCHED_OP(alloc_task, idle_task[0]); + rc = SCHED_OP(alloc_task, idle_domain[0]); BUG_ON(rc < 0); - sched_add_domain(idle_task[0]); + sched_add_domain(idle_domain[0]); } /* diff -r 07306e35a5fc -r 84cf56328ce0 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Thu Jan 5 11:19:12 2006 +++ b/xen/include/xen/sched.h Fri Jan 6 11:25:47 2006 @@ -51,8 +51,6 @@ int evtchn_init(struct domain *d); void evtchn_destroy(struct domain *d); -#define CPUMAP_RUNANYWHERE 0xFFFFFFFF - struct vcpu { int vcpu_id; @@ -80,7 +78,7 @@ atomic_t pausecnt; - cpumap_t cpumap; /* which cpus this domain can run on */ + cpumask_t cpu_affinity; struct arch_vcpu arch; }; @@ -173,9 +171,9 @@ extern struct domain idle0_domain; extern struct vcpu idle0_vcpu; -extern struct vcpu *idle_task[NR_CPUS]; +extern struct vcpu *idle_domain[NR_CPUS]; #define IDLE_DOMAIN_ID (0x7FFFU) -#define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags)) +#define is_idle_domain(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags)) struct vcpu *alloc_vcpu( struct domain *d, unsigned int vcpu_id, unsigned int cpu_id); @@ -364,17 +362,14 @@ /* Currently running on a CPU? */ #define _VCPUF_running 3 #define VCPUF_running (1UL<<_VCPUF_running) - /* Disables auto-migration between CPUs. */ -#define _VCPUF_cpu_pinned 4 -#define VCPUF_cpu_pinned (1UL<<_VCPUF_cpu_pinned) /* Domain migrated between CPUs. */ -#define _VCPUF_cpu_migrated 5 +#define _VCPUF_cpu_migrated 4 #define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated) /* Initialization completed. */ -#define _VCPUF_initialised 6 +#define _VCPUF_initialised 5 #define VCPUF_initialised (1UL<<_VCPUF_initialised) /* VCPU is not-runnable */ -#define _VCPUF_down 7 +#define _VCPUF_down 6 #define VCPUF_down (1UL<<_VCPUF_down) /* _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |