[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Add a new timer operation kill_timer(). Effectively the



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID cc2f35c83b4cbc48e540ddb2e36571ed05917b93
# Parent  d3279720963294702936ecb45d49cd4db8b5caaf
Add a new timer operation kill_timer(). Effectively the
'opposite' of init_timer(), it marks the end of a timer's
lifetime. After this call the timer will not be pending,
its callback handler will not be running, and future calls
to set_timer() will silently fail.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r d32797209632 -r cc2f35c83b4c xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Thu Jan 12 23:16:07 2006
+++ b/xen/arch/x86/vmx.c        Fri Jan 13 00:03:44 2006
@@ -105,13 +105,11 @@
     destroy_vmcs(&v->arch.arch_vmx);
     free_monitor_pagetable(v);
     vpit = &v->domain->arch.vmx_platform.vmx_pit;
-    if ( active_timer(&(vpit->pit_timer)) )
-        stop_timer(&vpit->pit_timer);
-    if ( active_timer(&v->arch.arch_vmx.hlt_timer) )
-        stop_timer(&v->arch.arch_vmx.hlt_timer);
+    kill_timer(&vpit->pit_timer);
+    kill_timer(&v->arch.arch_vmx.hlt_timer);
     if ( vmx_apic_support(v->domain) && (VLAPIC(v) != NULL) )
     {
-        stop_timer(&VLAPIC(v)->vlapic_timer);
+        kill_timer(&VLAPIC(v)->vlapic_timer);
         xfree(VLAPIC(v));
     }
 }
diff -r d32797209632 -r cc2f35c83b4c xen/common/sched_bvt.c
--- a/xen/common/sched_bvt.c    Thu Jan 12 23:16:07 2006
+++ b/xen/common/sched_bvt.c    Fri Jan 13 00:03:44 2006
@@ -45,9 +45,9 @@
                                              limits*/
     s32                 warp_value;       /* virtual time warp */
     s_time_t            warpl;            /* warp limit */
-    struct timer     warp_timer;       /* deals with warpl */
+    struct timer        warp_timer;       /* deals with warpl */
     s_time_t            warpu;            /* unwarp time requirement */
-    struct timer     unwarp_timer;     /* deals with warpu */
+    struct timer        unwarp_timer;     /* deals with warpu */
 
     struct bvt_vcpu_info vcpu_inf[MAX_VIRT_CPUS];
 };
@@ -168,6 +168,7 @@
 static int bvt_alloc_task(struct vcpu *v)
 {
     struct domain *d = v->domain;
+    struct bvt_dom_info *inf;
 
     if ( (d->sched_priv == NULL) )
     {
@@ -176,32 +177,12 @@
         memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
     }
 
-    v->sched_priv = &BVT_INFO(d)->vcpu_inf[v->vcpu_id];
-
-    BVT_INFO(d)->vcpu_inf[v->vcpu_id].inf = BVT_INFO(d);
-    BVT_INFO(d)->vcpu_inf[v->vcpu_id].vcpu = v;
-
-    return 0;
-}
-
-/*
- * Add and remove a domain
- */
-static void bvt_add_task(struct vcpu *v) 
-{
-    struct bvt_dom_info *inf = BVT_INFO(v->domain);
-    struct bvt_vcpu_info *einf = EBVT_INFO(v);
-    ASSERT(inf != NULL);
-    ASSERT(v   != NULL);
-
-    /* Allocate per-CPU context if this is the first domain to be added. */
-    if ( CPU_INFO(v->processor) == NULL )
-    {
-        schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info);
-        BUG_ON(CPU_INFO(v->processor) == NULL);
-        INIT_LIST_HEAD(RUNQUEUE(v->processor));
-        CPU_SVT(v->processor) = 0;
-    }
+    inf = BVT_INFO(d);
+
+    v->sched_priv = &inf->vcpu_inf[v->vcpu_id];
+
+    inf->vcpu_inf[v->vcpu_id].inf  = BVT_INFO(d);
+    inf->vcpu_inf[v->vcpu_id].vcpu = v;
 
     if ( v->vcpu_id == 0 )
     {
@@ -218,7 +199,28 @@
         init_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
     }
 
-    einf->vcpu = v;
+    return 0;
+}
+
+/*
+ * Add and remove a domain
+ */
+static void bvt_add_task(struct vcpu *v) 
+{
+    struct bvt_dom_info *inf = BVT_INFO(v->domain);
+    struct bvt_vcpu_info *einf = EBVT_INFO(v);
+
+    ASSERT(inf != NULL);
+    ASSERT(v   != NULL);
+
+    /* Allocate per-CPU context if this is the first domain to be added. */
+    if ( CPU_INFO(v->processor) == NULL )
+    {
+        schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info);
+        BUG_ON(CPU_INFO(v->processor) == NULL);
+        INIT_LIST_HEAD(RUNQUEUE(v->processor));
+        CPU_SVT(v->processor) = 0;
+    }
 
     if ( is_idle_vcpu(v) )
     {
@@ -305,8 +307,14 @@
  */
 static void bvt_free_task(struct domain *d)
 {
-    ASSERT(d->sched_priv != NULL);
-    xfree(d->sched_priv);
+    struct bvt_dom_info *inf = BVT_INFO(d);
+
+    ASSERT(inf != NULL);
+
+    kill_timer(&inf->warp_timer);
+    kill_timer(&inf->unwarp_timer);
+
+    xfree(inf);
 }
 
 /* Control the scheduler. */
diff -r d32797209632 -r cc2f35c83b4c xen/common/schedule.c
--- a/xen/common/schedule.c     Thu Jan 12 23:16:07 2006
+++ b/xen/common/schedule.c     Fri Jan 13 00:03:44 2006
@@ -156,7 +156,7 @@
 
 void sched_rem_domain(struct vcpu *v) 
 {
-    stop_timer(&v->timer);
+    kill_timer(&v->timer);
     SCHED_OP(rem_task, v);
     TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
 }
@@ -462,7 +462,7 @@
 /* Periodic tick timer: send timer event to current domain */
 static void t_timer_fn(void *unused)
 {
-    struct vcpu  *v  = current;
+    struct vcpu  *v   = current;
     unsigned int  cpu = smp_processor_id();
 
     schedule_data[cpu].tick++;
diff -r d32797209632 -r cc2f35c83b4c xen/common/timer.c
--- a/xen/common/timer.c        Thu Jan 12 23:16:07 2006
+++ b/xen/common/timer.c        Fri Jan 13 00:03:44 2006
@@ -29,6 +29,7 @@
 struct timers {
     spinlock_t     lock;
     struct timer **heap;
+    struct timer  *running;
 } __cacheline_aligned;
 
 struct timers timers[NR_CPUS];
@@ -167,11 +168,11 @@
     unsigned long flags;
 
     spin_lock_irqsave(&timers[cpu].lock, flags);
-    ASSERT(timer != NULL);
     if ( active_timer(timer) )
         __stop_timer(timer);
     timer->expires = expires;
-    __add_timer(timer);
+    if ( likely(!timer->killed) )
+        __add_timer(timer);
     spin_unlock_irqrestore(&timers[cpu].lock, flags);
 }
 
@@ -182,10 +183,28 @@
     unsigned long flags;
 
     spin_lock_irqsave(&timers[cpu].lock, flags);
-    ASSERT(timer != NULL);
     if ( active_timer(timer) )
         __stop_timer(timer);
     spin_unlock_irqrestore(&timers[cpu].lock, flags);
+}
+
+
+void kill_timer(struct timer *timer)
+{
+    int           cpu = timer->cpu;
+    unsigned long flags;
+
+    BUG_ON(timers[cpu].running == timer);
+
+    spin_lock_irqsave(&timers[cpu].lock, flags);
+    if ( active_timer(timer) )
+        __stop_timer(timer);
+    timer->killed = 1;
+    spin_unlock_irqrestore(&timers[cpu].lock, flags);
+
+    for_each_online_cpu ( cpu )
+        while ( timers[cpu].running == timer )
+            cpu_relax();
 }
 
 
@@ -208,19 +227,20 @@
         {
             remove_entry(heap, t);
 
+            timers[cpu].running = t;
+
             fn   = t->function;
             data = t->data;
 
-            if ( fn != NULL )
-            {
-                spin_unlock_irq(&timers[cpu].lock);
-                (*fn)(data);
-                spin_lock_irq(&timers[cpu].lock);
-            }
+            spin_unlock_irq(&timers[cpu].lock);
+            (*fn)(data);
+            spin_lock_irq(&timers[cpu].lock);
 
             /* Heap may have grown while the lock was released. */
             heap = timers[cpu].heap;
         }
+
+        timers[cpu].running = NULL;
     }
     while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
 
diff -r d32797209632 -r cc2f35c83b4c xen/include/xen/timer.h
--- a/xen/include/xen/timer.h   Thu Jan 12 23:16:07 2006
+++ b/xen/include/xen/timer.h   Fri Jan 13 00:03:44 2006
@@ -22,6 +22,8 @@
     void         *data;
     /* Timer-heap offset. */
     unsigned int  heap_offset;
+    /* Has this timer been killed (cannot be activated)? */
+    int           killed;
 };
 
 /*
@@ -64,6 +66,13 @@
 extern void stop_timer(struct timer *timer);
 
 /*
+ * Deactivate a timer and prevent it from being re-set (future calls to
+ * set_timer will silently fail). When this function returns it is guaranteed
+ * that the timer callback handler is not running on any CPU.
+ */
+extern void kill_timer(struct timer *timer);
+
+/*
  * Initialisation. Must be called before any other timer function.
  */
 extern void timer_init(void);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.