[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [Qemu-devel] [PATCH v3 6/6] qemu_calculate_timeout: increase minimum timeout to 1h



On Fri, 10 Feb 2012, Paul Brook wrote:
> > +#ifdef CONFIG_SLIRP
> > +static inline void slirp_update_timeout(uint32_t *timeout)
> > +{
> > +    *timeout = MIN(1000, *timeout);
> > +}
> > +#else
> > +static inline void slirp_update_timeout(uint32_t *timeout) { }
> > +#endif
> 
> Shouldn't we be testing whether slirp is actually in use? I doubt many people 
> go to the effort of rebuilding without SLIRP support.
 
Yes, you are right. Also considering that we are only calling
slirp_update_timeout if CONFIG_SLIRP is defined, there is no need for
the !CONFIG_SLIRP dummy version of the function.

---

commit 3a89477edc7e551c93b016940d2fdad9ebc22a84
Author: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Date:   Mon Feb 13 11:25:03 2012 +0000

    main_loop_wait: block indefinitely
    
    - remove qemu_calculate_timeout;
    
    - explicitly size timeout to uint32_t;
    
    - introduce slirp_update_timeout;
    
    - pass NULL as timeout argument to select in case timeout is the maximum
    value;
    
    Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

diff --git a/async.c b/async.c
index 332d511..ecdaf15 100644
--- a/async.c
+++ b/async.c
@@ -120,7 +120,7 @@ void qemu_bh_delete(QEMUBH *bh)
     bh->deleted = 1;
 }
 
-void qemu_bh_update_timeout(int *timeout)
+void qemu_bh_update_timeout(uint32_t *timeout)
 {
     QEMUBH *bh;
 
diff --git a/main-loop.c b/main-loop.c
index 692381c..4a105e9 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -366,7 +366,7 @@ void qemu_del_wait_object(HANDLE handle, WaitObjectFunc 
*func, void *opaque)
     }
 }
 
-static void os_host_main_loop_wait(int *timeout)
+static void os_host_main_loop_wait(uint32_t *timeout)
 {
     int ret, ret2, i;
     PollingEntry *pe;
@@ -410,7 +410,7 @@ static void os_host_main_loop_wait(int *timeout)
     *timeout = 0;
 }
 #else
-static inline void os_host_main_loop_wait(int *timeout)
+static inline void os_host_main_loop_wait(uint32_t *timeout)
 {
 }
 #endif
@@ -419,21 +419,17 @@ int main_loop_wait(int nonblocking)
 {
     fd_set rfds, wfds, xfds;
     int ret, nfds;
-    struct timeval tv;
-    int timeout;
+    struct timeval tv, *tvarg = NULL;
+    uint32_t timeout = UINT32_MAX;
 
     if (nonblocking) {
         timeout = 0;
     } else {
-        timeout = qemu_calculate_timeout();
         qemu_bh_update_timeout(&timeout);
     }
 
     os_host_main_loop_wait(&timeout);
 
-    tv.tv_sec = timeout / 1000;
-    tv.tv_usec = (timeout % 1000) * 1000;
-
     /* poll any events */
     /* XXX: separate device handlers from system ones */
     nfds = -1;
@@ -442,16 +438,23 @@ int main_loop_wait(int nonblocking)
     FD_ZERO(&xfds);
 
 #ifdef CONFIG_SLIRP
+    slirp_update_timeout(&timeout);
     slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
 #endif
     qemu_iohandler_fill(&nfds, &rfds, &wfds, &xfds);
     glib_select_fill(&nfds, &rfds, &wfds, &xfds, &tv);
 
+    if (timeout < UINT32_MAX) {
+        tvarg = &tv;
+        tv.tv_sec = timeout / 1000;
+        tv.tv_usec = (timeout % 1000) * 1000;
+    }
+
     if (timeout > 0) {
         qemu_mutex_unlock_iothread();
     }
 
-    ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
+    ret = select(nfds + 1, &rfds, &wfds, &xfds, tvarg);
 
     if (timeout > 0) {
         qemu_mutex_lock_iothread();
diff --git a/main-loop.h b/main-loop.h
index f971013..22c0dc9 100644
--- a/main-loop.h
+++ b/main-loop.h
@@ -352,6 +352,6 @@ void qemu_iohandler_poll(fd_set *readfds, fd_set *writefds, 
fd_set *xfds, int rc
 
 void qemu_bh_schedule_idle(QEMUBH *bh);
 int qemu_bh_poll(void);
-void qemu_bh_update_timeout(int *timeout);
+void qemu_bh_update_timeout(uint32_t *timeout);
 
 #endif
diff --git a/qemu-timer.c b/qemu-timer.c
index de20852..3e1ce08 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -821,8 +821,3 @@ fail:
     return err;
 }
 
-int qemu_calculate_timeout(void)
-{
-    return 1000;
-}
-
diff --git a/qemu-timer.h b/qemu-timer.h
index de17f3b..f1386d5 100644
--- a/qemu-timer.h
+++ b/qemu-timer.h
@@ -62,7 +62,6 @@ uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
 void qemu_run_all_timers(void);
 int qemu_alarm_pending(void);
 void configure_alarms(char const *opt);
-int qemu_calculate_timeout(void);
 void init_clocks(void);
 int init_timer_alarm(void);
 
diff --git a/qemu-tool.c b/qemu-tool.c
index 6b69668..76830b7 100644
--- a/qemu-tool.c
+++ b/qemu-tool.c
@@ -90,6 +90,10 @@ static void __attribute__((constructor)) init_main_loop(void)
     qemu_clock_enable(vm_clock, false);
 }
 
+void slirp_update_timeout(uint32_t *timeout)
+{
+}
+
 void slirp_select_fill(int *pnfds, fd_set *readfds,
                        fd_set *writefds, fd_set *xfds)
 {
diff --git a/slirp/libslirp.h b/slirp/libslirp.h
index 890fd86..77527ad 100644
--- a/slirp/libslirp.h
+++ b/slirp/libslirp.h
@@ -15,6 +15,7 @@ Slirp *slirp_init(int restricted, struct in_addr vnetwork,
                   struct in_addr vnameserver, void *opaque);
 void slirp_cleanup(Slirp *slirp);
 
+void slirp_update_timeout(uint32_t *timeout);
 void slirp_select_fill(int *pnfds,
                        fd_set *readfds, fd_set *writefds, fd_set *xfds);
 
diff --git a/slirp/slirp.c b/slirp/slirp.c
index 19d69eb..418f8d0 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -255,6 +255,13 @@ void slirp_cleanup(Slirp *slirp)
 #define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) 
== SS_ISFCONNECTED)
 #define UPD_NFDS(x) if (nfds < (x)) nfds = (x)
 
+void slirp_update_timeout(uint32_t *timeout)
+{
+    if (!QTAILQ_EMPTY(&slirp_instances)) {
+        *timeout = MIN(1000, *timeout);
+    }
+}
+
 void slirp_select_fill(int *pnfds,
                        fd_set *readfds, fd_set *writefds, fd_set *xfds)
 {

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.