[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 22/59] Use tsc for time rather than rumpkernel clock_gettime()
From: George Dunlap <george.dunlap@xxxxxxxxxx> The rumpkernel clock_gettime() drifts drastically when the system is overloaded. Switch to using a TSC timesource instead: * Use tsc_mode='native' in guest config * Read the host tsc from /proc/cpuinfo when starting a run * Pass it as an argument to the worker * Implement now() with rdtsc The results appear to continue to be accurate even with heavy overcommitment. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx> --- benchmark.go | 8 ++++++++ run.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ xenworker.go | 1 + 3 files changed, 54 insertions(+) diff --git a/benchmark.go b/benchmark.go index b0b55c7..7ea9aaa 100644 --- a/benchmark.go +++ b/benchmark.go @@ -48,6 +48,14 @@ type WorkerParams struct { Args []string } +func (l *WorkerParams) SetkHZ(kHZ uint64) { + if l.Args[0] == "kHZ" { + l.Args[1] = fmt.Sprintf("%d", kHZ) + } else { + l.Args = append([]string{"kHZ", fmt.Sprintf("%d", kHZ)}, l.Args...) + } +} + type WorkerConfig struct { Pool string } diff --git a/run.go b/run.go index 2a93405..4222001 100644 --- a/run.go +++ b/run.go @@ -23,6 +23,9 @@ import ( "os" "os/signal" "time" + "regexp" + "strconv" + "bufio" ) type WorkerState struct { @@ -106,9 +109,45 @@ func NewWorkerList(WorkerSets []WorkerSet, workerType int) (wl WorkerList, err e return } +var CpukHZ uint64 + +func getCpuHz() (err error) { + if CpukHZ == 0 { + var cpuinfo *os.File + cpuinfo, err = os.Open("/proc/cpuinfo") + if err != nil { + return + } + re := regexp.MustCompile("^cpu MHz\\s*: ([0-9.]+)$") + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + s := scanner.Text() + m := re.FindStringSubmatch(s) + if m != nil { + var MHZ float64 + MHZ, err = strconv.ParseFloat(m[1], 64) + if err != nil { + return + } + CpukHZ = uint64(MHZ*1000) + break + } + } + if CpukHZ == 0 { + err = fmt.Errorf("Couldn't find cpu MHz") + return + } else { + fmt.Println("CpukHZ: ", CpukHZ) + + } + } + return +} + func (run *BenchmarkRun) Run() (err error) { for wsi := range run.WorkerSets { run.WorkerSets[wsi].Config.PropagateFrom(run.WorkerConfig) + run.WorkerSets[wsi].Params.SetkHZ(CpukHZ) } Workers, err := NewWorkerList(run.WorkerSets, WorkerXen) @@ -164,6 +203,12 @@ func (run *BenchmarkRun) Run() (err error) { } func (plan *BenchmarkPlan) Run() (err error) { + + err = getCpuHz() + if err != nil { + return + } + for i := range plan.Runs { r := &plan.Runs[i]; if ! r.Completed { diff --git a/xenworker.go b/xenworker.go index 4077e77..e98c970 100644 --- a/xenworker.go +++ b/xenworker.go @@ -95,6 +95,7 @@ func (w *XenWorker) Init(p WorkerParams, g WorkerConfig) (err error) { fmt.Fprintf(cfg, "memory = 32\n") fmt.Fprintf(cfg, "vcpus = 1\n") fmt.Fprintf(cfg, "on_crash = 'destroy'\n") + fmt.Fprintf(cfg, "tsc_mode = 'native'\n") if g.Pool != "" { fmt.Fprintf(cfg, "pool = '%s'\n", g.Pool) -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |