[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [OSSTest PATCH v2 1/2] ts-cpupools: new test script



for smoke testing cpupools a bit. It tries to partition
a live host in two cpupools, trying out the following 3
schedulers for the new cpupool (one after the other):
 credit, credit2 and RTDS.

It also tries to migrating a domain to the new cpupool
and then back to Pool-0.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Juergen Gross <jgross@xxxxxxxx>
---
 ts-cpupools |  124 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 124 insertions(+)
 create mode 100755 ts-cpupools

diff --git a/ts-cpupools b/ts-cpupools
new file mode 100755
index 0000000..fe612e1
--- /dev/null
+++ b/ts-cpupools
@@ -0,0 +1,124 @@
+#!/usr/bin/perl -w
+# This is part of "osstest", an automated testing framework for Xen.
+# Copyright (C) 2009-2014 Citrix Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+use strict qw(vars);
+use DBI;
+use Osstest;
+use Osstest::TestSupport;
+
+tsreadconfig();
+
+our ($whhost,$gn)= @ARGV;
+$whhost ||= 'host';
+$gn ||= 'debian';
+
+our ($ho,$gho) = ts_get_host_guest($whhost,$gn);
+#our $ho= selecthost(@ARGV);
+
+our $default_cpupool= "Pool-0";
+our @schedulers= ("credit","credit2","rtds");
+our @cpulist;
+our $nr_cpus;
+
+sub check () {
+  my $out;
+
+  # Figure out the number of pCPUs of the host. We need to know
+  # that in order to decide with what pCPUs we want to create
+  # cpupools
+  my $xlinfo= target_cmd_output_root($ho, "xl info");
+  $xlinfo =~ /nr_cpus\s*:\s([0-9]*)/;
+  $nr_cpus= $1;
+  logm("Found $nr_cpus pCPUs");
+  die "Too few pCPUs to test cpupools: $nr_cpus" if $nr_cpus < 2;
+
+  # We want only 1 cpupool to exist
+  my $cppinfo= target_cmd_output_root($ho, "xl cpupool-list");
+  my $nr_cpupools= $cppinfo =~ tr/\n//;
+  logm("Found $nr_cpupools cpupools");
+  die "There already is more than one cpu pool!" if $nr_cpupools > 1;
+  die "Non-default cpupool configuration detected" if $cppinfo =~ 
/^$default_cpupool\b/;
+
+  $out= target_cmd_output_root($ho, "xl cpupool-list"); logm("$out");
+  $out= target_cmd_output_root($ho, "xl cpupool-list -c"); logm("$out");
+}
+
+# List of the odd pCPUs
+sub prep_cpulist () {
+  if (! defined @cpulist) {
+    foreach my $cpu (0..$nr_cpus) {
+      next unless $cpu % 2;
+      push @cpulist, $cpu;
+    }
+  }
+}
+
+sub prep ($) {
+  my ($sched)= @_;
+
+  # Remove the pCPUs from in $cpulist from the default cpupool
+  my $cpustr= "[";
+  foreach my $cpu (@cpulist) {
+    target_cmd_root($ho,"xl cpupool-cpu-remove $default_cpupool $cpu");
+    $cpustr.= "\"$cpu\", ";
+  }
+  $cpustr.= "]";
+
+  logm("Creating config file for cpupool-osstest-$sched with cpus=$cpustr");
+  
target_putfilecontents_stash($ho,100,<<"END","/etc/xen/cpupool-osstest-$sched");
+name = "cpupool-osstest-$sched"
+sched=\"$sched\"
+cpus=$cpustr
+END
+}
+
+check();
+prep_cpulist();
+foreach my $sched (@schedulers) {
+  my $out;
+
+  prep("$sched");
+
+  # For each cpupool:
+  #  * create it
+  #  * rename it
+  #  * move a domain in it
+  #  * move back a domain out of it
+  #  * add back the pcpus from it to the default pool
+  #  * destroy it
+  target_cmd_root($ho, "xl cpupool-create /etc/xen/cpupool-osstest-$sched");
+  target_cmd_output_root($ho, "xl cpupool-rename cpupool-osstest-$sched 
cpupool-test");
+  $out= target_cmd_output_root($ho, "xl cpupool-list -c"); logm("$out");
+  $out= target_cmd_output_root($ho, "xl cpupool-list"); logm("$out");
+
+  target_cmd_root($ho, "xl cpupool-migrate $gho->{Name} cpupool-test");
+  $out= target_cmd_output_root($ho, "xl cpupool-list"); logm("$out");
+  $out= target_cmd_output_root($ho, "xl vcpu-list"); logm("$out");
+
+  target_cmd_root($ho, "xl cpupool-migrate $gho->{Name} Pool-0");
+  $out= target_cmd_output_root($ho, "xl cpupool-list"); logm("$out");
+
+  foreach my $cpu (@cpulist) {
+    target_cmd_root($ho,"xl cpupool-cpu-remove cpupool-test $cpu");
+    target_cmd_root($ho,"xl cpupool-cpu-add $default_cpupool $cpu");
+  }
+  $out= target_cmd_output_root($ho, "xl cpupool-list -c"); logm("$out");
+
+  target_cmd_root($ho, "xl cpupool-destroy cpupool-test");
+  $out= target_cmd_output_root($ho, "xl cpupool-list"); logm("$out");
+}
+


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.