summaryrefslogtreecommitdiff
path: root/bin
diff options
authorSylvain Pineau <sylvain.pineau@canonical.com>2017-09-26 10:27:20 +0200
committerSylvain Pineau <sylvain.pineau@canonical.com>2017-09-26 10:27:20 +0200
commit78012795b553ba0b5ad0c2977d80d8f16831ae00 (patch)
tree72bc986d7b32d1de0d6c34fc66c30dab133a8de1 /bin
parent2515d92d9abd6f8fb7d13b6647e9d1f9a9a88061 (diff)
Import plainbox-provider-checkbox_0.40.0~rc2.orig.tar.gzupstream-0.40.0_rc2patched-0.40.0_rc2-1
Diffstat (limited to 'bin')
-rwxr-xr-xbin/disk_smart61
-rwxr-xr-xbin/network29
-rwxr-xr-xbin/network_device_info2
-rwxr-xr-xbin/network_info4
-rwxr-xr-xbin/pm_test12
-rwxr-xr-xbin/virtualization3
-rwxr-xr-xbin/xrandr_cycle26
7 files changed, 93 insertions, 44 deletions
diff --git a/bin/disk_smart b/bin/disk_smart
index 804b11f..a9b294e 100755
--- a/bin/disk_smart
+++ b/bin/disk_smart
@@ -206,22 +206,23 @@ def initiate_smart_test(disk, raid_element, raid_type, type='short'):
return smart_proc.returncode
-def get_smart_entries(disk, raid_element, raid_type, type='selftest'):
+def get_smart_entries(disk, raid_element, raid_type, verbose=False):
entries = []
+ returncode = 0
try:
if raid_type == 'none':
- stdout = check_output(['smartctl', '-l', type, disk],
+ stdout = check_output(['smartctl', '-l', 'selftest', disk],
universal_newlines=True)
else:
- stdout = check_output(['smartctl', '-l', type, disk,
+ stdout = check_output(['smartctl', '-l', 'selftest', disk,
'-d', '{},{}'.
format(raid_type, raid_element)],
universal_newlines=True)
- returncode = 0
except CalledProcessError as err:
- logging.error("Error encountered checking SMART Log")
- logging.error("\tsmartctl returned code: {}".format(err.returncode))
- logging.error("\tSee 'man smartctl' for info on return code meanings")
+ if verbose:
+ logging.error("Error encountered checking SMART Log")
+ logging.error("\tsmartctl returned: {}".format(err.returncode))
+ logging.error("\tSee 'man smartctl' for info on return codes")
stdout = err.output
returncode = err.returncode
@@ -239,7 +240,7 @@ def get_smart_entries(disk, raid_element, raid_type, type='selftest'):
logging.info('No entries found in log')
if not line.startswith('Num'):
entries.append('No entries found in log yet')
- return entries, returncode
+ return entries, stdout, returncode
columns = ['number', 'description', 'status',
'remaining', 'lifetime', 'lba']
lengths = [line.index(i) for i in line.split()]
@@ -258,7 +259,7 @@ def get_smart_entries(disk, raid_element, raid_type, type='selftest'):
entry['number'] = int(entry['number'][1:])
entries.append(entry)
- return entries, returncode
+ return entries, stdout, returncode
def in_progress(current_entries):
@@ -316,11 +317,12 @@ def poll_for_status(args, disk, raid_element, raid_type, previous_entries):
keep_going = True
while keep_going:
- # Poll every sleep seconds until test is complete$
+ # Poll every sleep seconds until test is complete
time.sleep(args.sleep)
- current_entries, returncode = get_smart_entries(disk, raid_element,
- raid_type)
+ current_entries, output, returncode = get_smart_entries(disk,
+ raid_element,
+ raid_type)
if current_entries != previous_entries:
if not in_progress(current_entries):
logging.debug("Current log entries differ from starting log"
@@ -354,8 +356,9 @@ def run_smart_test(args, disk, raid_element, raid_type):
:returns:
True for success, False for failure
"""
- previous_entries, returncode = get_smart_entries(disk, raid_element,
- raid_type)
+ previous_entries, output, returncode = get_smart_entries(disk,
+ raid_element,
+ raid_type)
if raid_type == 'none':
logging.info("Starting SMART self-test on {}".format(disk))
else:
@@ -375,32 +378,26 @@ def run_smart_test(args, disk, raid_element, raid_type):
logging.debug("Log is 20+ entries long. Restarting test to add an"
" abort message to make the log diff easier")
initiate_smart_test(disk, raid_element, raid_type)
- previous_entries, returncode = get_smart_entries(disk, raid_element,
- raid_type)
+ previous_entries, output, returncode = get_smart_entries(disk,
+ raid_element,
+ raid_type)
status, returncode = poll_for_status(args, disk, raid_element, raid_type,
previous_entries)
if returncode != 0:
- log, returncode = get_smart_entries(disk, raid_element, raid_type)
+ log, output, returncode = get_smart_entries(disk, raid_element,
+ raid_type, True)
+ logging.error("FAIL: SMART Self-Test appears to have failed "
+ "for some reason.")
+ logging.error("\tLast smartctl return code: %d", returncode)
+ logging.error("\tLast smartctl run status: %s", status)
if raid_type == 'none':
- logging.error("FAIL: SMART Self-Test appears to have failed "
- "for some reason. Run 'sudo smartctl -l selftest "
- "{}' to see the SMART log".format(disk))
+ logging.error("\t'smartctl -l selftest {}' output:".format(disk))
else:
- logging.error("FAIL: SMART Self-Test appears to have failed "
- "for some reason. Run 'sudo smartctl -l selftest "
- "{} -d {},{}' to see the SMART log".
+ logging.error("\t'smartctl -l selftest {} -d {},{}' output:".
format(disk, raid_type, raid_element))
- logging.error("\tLast smartctl return code: %d", returncode)
- logging.error("\tLast smartctl run status: %s", status)
- logging.debug("\tMost Recent SMART LOG Entry:")
- for log_entry in log:
- if log_entry['number'] == 1:
- logging.debug("\t# {}\t{}\t{}\t{}\t{}\t{}".format(
- log_entry['number'], log_entry['description'],
- log_entry['status'], log_entry['remaining'],
- log_entry['lifetime'], log_entry['lba']))
+ logging.error("\n%s", output)
return False
else:
if raid_type == 'none':
diff --git a/bin/network b/bin/network
index c855f0a..aaee226 100755
--- a/bin/network
+++ b/bin/network
@@ -28,6 +28,7 @@ import datetime
import fcntl
import ipaddress
import logging
+import math
import os
import re
import shlex
@@ -57,6 +58,7 @@ class IPerfPerformanceTest(object):
fail_threshold,
cpu_load_fail_threshold,
iperf3,
+ num_threads,
protocol="tcp",
data_size="1",
run_time=None,
@@ -68,6 +70,7 @@ class IPerfPerformanceTest(object):
self.fail_threshold = fail_threshold
self.cpu_load_fail_threshold = cpu_load_fail_threshold
self.iperf3 = iperf3
+ self.num_threads = num_threads
self.data_size = data_size
self.run_time = run_time
self.scan_timeout = scan_timeout
@@ -84,10 +87,21 @@ class IPerfPerformanceTest(object):
else:
self.executable = "iperf"
+ # Determine number of parallel threads
+ if self.num_threads == -1:
+ # Below is a really crude guesstimate based on our
+ # initial testing. It's likely possible to improve
+ # this method of setting the number of threads.
+ threads = math.ceil(self.iface.link_speed / 10000)
+ else:
+ threads = self.num_threads
+
+ logging.debug("Using {} threads.".format(threads))
+
# If we set run_time, use that instead to build the command.
if self.run_time is not None:
- cmd = "{} -c {} -t {} -i 1 -f m".format(
- self.executable, self.target, self.run_time)
+ cmd = "{} -c {} -t {} -i 1 -f m -P {}".format(
+ self.executable, self.target, self.run_time, threads)
else:
# Because we can vary the data size, we need to vary the timeout as
# well. It takes an estimated 15 minutes to send 1GB over 10Mb/s.
@@ -96,8 +110,9 @@ class IPerfPerformanceTest(object):
# time without timeout to catch devices that slow down, and also
# not prematurely end iperf on low-bandwidth devices.
self.timeout = 1080*int(self.data_size)
- cmd = "timeout {} {} -c {} -n {}G -i 1 -f -m".format(
- self.timeout, self.executable, self.target, self.data_size)
+ cmd = "timeout {} {} -c {} -n {}G -i 1 -f -m -P {}".format(
+ self.timeout, self.executable, self.target, self.data_size,
+ threads)
logging.debug("Executing command {}".format(cmd))
logging.debug("Starting iperf against {}, this could take a while...".
@@ -389,7 +404,7 @@ def run_test(args, test_target):
iperf_benchmark = IPerfPerformanceTest(args.interface, test_target,
args.fail_threshold,
args.cpu_load_fail_threshold,
- args.iperf3)
+ args.iperf3, args.num_threads)
if args.datasize:
iperf_benchmark.data_size = args.datasize
if args.runtime:
@@ -709,6 +724,10 @@ TEST_TARGET_IPERF = iperf-server.example.com
test_parser.add_argument(
'--underspeed-ok', default=False, action="store_true",
help="Run test even if an underspeed 1ink is detected")
+ test_parser.add_argument(
+ '--num-threads', type=int, default=-1,
+ help=("Number of threads to use in the test. "
+ "(Default is computed based on network speed.)"))
# Sub info options
info_parser.add_argument(
diff --git a/bin/network_device_info b/bin/network_device_info
index da2f5a2..174fbe8 100755
--- a/bin/network_device_info
+++ b/bin/network_device_info
@@ -58,7 +58,7 @@ nm_devices = []
class UdevResult:
def addDevice(self, device):
- if device.category == 'NETWORK':
+ if device.category == 'NETWORK' and device.interface != "UNKNOWN":
udev_devices.append(device)
diff --git a/bin/network_info b/bin/network_info
index 38d5ada..7dbe609 100755
--- a/bin/network_info
+++ b/bin/network_info
@@ -84,8 +84,8 @@ def main(args):
print("IPv6: n/a")
except:
print("IPv6: n/a")
- print("MAC: %s\n" % get_mac_address(interface))
- print("Connect Speed: %s" % get_speed(interface))
+ print("MAC: %s" % get_mac_address(interface))
+ print("Connect Speed: %s\n" % get_speed(interface))
return 0
diff --git a/bin/pm_test b/bin/pm_test
index ce50a51..395498c 100755
--- a/bin/pm_test
+++ b/bin/pm_test
@@ -200,6 +200,10 @@ class PowerManagementOperation(object):
else:
title = '{0} test'.format(self.args.pm_operation.capitalize())
MessageDialog(title, message).run()
+ if self.args.checkbox_respawn_cmd:
+ subprocess.run(
+ r'DISPLAY=:0 x-terminal-emulator -e "bash -c \"source {}; exec bash\""'.format(
+ self.args.checkbox_respawn_cmd), shell=True)
def teardown(self):
"""
@@ -633,7 +637,7 @@ class AutoStartFile(object):
[Desktop Entry]
Name={pm_operation} test
Comment=Verify {pm_operation} works properly
-Exec=sudo /usr/bin/python3 {script} -r {repetitions} -w {wakeup} --hardware-delay {hardware_delay} --pm-delay {pm_delay} --min-pm-time {min_pm_time} --max-pm-time {max_pm_time} --append --total {total} --start {start} --pm-timestamp {pm_timestamp} {silent} --log-level={log_level} --log-dir={log_dir} --suspends-before-reboot={suspend_cycles} {fwts} {pm_operation}
+Exec=sudo /usr/bin/python3 {script} -r {repetitions} -w {wakeup} --hardware-delay {hardware_delay} --pm-delay {pm_delay} --min-pm-time {min_pm_time} --max-pm-time {max_pm_time} --append --total {total} --start {start} --pm-timestamp {pm_timestamp} {silent} --log-level={log_level} --log-dir={log_dir} --suspends-before-reboot={suspend_cycles} --checkbox-respawn-cmd={checkbox_respawn} {fwts} {pm_operation}
Type=Application
X-GNOME-Autostart-enabled=true
Hidden=false
@@ -686,7 +690,8 @@ Hidden=false
log_dir=self.args.log_dir,
fwts='--fwts' if self.args.fwts else '',
suspend_cycles=self.args.suspends_before_reboot,
- pm_operation=self.args.pm_operation))
+ pm_operation=self.args.pm_operation,
+ checkbox_respawn=self.args.checkbox_respawn_cmd))
logging.debug(contents)
with open(self.desktop_filename, 'w') as f:
@@ -824,6 +829,9 @@ class MyArgumentParser(object):
# use fwts for suspend tests
parser.add_argument('--fwts', action='store_true', help=('Use fwts '
'when doing the suspend tests'))
+ parser.add_argument('--checkbox-respawn-cmd', type=str, help=(
+ 'path to a file telling how to return to checkbox after the'
+ ' test is done'), default='')
self.parser = parser
def parse(self):
diff --git a/bin/virtualization b/bin/virtualization
index b67a5fa..17ab9b7 100755
--- a/bin/virtualization
+++ b/bin/virtualization
@@ -292,7 +292,8 @@ class KVMTest(object):
if (
url.path.endswith('/') or
url.path == '' or
- not url.path.endswith(".img")
+ not (url.path.endswith(".img") or
+ url.path.endswith(".tar.gz"))
):
# If we have a relative URL (local copies of official images)
# http://192.168.0.1/ or http://192.168.0.1/images/
diff --git a/bin/xrandr_cycle b/bin/xrandr_cycle
index 2a5e915..b9f615b 100755
--- a/bin/xrandr_cycle
+++ b/bin/xrandr_cycle
@@ -10,6 +10,9 @@ import sys
import tarfile
import time
+from fractions import Fraction
+from collections import OrderedDict
+
parser = argparse.ArgumentParser()
parser.add_argument('--keyword', default='',
help=('A keyword to distinguish the screenshots '
@@ -23,6 +26,7 @@ args = parser.parse_args()
device_context = '' # track what device's modes we are looking at
modes = [] # keep track of all the devices and modes discovered
+highest_modes = [] # list of highest-res modes for each aspect ratio
current_modes = [] # remember the user's current settings for cleanup later
failures = 0 # count the number of failed modesets
failure_messages = [] # remember which modes failed
@@ -65,6 +69,26 @@ for line in output:
# we found them at the end:
if foo[1].find('*') != -1:
current_modes.append((device_context, foo[0]))
+# let's create a dict of aspect_ratio:largest_width for each display
+# (width, because it's easier to compare simple ints when looking for the
+# highest value).
+top_res_per_aspect = OrderedDict()
+for adapter, mode in modes:
+ try:
+ width, height = [int(x) for x in mode.split('x')]
+ aspect = Fraction(width, height)
+ if adapter not in top_res_per_aspect:
+ top_res_per_aspect[adapter] = OrderedDict()
+ cur_max = top_res_per_aspect[adapter].get(aspect, 0)
+ top_res_per_aspect[adapter][aspect] = max(cur_max, width)
+ except Exception as exc:
+ print("Error parsing %s: %s" % (mode, exc))
+
+highest_modes = []
+for adapter, params in top_res_per_aspect.items():
+ for aspect, width in params.items():
+ mode = '{}x{}'.format(width, width/aspect)
+ highest_modes.append((adapter, mode))
# Now we have a list of the modes we need to test. So let's do just that.
profile_path = os.environ['HOME'] + '/.shutter/profiles/'
@@ -131,7 +155,7 @@ except:
raise SystemExit("ERROR: While updating folder name "
"in shutter profile: {}".format(sys.exc_info()))
-for mode in modes:
+for mode in highest_modes:
cmd = 'xrandr --output ' + mode[0] + ' --mode ' + mode[1]
retval = subprocess.call(cmd, shell=True)
if retval != 0: