summaryrefslogtreecommitdiff
diff options
-rwxr-xr-xbin/cpufreq_test.py761
-rwxr-xr-xbin/cpuid.py2
-rwxr-xr-xbin/gateway_ping_test.py3
-rwxr-xr-xbin/pactl_list.sh20
-rwxr-xr-xbin/roundtrip_qr.py6
-rwxr-xr-xbin/snap_tests.py38
-rwxr-xr-xbin/socketcan_test.py3
-rw-r--r--units/audio/jobs.pxu25
-rw-r--r--units/cpu/jobs.pxu12
-rw-r--r--units/cpu/test-plan.pxu2
-rw-r--r--units/kernel-snap/jobs.pxu1
-rw-r--r--units/miscellanea/test-plan.pxu2
-rw-r--r--units/monitor/jobs.pxu15
-rw-r--r--units/monitor/test-plan.pxu2
-rw-r--r--units/power-management/jobs.pxu44
-rw-r--r--units/snappy/snappy.pxu19
-rw-r--r--units/stress/jobs.pxu1
-rw-r--r--units/thunderbolt/jobs.pxu5
-rw-r--r--units/watchdog/jobs.pxu4
-rw-r--r--units/watchdog/manifest.pxu5
20 files changed, 925 insertions, 45 deletions
diff --git a/bin/cpufreq_test.py b/bin/cpufreq_test.py
new file mode 100755
index 0000000..81ee276
--- /dev/null
+++ b/bin/cpufreq_test.py
@@ -0,0 +1,761 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Authors
+# Adrian Lane <adrian.lane@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Test and validate SUT CPU scaling capabilities via CPUFreq."""
+
+
+from os import path
+import multiprocessing
+import collections
+import threading
+import argparse
+import logging
+import pprint
+import random
+import signal
+import copy
+import math
+import time
+import sys
+import psutil
+
+
+class CpuFreqTestError(Exception):
+ """Exception handling."""
+ def __init__(self, message):
+ super().__init__()
+ if 'scaling_driver' in message:
+ logging.error(
+ '%s\n## Fatal: scaling via cpufeq unsupported ##')
+ # exempt systems unable to change intel_pstate driver mode
+ elif 'intel_pstate/status' in message:
+ pass
+ else:
+ logging.error(message)
+
+
+class CpuFreqTest:
+ """ Test cpufreq scaling capabilities."""
+ # duration to stay at frequency (sec) (gt observe_interval)
+ scale_duration = 8
+ # frequency sampling interval (sec) (lt scale_duration)
+ observe_interval = .4
+ # max, min percentage of avg freq allowed to pass
+ # values relative to target freq
+ # ex: max = 110, min = 90 is 20% passing tolerance
+ max_freq_pct = 150
+ min_freq_pct = 90
+
+ def __init__(self):
+ def append_max_min():
+ """ Create scaling table from max_freq,
+ min_freq cpufreq files.
+ """
+ freq_table = []
+ path_max = path.join('cpu0', 'cpufreq',
+ 'scaling_max_freq')
+ path_min = path.join('cpu0', 'cpufreq',
+ 'scaling_min_freq')
+ freq_table.append(
+ self._read_sysfs(path_max).rstrip('\n'))
+ freq_table.append(
+ self._read_sysfs(path_min).rstrip('\n'))
+ return freq_table
+
+ self.fail_count = 0
+ self.path_root = '/sys/devices/system/cpu'
+ self.__proc_list = [] # track spawned processes
+ # catalog known cpufreq driver types
+ # used to determine logic flow control
+ self.driver_types = (
+ '-cpufreq',
+ 'cpufreq-',
+ 'arm-big-little'
+ )
+ # chainmap object for dict of dicts
+ self.freq_chainmap = collections.ChainMap()
+ # cpufreq driver
+ path_scaling_driver = path.join('cpu0', 'cpufreq',
+ 'scaling_driver')
+ self.scaling_driver = self._read_sysfs(
+ path_scaling_driver).rstrip('\n')
+ path_scaling_gvrnrs = path.join('cpu0', 'cpufreq',
+ 'scaling_available_governors')
+ path_startup_governor = path.join('cpu0', 'cpufreq',
+ 'scaling_governor')
+ self.scaling_gvrnrs = self._read_sysfs(
+ path_scaling_gvrnrs).rstrip('\n').split()
+ self.startup_governor = self._read_sysfs(
+ path_startup_governor).rstrip('\n')
+
+ # ensure the correct freq table is populated
+ if any(drvr in self.scaling_driver for drvr in self.driver_types):
+ path_scaling_freqs = path.join('cpu0', 'cpufreq',
+ 'scaling_available_frequencies')
+ scaling_freqs = self._read_sysfs(
+ path_scaling_freqs).rstrip('\n').split()
+ self.scaling_freqs = list(
+ map(int, scaling_freqs))
+ # test freqs in ascending order
+ self.scaling_freqs.sort()
+ else:
+ # setup path and status for intel pstate directives
+ if 'intel_' in self.scaling_driver:
+ # /sys/devices/system/cpu/intel_pstate/status
+ self.path_ipst_status = path.join('intel_pstate', 'status')
+ self.startup_ipst_status = self._read_sysfs(
+ self.path_ipst_status).rstrip('\n')
+ # use max, min freq for scaling table
+ self.scaling_freqs = list(
+ map(int, append_max_min()))
+ self.scaling_freqs.sort()
+ self.startup_max_freq = self.scaling_freqs[1]
+ self.startup_min_freq = self.scaling_freqs[0]
+
+ def _read_sysfs(self, fpath):
+ """Read sysfs/cpufreq file."""
+ abs_path = path.join(self.path_root, fpath)
+ try:
+ with open(abs_path, 'r') as _file:
+ data = _file.read()
+ except OSError:
+ raise CpuFreqTestError(
+ 'Unable to read file: %s' % abs_path)
+ return data
+
+ def _write_sysfs(self, fpath, data):
+ """Write sysfs/cpufreq file, data type agnostic."""
+ def return_bytes_utf(_data):
+ """Data type conversion to bytes utf."""
+ try:
+ data_enc = _data.encode()
+ except (AttributeError, TypeError):
+ data_enc = str(_data).encode()
+ return bytes(data_enc)
+
+ if not isinstance(data, bytes):
+ data_utf = return_bytes_utf(data)
+ else:
+ # do not convert bytes()
+ data_utf = data
+
+ abs_path = path.join(self.path_root, fpath)
+ try:
+ with open(abs_path, 'wb') as _file:
+ _file.write(data_utf)
+ except OSError:
+ raise CpuFreqTestError(
+ 'Unable to write file: %s' % abs_path)
+
+ def _get_cores(self, fpath):
+ """Get various core ranges, convert to list."""
+ def list_core_range(_core_range):
+ """ Method to convert core range to list prior
+ to iteration.
+ """
+ _core_list = []
+ # allow iteration over range: rng
+ for core in _core_range.split(','):
+ first_last = core.split('-')
+ if len(first_last) == 2:
+ _core_list += list(
+ range(
+ int(first_last[0]), int(first_last[1]) + 1))
+ else:
+ _core_list += [int(first_last[0])]
+ return _core_list
+
+ core_range = self._read_sysfs(fpath).strip('\n').strip()
+ core_list = list_core_range(core_range)
+ return core_list
+
+ def _process_results(self):
+ """Process results from CpuFreqCoreTest."""
+ def comp_freq_dict(_inner_key, _inner_val):
+ """Transpose and append results from subclass."""
+ if _inner_val:
+ # calc freq_median/freq_target %
+ result_pct = int((_inner_val / _inner_key) * 100)
+ if CpuFreqTest.min_freq_pct <= result_pct <= (
+ CpuFreqTest.max_freq_pct):
+ # append result pass/fail
+ new_inner_val = [str(result_pct) + '%', 'Pass']
+ else:
+ new_inner_val = [str(result_pct) + '%', 'Fail']
+ # increment fail bit
+ self.fail_count += 1
+ # append raw freq_median value
+ new_inner_val.append(int(_inner_val))
+ else:
+ new_inner_val = ['<=0%', 'Fail', _inner_val]
+ self.fail_count += 1
+ return new_inner_val
+
+ # create master result table with dict comprehension
+ freq_result_map = {
+ outer_key: {
+ inner_key: comp_freq_dict(inner_key, inner_val)
+ for inner_key, inner_val in outer_val.items()
+ }
+ for outer_key, outer_val in self.freq_chainmap.items()
+ }
+ return freq_result_map
+
+ def disable_thread_siblings(self):
+ """Disable thread_siblings (aka hyperthreading)
+ on all cores.
+ """
+ def get_thread_siblings():
+ """Get hyperthread cores to offline."""
+ thread_siblings = []
+ online_cores = self._get_cores('online')
+ for _core in online_cores:
+ _fpath = path.join('cpu%i' % _core,
+ 'topology', 'thread_siblings_list')
+ # second core is sibling
+ thread_siblings += self._get_cores(_fpath)[1:]
+
+ if thread_siblings:
+ _to_disable = set(thread_siblings) & set(online_cores)
+ logging.info(
+ '* disabling thread siblings (hyperthreading):')
+ logging.info(
+ ' - disabling cores: %s', _to_disable)
+ else:
+ _to_disable = False
+ return _to_disable
+
+ to_disable = get_thread_siblings()
+ if to_disable:
+ for core in to_disable:
+ fpath = path.join('cpu%i' % core, 'online')
+ self._write_sysfs(fpath, 0)
+
+ def set_governors(self, governor):
+ """Set/change CpuFreq scaling governor; global on all cores."""
+ logging.info(' - setting governor: %s', governor)
+ online_cores = self._get_cores('online')
+ for core in online_cores:
+ fpath = path.join('cpu%i' % core,
+ 'cpufreq', 'scaling_governor')
+ self._write_sysfs(fpath, governor)
+
+ def reset(self):
+ """Enable all offline cpus,
+ and reset max and min frequencies files.
+ """
+ def reset_intel_driver():
+ """ Reset fn for pstate driver."""
+ try:
+ self._write_sysfs(
+ self.path_ipst_status, 'off')
+ # if kernel/bios limitations present
+ except CpuFreqTestError:
+ # then reset via max, min freq files
+ set_max_min()
+ return
+
+ logging.info('* resetting intel p_state cpufreq driver')
+ # wait 300ms between setting driver modes
+ time.sleep(.3)
+ logging.info(
+ ' - setting driver mode: %s', self.startup_ipst_status)
+ self._write_sysfs(
+ self.path_ipst_status, self.startup_ipst_status)
+
+ def enable_off_cores():
+ """Enable all present and offline cores."""
+ present_cores = self._get_cores('present')
+ try:
+ offline_cores = self._get_cores('offline')
+ # for -r (reset) arg invokation
+ except ValueError:
+ return
+
+ to_enable = set(present_cores) & set(offline_cores)
+ logging.info('* enabling thread siblings/hyperthreading:')
+ logging.info(' - enabling cores: %s', to_enable)
+ for core in to_enable:
+ fpath = path.join('cpu%i' % core,
+ 'online')
+ self._write_sysfs(fpath, 1)
+
+ def set_max_min():
+ """Set max_frequency and min_frequency cpufreq files."""
+ logging.info('* restoring max, min freq files')
+ present_cores = self._get_cores('present')
+ for core in present_cores:
+ path_max = path.join('cpu%i' % core,
+ 'cpufreq', 'scaling_max_freq')
+ path_min = path.join('cpu%i' % core,
+ 'cpufreq', 'scaling_min_freq')
+ # reset max freq
+ self._write_sysfs(
+ path_max, self.startup_max_freq)
+ # reset min freq
+ self._write_sysfs(
+ path_min, self.startup_min_freq)
+
+ logging.info('* restoring startup governor:')
+ self.set_governors(self.startup_governor)
+
+ # enable offline cores
+ enable_off_cores()
+
+ # reset sysfs for non-acpi_cpufreq systems
+ if not any(drvr in self.scaling_driver for drvr in self.driver_types):
+ if 'intel_' in self.scaling_driver:
+ reset_intel_driver()
+ else:
+ set_max_min()
+
+ def execute_test(self):
+ """Execute cpufreq test, process results and return
+ appropriate exit code.
+ """
+ def init_intel_driver():
+ """Initialize Intel driver for testing.
+ Some modes unavailable for certain processor:kernel/bios configs.
+ """
+ try:
+ self._write_sysfs(
+ self.path_ipst_status, 'off')
+ # exempt systems unable to change intel_pstate driver mode
+ except CpuFreqTestError:
+ return
+
+ logging.info(
+ '* initializing intel_cpufreq driver:')
+ # wait 300ms between changing driver modes
+ time.sleep(.3)
+ # prefer the intel_cpufreq driver (passive mode)
+ self._write_sysfs(self.path_ipst_status, 'passive')
+ cur_ipst_status = self._read_sysfs(
+ self.path_ipst_status).rstrip('\n')
+ logging.info(' - driver mode: %s', cur_ipst_status)
+
+ logging.info('---------------------\n'
+ '| CpuFreqTest Begin |\n'
+ '---------------------')
+ start_time = time.time()
+ # disable hyperthreading
+ self.disable_thread_siblings()
+
+ # if intel, reset and set best compatible driver
+ if 'intel_' in self.scaling_driver:
+ init_intel_driver()
+
+ logging.info('* configuring cpu governors:')
+ # userspace governor required for scaling_setspeed
+ if any(drvr in self.scaling_driver for drvr in self.driver_types):
+ self.set_governors('userspace')
+ else:
+ self.set_governors('performance')
+
+ # spawn core_tests concurrently
+ logging.info('---------------------')
+ self.spawn_core_test()
+ # wrap up test
+ logging.info('\n-----------------\n'
+ '| Test Complete |\n'
+ '-----------------\n')
+ # reset state and cleanup
+ logging.info('[Reset & Cleanup]')
+ self.reset()
+
+ # facilitate house cleaning
+ if self.__proc_list:
+ logging.info('* terminating dangling pids')
+ for proc in self.__proc_list:
+ # terminate dangling processes
+ proc.terminate()
+ # prove that we are single-threaded again
+ logging.info('* active threads: %i\n', threading.active_count())
+
+ # display results
+ logging.warning('[CpuFreqTest Results]') # for --quiet mode
+ logging.info(
+ ' - legend:\n'
+ ' {core: {target_freq:'
+ '[sampled_med_%, P/F, sampled_median],:.\n')
+ # format result dict for human consumption
+ logging.info(
+ pprint.pformat(self._process_results()))
+ # provide time under test for debug/verbose output
+ end_time = time.time() - start_time
+ logging.debug('[Test Took: %.3fs]', end_time)
+ if self.fail_count:
+ print('\n[Test Failed]\n'
+ '* core fail_count =', self.fail_count)
+ return 1
+
+ print('\n[Test Passed]')
+ return 0
+
+ def spawn_core_test(self):
+ """Spawn concurrent scale testing on all online cores."""
+ def run_worker_process(_result_queue, affinity):
+ """ Subclass instantiation & constructor for
+ individual core.
+ """
+ _worker = psutil.Process()
+ # assign affinity, pin to core
+ _worker.cpu_affinity(affinity)
+ # intantiate core_test
+ cpu_freq_ctest = CpuFreqCoreTest(
+ affinity[0], _worker.pid)
+ # execute freq scaling
+ cpu_freq_ctest.scale_all_freq()
+ # get results
+ res_freq_map = cpu_freq_ctest.__call__()
+ # place in result_queue
+ _result_queue.put(res_freq_map)
+
+ def process_rqueue(queue_depth, _result_queue):
+ """Get and process core_test result_queue."""
+ # get queued core_test results
+ for _ in range(queue_depth):
+ # pipe results from core_test
+ worker_queue = _result_queue.get()
+ # append to chainmap object
+ self.freq_chainmap = self.freq_chainmap.new_child(
+ worker_queue)
+ # signal processing complete
+ _result_queue.task_done()
+ logging.info('----------------------------')
+ logging.info('* joining and closing queues')
+ # nicely join and close queue
+ try:
+ _result_queue.join()
+ finally:
+ _result_queue.close()
+
+ worker_list = [] # track spawned multiproc processes
+ pid_list = [] # track spawned multiproc pids
+ online_cores = self._get_cores('online')
+ # delegate & spawn tests on other cores first
+ # then run core 0 last (main() thread)
+ online_cores.append(online_cores.pop(0))
+ # create queue for piping results
+ result_queue = multiprocessing.JoinableQueue()
+
+ # assign affinity and spawn core_test
+ for core in online_cores:
+ affinity = [int(core)]
+ affinity_dict = dict(affinity=affinity)
+ worker = multiprocessing.Process(target=run_worker_process,
+ args=(result_queue,),
+ kwargs=affinity_dict)
+ # start core_test
+ worker.start()
+ worker_list.append(worker)
+ # track and log active child pids
+ pid_list.append(worker.pid)
+
+ # get, process queues
+ process_rqueue(len(worker_list), result_queue)
+
+ # cleanup core_test pids
+ logging.info('* joining worker processes:')
+ for idx, worker in enumerate(worker_list):
+ # join worker processes
+ worker_return = worker.join()
+ time.sleep(.1)
+ if worker_return is None:
+ logging.info(
+ ' - PID %s joined parent', pid_list[idx])
+ else:
+ # can cleanup in reset subroutine
+ continue
+ # update attribute for a 2nd pass terminate
+ self.__proc_list = worker_list
+
+
+class CpuFreqCoreTest(CpuFreqTest):
+ """Subclass to facilitate concurrent frequency scaling."""
+ class ObserveFreq:
+ """Class for instantiating observation thread.
+ Non-blocking and locked to system time to prevent
+ linear timer drift as frequency scaling ramps up.
+ """
+ __slots__ = ('interval',
+ 'callback',
+ 'thread_timer',
+ 'timer_running',
+ 'next_call')
+
+ def __init__(self, interval, callback):
+ """Execute start_timer on class instantiation."""
+ self.interval = interval
+ self.callback = callback
+ self.thread_timer = None
+ self.timer_running = False
+ self.next_call = time.time()
+ # start event loop
+ self.start_timer()
+
+ def start_timer(self):
+ """Facilitate callbacks at specified interval,
+ accounts and corrects for drift.
+ """
+ if not self.timer_running:
+ # offset interval
+ self.next_call += self.interval
+ # create time delta for consistent timing
+ time_delta = self.next_call - time.time()
+ # call self.observe() at end of time_delta
+ self.thread_timer = threading.Timer(time_delta,
+ self.observe)
+ # cleanup spawned timer threads on exit
+ self.thread_timer.daemon = True
+ self.thread_timer.start()
+ self.timer_running = True
+
+ def observe(self):
+ """Trigger callback to sample frequency."""
+ # reset timer_running
+ self.timer_running = False
+ # callback to outer scope
+ self.callback()
+ # start another tt cycle
+ self.start_timer()
+
+ def stop(self):
+ """Called when frequency scaling completed."""
+ if self.thread_timer:
+ # event loop end
+ self.thread_timer.cancel()
+ # logic reinforcement
+ self.timer_running = False
+
+ # as we may instantiate many instances
+ __slots__ = ('core',
+ 'pid',
+ '__instance_core',
+ '__instance_cpu',
+ '__instance_pid',
+ '__stop_scaling',
+ '__observed_freqs',
+ '__observed_freqs_dict',
+ '__read_sysfs',
+ '__write_sysfs')
+
+ def __init__(self, core, pid):
+ # perform base class inheritance
+ super().__init__()
+ # mangle instance attributes
+ self.__instance_core = int(core)
+ self.__instance_cpu = 'cpu%i' % core # future call speedup
+ self.__instance_pid = pid # worker pid
+ self.__stop_scaling = False # signal.alarm semaphore
+ self.__observed_freqs = [] # recorded freqs
+ self.__observed_freqs_dict = {} # core: recorded freqs
+ # private _r/_w_sysfs methods for concurrent access w/o locks
+ self.__read_sysfs = copy.deepcopy(self._read_sysfs)
+ self.__write_sysfs = copy.deepcopy(self._write_sysfs)
+
+ def __call__(self):
+ """Have subclass return dict '{core: {trgt_f: med_f,}}'
+ when called.
+ """
+ freq_map = {
+ self.__instance_core: self.__observed_freqs_dict
+ }
+ return freq_map
+
+ def _observefreq_callback(self):
+ """Callback method to sample frequency."""
+ def get_cur_freq():
+ """ Get current frequency.
+ """
+ fpath = path.join(self.__instance_cpu,
+ 'cpufreq', 'scaling_cur_freq')
+ freqs = self.__read_sysfs(fpath).rstrip('\n').split()[0]
+ return int(freqs)
+
+ self.__observed_freqs.append(get_cur_freq())
+ # matrix mode
+ logging.debug(self.__observed_freqs)
+
+ def scale_all_freq(self):
+ """Primary method to scale full range of freqs."""
+ def calc_freq_median(obs_freqs):
+ """ Calculate the median value of observed freqs.
+ """
+ n_samples = len(obs_freqs)
+ c_index = n_samples // 2
+ # odd number of samples
+ if n_samples % 2:
+ freq_median = sorted(obs_freqs)[c_index]
+ # even number of samples
+ else:
+ freq_median = sum(
+ sorted(obs_freqs)[
+ (c_index - 1):(c_index + 1)
+ ]) / 2
+ return freq_median
+
+ def map_observed_freqs(target_freq):
+ """Align freq key/values and split result lists
+ for grouping.
+ """
+ # get median of observed freqs
+ freq_median = calc_freq_median(self.__observed_freqs)
+ # target_freq = key, freq_median = value
+ self.__observed_freqs_dict.update(
+ {target_freq: freq_median})
+
+ def handle_alarm(*args):
+ """Alarm trigger callback, unload core."""
+ # *args req to call signal.signal()
+ del args # args unused
+ # stop workload loop
+ self.__stop_scaling = True
+
+ def execute_workload(workload_n):
+ """Perform maths to load core."""
+ # compartmentalized for future development
+ while not self.__stop_scaling:
+ math.factorial(workload_n)
+
+ def log_freq_scaling(_freq, workload_n):
+ """Provide feedback via logging."""
+ logging.info('* testing: %s || target freq: %i ||'
+ ' work: fact(%i) || worker pid: %i',
+ self.__instance_cpu, _freq,
+ workload_n, self.__instance_pid)
+
+ def load_observe_map(_freq):
+ """Proxy fn to scale core to freq."""
+ # gen randint for workload factorial calcs
+ workload_n = random.randint(37512, 39845)
+ # setup async alarm to kill load gen
+ signal.signal(signal.SIGALRM, handle_alarm)
+ # time to gen load
+ signal.alarm(CpuFreqTest.scale_duration)
+ # instantiate ObserveFreq and start data sampling
+ observe_freq = self.ObserveFreq(
+ interval=CpuFreqTest.observe_interval,
+ callback=self._observefreq_callback)
+ # provide feedback on test status
+ log_freq_scaling(_freq, workload_n)
+ # start loading core
+ execute_workload(workload_n)
+ # stop sampling
+ observe_freq.stop()
+ # map freq results to core
+ map_observed_freqs(_freq)
+
+ # cpufreq class driver (non-intel) supports full freq table scaling
+ if any(drvr in self.scaling_driver for drvr in self.driver_types):
+ fpath = path.join(self.__instance_cpu,
+ 'cpufreq', 'scaling_setspeed')
+ # others support max, min freq scaling
+ else:
+ fpath = path.join(self.__instance_cpu,
+ 'cpufreq', 'scaling_max_freq')
+
+ # iterate over supported frequency scaling table
+ for idx, freq in enumerate(self.scaling_freqs):
+ # re-init some attributes after 1st pass
+ if idx:
+ # reset freq list
+ self.__observed_freqs = []
+ # reset signal.signal() event loop bit
+ self.__stop_scaling = False
+
+ self.__write_sysfs(fpath, freq)
+ # load core, observe freqs, map to obs_freq_dict
+ load_observe_map(freq)
+
+
+def parse_args_logging():
+ """ Ingest arguments and init logging."""
+ def init_logging(_user_arg):
+ """ Pass user arg and configure logging module."""
+ # logging optimizations; per python logging lib docs
+ logging._srcfile = None # pylint: disable=protected-access
+ # "%(processName)s prefix
+ logging.logMultiprocessing = False
+ # "%(process)d" prefix
+ logging.logProcesses = False
+ # "%(thread)d" & "%(threadName)s" prefixes
+ logging.logThreads = False
+
+ # log to stdout for argparsed logging lvls
+ stdout_handler = logging.StreamHandler(sys.stdout)
+ stdout_handler.setLevel(_user_arg.log_level)
+
+ # log to stderr for exceptions
+ stderr_formatter = logging.Formatter(
+ '%(levelname)s: %(message)s')
+ stderr_handler = logging.StreamHandler(sys.stderr)
+ stderr_handler.setLevel(logging.ERROR)
+ stderr_handler.setFormatter(stderr_formatter)
+
+ # setup base/root logger
+ root_logger = logging.getLogger()
+ # set root logging level
+ root_logger.setLevel(logging.NOTSET)
+ # add handlers for out, err
+ root_logger.addHandler(stdout_handler)
+ root_logger.addHandler(stderr_handler)
+
+ parser = argparse.ArgumentParser()
+ # only allow one arg to be passed
+ parser_mutex_grp = parser.add_mutually_exclusive_group()
+ parser_mutex_grp.add_argument(
+ '-d', '-D', '--debug',
+ dest='log_level',
+ action='store_const',
+ const=logging.DEBUG,
+ # default logging level
+ default=logging.INFO,
+ help='debug/verbose output')
+ parser_mutex_grp.add_argument(
+ '-q', '-Q', '--quiet',
+ dest='log_level',
+ action='store_const',
+ # repurpose built-in logging level
+ const=logging.WARNING,
+ help='suppress output')
+ parser_mutex_grp.add_argument(
+ '-r', '-R', '--reset',
+ action='store_true',
+ help='reset cpufreq sysfs parameters (all cores):'
+ ' (governor, thread siblings, max/min freqs, pstate)')
+ user_arg = parser.parse_args()
+ init_logging(user_arg)
+ return user_arg
+
+
+def main():
+ # configure and start logging
+ user_arg = parse_args_logging()
+ # instantiate CpuFreqTest as cpu_freq_test
+ cpu_freq_test = CpuFreqTest()
+ # provide access to reset() method
+ if user_arg.reset:
+ print('[Reset CpuFreq Sysfs]')
+ return cpu_freq_test.reset()
+ return cpu_freq_test.execute_test()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/bin/cpuid.py b/bin/cpuid.py
index 36d5b46..3eebf1e 100755
--- a/bin/cpuid.py
+++ b/bin/cpuid.py
@@ -88,12 +88,14 @@ CPUIDS = {
"AMD EPYC": ['0x800f12'],
"AMD Lisbon": ['0x100f81'],
"AMD Magny-Cours": ['0x100f91'],
+ "AMD Milan": ['0xa00f11'],
"AMD ROME": ['0x830f10'],
"Broadwell": ['0x4067', '0x306d4', '0x5066', '0x406f'],
"Canon Lake": ['0x6066'],
"Cascade Lake": ['0x50655', '0x50656', '0x50657'],
"Coffee Lake": [
'0x806ea', '0x906ea', '0x906eb', '0x906ec', '0x906ed'],
+ "Cooper Lake": ['0x5065a', '0x5065b'],
"Haswell": ['0x306c', '0x4065', '0x4066', '0x306f'],
"Ice Lake": ['0x706e'],
"Ivy Bridge": ['0x306a', '0x306e'],
diff --git a/bin/gateway_ping_test.py b/bin/gateway_ping_test.py
index b6126cc..dc2e165 100755
--- a/bin/gateway_ping_test.py
+++ b/bin/gateway_ping_test.py
@@ -185,7 +185,8 @@ def ping(host, interface, count, deadline, verbose=False):
if interface:
command.append("-I{}".format(interface))
reg = re.compile(
- r"(\d+) packets transmitted, (\d+) received, (\d+)% packet loss")
+ r"(\d+) packets transmitted, (\d+) received,"
+ r".*([0-9]*\.?[0-9]*.)% packet loss")
ping_summary = {'transmitted': 0, 'received': 0, 'pct_loss': 0}
try:
output = subprocess.check_output(command, universal_newlines=True)
diff --git a/bin/pactl_list.sh b/bin/pactl_list.sh
new file mode 100755
index 0000000..a7abb87
--- /dev/null
+++ b/bin/pactl_list.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+EXIT_CODE=0
+
+for device in "sources" "sinks"
+do
+ if ! pactl list $device short | grep -v -E "monitor|auto_null"
+ then
+ echo "No available $device found"
+ case $device in
+ "sources")
+ EXIT_CODE=$(( EXIT_CODE+1 ))
+ ;;
+ "sinks")
+ EXIT_CODE=$(( EXIT_CODE+2 ))
+ esac
+ fi
+done
+
+exit $EXIT_CODE \ No newline at end of file
diff --git a/bin/roundtrip_qr.py b/bin/roundtrip_qr.py
index e2eb5b5..4cb6592 100755
--- a/bin/roundtrip_qr.py
+++ b/bin/roundtrip_qr.py
@@ -54,7 +54,7 @@ def capture_webcam(name):
def generate_data():
- return ''.join(random.choice(string.ascii_letters) for i in range(20))
+ return ''.join(random.choice(string.ascii_letters) for i in range(10))
def generate_qr_code(data):
@@ -62,11 +62,11 @@ def generate_qr_code(data):
def display_code(qr):
- with open('/dev/tty1', 'wb+', buffering=0) as term:
+ with open('/dev/tty0', 'wb+', buffering=0) as term:
# clear the tty so the qr is always printed at the top of the sceen
term.write(str.encode('\033c'))
# print the qr code
- term.write(qr.terminal(quiet_zone=5).encode())
+ term.write(qr.terminal(quiet_zone=1).encode())
def decode_image(filename):
diff --git a/bin/snap_tests.py b/bin/snap_tests.py
index fe80288..e6ea23b 100755
--- a/bin/snap_tests.py
+++ b/bin/snap_tests.py
@@ -12,7 +12,6 @@ import sys
from checkbox_support.snap_utils.snapd import Snapd
# Requirements for the test snap:
-# - the snap must not be installed at the start of the nested test plan
# - the snap must be strictly confined (no classic or devmode flags)
# - there must be different revisions on the stable & edge channels
try:
@@ -37,7 +36,7 @@ class SnapList():
"""snap list should show the core package is installed."""
data = Snapd().list()
for snap in data:
- if snap['name'] in ('core', 'core16', 'core18'):
+ if snap['name'] in ('core', 'core16', 'core18', 'core20'):
print("Found a core snap")
print(snap['name'], snap['version'], snap['revision'])
return 0
@@ -70,6 +69,9 @@ class SnapInstall():
args = parser.parse_args(sys.argv[2:])
print('Install {}...'.format(TEST_SNAP))
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL, verbose=True)
+ if s.list(TEST_SNAP):
+ print('{} already installed. Removing'.format(TEST_SNAP))
+ s.remove(TEST_SNAP)
s.install(TEST_SNAP, args.channel)
print('Confirm in snap list...')
data = s.list()
@@ -87,11 +89,12 @@ class SnapRefresh():
def invoked(self):
"""Test refresh of test-snapd-tools snap."""
def get_rev():
- data = Snapd().list()
- for snap in data:
- if snap['name'] == TEST_SNAP:
- return snap['revision']
- print('Get starting revision...')
+ return Snapd().list(TEST_SNAP)['revision']
+ if Snapd().list(TEST_SNAP):
+ print('Remove previously installed revision')
+ Snapd().remove(TEST_SNAP)
+ print('Install starting revision...')
+ Snapd().install(TEST_SNAP, 'stable')
start_rev = get_rev()
print(' revision:', start_rev)
print('Refresh to edge...')
@@ -112,10 +115,15 @@ class SnapRevert():
def invoked(self):
"""Test revert of test-snapd-tools snap."""
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL)
+ if s.list(TEST_SNAP):
+ s.remove(TEST_SNAP)
+ print('Install stable revision')
+ s.install(TEST_SNAP)
+ print('Refresh to edge')
+ s.refresh(TEST_SNAP, 'edge')
print('Get stable channel revision from store...')
r = s.info(TEST_SNAP)
stable_rev = r['channels']['latest/stable']['revision']
- print('Get current installed revision...')
r = s.list(TEST_SNAP)
installed_rev = r['revision'] # should be edge revision
print('Reverting snap {}...'.format(TEST_SNAP))
@@ -140,6 +148,11 @@ class SnapReupdate():
"""Test re-update of test-snapd-tools snap."""
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL)
print('Get edge channel revision from store...')
+ if s.list(TEST_SNAP):
+ s.remove(TEST_SNAP)
+ s.install(TEST_SNAP)
+ s.refresh(TEST_SNAP, 'edge')
+ s.revert(TEST_SNAP)
r = s.info(TEST_SNAP)
edge_rev = r['channels']['latest/edge']['revision']
print('Remove edge revision...')
@@ -160,8 +173,11 @@ class SnapRemove():
def invoked(self):
"""Test remove of test-snapd-tools snap."""
- print('Install {}...'.format(TEST_SNAP))
+ print('Remove {}...'.format(TEST_SNAP))
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL)
+ if not s.list(TEST_SNAP):
+ print('{} not found. Installing'.format(TEST_SNAP))
+ s.install(TEST_SNAP)
s.remove(TEST_SNAP)
print('Check not in snap list')
data = s.list()
@@ -189,8 +205,8 @@ class Snap():
parser = argparse.ArgumentParser()
parser.add_argument('subcommand', type=str, choices=sub_commands)
args = parser.parse_args(sys.argv[1:2])
- sub_commands[args.subcommand]().invoked()
+ return sub_commands[args.subcommand]().invoked()
if __name__ == '__main__':
- Snap().main()
+ sys.exit(Snap().main())
diff --git a/bin/socketcan_test.py b/bin/socketcan_test.py
index 25c79b2..5468b3a 100755
--- a/bin/socketcan_test.py
+++ b/bin/socketcan_test.py
@@ -18,6 +18,7 @@
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
import argparse
+import ctypes
import os
import socket
import struct
@@ -112,7 +113,7 @@ def echo_test(args):
id_flags = 0
if args.effid:
print('Setting EFF CAN ID flag')
- id_flags = socket.CAN_EFF_FLAG
+ id_flags = ctypes.c_ulong(socket.CAN_EFF_FLAG).value
# Whether to enable local loopback, required for local only test
# but only want to parse packets from other end if remote
diff --git a/units/audio/jobs.pxu b/units/audio/jobs.pxu
index 538d213..1fad964 100644
--- a/units/audio/jobs.pxu
+++ b/units/audio/jobs.pxu
@@ -341,6 +341,7 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::audio
id: audio/alsa_record_playback_automated
+depends: audio/detect_sinks_sources
estimated_duration: 10.0
requires:
package.name == 'python3-gi'
@@ -357,6 +358,17 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::audio
+id: audio/detect_sinks_sources
+estimated_duration: 1.0
+requires:
+ package.name == 'pulseaudio-utils'
+command:
+ pactl_list.sh
+_description:
+ Test to detect if there's available sources and sinks.
+
+plugin: shell
+category_id: com.canonical.plainbox::audio
id: audio/alsa_info_collect
estimated_duration: 2.0
command: alsa_info --no-dialog --no-upload --output "${PLAINBOX_SESSION_SHARE}"/alsa_info.log
@@ -590,7 +602,7 @@ plugin: shell
category_id: com.canonical.plainbox::audio
id: audio/alsa_record_playback_automated_after_suspend_30_cycles
estimated_duration: 10.0
-depends: power-management/suspend_30_cycles
+depends: power-management/suspend_30_cycles audio/detect_sinks_sources_after_suspend_30_cycles
requires:
package.name == 'python3-gi'
package.name == 'gir1.2-gstreamer-1.0'
@@ -606,6 +618,17 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::audio
+id: audio/detect_sinks_sources_after_suspend_30_cycles
+estimated_duration: 1.0
+requires:
+ package.name == 'pulseaudio-utils'
+command:
+ pactl_list.sh
+_description:
+ Test to detect if there's available sources and sinks after suspending 30 times.
+
+plugin: shell
+category_id: com.canonical.plainbox::audio
id: audio/check_volume_after_suspend_30_cycles
estimated_duration: 1.0
depends: power-management/suspend_30_cycles
diff --git a/units/cpu/jobs.pxu b/units/cpu/jobs.pxu
index c442bee..afdd6ea 100644
--- a/units/cpu/jobs.pxu
+++ b/units/cpu/jobs.pxu
@@ -86,7 +86,7 @@ plugin: shell
category_id: com.canonical.plainbox::cpu
id: cpu/topology
estimated_duration: 1.0
-requires: int(cpuinfo.count) > 1 and (cpuinfo.platform == 'i386' or cpuinfo.platform == 'x86_64' or cpuinfo.platform == 's390x')
+requires: int(cpuinfo.count) > 1 and (cpuinfo.platform == 'i386' or cpuinfo.platform == 'x86_64')
command: cpu_topology.py
_summary:
Check CPU topology for accuracy between proc and sysfs
@@ -177,3 +177,13 @@ _siblings: [
"command": "cpuinfo_resource.py | diff $PLAINBOX_SESSION_SHARE/cpuinfo_before_suspend -",
"depends": "com.canonical.certification::suspend/suspend_advanced_auto"}
]
+
+plugin: shell
+category_id: com.canonical.plainbox::cpu
+id: cpu/cpufreq_test-server
+user: root
+command: cpufreq_test.py -q
+_summary:
+ cpufreq scaling test
+_description:
+ Comprehensive testing of cpu scaling capabilities and directives via cpufreq.
diff --git a/units/cpu/test-plan.pxu b/units/cpu/test-plan.pxu
index 5d06090..eb2dabb 100644
--- a/units/cpu/test-plan.pxu
+++ b/units/cpu/test-plan.pxu
@@ -85,7 +85,7 @@ _name: CPU Tests (Server)
_description: CPU Tests (Server)
include:
cpu/clocktest certification-status=blocker
- cpu/frequency_governors certification-status=blocker
+ cpu/cpufreq_test-server certification-status=blocker
cpu/maxfreq_test certification-status=blocker
cpu/maxfreq_test-log-attach certification-status=non-blocker
cpu/topology certification-status=blocker
diff --git a/units/kernel-snap/jobs.pxu b/units/kernel-snap/jobs.pxu
index 151fd98..950d1a6 100644
--- a/units/kernel-snap/jobs.pxu
+++ b/units/kernel-snap/jobs.pxu
@@ -16,6 +16,7 @@ requires:
unit: template
template-resource: bootloader
+template-filter: bootloader.booted_kernel_path != 'unknown'
id: kernel-snap/booted-kernel-matches-current-{name}
category_id: kernel-snap
_summary: The booted kernel image matches image in current kernel snap
diff --git a/units/miscellanea/test-plan.pxu b/units/miscellanea/test-plan.pxu
index 6c2d582..60741b6 100644
--- a/units/miscellanea/test-plan.pxu
+++ b/units/miscellanea/test-plan.pxu
@@ -87,9 +87,7 @@ mandatory_include:
miscellanea/submission-resources
miscellanea/cpuid
miscellanea/efi_boot_mode certification-status=blocker
- miscellanea/reboot_firmware
miscellanea/efi_pxeboot
- miscellanea/kernel_taint_test
miscellanea/cpus_are_not_samples
miscellanea/ipmi_test certification-status=blocker
miscellanea/bmc_info
diff --git a/units/monitor/jobs.pxu b/units/monitor/jobs.pxu
index 4a6ae9a..f5e4974 100644
--- a/units/monitor/jobs.pxu
+++ b/units/monitor/jobs.pxu
@@ -395,3 +395,18 @@ _steps:
_verification:
Was the interface displayed correctly on the screen?
flags: also-after-suspend
+
+id: monitor/vga
+_summary: Monitor works (VGA)
+_purpose:
+ Check output to display through VGA port
+_steps:
+ 1. Connect display to VGA port
+ 2. Check the display
+_verification:
+ Output to display works
+plugin: manual
+category_id: com.canonical.plainbox::monitor
+estimated_duration: 300
+flags: also-after-suspend
+
diff --git a/units/monitor/test-plan.pxu b/units/monitor/test-plan.pxu
index b893acb..21b16cb 100644
--- a/units/monitor/test-plan.pxu
+++ b/units/monitor/test-plan.pxu
@@ -273,6 +273,7 @@ include:
monitor/dvi-to-vga
monitor/hdmi-to-vga
monitor/displayport_hotplug
+ monitor/vga
id: after-suspend-monitor-full
unit: test plan
@@ -292,3 +293,4 @@ include:
after-suspend-monitor/dvi-to-vga
after-suspend-monitor/hdmi-to-vga
after-suspend-monitor/displayport_hotplug
+ after-suspend-monitor/vga
diff --git a/units/power-management/jobs.pxu b/units/power-management/jobs.pxu
index 37aef99..9dc6851 100644
--- a/units/power-management/jobs.pxu
+++ b/units/power-management/jobs.pxu
@@ -345,25 +345,34 @@ requires:
command:
cpu_lpi_file=$(cpuinfo_resource.py | grep cpu_lpi_file | awk '{ print $2 }')
if [ "$cpu_lpi_file" == "low_power_idle_cpu_residency_us" ]; then
- echo "check /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us"
+ before=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us)
xset dpms force off
sleep 20
xset dpms force on
- residency=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us)
+ after=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us)
+ residency=$((after-before))
+ echo "/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us after/before screen off is $residency"
else
- echo "check /sys/kernel/debug/pmc_core/package_cstate_show"
+ echo "The system doesn't have hardware-based residency counter."
+ echo "please check https://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf"
before=$(cat < /sys/kernel/debug/pmc_core/package_cstate_show | grep C10 | awk '{ print $4 }')
xset dpms force off
sleep 20
xset dpms force on
after=$(cat < /sys/kernel/debug/pmc_core/package_cstate_show | grep C10 | awk '{ print $4 }')
residency=$((after-before))
+ echo "/sys/kernel/debug/pmc_core/package_cstate_show after/before screen off is $residency"
+ fi
+ if [ $residency -eq 0 ]; then
+ echo "The cpu can't enter low power idle when screen off."
+ echo "please refer to https://www.kernel.org/doc/html/latest/firmware-guide/acpi/lpit.html."
+ exit 1
fi
- [ $residency -gt 0 ] || exit 1
user: root
estimated_duration: 25
id: power-management/system-low-power-idle
+after: suspend/suspend_advanced_auto
category_id: com.canonical.plainbox::power-management
_summary: System low power idle residency check
_description:
@@ -379,16 +388,31 @@ command:
dmesg | grep ACPI | grep supports | sed 's/\[.*ACPI/ACPI/'
echo "Content of /etc/default/grub:"
cat /etc/default/grub
- rtcwake --mode freeze -s 10
if [ "$sys_lpi_file" == "low_power_idle_system_residency_us" ]; then
- echo "check /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us"
- residency=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us)
+ before=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us)
+ rtcwake --mode freeze -s 10
+ after=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us)
+ residency=$((after-before))
+ echo "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us after/before suspend is $residency"
else
- echo "check /sys/kernel/debug/pmc_core/slp_s0_residency_usec"
- residency=$(cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec)
+ echo "The system doesn't have hardware-based residency counter."
+ echo "please check https://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf"
+ before=$(cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec)
+ rtcwake --mode freeze -s 10
+ after=$(cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec)
+ residency=$((after-before))
+ echo "/sys/kernel/debug/pmc_core/slp_s0_residency_usec after/before suspend is $residency"
fi
# shellcheck disable=SC2086
- [ $residency -gt 0 ] || exit 1
+ if [ $residency -eq 0 ]; then
+ echo "The system can't enter s0 when suspended."
+ echo "please refer to https://www.kernel.org/doc/html/latest/firmware-guide/acpi/lpit.html."
+ if [ -f /sys/kernel/debug/suspend_stats ]; then
+ echo "cat /sys/kernel/debug/suspend_stats"
+ cat /sys/kernel/debug/suspend_stats
+ fi
+ exit 1
+ fi
user: root
estimated_duration: 15
diff --git a/units/snappy/snappy.pxu b/units/snappy/snappy.pxu
index fed3f76..45ffc1b 100644
--- a/units/snappy/snappy.pxu
+++ b/units/snappy/snappy.pxu
@@ -46,7 +46,6 @@ plugin: shell
command: snap_tests.py remove
category_id: snappy
estimated_duration: 10s
-depends: snappy/snap-install
flags: preserve-locale
user: root
environ: TEST_SNAP SNAPD_TASK_TIMEOUT SNAPD_POLL_INTERVAL
@@ -83,7 +82,6 @@ _steps:
_verification:
Check hello version is back to its stable version
plugin: manual
-depends: snappy/snap-refresh
category_id: snappy
estimated_duration: 60
@@ -101,7 +99,6 @@ _steps:
_verification:
Check hello version is again the one from the beta channel
plugin: manual
-depends: snappy/snap-revert
category_id: snappy
estimated_duration: 60
@@ -109,12 +106,12 @@ id: snappy/snap-refresh-automated
template-engine: jinja2
_summary: Test the snap refresh command is working.
_description:
- The snap {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} should
- be installed from the stable channel prior to starting the test. The job
- refreshes to edge and compares the revision before and after.
+ The test will install the
+ {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} snap from the
+ stable channel and then refreshes it to edge and compares the revision before
+ and after the refresh.
plugin: shell
command: snap_tests.py refresh
-depends: snappy/snap-install
category_id: snappy
estimated_duration: 10s
user: root
@@ -124,12 +121,11 @@ id: snappy/snap-revert-automated
template-engine: jinja2
_summary: Test the snap revert command is working.
_description:
- Runs after snap-refresh-automated and should revert the installed edge channel
- snap {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} leftover
- from that test to the one from stable.
+ Checks if the edge channel
+ {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} snap is reverted
+ back to the one from stable.
plugin: shell
command: snap_tests.py revert
-depends: snappy/snap-refresh-automated
category_id: snappy
estimated_duration: 10s
user: root
@@ -143,7 +139,6 @@ _description:
snap can be refreshed after removal of the blacklisted revision.
plugin: shell
command: snap_tests.py reupdate
-depends: snappy/snap-revert-automated
category_id: snappy
estimated_duration: 10s
user: root
diff --git a/units/stress/jobs.pxu b/units/stress/jobs.pxu
index 2114b39..a541e3a 100644
--- a/units/stress/jobs.pxu
+++ b/units/stress/jobs.pxu
@@ -18,6 +18,7 @@ estimated_duration: 7200.0
requires:
executable.name == 'stress-ng'
user: root
+environ: STRESS_NG_CPU_TIME
command:
if [ -n "$STRESS_NG_CPU_TIME" ]
then
diff --git a/units/thunderbolt/jobs.pxu b/units/thunderbolt/jobs.pxu
index 73fadb5..031c2ad 100644
--- a/units/thunderbolt/jobs.pxu
+++ b/units/thunderbolt/jobs.pxu
@@ -105,7 +105,7 @@ _siblings: [
_summary: Storage insert detection on Thunderbolt 3 port
_description:
PURPOSE:
- This test will check if the insertion of a Thunderbolt HDD could be detected
+ This test will check if the insertion of a Thunderbolt 3 HDD could be detected
STEPS:
1. Click 'Test' to begin the test. This test will
timeout and fail if the insertion has not been detected within 40 seconds.
@@ -164,7 +164,7 @@ _siblings: [
_summary: Storage removal detection on Thunderbolt 3 port
_description:
PURPOSE:
- This test will check the system can detect the removal of a Thunderbolt HDD
+ This test will check the system can detect the removal of a Thunderbolt 3 HDD
STEPS:
1. Click 'Test' to begin the test. This test will timeout and fail if
the removal has not been detected within 20 seconds.
@@ -176,6 +176,7 @@ _description:
plugin: user-interact-verify
category_id: com.canonical.plainbox::disk
id: thunderbolt3/daisy-chain
+user: root
imports: from com.canonical.plainbox import manifest
requires: manifest.has_thunderbolt3 == 'True'
flags: also-after-suspend-manual
diff --git a/units/watchdog/jobs.pxu b/units/watchdog/jobs.pxu
index 3247941..f02460f 100644
--- a/units/watchdog/jobs.pxu
+++ b/units/watchdog/jobs.pxu
@@ -29,6 +29,8 @@ command:
{% endif -%}
category_id: com.canonical.plainbox::power-management
flags: simple
+imports: from com.canonical.plainbox import manifest
+requires: manifest.has_hardware_watchdog == 'True'
id: watchdog/trigger-system-reset
depends: watchdog/systemd-config
@@ -75,3 +77,5 @@ unit: job
plugin: shell
command: failed_service_check.sh
estimated_duration: 1.0
+imports: from com.canonical.plainbox import manifest
+requires: manifest.has_hardware_watchdog == 'True'
diff --git a/units/watchdog/manifest.pxu b/units/watchdog/manifest.pxu
new file mode 100644
index 0000000..d80d7e1
--- /dev/null
+++ b/units/watchdog/manifest.pxu
@@ -0,0 +1,5 @@
+
+unit: manifest entry
+id: has_hardware_watchdog
+_name: Hardware Watchdog
+value-type: bool