summaryrefslogtreecommitdiff
diff options
-rwxr-xr-xbin/audio_test.py25
-rwxr-xr-xbin/boot_mode_test.py6
-rwxr-xr-xbin/booted_kernel_tests.py10
-rwxr-xr-xbin/cpufreq_test.py765
-rwxr-xr-xbin/cpuid.py2
-rwxr-xr-xbin/disk_read_performance_test.sh4
-rwxr-xr-xbin/fan_reaction_test.py31
-rwxr-xr-xbin/gateway_ping_test.py3
-rwxr-xr-xbin/pactl_list.sh20
-rwxr-xr-xbin/roundtrip_qr.py6
-rwxr-xr-xbin/snap_tests.py38
-rwxr-xr-xbin/socketcan_test.py3
-rwxr-xr-xbin/wifi_nmcli_test.py22
-rwxr-xr-xmanage.py2
-rw-r--r--src/clocktest.c4
-rw-r--r--units/audio/jobs.pxu25
-rw-r--r--units/camera/jobs.pxu9
-rw-r--r--units/camera/manifest.pxu7
-rw-r--r--units/cpu/jobs.pxu12
-rw-r--r--units/cpu/test-plan.pxu2
-rw-r--r--units/firmware/jobs.pxu44
-rw-r--r--units/firmware/test-plan.pxu27
-rw-r--r--units/info/jobs.pxu6
-rw-r--r--units/kernel-snap/jobs.pxu1
-rw-r--r--units/keys/jobs.pxu24
-rw-r--r--units/keys/test-plan.pxu2
-rw-r--r--units/memory/jobs.pxu2
-rw-r--r--units/miscellanea/test-plan.pxu2
-rw-r--r--units/monitor/jobs.pxu15
-rw-r--r--units/monitor/test-plan.pxu2
-rw-r--r--units/networking/jobs.pxu7
-rw-r--r--units/power-management/jobs.pxu44
-rw-r--r--units/snappy/snappy.pxu19
-rw-r--r--units/snappy/test-plan.pxu6
-rw-r--r--units/stress/jobs.pxu5
-rw-r--r--units/stress/s3s4.pxu28
-rw-r--r--units/submission/jobs.pxu4
-rw-r--r--units/submission/test-plan.pxu20
-rw-r--r--units/suspend/suspend-graphics.pxu2
-rw-r--r--units/suspend/suspend.pxu26
-rw-r--r--units/thunderbolt/jobs.pxu5
-rw-r--r--units/touchpad/jobs.pxu30
-rw-r--r--units/touchpad/test-plan.pxu1
-rw-r--r--units/ubuntucore/category.pxu3
-rw-r--r--units/ubuntucore/jobs.pxu53
-rw-r--r--units/ubuntucore/test-plan.pxu22
-rw-r--r--units/usb/usb.pxu4
-rw-r--r--units/watchdog/jobs.pxu12
-rw-r--r--units/watchdog/manifest.pxu5
-rw-r--r--units/watchdog/test-plan.pxu1
-rw-r--r--units/wireless/jobs.pxu26
-rw-r--r--units/wireless/test-plan.pxu91
-rw-r--r--units/wireless/wireless-connection-netplan.pxu24
-rw-r--r--units/wwan/jobs.pxu4
54 files changed, 1352 insertions, 211 deletions
diff --git a/bin/audio_test.py b/bin/audio_test.py
index 2a56df4..4b3f89c 100755
--- a/bin/audio_test.py
+++ b/bin/audio_test.py
@@ -24,10 +24,6 @@ except ImportError:
print((sys.version), file=sys.stderr)
sys.exit(127)
-try:
- from collections.abc import Callable
-except ImportError:
- from collections import Callable # backward compatible
# Frequency bands for FFT
BINS = 256
@@ -115,22 +111,16 @@ class PIDController(object):
class PAVolumeController(object):
pa_types = {'input': 'source', 'output': 'sink'}
- def __init__(self, type, method=None, logger=None):
+ def __init__(self, type, logger=None):
"""Initializes the volume controller.
Arguments:
type: either input or output
- method: a method that will run a command and return pulseaudio
- information in the described format, as a single string with
- line breaks (to be processed with str.splitlines())
"""
self.type = type
self._volume = None
self.identifier = None
- self.method = method
- if not isinstance(method, Callable):
- self.method = self._pactl_output
self.logger = logger
def set_volume(self, volume):
@@ -142,8 +132,7 @@ class PAVolumeController(object):
'set-%s-volume' % (self.pa_types[self.type]),
str(self.identifier[0]),
str(int(volume)) + "%"]
- if not self.method(command):
- return False
+ self._pactl_output(command)
self._volume = volume
return True
@@ -160,8 +149,7 @@ class PAVolumeController(object):
'set-%s-mute' % (self.pa_types[self.type]),
str(self.identifier[0]),
mute]
- if not self.method(command):
- return False
+ self._pactl_output(command)
return True
def get_identifier(self):
@@ -192,7 +180,7 @@ class PAVolumeController(object):
# <ID>\t<NAME>\t<MODULE>\t<SAMPLE_SPEC_WITH_SPACES>\t<STATE>
# What we need to return is the ID for the first element on this list
# that does not contain auto_null or monitor.
- pa_info = self.method(command)
+ pa_info = self._pactl_output(command)
valid_elements = None
if pa_info:
@@ -221,7 +209,8 @@ class PAVolumeController(object):
universal_newlines=True)
except (subprocess.CalledProcessError):
time.sleep(5)
- return False
+ self.logger.error("Fail to execute: {}".format(command))
+ sys.exit(1)
class FileDumper(object):
@@ -369,7 +358,7 @@ class GStreamerMessageHandler(object):
# we can't control it :(
current_volume = volume_controller.get_volume()
if current_volume is None:
- self.logger.error("Unable to control recording volume."
+ self.logger.error("Unable to control recording volume. "
"Test results may be wrong")
return
self.current_level = level
diff --git a/bin/boot_mode_test.py b/bin/boot_mode_test.py
index e51a099..7d7299a 100755
--- a/bin/boot_mode_test.py
+++ b/bin/boot_mode_test.py
@@ -68,7 +68,8 @@ def reboot_to_firmware_check():
"OsIndicationsSupported-8be4df61-93ca-11d2-aa0d-00e098032b8c"
if os.path.isdir(osis_dir):
if os.path.isfile(osis_var):
- fw_info = open(osis_var).read()
+ with open(osis_var) as fh:
+ fw_info = fh.read()
if ord(fw_info[4]) & 1:
logging.info("PASS: Reboot-to-firmware feature is present.")
return 0
@@ -95,7 +96,8 @@ def secure_boot_check():
sb_var = sb_dir + "SecureBoot-8be4df61-93ca-11d2-aa0d-00e098032b8c"
if os.path.isdir(sb_dir):
if os.path.isfile(sb_var):
- sb_info = open(sb_var).read()
+ with open(sb_var) as fh:
+ sb_info = fh.read()
if ord(sb_info[4]) == 1:
logging.info("PASS: System booted with Secure Boot active.")
return 0
diff --git a/bin/booted_kernel_tests.py b/bin/booted_kernel_tests.py
index f190d8d..7caf583 100755
--- a/bin/booted_kernel_tests.py
+++ b/bin/booted_kernel_tests.py
@@ -9,8 +9,9 @@ import hashlib
import os
import sys
-from checkbox_support.snap_utils.system import (
- get_kernel_snap, add_hostfs_prefix)
+from checkbox_support.snap_utils.system import get_kernel_snap
+from checkbox_support.snap_utils.system import get_series
+from checkbox_support.snap_utils.system import add_hostfs_prefix
# 64kb buffer, hopefully suitable for all devices that might run this test
BUF_SIZE = 65536
@@ -20,7 +21,10 @@ def get_snap_kernel_path():
kernel = get_kernel_snap()
if kernel is None:
raise SystemExit('ERROR: failed to get kernel snap')
- path = '/snap/{}/current/kernel.img'.format(kernel)
+ if int(get_series()) >= 20:
+ path = '/snap/{}/current/kernel.efi'.format(kernel)
+ else:
+ path = '/snap/{}/current/kernel.img'.format(kernel)
return path
diff --git a/bin/cpufreq_test.py b/bin/cpufreq_test.py
new file mode 100755
index 0000000..ec3cf6c
--- /dev/null
+++ b/bin/cpufreq_test.py
@@ -0,0 +1,765 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Authors
+# Adrian Lane <adrian.lane@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Test and validate SUT CPU scaling capabilities via CPUFreq."""
+
+
+from os import path
+import multiprocessing
+import collections
+import threading
+import argparse
+import logging
+import pprint
+import random
+import signal
+import copy
+import math
+import time
+import sys
+import psutil
+
+
+class CpuFreqTestError(Exception):
+ """Exception handling."""
+ def __init__(self, message):
+ super().__init__()
+ # warn and exit if cpufreq scaling non-supported
+ if 'scaling_driver' in message:
+ logging.warning(
+ '## Warning: scaling via CpuFreq non-supported ##')
+ sys.exit()
+ # exempt systems unable to change intel_pstate driver mode
+ elif 'intel_pstate/status' in message:
+ pass
+ else:
+ logging.error(message)
+
+
+class CpuFreqTest:
+ """ Test cpufreq scaling capabilities."""
+ # duration to stay at frequency (sec) (gt observe_interval)
+ scale_duration = 8
+ # frequency sampling interval (sec) (lt scale_duration)
+ observe_interval = .4
+ # max, min percentage of avg freq allowed to pass
+ # values relative to target freq
+ # ex: max = 110, min = 90 is 20% passing tolerance
+ max_freq_pct = 150
+ min_freq_pct = 90
+
+ def __init__(self):
+ def append_max_min():
+ """ Create scaling table from max_freq,
+ min_freq cpufreq files.
+ """
+ freq_table = []
+ path_max = path.join('cpu0', 'cpufreq',
+ 'scaling_max_freq')
+ path_min = path.join('cpu0', 'cpufreq',
+ 'scaling_min_freq')
+ freq_table.append(
+ self._read_sysfs(path_max).rstrip('\n'))
+ freq_table.append(
+ self._read_sysfs(path_min).rstrip('\n'))
+ return freq_table
+
+ self.fail_count = 0
+ self.path_root = '/sys/devices/system/cpu'
+ self.__proc_list = [] # track spawned processes
+ # catalog known cpufreq driver types
+ # used to determine logic flow control
+ self.driver_types = (
+ '-cpufreq',
+ 'cpufreq-',
+ 'arm-big-little'
+ )
+ # chainmap object for dict of dicts
+ self.freq_chainmap = collections.ChainMap()
+ # cpufreq driver
+ path_scaling_driver = path.join('cpu0', 'cpufreq',
+ 'scaling_driver')
+ self.scaling_driver = self._read_sysfs(
+ path_scaling_driver).rstrip('\n')
+ path_scaling_gvrnrs = path.join('cpu0', 'cpufreq',
+ 'scaling_available_governors')
+ path_startup_governor = path.join('cpu0', 'cpufreq',
+ 'scaling_governor')
+ self.scaling_gvrnrs = self._read_sysfs(
+ path_scaling_gvrnrs).rstrip('\n').split()
+ self.startup_governor = self._read_sysfs(
+ path_startup_governor).rstrip('\n')
+
+ # ensure the correct freq table is populated
+ if any(drvr in self.scaling_driver for drvr in self.driver_types):
+ path_scaling_freqs = path.join('cpu0', 'cpufreq',
+ 'scaling_available_frequencies')
+ scaling_freqs = self._read_sysfs(
+ path_scaling_freqs).rstrip('\n').split()
+ self.scaling_freqs = list(
+ map(int, scaling_freqs))
+ # test freqs in ascending order
+ self.scaling_freqs.sort()
+ else:
+ # setup path and status for intel pstate directives
+ if 'intel_' in self.scaling_driver:
+ # /sys/devices/system/cpu/intel_pstate/status
+ self.path_ipst_status = path.join('intel_pstate', 'status')
+ self.startup_ipst_status = self._read_sysfs(
+ self.path_ipst_status).rstrip('\n')
+ # use max, min freq for scaling table
+ self.scaling_freqs = list(
+ map(int, append_max_min()))
+ self.scaling_freqs.sort()
+ self.startup_max_freq = self.scaling_freqs[1]
+ self.startup_min_freq = self.scaling_freqs[0]
+
+ def _read_sysfs(self, fpath):
+ """Read sysfs/cpufreq file."""
+ abs_path = path.join(self.path_root, fpath)
+ try:
+ with open(abs_path, 'r') as _file:
+ data = _file.read()
+ except OSError:
+ raise CpuFreqTestError(
+ 'Unable to read file: %s' % abs_path)
+ return data
+
+ def _write_sysfs(self, fpath, data):
+ """Write sysfs/cpufreq file, data type agnostic."""
+ def return_bytes_utf(_data):
+ """Data type conversion to bytes utf."""
+ try:
+ data_enc = _data.encode()
+ except (AttributeError, TypeError):
+ data_enc = str(_data).encode()
+ return bytes(data_enc)
+
+ if not isinstance(data, bytes):
+ data_utf = return_bytes_utf(data)
+ else:
+ # do not convert bytes()
+ data_utf = data
+
+ abs_path = path.join(self.path_root, fpath)
+ try:
+ with open(abs_path, 'wb') as _file:
+ _file.write(data_utf)
+ except OSError:
+ raise CpuFreqTestError(
+ 'Unable to write file: %s' % abs_path)
+
+ def _get_cores(self, fpath):
+ """Get various core ranges, convert to list."""
+ def list_core_range(_core_range):
+ """ Method to convert core range to list prior
+ to iteration.
+ """
+ _core_list = []
+ # allow iteration over range: rng
+ for core in _core_range.split(','):
+ first_last = core.split('-')
+ if len(first_last) == 2:
+ _core_list += list(
+ range(
+ int(first_last[0]), int(first_last[1]) + 1))
+ else:
+ _core_list += [int(first_last[0])]
+ return _core_list
+
+ core_range = self._read_sysfs(fpath).strip('\n').strip()
+ core_list = list_core_range(core_range)
+ return core_list
+
+ def _process_results(self):
+ """Process results from CpuFreqCoreTest."""
+ def comp_freq_dict(_inner_key, _inner_val):
+ """Transpose and append results from subclass."""
+ if _inner_val:
+ # calc freq_median/freq_target %
+ result_pct = int((_inner_val / _inner_key) * 100)
+ if CpuFreqTest.min_freq_pct <= result_pct <= (
+ CpuFreqTest.max_freq_pct):
+ # append result pass/fail
+ new_inner_val = [str(result_pct) + '%', 'Pass']
+ else:
+ new_inner_val = [str(result_pct) + '%', 'Fail']
+ # increment fail bit
+ self.fail_count += 1
+ # append raw freq_median value
+ new_inner_val.append(int(_inner_val))
+ else:
+ new_inner_val = ['<=0%', 'Fail', _inner_val]
+ self.fail_count += 1
+ return new_inner_val
+
+ # create master result table with dict comprehension
+ freq_result_map = {
+ outer_key: {
+ inner_key: comp_freq_dict(inner_key, inner_val)
+ for inner_key, inner_val in outer_val.items()
+ }
+ for outer_key, outer_val in self.freq_chainmap.items()
+ }
+ return freq_result_map
+
+ def disable_thread_siblings(self):
+ """Disable thread_siblings (aka hyperthreading)
+ on all cores.
+ """
+ def get_thread_siblings():
+ """Get hyperthread cores to offline."""
+ thread_siblings = []
+ online_cores = self._get_cores('online')
+ for _core in online_cores:
+ _fpath = path.join('cpu%i' % _core,
+ 'topology', 'thread_siblings_list')
+ # second core is sibling
+ thread_siblings += self._get_cores(_fpath)[1:]
+
+ if thread_siblings:
+ _to_disable = set(thread_siblings) & set(online_cores)
+ logging.info(
+ '* disabling thread siblings (hyperthreading):')
+ logging.info(
+ ' - disabling cores: %s', _to_disable)
+ else:
+ _to_disable = False
+ return _to_disable
+
+ to_disable = get_thread_siblings()
+ if to_disable:
+ for core in to_disable:
+ fpath = path.join('cpu%i' % core, 'online')
+ self._write_sysfs(fpath, 0)
+
+ def set_governors(self, governor):
+ """Set/change CpuFreq scaling governor; global on all cores."""
+ logging.info(' - setting governor: %s', governor)
+ online_cores = self._get_cores('online')
+ for core in online_cores:
+ fpath = path.join('cpu%i' % core,
+ 'cpufreq', 'scaling_governor')
+ self._write_sysfs(fpath, governor)
+
+ def reset(self):
+ """Enable all offline cpus,
+ and reset max and min frequencies files.
+ """
+ def reset_intel_driver():
+ """ Reset fn for pstate driver."""
+ try:
+ self._write_sysfs(
+ self.path_ipst_status, 'off')
+ # if kernel/bios limitations present
+ except CpuFreqTestError:
+ # then reset via max, min freq files
+ set_max_min()
+ return
+
+ logging.info('* resetting intel p_state cpufreq driver')
+ # wait 300ms between setting driver modes
+ time.sleep(.3)
+ logging.info(
+ ' - setting driver mode: %s', self.startup_ipst_status)
+ self._write_sysfs(
+ self.path_ipst_status, self.startup_ipst_status)
+
+ def enable_off_cores():
+ """Enable all present and offline cores."""
+ present_cores = self._get_cores('present')
+ try:
+ offline_cores = self._get_cores('offline')
+ # for -r (reset) arg invokation
+ except ValueError:
+ return
+
+ to_enable = set(present_cores) & set(offline_cores)
+ logging.info('* enabling thread siblings/hyperthreading:')
+ logging.info(' - enabling cores: %s', to_enable)
+ for core in to_enable:
+ fpath = path.join('cpu%i' % core,
+ 'online')
+ self._write_sysfs(fpath, 1)
+
+ def set_max_min():
+ """Set max_frequency and min_frequency cpufreq files."""
+ logging.info('* restoring max, min freq files')
+ present_cores = self._get_cores('present')
+ for core in present_cores:
+ path_max = path.join('cpu%i' % core,
+ 'cpufreq', 'scaling_max_freq')
+ path_min = path.join('cpu%i' % core,
+ 'cpufreq', 'scaling_min_freq')
+ # reset max freq
+ self._write_sysfs(
+ path_max, self.startup_max_freq)
+ # reset min freq
+ self._write_sysfs(
+ path_min, self.startup_min_freq)
+
+ logging.info('* restoring startup governor:')
+ self.set_governors(self.startup_governor)
+
+ # enable offline cores
+ enable_off_cores()
+
+ # reset sysfs for non-acpi_cpufreq systems
+ if not any(drvr in self.scaling_driver for drvr in self.driver_types):
+ if 'intel_' in self.scaling_driver:
+ reset_intel_driver()
+ else:
+ set_max_min()
+
+ def execute_test(self):
+ """Execute cpufreq test, process results and return
+ appropriate exit code.
+ """
+ def init_intel_driver():
+ """Initialize Intel driver for testing.
+ Some modes unavailable for certain processor:kernel/bios configs.
+ """
+ try:
+ self._write_sysfs(
+ self.path_ipst_status, 'off')
+ # exempt systems unable to change intel_pstate driver mode
+ except CpuFreqTestError:
+ return
+
+ logging.info(
+ '* initializing intel_cpufreq driver:')
+ # wait 300ms between changing driver modes
+ time.sleep(.3)
+ # prefer the intel_cpufreq driver (passive mode)
+ self._write_sysfs(self.path_ipst_status, 'passive')
+ cur_ipst_status = self._read_sysfs(
+ self.path_ipst_status).rstrip('\n')
+ logging.info(' - driver mode: %s', cur_ipst_status)
+
+ logging.info('---------------------\n'
+ '| CpuFreqTest Begin |\n'
+ '---------------------')
+ start_time = time.time()
+ # disable hyperthreading
+ self.disable_thread_siblings()
+
+ # if intel, reset and set best compatible driver
+ if 'intel_' in self.scaling_driver:
+ init_intel_driver()
+
+ logging.info('* configuring cpu governors:')
+ # userspace governor required for scaling_setspeed
+ if any(drvr in self.scaling_driver for drvr in self.driver_types):
+ self.set_governors('userspace')
+ else:
+ self.set_governors('performance')
+
+ # spawn core_tests concurrently
+ logging.info('---------------------')
+ self.spawn_core_test()
+ # wrap up test
+ logging.info('\n-----------------\n'
+ '| Test Complete |\n'
+ '-----------------\n')
+ # reset state and cleanup
+ logging.info('[Reset & Cleanup]')
+ self.reset()
+
+ # facilitate house cleaning
+ if self.__proc_list:
+ logging.info('* terminating dangling pids')
+ for proc in self.__proc_list:
+ # terminate dangling processes
+ proc.terminate()
+ # prove that we are single-threaded again
+ logging.info('* active threads: %i\n', threading.active_count())
+
+ # display results
+ logging.warning('[CpuFreqTest Results]') # for --quiet mode
+ logging.info(
+ ' - legend:\n'
+ ' {core: {target_freq:'
+ '[sampled_med_%, P/F, sampled_median],:.\n')
+ # format result dict for human consumption
+ logging.info(
+ pprint.pformat(self._process_results()))
+ # provide time under test for debug/verbose output
+ end_time = time.time() - start_time
+ logging.debug('[Test Took: %.3fs]', end_time)
+ if self.fail_count:
+ print('\n[Test Failed]\n'
+ '* core fail_count =', self.fail_count)
+ return 1
+
+ print('\n[Test Passed]')
+ return 0
+
+ def spawn_core_test(self):
+ """Spawn concurrent scale testing on all online cores."""
+ def run_worker_process(_result_queue, affinity):
+ """ Subclass instantiation & constructor for
+ individual core.
+ """
+ _worker = psutil.Process()
+ # assign affinity, pin to core
+ _worker.cpu_affinity(affinity)
+ # intantiate core_test
+ cpu_freq_ctest = CpuFreqCoreTest(
+ affinity[0], _worker.pid)
+ # execute freq scaling
+ cpu_freq_ctest.scale_all_freq()
+ # get results
+ res_freq_map = cpu_freq_ctest.__call__()
+ # place in result_queue
+ _result_queue.put(res_freq_map)
+
+ def process_rqueue(queue_depth, _result_queue):
+ """Get and process core_test result_queue."""
+ # get queued core_test results
+ for _ in range(queue_depth):
+ # pipe results from core_test
+ worker_queue = _result_queue.get()
+ # append to chainmap object
+ self.freq_chainmap = self.freq_chainmap.new_child(
+ worker_queue)
+ # signal processing complete
+ _result_queue.task_done()
+ logging.info('----------------------------')
+ logging.info('* joining and closing queues')
+ # nicely join and close queue
+ try:
+ _result_queue.join()
+ finally:
+ _result_queue.close()
+
+ worker_list = [] # track spawned multiproc processes
+ pid_list = [] # track spawned multiproc pids
+ online_cores = self._get_cores('online')
+ # delegate & spawn tests on other cores first
+ # then run core 0 last (main() thread)
+ online_cores.append(online_cores.pop(0))
+ # create queue for piping results
+ result_queue = multiprocessing.JoinableQueue()
+
+ # assign affinity and spawn core_test
+ for core in online_cores:
+ affinity = [int(core)]
+ affinity_dict = dict(affinity=affinity)
+ worker = multiprocessing.Process(target=run_worker_process,
+ args=(result_queue,),
+ kwargs=affinity_dict)
+ # start core_test
+ worker.start()
+ worker_list.append(worker)
+ # track and log active child pids
+ pid_list.append(worker.pid)
+
+ # get, process queues
+ process_rqueue(len(worker_list), result_queue)
+
+ # cleanup core_test pids
+ logging.info('* joining worker processes:')
+ for idx, worker in enumerate(worker_list):
+ # join worker processes
+ worker_return = worker.join()
+ time.sleep(.1)
+ if worker_return is None:
+ logging.info(
+ ' - PID %s joined parent', pid_list[idx])
+ else:
+ # can cleanup in reset subroutine
+ continue
+ # update attribute for a 2nd pass terminate
+ self.__proc_list = worker_list
+
+
+class CpuFreqCoreTest(CpuFreqTest):
+ """Subclass to facilitate concurrent frequency scaling."""
+ class ObserveFreq:
+ """Class for instantiating observation thread.
+ Non-blocking and locked to system time to prevent
+ linear timer drift as frequency scaling ramps up.
+ """
+ __slots__ = ('interval',
+ 'callback',
+ 'thread_timer',
+ 'timer_running',
+ 'next_call')
+
+ def __init__(self, interval, callback):
+ """Execute start_timer on class instantiation."""
+ self.interval = interval
+ self.callback = callback
+ self.thread_timer = None
+ self.timer_running = False
+ self.next_call = time.time()
+ # start event loop
+ self.start_timer()
+
+ def start_timer(self):
+ """Facilitate callbacks at specified interval,
+ accounts and corrects for drift.
+ """
+ if not self.timer_running:
+ # offset interval
+ self.next_call += self.interval
+ # create time delta for consistent timing
+ time_delta = self.next_call - time.time()
+ # call self.observe() at end of time_delta
+ self.thread_timer = threading.Timer(time_delta,
+ self.observe)
+ # cleanup spawned timer threads on exit
+ self.thread_timer.daemon = True
+ self.thread_timer.start()
+ self.timer_running = True
+
+ def observe(self):
+ """Trigger callback to sample frequency."""
+ # reset timer_running
+ self.timer_running = False
+ # callback to outer scope
+ self.callback()
+ # start another tt cycle
+ self.start_timer()
+
+ def stop(self):
+ """Called when frequency scaling completed."""
+ if self.thread_timer:
+ # event loop end
+ self.thread_timer.cancel()
+ # logic reinforcement
+ self.timer_running = False
+
+ # as we may instantiate many instances
+ __slots__ = ('core',
+ 'pid',
+ '__instance_core',
+ '__instance_cpu',
+ '__instance_pid',
+ '__stop_scaling',
+ '__observed_freqs',
+ '__observed_freqs_dict',
+ '__read_sysfs',
+ '__write_sysfs')
+
+ def __init__(self, core, pid):
+ # perform base class inheritance
+ super().__init__()
+ # mangle instance attributes
+ self.__instance_core = int(core)
+ self.__instance_cpu = 'cpu%i' % core # future call speedup
+ self.__instance_pid = pid # worker pid
+ self.__stop_scaling = False # signal.alarm semaphore
+ self.__observed_freqs = [] # recorded freqs
+ self.__observed_freqs_dict = {} # core: recorded freqs
+ # private _r/_w_sysfs methods for concurrent access w/o locks
+ self.__read_sysfs = copy.deepcopy(self._read_sysfs)
+ self.__write_sysfs = copy.deepcopy(self._write_sysfs)
+
+ def __call__(self):
+ """Have subclass return dict '{core: {trgt_f: med_f,}}'
+ when called.
+ """
+ freq_map = {
+ self.__instance_core: self.__observed_freqs_dict
+ }
+ return freq_map
+
+ def _observefreq_callback(self):
+ """Callback method to sample frequency."""
+ def get_cur_freq():
+ """ Get current frequency.
+ """
+ fpath = path.join(self.__instance_cpu,
+ 'cpufreq', 'scaling_cur_freq')
+ freqs = self.__read_sysfs(fpath).rstrip('\n').split()[0]
+ return int(freqs)
+
+ self.__observed_freqs.append(get_cur_freq())
+ # matrix mode
+ logging.debug(self.__observed_freqs)
+
+ def scale_all_freq(self):
+ """Primary method to scale full range of freqs."""
+ def calc_freq_median(obs_freqs):
+ """ Calculate the median value of observed freqs.
+ """
+ n_samples = len(obs_freqs)
+ c_index = n_samples // 2
+ # odd number of samples
+ if n_samples % 2:
+ freq_median = sorted(obs_freqs)[c_index]
+ # even number of samples
+ else:
+ freq_median = sum(
+ sorted(obs_freqs)[
+ (c_index - 1):(c_index + 1)
+ ]) / 2
+ return freq_median
+
+ def map_observed_freqs(target_freq):
+ """Align freq key/values and split result lists
+ for grouping.
+ """
+ # get median of observed freqs
+ freq_median = calc_freq_median(self.__observed_freqs)
+ # target_freq = key, freq_median = value
+ self.__observed_freqs_dict.update(
+ {target_freq: freq_median})
+
+ def handle_alarm(*args):
+ """Alarm trigger callback, unload core."""
+ # *args req to call signal.signal()
+ del args # args unused
+ # stop workload loop
+ self.__stop_scaling = True
+
+ def execute_workload(workload_n):
+ """Perform maths to load core."""
+ # compartmentalized for future development
+ while not self.__stop_scaling:
+ math.factorial(workload_n)
+
+ def log_freq_scaling(_freq, workload_n):
+ """Provide feedback via logging."""
+ logging.info('* testing: %s || target freq: %i ||'
+ ' work: fact(%i) || worker pid: %i',
+ self.__instance_cpu, _freq,
+ workload_n, self.__instance_pid)
+
+ def load_observe_map(_freq):
+ """Proxy fn to scale core to freq."""
+ # gen randint for workload factorial calcs
+ workload_n = random.randint(37512, 39845)
+ # setup async alarm to kill load gen
+ signal.signal(signal.SIGALRM, handle_alarm)
+ # time to gen load
+ signal.alarm(CpuFreqTest.scale_duration)
+ # instantiate ObserveFreq and start data sampling
+ observe_freq = self.ObserveFreq(
+ interval=CpuFreqTest.observe_interval,
+ callback=self._observefreq_callback)
+ # provide feedback on test status
+ log_freq_scaling(_freq, workload_n)
+ # start loading core
+ execute_workload(workload_n)
+ # stop sampling
+ observe_freq.stop()
+ # map freq results to core
+ map_observed_freqs(_freq)
+
+ # cpufreq class driver (non-intel) supports full freq table scaling
+ if any(drvr in self.scaling_driver for drvr in self.driver_types):
+ fpath = path.join(self.__instance_cpu,
+ 'cpufreq', 'scaling_setspeed')
+ # others support max, min freq scaling
+ else:
+ fpath = path.join(self.__instance_cpu,
+ 'cpufreq', 'scaling_max_freq')
+
+ # iterate over supported frequency scaling table
+ for idx, freq in enumerate(self.scaling_freqs):
+ # re-init some attributes after 1st pass
+ if idx:
+ # time buffer ensure all prior freq intervals processed
+ time.sleep(1)
+ # reset freq list
+ self.__observed_freqs = []
+ # reset signal.signal() event loop bit
+ self.__stop_scaling = False
+
+ self.__write_sysfs(fpath, freq)
+ # load core, observe freqs, map to obs_freq_dict
+ load_observe_map(freq)
+
+
+def parse_arg_logging():
+ """ Ingest arguments and init logging."""
+ def init_logging(_user_arg):
+ """ Pass user arg and configure logging module."""
+ # logging optimizations; per python logging lib docs
+ logging._srcfile = None # pylint: disable=protected-access
+ # "%(processName)s prefix
+ logging.logMultiprocessing = False
+ # "%(process)d" prefix
+ logging.logProcesses = False
+ # "%(thread)d" & "%(threadName)s" prefixes
+ logging.logThreads = False
+
+ # log to stdout for argparsed logging lvls
+ stdout_handler = logging.StreamHandler(sys.stdout)
+ stdout_handler.setLevel(_user_arg.log_level)
+
+ # log to stderr for exceptions
+ stderr_formatter = logging.Formatter(
+ '%(levelname)s: %(message)s')
+ stderr_handler = logging.StreamHandler(sys.stderr)
+ stderr_handler.setLevel(logging.ERROR)
+ stderr_handler.setFormatter(stderr_formatter)
+
+ # setup base/root logger
+ root_logger = logging.getLogger()
+ # set root logging level
+ root_logger.setLevel(logging.NOTSET)
+ # add handlers for out, err
+ root_logger.addHandler(stdout_handler)
+ root_logger.addHandler(stderr_handler)
+
+ parser = argparse.ArgumentParser()
+ # only allow one arg to be passed
+ parser_mutex_grp = parser.add_mutually_exclusive_group()
+ parser_mutex_grp.add_argument(
+ '-d', '-D', '--debug',
+ dest='log_level',
+ action='store_const',
+ const=logging.DEBUG,
+ # default logging level
+ default=logging.INFO,
+ help='debug/verbose output')
+ parser_mutex_grp.add_argument(
+ '-q', '-Q', '--quiet',
+ dest='log_level',
+ action='store_const',
+ # allow visible warnings in quiet mode
+ const=logging.WARNING,
+ help='suppress output')
+ parser_mutex_grp.add_argument(
+ '-r', '-R', '--reset',
+ action='store_true',
+ help='reset cpufreq sysfs parameters (all cores):'
+ ' (governor, thread siblings, max/min freqs, pstate)')
+ user_arg = parser.parse_args()
+ init_logging(user_arg)
+ return user_arg
+
+
+def main():
+ # configure and start logging
+ user_arg = parse_arg_logging()
+ # instantiate CpuFreqTest as cpu_freq_test
+ cpu_freq_test = CpuFreqTest()
+ # provide access to reset() method
+ if user_arg.reset:
+ print('[Reset CpuFreq Sysfs]')
+ return cpu_freq_test.reset()
+ return cpu_freq_test.execute_test()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/bin/cpuid.py b/bin/cpuid.py
index 36d5b46..3eebf1e 100755
--- a/bin/cpuid.py
+++ b/bin/cpuid.py
@@ -88,12 +88,14 @@ CPUIDS = {
"AMD EPYC": ['0x800f12'],
"AMD Lisbon": ['0x100f81'],
"AMD Magny-Cours": ['0x100f91'],
+ "AMD Milan": ['0xa00f11'],
"AMD ROME": ['0x830f10'],
"Broadwell": ['0x4067', '0x306d4', '0x5066', '0x406f'],
"Canon Lake": ['0x6066'],
"Cascade Lake": ['0x50655', '0x50656', '0x50657'],
"Coffee Lake": [
'0x806ea', '0x906ea', '0x906eb', '0x906ec', '0x906ed'],
+ "Cooper Lake": ['0x5065a', '0x5065b'],
"Haswell": ['0x306c', '0x4065', '0x4066', '0x306f'],
"Ice Lake": ['0x706e'],
"Ivy Bridge": ['0x306a', '0x306e'],
diff --git a/bin/disk_read_performance_test.sh b/bin/disk_read_performance_test.sh
index 28d0860..d91dc7b 100755
--- a/bin/disk_read_performance_test.sh
+++ b/bin/disk_read_performance_test.sh
@@ -32,6 +32,9 @@ for disk in "$@"; do
if [[ $dev_path =~ pmem ]]; then
disk_type="nvdimm"
fi
+ if [[ $dev_path =~ mtd ]]; then
+ disk_type="mtd"
+ fi
if [[ ($disk_type == "scsi" || $disk_type == "ata") && $rotational == 0 ]]; then
disk_type="ssd"
fi
@@ -58,6 +61,7 @@ for disk in "$@"; do
"devmapper" ) MIN_BUF_READ=$DEFAULT_BUF_READ;;
"ide" ) MIN_BUF_READ=40;;
"mmc" ) MIN_BUF_READ=$DEFAULT_BUF_READ;;
+ "mtd" ) MIN_BUF_READ=1;;
"nvme" ) MIN_BUF_READ=200;;
"nvdimm" ) MIN_BUF_READ=500;;
"mdadm" ) MIN_BUF_READ=80;;
diff --git a/bin/fan_reaction_test.py b/bin/fan_reaction_test.py
index c37054b..3b2257e 100755
--- a/bin/fan_reaction_test.py
+++ b/bin/fan_reaction_test.py
@@ -30,20 +30,45 @@ class FanMonitor:
"""Device that reports fan RPM or something correlating to that."""
def __init__(self):
"""Use heuristics to find something that we can read."""
+ self.hwmons = []
self._fan_paths = glob.glob('/sys/class/hwmon/hwmon*/fan*_input')
- if not self._fan_paths:
+ # All entries (except name) under /sys/class/hwmon/hwmon/* are optional
+ # and should only be created in a given driver if the chip has
+ # the feature.
+ # Use fan*_input is because the "thinkpad_hwmon" driver is report
+ # fan_input only. If there is any driver has different implementation
+ # then may need to check other entries in the future.
+ for i in self._fan_paths:
+ device = os.path.join(os.path.dirname(i), 'device')
+ device_path = os.path.realpath(device)
+ # Get the class of pci device of hwmon whether is GPU.
+ if "pci" in device_path:
+ pci_class_path = os.path.join(device, 'class')
+ try:
+ with open(pci_class_path, 'r') as _file:
+ pci_class = _file.read().splitlines()
+ pci_device_class = (
+ int(pci_class[0], base=16) >> 16) & 0xff
+ """Make sure the fan is not on graphic card"""
+ if pci_device_class == 3:
+ continue
+ except OSError:
+ print('Not able to access {}'.format(pci_class_path))
+ continue
+ self.hwmons.append(i)
+ if not self.hwmons:
print('Fan monitoring interface not found in SysFS')
raise SystemExit(0)
def get_rpm(self):
result = {}
- for p in self._fan_paths:
+ for p in self.hwmons:
try:
with open(p, 'rt') as f:
fan_mon_name = os.path.relpath(p, '/sys/class/hwmon')
result[fan_mon_name] = int(f.read())
except OSError:
- print('Fan SysFS node dissappeared ({})'.format(p))
+ print('Fan SysFS node disappeared ({})'.format(p))
return result
def get_average_rpm(self, period):
diff --git a/bin/gateway_ping_test.py b/bin/gateway_ping_test.py
index b6126cc..dc2e165 100755
--- a/bin/gateway_ping_test.py
+++ b/bin/gateway_ping_test.py
@@ -185,7 +185,8 @@ def ping(host, interface, count, deadline, verbose=False):
if interface:
command.append("-I{}".format(interface))
reg = re.compile(
- r"(\d+) packets transmitted, (\d+) received, (\d+)% packet loss")
+ r"(\d+) packets transmitted, (\d+) received,"
+ r".*([0-9]*\.?[0-9]*.)% packet loss")
ping_summary = {'transmitted': 0, 'received': 0, 'pct_loss': 0}
try:
output = subprocess.check_output(command, universal_newlines=True)
diff --git a/bin/pactl_list.sh b/bin/pactl_list.sh
new file mode 100755
index 0000000..a7abb87
--- /dev/null
+++ b/bin/pactl_list.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+EXIT_CODE=0
+
+for device in "sources" "sinks"
+do
+ if ! pactl list $device short | grep -v -E "monitor|auto_null"
+ then
+ echo "No available $device found"
+ case $device in
+ "sources")
+ EXIT_CODE=$(( EXIT_CODE+1 ))
+ ;;
+ "sinks")
+ EXIT_CODE=$(( EXIT_CODE+2 ))
+ esac
+ fi
+done
+
+exit $EXIT_CODE \ No newline at end of file
diff --git a/bin/roundtrip_qr.py b/bin/roundtrip_qr.py
index e2eb5b5..4cb6592 100755
--- a/bin/roundtrip_qr.py
+++ b/bin/roundtrip_qr.py
@@ -54,7 +54,7 @@ def capture_webcam(name):
def generate_data():
- return ''.join(random.choice(string.ascii_letters) for i in range(20))
+ return ''.join(random.choice(string.ascii_letters) for i in range(10))
def generate_qr_code(data):
@@ -62,11 +62,11 @@ def generate_qr_code(data):
def display_code(qr):
- with open('/dev/tty1', 'wb+', buffering=0) as term:
+ with open('/dev/tty0', 'wb+', buffering=0) as term:
# clear the tty so the qr is always printed at the top of the sceen
term.write(str.encode('\033c'))
# print the qr code
- term.write(qr.terminal(quiet_zone=5).encode())
+ term.write(qr.terminal(quiet_zone=1).encode())
def decode_image(filename):
diff --git a/bin/snap_tests.py b/bin/snap_tests.py
index fe80288..e6ea23b 100755
--- a/bin/snap_tests.py
+++ b/bin/snap_tests.py
@@ -12,7 +12,6 @@ import sys
from checkbox_support.snap_utils.snapd import Snapd
# Requirements for the test snap:
-# - the snap must not be installed at the start of the nested test plan
# - the snap must be strictly confined (no classic or devmode flags)
# - there must be different revisions on the stable & edge channels
try:
@@ -37,7 +36,7 @@ class SnapList():
"""snap list should show the core package is installed."""
data = Snapd().list()
for snap in data:
- if snap['name'] in ('core', 'core16', 'core18'):
+ if snap['name'] in ('core', 'core16', 'core18', 'core20'):
print("Found a core snap")
print(snap['name'], snap['version'], snap['revision'])
return 0
@@ -70,6 +69,9 @@ class SnapInstall():
args = parser.parse_args(sys.argv[2:])
print('Install {}...'.format(TEST_SNAP))
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL, verbose=True)
+ if s.list(TEST_SNAP):
+ print('{} already installed. Removing'.format(TEST_SNAP))
+ s.remove(TEST_SNAP)
s.install(TEST_SNAP, args.channel)
print('Confirm in snap list...')
data = s.list()
@@ -87,11 +89,12 @@ class SnapRefresh():
def invoked(self):
"""Test refresh of test-snapd-tools snap."""
def get_rev():
- data = Snapd().list()
- for snap in data:
- if snap['name'] == TEST_SNAP:
- return snap['revision']
- print('Get starting revision...')
+ return Snapd().list(TEST_SNAP)['revision']
+ if Snapd().list(TEST_SNAP):
+ print('Remove previously installed revision')
+ Snapd().remove(TEST_SNAP)
+ print('Install starting revision...')
+ Snapd().install(TEST_SNAP, 'stable')
start_rev = get_rev()
print(' revision:', start_rev)
print('Refresh to edge...')
@@ -112,10 +115,15 @@ class SnapRevert():
def invoked(self):
"""Test revert of test-snapd-tools snap."""
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL)
+ if s.list(TEST_SNAP):
+ s.remove(TEST_SNAP)
+ print('Install stable revision')
+ s.install(TEST_SNAP)
+ print('Refresh to edge')
+ s.refresh(TEST_SNAP, 'edge')
print('Get stable channel revision from store...')
r = s.info(TEST_SNAP)
stable_rev = r['channels']['latest/stable']['revision']
- print('Get current installed revision...')
r = s.list(TEST_SNAP)
installed_rev = r['revision'] # should be edge revision
print('Reverting snap {}...'.format(TEST_SNAP))
@@ -140,6 +148,11 @@ class SnapReupdate():
"""Test re-update of test-snapd-tools snap."""
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL)
print('Get edge channel revision from store...')
+ if s.list(TEST_SNAP):
+ s.remove(TEST_SNAP)
+ s.install(TEST_SNAP)
+ s.refresh(TEST_SNAP, 'edge')
+ s.revert(TEST_SNAP)
r = s.info(TEST_SNAP)
edge_rev = r['channels']['latest/edge']['revision']
print('Remove edge revision...')
@@ -160,8 +173,11 @@ class SnapRemove():
def invoked(self):
"""Test remove of test-snapd-tools snap."""
- print('Install {}...'.format(TEST_SNAP))
+ print('Remove {}...'.format(TEST_SNAP))
s = Snapd(SNAPD_TASK_TIMEOUT, SNAPD_POLL_INTERVAL)
+ if not s.list(TEST_SNAP):
+ print('{} not found. Installing'.format(TEST_SNAP))
+ s.install(TEST_SNAP)
s.remove(TEST_SNAP)
print('Check not in snap list')
data = s.list()
@@ -189,8 +205,8 @@ class Snap():
parser = argparse.ArgumentParser()
parser.add_argument('subcommand', type=str, choices=sub_commands)
args = parser.parse_args(sys.argv[1:2])
- sub_commands[args.subcommand]().invoked()
+ return sub_commands[args.subcommand]().invoked()
if __name__ == '__main__':
- Snap().main()
+ sys.exit(Snap().main())
diff --git a/bin/socketcan_test.py b/bin/socketcan_test.py
index 25c79b2..5468b3a 100755
--- a/bin/socketcan_test.py
+++ b/bin/socketcan_test.py
@@ -18,6 +18,7 @@
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
import argparse
+import ctypes
import os
import socket
import struct
@@ -112,7 +113,7 @@ def echo_test(args):
id_flags = 0
if args.effid:
print('Setting EFF CAN ID flag')
- id_flags = socket.CAN_EFF_FLAG
+ id_flags = ctypes.c_ulong(socket.CAN_EFF_FLAG).value
# Whether to enable local loopback, required for local only test
# but only want to parse packets from other end if remote
diff --git a/bin/wifi_nmcli_test.py b/bin/wifi_nmcli_test.py
index a6bbd8f..6f77376 100755
--- a/bin/wifi_nmcli_test.py
+++ b/bin/wifi_nmcli_test.py
@@ -17,11 +17,23 @@ import subprocess as sp
import sys
import time
+from distutils.version import LooseVersion
from gateway_ping_test import ping
print = functools.partial(print, flush=True)
+def legacy_nmcli():
+ cmd = "nmcli -v"
+ output = sp.check_output(cmd, shell=True)
+ version = LooseVersion(output.strip().split()[-1].decode())
+ # check if using the 16.04 nmcli because of this bug
+ # https://bugs.launchpad.net/plano/+bug/1896806
+ if version < LooseVersion("1.9.9"):
+ return True
+ return False
+
+
def print_head(txt):
print("##", txt)
@@ -159,7 +171,10 @@ def open_connection(args):
# Make sure the connection is brought up
cmd = "nmcli c up TEST_CON"
print_cmd(cmd)
- sp.call(cmd, shell=True)
+ try:
+ sp.call(cmd, shell=True, timeout=200 if legacy_nmcli() else None)
+ except sp.TimeoutExpired:
+ print("Connection activation failed\n")
print()
print_head("Ensure interface is connected")
@@ -206,7 +221,10 @@ def secured_connection(args):
# Make sure the connection is brought up
cmd = "nmcli c up TEST_CON"
print_cmd(cmd)
- sp.call(cmd, shell=True)
+ try:
+ sp.call(cmd, shell=True, timeout=200 if legacy_nmcli() else None)
+ except sp.TimeoutExpired:
+ print("Connection activation failed\n")
print()
print_head("Ensure interface is connected")
diff --git a/manage.py b/manage.py
index 9dea82b..a1d80d4 100755
--- a/manage.py
+++ b/manage.py
@@ -5,7 +5,7 @@ from plainbox.provider_manager import N_
setup(
name='plainbox-provider-checkbox',
namespace='com.canonical.certification',
- version="0.55.0",
+ version="0.56.0rc1",
description=N_("Checkbox provider"),
gettext_domain='plainbox-provider-checkbox',
strict=False, deprecated=False,
diff --git a/src/clocktest.c b/src/clocktest.c
index 139056f..cc82d02 100644
--- a/src/clocktest.c
+++ b/src/clocktest.c
@@ -72,8 +72,8 @@ int test_clock_jitter(){
#endif
if (jitter > MAX_JITTER || jitter < -MAX_JITTER){
- printf ("ERROR, jitter = %f\n",jitter);
- printf ("iter = %u, cpus = %u,%u\n",iter,slow_cpu,fast_cpu);
+ printf ("ERROR: jitter = %f Jitter must be < 0.2 to pass\n",jitter);
+ printf ("ERROR: Failed Iteration = %u, Slowest CPU: %u Fastest CPU: %u\n",iter,slow_cpu,fast_cpu);
failures++;
}
if (jitter > largest_jitter)
diff --git a/units/audio/jobs.pxu b/units/audio/jobs.pxu
index 538d213..1fad964 100644
--- a/units/audio/jobs.pxu
+++ b/units/audio/jobs.pxu
@@ -341,6 +341,7 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::audio
id: audio/alsa_record_playback_automated
+depends: audio/detect_sinks_sources
estimated_duration: 10.0
requires:
package.name == 'python3-gi'
@@ -357,6 +358,17 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::audio
+id: audio/detect_sinks_sources
+estimated_duration: 1.0
+requires:
+ package.name == 'pulseaudio-utils'
+command:
+ pactl_list.sh
+_description:
+ Test to detect if there's available sources and sinks.
+
+plugin: shell
+category_id: com.canonical.plainbox::audio
id: audio/alsa_info_collect
estimated_duration: 2.0
command: alsa_info --no-dialog --no-upload --output "${PLAINBOX_SESSION_SHARE}"/alsa_info.log
@@ -590,7 +602,7 @@ plugin: shell
category_id: com.canonical.plainbox::audio
id: audio/alsa_record_playback_automated_after_suspend_30_cycles
estimated_duration: 10.0
-depends: power-management/suspend_30_cycles
+depends: power-management/suspend_30_cycles audio/detect_sinks_sources_after_suspend_30_cycles
requires:
package.name == 'python3-gi'
package.name == 'gir1.2-gstreamer-1.0'
@@ -606,6 +618,17 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::audio
+id: audio/detect_sinks_sources_after_suspend_30_cycles
+estimated_duration: 1.0
+requires:
+ package.name == 'pulseaudio-utils'
+command:
+ pactl_list.sh
+_description:
+ Test to detect if there's available sources and sinks after suspending 30 times.
+
+plugin: shell
+category_id: com.canonical.plainbox::audio
id: audio/check_volume_after_suspend_30_cycles
estimated_duration: 1.0
depends: power-management/suspend_30_cycles
diff --git a/units/camera/jobs.pxu b/units/camera/jobs.pxu
index 7c0b1d0..285fc2e 100644
--- a/units/camera/jobs.pxu
+++ b/units/camera/jobs.pxu
@@ -2,8 +2,9 @@ plugin: shell
category_id: com.canonical.plainbox::camera
id: camera/detect
estimated_duration: 1.2
+imports: from com.canonical.plainbox import manifest
requires:
- device.category == 'CAPTURE'
+ manifest.has_camera == 'True'
command:
camera_test.py detect
_summary: This Automated test attempts to detect a camera.
@@ -144,6 +145,12 @@ category_id: com.canonical.plainbox::camera
id: camera/roundtrip-qrcode_{{ name }}
_summary: Test video output and camera {{ name }} by displaying and reading a qrcode
estimated_duration: 5.0
+depends:
+ {%- if category == 'MMAL' %}
+ camera/detect-rpi
+ {%- else %}
+ camera/detect
+ {% endif -%}
requires:
{%- if __on_ubuntucore__ %}
lsb.release >= '19.1'
diff --git a/units/camera/manifest.pxu b/units/camera/manifest.pxu
index de12798..ed79336 100644
--- a/units/camera/manifest.pxu
+++ b/units/camera/manifest.pxu
@@ -1,4 +1,9 @@
unit: manifest entry
id: has_rpi_camera
_name: RaspberryPi Camera Module
-value-type: bool \ No newline at end of file
+value-type: bool
+
+unit: manifest entry
+id: has_camera
+_name: Camera/Capture Device
+value-type: bool
diff --git a/units/cpu/jobs.pxu b/units/cpu/jobs.pxu
index c442bee..afdd6ea 100644
--- a/units/cpu/jobs.pxu
+++ b/units/cpu/jobs.pxu
@@ -86,7 +86,7 @@ plugin: shell
category_id: com.canonical.plainbox::cpu
id: cpu/topology
estimated_duration: 1.0
-requires: int(cpuinfo.count) > 1 and (cpuinfo.platform == 'i386' or cpuinfo.platform == 'x86_64' or cpuinfo.platform == 's390x')
+requires: int(cpuinfo.count) > 1 and (cpuinfo.platform == 'i386' or cpuinfo.platform == 'x86_64')
command: cpu_topology.py
_summary:
Check CPU topology for accuracy between proc and sysfs
@@ -177,3 +177,13 @@ _siblings: [
"command": "cpuinfo_resource.py | diff $PLAINBOX_SESSION_SHARE/cpuinfo_before_suspend -",
"depends": "com.canonical.certification::suspend/suspend_advanced_auto"}
]
+
+plugin: shell
+category_id: com.canonical.plainbox::cpu
+id: cpu/cpufreq_test-server
+user: root
+command: cpufreq_test.py -q
+_summary:
+ cpufreq scaling test
+_description:
+ Comprehensive testing of cpu scaling capabilities and directives via cpufreq.
diff --git a/units/cpu/test-plan.pxu b/units/cpu/test-plan.pxu
index 5d06090..eb2dabb 100644
--- a/units/cpu/test-plan.pxu
+++ b/units/cpu/test-plan.pxu
@@ -85,7 +85,7 @@ _name: CPU Tests (Server)
_description: CPU Tests (Server)
include:
cpu/clocktest certification-status=blocker
- cpu/frequency_governors certification-status=blocker
+ cpu/cpufreq_test-server certification-status=blocker
cpu/maxfreq_test certification-status=blocker
cpu/maxfreq_test-log-attach certification-status=non-blocker
cpu/topology certification-status=blocker
diff --git a/units/firmware/jobs.pxu b/units/firmware/jobs.pxu
index 05c5f04..b0d69cb 100644
--- a/units/firmware/jobs.pxu
+++ b/units/firmware/jobs.pxu
@@ -87,3 +87,47 @@ command:
[ -f "$PLAINBOX_SESSION_SHARE"/fwts_server_results.log ] && gzip -c "$PLAINBOX_SESSION_SHARE"/fwts_server_results.log
_description: Attaches the FWTS Server Cert results log to the submission
_summary: Attach FWTS Server Cert test log to submission
+
+
+id: firmware/tcglog-required-algs-sha256
+category_id: com.canonical.plainbox::firmware
+summary: Test that the SHA256 algorithm is present in the TCG event log
+description:
+ Presence of support for the SHA256 algorithm is a requirement for enabling FDE
+ support in Ubuntu Core 20 systems
+plugin: shell
+user: root
+command: tcglog-check -required-algs sha256
+imports: from com.canonical.plainbox import manifest
+requires:
+ cpuinfo.platform == 'x86_64'
+ manifest.has_tpm2_chip == 'True'
+ executable.name == 'tcglog-check'
+
+id: firmware/tcglog-require-pe-image-digests
+category_id: com.canonical.plainbox::firmware
+summary: Test format of digests for EV_EFI_BOOT_SERVICES_APPLICATION events
+description:
+ Digests for EV_EFI_BOOT_SERVICES_APPLICATION events associated with PE images
+ must be PE image digests rather than file digests. This test is a requirement
+ for enabling FDE support in Ubuntu Core 20 systems
+plugin: shell
+user: root
+command: tcglog-check -require-pe-image-digests
+imports: from com.canonical.plainbox import manifest
+requires:
+ cpuinfo.platform == 'x86_64'
+ manifest.has_tpm2_chip == 'True'
+ executable.name == 'tcglog-check'
+
+id: firmware/tcglog-dump-attachment
+category_id: com.canonical.plainbox::firmware
+summary: Attach a dump of the TCG Event log for debugging
+plugin: attachment
+user: root
+command: tcglog-dump
+imports: from com.canonical.plainbox import manifest
+requires:
+ cpuinfo.platform == 'x86_64'
+ manifest.has_tpm2_chip == 'True'
+ executable.name == 'tcglog-dump'
diff --git a/units/firmware/test-plan.pxu b/units/firmware/test-plan.pxu
index 62b4e05..1765ced 100644
--- a/units/firmware/test-plan.pxu
+++ b/units/firmware/test-plan.pxu
@@ -9,3 +9,30 @@ mandatory_include:
include:
bootstrap_include:
fwts
+
+
+id: firmware-uc20-fde-full
+unit: test plan
+_name: Test firmware compatibility with UC20 FDE
+_description: Test firmware compatibility with UC20 FDE
+include:
+nested_part:
+ firmware-uc20-fde-manual
+ firmware-uc20-fde-automated
+
+
+id: firmware-uc20-fde-manual
+unit: test plan
+_name: Test firmware compatibility with UC20 FDE (manual)
+_description: Test firmware compatibility with UC20 FDE (manual)
+include:
+
+
+id: firmware-uc20-fde-automated
+unit: test plan
+_name: Test firmware compatibility with UC20 FDE (automated)
+_description: Test firmware compatibility with UC20 FDE (automated)
+include:
+ firmware/tcglog-required-algs-sha256
+ firmware/tcglog-require-pe-image-digests
+ firmware/tcglog-dump-attachment
diff --git a/units/info/jobs.pxu b/units/info/jobs.pxu
index a9031ad..ba6b376 100644
--- a/units/info/jobs.pxu
+++ b/units/info/jobs.pxu
@@ -350,8 +350,12 @@ estimated_duration: 0.1
_description: Attaches the buildstamp identifier for the OS
_summary: Attaches the buildstamp identifier for the OS
command:
- if [ -s /etc/buildstamp ]; then
+ if [ -s /var/lib/ubuntu_dist_channel ]; then
+ cat /var/lib/ubuntu_dist_channel
+ elif [ -s /etc/buildstamp ]; then
cat /etc/buildstamp
+ elif [ -s /run/mnt/ubuntu-seed/.disk/info ]; then
+ cat /run/mnt/ubuntu-seed/.disk/info
elif [ -s /etc/media-info ]; then
cat /etc/media-info
elif [ -s /writable/system-data/etc/buildstamp ]; then
diff --git a/units/kernel-snap/jobs.pxu b/units/kernel-snap/jobs.pxu
index 151fd98..950d1a6 100644
--- a/units/kernel-snap/jobs.pxu
+++ b/units/kernel-snap/jobs.pxu
@@ -16,6 +16,7 @@ requires:
unit: template
template-resource: bootloader
+template-filter: bootloader.booted_kernel_path != 'unknown'
id: kernel-snap/booted-kernel-matches-current-{name}
category_id: kernel-snap
_summary: The booted kernel image matches image in current kernel snap
diff --git a/units/keys/jobs.pxu b/units/keys/jobs.pxu
index 8cef3e7..3b42f3e 100644
--- a/units/keys/jobs.pxu
+++ b/units/keys/jobs.pxu
@@ -290,6 +290,30 @@ _steps:
_verification:
Did the power management prompt pop up when press power button?
+plugin: user-interact
+category_id: com.canonical.plainbox::keys
+_summary:
+ Check power button event filtering
+id: keys/power-button-event
+estimated_duration: 15.0
+requires:
+ package.name == 'acpid'
+ package.name == 'libglib2.0-bin'
+command:
+ action=$(gsettings get org.gnome.settings-daemon.plugins.power power-button-action)
+ gsettings set org.gnome.settings-daemon.plugins.power power-button-action nothing
+ acpi_listen -t 10 | tee "$PLAINBOX_SESSION_SHARE"/power-button-event.log
+ gsettings set org.gnome.settings-daemon.plugins.power power-button-action "$action"
+ [[ $(grep -c "PBTN.*00000080" "$PLAINBOX_SESSION_SHARE"/power-button-event.log) -eq 1 ]] || \
+ [[ $(grep -c "PWRB.*00000080" "$PLAINBOX_SESSION_SHARE"/power-button-event.log) -eq 1 ]]
+purpose:
+ This test will check if power button event has reported correctly, the listener will
+ wait for 10 seconds.
+steps:
+ 1. Single press and release the power button in 10 seconds, some platforms might need long-press
+ to trigger the PBTN or PWRB event
+ 2. Check the number of output PBTN/PWRB event
+
plugin: manual
category_id: com.canonical.plainbox::keys
id: keys/fn-lock
diff --git a/units/keys/test-plan.pxu b/units/keys/test-plan.pxu
index d4d2fe2..ffc111c 100644
--- a/units/keys/test-plan.pxu
+++ b/units/keys/test-plan.pxu
@@ -25,6 +25,7 @@ include:
keys/keyboard-backlight certification-status=blocker
keys/microphone-mute certification-status=blocker
keys/power-button certification-status=blocker
+ keys/power-button-event certification-status=blocker
keys/fn-lock certification-status=non-blocker
id: keys-cert-automated
@@ -72,6 +73,7 @@ include:
keys/keyboard-backlight certification-status=blocker
keys/microphone-mute certification-status=blocker
keys/power-button certification-status=blocker
+ keys/power-button-event certification-status=blocker
id: after-suspend-keys-cert-blockers
unit: test plan
diff --git a/units/memory/jobs.pxu b/units/memory/jobs.pxu
index 565493a..3cb6b5b 100644
--- a/units/memory/jobs.pxu
+++ b/units/memory/jobs.pxu
@@ -47,7 +47,7 @@ user: root
environ: STRESS_NG_MIN_SWAP_SIZE
requires:
executable.name == 'stress-ng'
-command: stress_ng_test.py memory
+command: systemd-inhibit stress_ng_test.py memory
_summary: Stress test of system memory
_description:
Test to perform some basic stress and exercise of system memory via the
diff --git a/units/miscellanea/test-plan.pxu b/units/miscellanea/test-plan.pxu
index 6c2d582..60741b6 100644
--- a/units/miscellanea/test-plan.pxu
+++ b/units/miscellanea/test-plan.pxu
@@ -87,9 +87,7 @@ mandatory_include:
miscellanea/submission-resources
miscellanea/cpuid
miscellanea/efi_boot_mode certification-status=blocker
- miscellanea/reboot_firmware
miscellanea/efi_pxeboot
- miscellanea/kernel_taint_test
miscellanea/cpus_are_not_samples
miscellanea/ipmi_test certification-status=blocker
miscellanea/bmc_info
diff --git a/units/monitor/jobs.pxu b/units/monitor/jobs.pxu
index 4a6ae9a..f5e4974 100644
--- a/units/monitor/jobs.pxu
+++ b/units/monitor/jobs.pxu
@@ -395,3 +395,18 @@ _steps:
_verification:
Was the interface displayed correctly on the screen?
flags: also-after-suspend
+
+id: monitor/vga
+_summary: Monitor works (VGA)
+_purpose:
+ Check output to display through VGA port
+_steps:
+ 1. Connect display to VGA port
+ 2. Check the display
+_verification:
+ Output to display works
+plugin: manual
+category_id: com.canonical.plainbox::monitor
+estimated_duration: 300
+flags: also-after-suspend
+
diff --git a/units/monitor/test-plan.pxu b/units/monitor/test-plan.pxu
index b893acb..21b16cb 100644
--- a/units/monitor/test-plan.pxu
+++ b/units/monitor/test-plan.pxu
@@ -273,6 +273,7 @@ include:
monitor/dvi-to-vga
monitor/hdmi-to-vga
monitor/displayport_hotplug
+ monitor/vga
id: after-suspend-monitor-full
unit: test plan
@@ -292,3 +293,4 @@ include:
after-suspend-monitor/dvi-to-vga
after-suspend-monitor/hdmi-to-vga
after-suspend-monitor/displayport_hotplug
+ after-suspend-monitor/vga
diff --git a/units/networking/jobs.pxu b/units/networking/jobs.pxu
index e95f3dc..6653eb3 100644
--- a/units/networking/jobs.pxu
+++ b/units/networking/jobs.pxu
@@ -86,4 +86,9 @@ command: network_predictable_names.sh
_summary: Verify that all network interfaces have predictable names.
_description: Verify that all network interfaces have predictable names.
requires:
- {% if __on_ubuntucore__ %}lsb.release >= '20'{% else %}lsb.release >= '18'{% endif %}
+ {%- if __on_ubuntucore__ %}
+ lsb.release >= '20'
+ model_assertion.gadget != "pi"
+ {%- else %}
+ lsb.release >= '18'
+ {% endif -%}
diff --git a/units/power-management/jobs.pxu b/units/power-management/jobs.pxu
index 37aef99..9dc6851 100644
--- a/units/power-management/jobs.pxu
+++ b/units/power-management/jobs.pxu
@@ -345,25 +345,34 @@ requires:
command:
cpu_lpi_file=$(cpuinfo_resource.py | grep cpu_lpi_file | awk '{ print $2 }')
if [ "$cpu_lpi_file" == "low_power_idle_cpu_residency_us" ]; then
- echo "check /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us"
+ before=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us)
xset dpms force off
sleep 20
xset dpms force on
- residency=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us)
+ after=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us)
+ residency=$((after-before))
+ echo "/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us after/before screen off is $residency"
else
- echo "check /sys/kernel/debug/pmc_core/package_cstate_show"
+ echo "The system doesn't have hardware-based residency counter."
+ echo "please check https://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf"
before=$(cat < /sys/kernel/debug/pmc_core/package_cstate_show | grep C10 | awk '{ print $4 }')
xset dpms force off
sleep 20
xset dpms force on
after=$(cat < /sys/kernel/debug/pmc_core/package_cstate_show | grep C10 | awk '{ print $4 }')
residency=$((after-before))
+ echo "/sys/kernel/debug/pmc_core/package_cstate_show after/before screen off is $residency"
+ fi
+ if [ $residency -eq 0 ]; then
+ echo "The cpu can't enter low power idle when screen off."
+ echo "please refer to https://www.kernel.org/doc/html/latest/firmware-guide/acpi/lpit.html."
+ exit 1
fi
- [ $residency -gt 0 ] || exit 1
user: root
estimated_duration: 25
id: power-management/system-low-power-idle
+after: suspend/suspend_advanced_auto
category_id: com.canonical.plainbox::power-management
_summary: System low power idle residency check
_description:
@@ -379,16 +388,31 @@ command:
dmesg | grep ACPI | grep supports | sed 's/\[.*ACPI/ACPI/'
echo "Content of /etc/default/grub:"
cat /etc/default/grub
- rtcwake --mode freeze -s 10
if [ "$sys_lpi_file" == "low_power_idle_system_residency_us" ]; then
- echo "check /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us"
- residency=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us)
+ before=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us)
+ rtcwake --mode freeze -s 10
+ after=$(cat /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us)
+ residency=$((after-before))
+ echo "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us after/before suspend is $residency"
else
- echo "check /sys/kernel/debug/pmc_core/slp_s0_residency_usec"
- residency=$(cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec)
+ echo "The system doesn't have hardware-based residency counter."
+ echo "please check https://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf"
+ before=$(cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec)
+ rtcwake --mode freeze -s 10
+ after=$(cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec)
+ residency=$((after-before))
+ echo "/sys/kernel/debug/pmc_core/slp_s0_residency_usec after/before suspend is $residency"
fi
# shellcheck disable=SC2086
- [ $residency -gt 0 ] || exit 1
+ if [ $residency -eq 0 ]; then
+ echo "The system can't enter s0 when suspended."
+ echo "please refer to https://www.kernel.org/doc/html/latest/firmware-guide/acpi/lpit.html."
+ if [ -f /sys/kernel/debug/suspend_stats ]; then
+ echo "cat /sys/kernel/debug/suspend_stats"
+ cat /sys/kernel/debug/suspend_stats
+ fi
+ exit 1
+ fi
user: root
estimated_duration: 15
diff --git a/units/snappy/snappy.pxu b/units/snappy/snappy.pxu
index fed3f76..45ffc1b 100644
--- a/units/snappy/snappy.pxu
+++ b/units/snappy/snappy.pxu
@@ -46,7 +46,6 @@ plugin: shell
command: snap_tests.py remove
category_id: snappy
estimated_duration: 10s
-depends: snappy/snap-install
flags: preserve-locale
user: root
environ: TEST_SNAP SNAPD_TASK_TIMEOUT SNAPD_POLL_INTERVAL
@@ -83,7 +82,6 @@ _steps:
_verification:
Check hello version is back to its stable version
plugin: manual
-depends: snappy/snap-refresh
category_id: snappy
estimated_duration: 60
@@ -101,7 +99,6 @@ _steps:
_verification:
Check hello version is again the one from the beta channel
plugin: manual
-depends: snappy/snap-revert
category_id: snappy
estimated_duration: 60
@@ -109,12 +106,12 @@ id: snappy/snap-refresh-automated
template-engine: jinja2
_summary: Test the snap refresh command is working.
_description:
- The snap {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} should
- be installed from the stable channel prior to starting the test. The job
- refreshes to edge and compares the revision before and after.
+ The test will install the
+ {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} snap from the
+ stable channel and then refreshes it to edge and compares the revision before
+ and after the refresh.
plugin: shell
command: snap_tests.py refresh
-depends: snappy/snap-install
category_id: snappy
estimated_duration: 10s
user: root
@@ -124,12 +121,11 @@ id: snappy/snap-revert-automated
template-engine: jinja2
_summary: Test the snap revert command is working.
_description:
- Runs after snap-refresh-automated and should revert the installed edge channel
- snap {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} leftover
- from that test to the one from stable.
+ Checks if the edge channel
+ {{ __checkbox_env__.get("TEST_SNAP", "test-snapd-tools") }} snap is reverted
+ back to the one from stable.
plugin: shell
command: snap_tests.py revert
-depends: snappy/snap-refresh-automated
category_id: snappy
estimated_duration: 10s
user: root
@@ -143,7 +139,6 @@ _description:
snap can be refreshed after removal of the blacklisted revision.
plugin: shell
command: snap_tests.py reupdate
-depends: snappy/snap-revert-automated
category_id: snappy
estimated_duration: 10s
user: root
diff --git a/units/snappy/test-plan.pxu b/units/snappy/test-plan.pxu
index f688bbf..b476809 100644
--- a/units/snappy/test-plan.pxu
+++ b/units/snappy/test-plan.pxu
@@ -29,9 +29,6 @@ _description:
QA test that includes manual tests for the snap command for Snappy Ubuntu
Core devices.
include:
- snappy/snap-refresh
- snappy/snap-revert
- snappy/snap-reupdate
snappy/os-refresh
snappy/os-revert
snappy/os-fail-boot
@@ -48,9 +45,6 @@ _description:
QA test that includes manual tests for the snap command for Snappy Ubuntu
Core devices.
include:
- snappy/snap-refresh
- snappy/snap-revert
- snappy/snap-reupdate
snappy/os-refresh-with-refresh-control
snappy/os-revert-with-refresh-control
snappy/os-fail-boot-with-refresh-control
diff --git a/units/stress/jobs.pxu b/units/stress/jobs.pxu
index aa12e28..a541e3a 100644
--- a/units/stress/jobs.pxu
+++ b/units/stress/jobs.pxu
@@ -18,14 +18,15 @@ estimated_duration: 7200.0
requires:
executable.name == 'stress-ng'
user: root
+environ: STRESS_NG_CPU_TIME
command:
if [ -n "$STRESS_NG_CPU_TIME" ]
then
echo "Found STRESS_NG_CPU_TIME env var, stress_ng cpu running time is now: $STRESS_NG_CPU_TIME seconds"
- stress_ng_test.py cpu --base-time "$STRESS_NG_CPU_TIME"
+ systemd-inhibit stress_ng_test.py cpu --base-time "$STRESS_NG_CPU_TIME"
else
echo STRESS_NG_CPU_TIME env var is not found, stress_ng cpu running time is default value
- stress_ng_test.py cpu --base-time 7200
+ systemd-inhibit stress_ng_test.py cpu --base-time 7200
fi
_summary:
Stress of CPUs (very long runtime)
diff --git a/units/stress/s3s4.pxu b/units/stress/s3s4.pxu
index 0e349c3..b8fc25b 100644
--- a/units/stress/s3s4.pxu
+++ b/units/stress/s3s4.pxu
@@ -21,6 +21,7 @@ plugin: resource
environ: STRESS_S3_ITERATIONS
command:
echo "s3_iterations: ${STRESS_S3_ITERATIONS:-30}"
+ python3 -c 'import platform;print("fwts: {}".format("supported" if platform.machine() in ["x86_64", "i386"] else "unsupported"))'
estimated_duration: 1s
flags: preserve-locale
@@ -32,6 +33,7 @@ plugin: resource
environ: STRESS_S4_ITERATIONS
command:
echo "s4_iterations: ${STRESS_S4_ITERATIONS:-30}"
+ python3 -c 'import platform;print("fwts: {}".format("supported" if platform.machine() in ["x86_64", "i386"] else "unsupported"))'
estimated_duration: 1s
flags: preserve-locale
@@ -39,10 +41,11 @@ flags: preserve-locale
unit: template
template-resource: stress_s3_iterations
template-unit: job
+template-engine: jinja2
plugin: shell
flags: preserve-locale
category_id: stress-tests/suspend
-id: stress-tests/suspend_{s3_iterations}_cycles
+id: stress-tests/suspend_{{ s3_iterations }}_cycles
imports:
from com.canonical.certification import sleep
from com.canonical.certification import rtc
@@ -53,11 +56,19 @@ estimated_duration: 2400.0
environ: PLAINBOX_SESSION_SHARE STRESS_S3_SLEEP_DELAY STRESS_S3_WAIT_DELAY LD_LIBRARY_PATH
user: root
command:
+ {%- if fwts == "supported" %}
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$SNAP/usr/lib/fwts"
- set -o pipefail; checkbox-support-fwts_test -l "$PLAINBOX_SESSION_SHARE"/suspend_{s3_iterations}_cycles -f none -s s3 --s3-device-check --s3-device-check-delay="${{STRESS_S3_WAIT_DELAY:-45}}" --s3-sleep-delay="${{STRESS_S3_SLEEP_DELAY:-30}}" --s3-multiple={s3_iterations} -j "$SNAP"/share/fwts | tee "$PLAINBOX_SESSION_SHARE"/suspend_{s3_iterations}_cycles_times.log
+ set -o pipefail; checkbox-support-fwts_test -l "$PLAINBOX_SESSION_SHARE"/suspend_{{ s3_iterations }}_cycles -f none -s s3 --s3-device-check --s3-device-check-delay="${STRESS_S3_WAIT_DELAY:-45}" --s3-sleep-delay="${STRESS_S3_SLEEP_DELAY:-30}" --s3-multiple={{ s3_iterations }} -j "$SNAP"/share/fwts | tee "$PLAINBOX_SESSION_SHARE"/suspend_{{ s3_iterations }}_cycles_times.log
+ {%- else %}
+ for i in {1..{{ s3_iterations }}};
+ do
+ echo "Iteration $i"
+ rtcwake -v -m mem -s "${STRESS_S3_SLEEP_DELAY:-30}"
+ done
+ {% endif -%}
_description:
PURPOSE:
- This is an automated stress test that will force the system to suspend/resume for {s3_iterations} cycles.
+ This is an automated stress test that will force the system to suspend/resume for {{ s3_iterations }} cycles.
unit: template
template-resource: stress_s3_iterations
@@ -67,6 +78,7 @@ flags: preserve-locale
category_id: stress-tests/suspend
id: stress-tests/suspend-{s3_iterations}-cycles-log-check
after: stress-tests/suspend_{s3_iterations}_cycles
+requires: cpuinfo.platform in ("i386", "x86_64")
estimated_duration: 1.0
command: [ -e "$PLAINBOX_SESSION_SHARE"/suspend_{s3_iterations}_cycles.log ] && sleep_test_log_check.py -v s3 "$PLAINBOX_SESSION_SHARE"/suspend_{s3_iterations}_cycles.log
_description:
@@ -81,6 +93,7 @@ category_id: stress-tests/suspend
id: stress-tests/suspend-{s3_iterations}-cycles-log-attach
estimated_duration: 1.0
after: stress-tests/suspend_{s3_iterations}_cycles
+requires: cpuinfo.platform in ("i386", "x86_64")
command: [ -e "$PLAINBOX_SESSION_SHARE"/suspend_{s3_iterations}_cycles.log ] && cat "$PLAINBOX_SESSION_SHARE"/suspend_{s3_iterations}_cycles.log
_description:
Attaches the log from the {s3_iterations} cycles Suspend/Resume test if it exists
@@ -89,10 +102,11 @@ _description:
unit: template
template-resource: stress_s4_iterations
template-unit: job
+template-engine: jinja2
plugin: shell
flags: preserve-locale
category_id: stress-tests/hibernate
-id: stress-tests/hibernate_{s4_iterations}_cycles
+id: stress-tests/hibernate_{{ s4_iterations }}_cycles
imports:
from com.canonical.certification import sleep
from com.canonical.certification import rtc
@@ -104,10 +118,10 @@ environ: PLAINBOX_SESSION_SHARE STRESS_S4_SLEEP_DELAY STRESS_S4_WAIT_DELAY LD_LI
user: root
command:
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$SNAP/usr/lib/fwts"
- checkbox-support-fwts_test -l "$PLAINBOX_SESSION_SHARE"/hibernate_{s4_iterations}_cycles -f none -s s4 --s4-device-check --s4-device-check-delay="${{STRESS_S4_WAIT_DELAY:-45}}" --s4-sleep-delay="${{STRESS_S4_SLEEP_DELAY:-120}}" --s4-multiple={s4_iterations} -j "$SNAP"/share/fwts
+ checkbox-support-fwts_test -l "$PLAINBOX_SESSION_SHARE"/hibernate_{{ s4_iterations }}_cycles -f none -s s4 --s4-device-check --s4-device-check-delay="${STRESS_S4_WAIT_DELAY:-45}" --s4-sleep-delay="${STRESS_S4_SLEEP_DELAY:-120}" --s4-multiple={{ s4_iterations }} -j "$SNAP"/share/fwts
_description:
PURPOSE:
- This is an automated stress test that will force the system to hibernate/resume for {s4_iterations} cycles
+ This is an automated stress test that will force the system to hibernate/resume for {{ s4_iterations }} cycles
unit: template
template-resource: stress_s4_iterations
@@ -117,6 +131,7 @@ flags: preserve-locale
category_id: stress-tests/hibernate
id: stress-tests/hibernate-{s4_iterations}-cycles-log-check
after: stress-tests/hibernate_{s4_iterations}_cycles
+requires: cpuinfo.platform in ("i386", "x86_64")
estimated_duration: 1.0
command: [ -e "$PLAINBOX_SESSION_SHARE"/hibernate_{s4_iterations}_cycles.log ] && sleep_test_log_check.py -v s4 "$PLAINBOX_SESSION_SHARE"/hibernate_{s4_iterations}_cycles.log
_description:
@@ -131,6 +146,7 @@ category_id: stress-tests/hibernate
id: stress-tests/hibernate-{s4_iterations}-cycles-log-attach
estimated_duration: 1.0
after: stress-tests/hibernate_{s4_iterations}_cycles
+requires: cpuinfo.platform in ("i386", "x86_64")
command: [ -e "$PLAINBOX_SESSION_SHARE"/hibernate_{s4_iterations}_cycles.log ] && cat "$PLAINBOX_SESSION_SHARE"/hibernate_{s4_iterations}_cycles.log
_description:
Attaches the log from the {s4_iterations} cycles Hibernate/Resume test if it exists
diff --git a/units/submission/jobs.pxu b/units/submission/jobs.pxu
index 892d4e6..a8c8c7f 100644
--- a/units/submission/jobs.pxu
+++ b/units/submission/jobs.pxu
@@ -22,8 +22,10 @@ requires:
dmi_present.state == 'supported'
user: root
command:
+ BOOT_MODE=$(inxi_snapshot -M --output json --output-file print | grep -oP '(?<=\d#)(UEFI|BIOS)(\s+\[Legacy\])?')
+ # shellcheck disable=SC2016
dmidecode | python3 -m plainbox dev parse dmidecode | \
- jq '[.[] | ._attributes + {"category": .category}]'
+ jq --arg BOOT_MODE "$BOOT_MODE" '[.[] | ._attributes + {"category": .category} + (if .category == "BIOS" then {boot_mode: $BOOT_MODE} else {} end)]'
estimated_duration: 1
_description: Attaches dmidecode output
_summary: Attaches json dumps of raw dmi devices
diff --git a/units/submission/test-plan.pxu b/units/submission/test-plan.pxu
index 5ae692f..5f3d9f6 100644
--- a/units/submission/test-plan.pxu
+++ b/units/submission/test-plan.pxu
@@ -1,10 +1,26 @@
id: submission-cert-full
unit: test plan
-_name: Submission resources
-_description: Submission resources
+_name: Full submission resources
+_description: Full submission resources
+include:
+nested_part:
+ submission-cert-manual
+ submission-cert-automated
+
+id: submission-cert-manual
+unit: test plan
+_name: Manual submission resources
+_description: Manual submission resources
include:
mandatory_include:
miscellanea/device_check
+
+id: submission-cert-automated
+unit: test plan
+_name: Automated submission resources
+_description: Automated submission resources
+include:
+mandatory_include:
# Meta-job to include required resources, don't remove.
miscellanea/submission-resources
info/systemd-analyze
diff --git a/units/suspend/suspend-graphics.pxu b/units/suspend/suspend-graphics.pxu
index 84bfab8..9a52fbb 100644
--- a/units/suspend/suspend-graphics.pxu
+++ b/units/suspend/suspend-graphics.pxu
@@ -109,7 +109,7 @@ plugin: attachment
category_id: com.canonical.plainbox::suspend
id: suspend/{index}_xrandr_screens_after_suspend.tar.gz_auto
depends: suspend/{index}_cycle_resolutions_after_suspend_{product_slug}_graphics
-command: [ -f "$PLAINBOX_SESSION_SHARE"/{index}_xrandr_screens_after_suspend.tgz ] && cat "$PLAINBOX_SESSION_SHARE"/{index}_xrandr_screens_after_suspend.tgz
+command: [ -f "$PLAINBOX_SESSION_SHARE"/xrandr_screens_{index}_after_suspend.tgz ] && cat "$PLAINBOX_SESSION_SHARE"/xrandr_screens_{index}_after_suspend.tgz
_description: This attaches screenshots from the suspend/cycle_resolutions_after_suspend test to the results submission.
unit: template
diff --git a/units/suspend/suspend.pxu b/units/suspend/suspend.pxu
index 034f97b..a2830d4 100644
--- a/units/suspend/suspend.pxu
+++ b/units/suspend/suspend.pxu
@@ -1636,32 +1636,6 @@ _description:
plugin: shell
category_id: com.canonical.plainbox::suspend
-id: suspend/usb_storage_preinserted_after_suspend
-estimated_duration: 1.2
-user: root
-depends: suspend/suspend_advanced_auto
-command: removable_storage_test.py -l usb && removable_storage_test.py -s 268400000 usb
-_description:
- This is an automated version of usb/storage-automated and assumes that the
- server has usb storage devices plugged in prior to checkbox execution. It
- is intended for servers and SRU automated testing.
-
-plugin: shell
-category_id: com.canonical.plainbox::suspend
-id: suspend/usb3_storage_preinserted_after_suspend
-estimated_duration: 1.2
-user: root
-requires:
- usb.usb3 == 'supported'
-depends: suspend/suspend_advanced_auto
-command: removable_storage_test.py -l usb && removable_storage_test.py -s 268400000 -m 500000000 usb --driver xhci_hcd
-_description:
- This is an automated version of usb3/storage-automated and assumes that the
- server has usb 3.0 storage devices plugged in prior to checkbox execution. It
- is intended for servers and SRU automated testing.
-
-plugin: shell
-category_id: com.canonical.plainbox::suspend
id: suspend/usb_performance_after_suspend
depends: suspend/usb_insert_after_suspend
user: root
diff --git a/units/thunderbolt/jobs.pxu b/units/thunderbolt/jobs.pxu
index 73fadb5..031c2ad 100644
--- a/units/thunderbolt/jobs.pxu
+++ b/units/thunderbolt/jobs.pxu
@@ -105,7 +105,7 @@ _siblings: [
_summary: Storage insert detection on Thunderbolt 3 port
_description:
PURPOSE:
- This test will check if the insertion of a Thunderbolt HDD could be detected
+ This test will check if the insertion of a Thunderbolt 3 HDD could be detected
STEPS:
1. Click 'Test' to begin the test. This test will
timeout and fail if the insertion has not been detected within 40 seconds.
@@ -164,7 +164,7 @@ _siblings: [
_summary: Storage removal detection on Thunderbolt 3 port
_description:
PURPOSE:
- This test will check the system can detect the removal of a Thunderbolt HDD
+ This test will check the system can detect the removal of a Thunderbolt 3 HDD
STEPS:
1. Click 'Test' to begin the test. This test will timeout and fail if
the removal has not been detected within 20 seconds.
@@ -176,6 +176,7 @@ _description:
plugin: user-interact-verify
category_id: com.canonical.plainbox::disk
id: thunderbolt3/daisy-chain
+user: root
imports: from com.canonical.plainbox import manifest
requires: manifest.has_thunderbolt3 == 'True'
flags: also-after-suspend-manual
diff --git a/units/touchpad/jobs.pxu b/units/touchpad/jobs.pxu
index 2fcdbc3..bb6f596 100644
--- a/units/touchpad/jobs.pxu
+++ b/units/touchpad/jobs.pxu
@@ -218,6 +218,36 @@ _siblings:
[{ "id": "touchpad/continuous-move-after-suspend",
"depends": "suspend/suspend_advanced touchpad/continuous-move" }]
+unit: template
+template-resource: device
+template-filter: device.category == 'TOUCHPAD'
+template-unit: job
+plugin: shell
+category_id: com.canonical.plainbox::touchpad
+id: touchpad/palm-rejection-firmware-labeling_{product_slug}
+requires: device.driver == 'hid-multitouch'
+estimated_duration: 5.0
+command:
+ abs_caps=$(cat </sys{path}/capabilities/abs)
+ abs_caps_hex=$((16#"$abs_caps"))
+ tool_type_bit=$((abs_caps_hex >> 55))
+ support=$((tool_type_bit & 1))
+ if [ $support -eq 1 ]; then
+ exit 0
+ else
+ echo "Touchapd info:"
+ cat </sys{path}/name
+ cat </sys{path}/modalias
+ echo "Touchpad EV_ABS capabilities:"
+ echo "$abs_caps"
+ exit 1
+ fi
+_summary: Touchpad EV_ABS capability check
+_description:
+ Libinput firmware/labeling palm detection rely on touchpad ABS_MT_TOOL_TYPE
+ capability. This test checks touchpad's EV_ABS capability to
+ make sure that firmware/labeling bit is set in touchpad firmware.
+
id: touchpad/palm-rejection
plugin: user-interact
category_id: com.canonical.plainbox::touchpad
diff --git a/units/touchpad/test-plan.pxu b/units/touchpad/test-plan.pxu
index 188e7f2..39769d0 100644
--- a/units/touchpad/test-plan.pxu
+++ b/units/touchpad/test-plan.pxu
@@ -30,6 +30,7 @@ _description:
Touchpad tests (Automated)
include:
touchpad/detected-as-mouse certification-status=blocker
+ touchpad/palm-rejection-firmware-labeling_.* certification-status=blocker
id: after-suspend-touchpad-cert-full
diff --git a/units/ubuntucore/category.pxu b/units/ubuntucore/category.pxu
new file mode 100644
index 0000000..b6c6461
--- /dev/null
+++ b/units/ubuntucore/category.pxu
@@ -0,0 +1,3 @@
+unit: category
+id: ubuntucore
+_name: Ubuntu Core OS feature tests
diff --git a/units/ubuntucore/jobs.pxu b/units/ubuntucore/jobs.pxu
new file mode 100644
index 0000000..c82c43b
--- /dev/null
+++ b/units/ubuntucore/jobs.pxu
@@ -0,0 +1,53 @@
+id: ubuntucore/os-recovery-mode
+_summary: Reboot into recovery mode and log into the system using prior credentials.
+_purpose:
+ Check if system will reboot to recovery mode successfully
+requires:
+ lsb.release >= '20'
+_steps:
+ 1. Send unix socket command to reboot system into recovery mode
+ $ sudo http --pretty=format POST snapd:///v2/systems/$(ls /run/mnt/ubuntu-seed/systems/) action=do mode=recover
+ If 'http' is not installed, run the following command first
+ $ sudo snap install http
+ 2. System should respond to the unix socket command and reboot itself
+ 3. Wait until system completes the reboot process
+ 4. Check if system is running in recovery mode through kernel cmdline
+ $ cat /proc/cmdline
+ 5. Reboot the system and check again if system goes back to normal run mode
+ $ cat /proc/cmdline
+_verification:
+ Check if kernel cmdline when system in recovery mode includes:
+ 'snapd_recovery_mode=recover'
+ Check if kernel cmdline when system in normal run mode includes:
+ 'snapd_recovery_mode=run'
+plugin: manual
+category_id: ubuntucore
+
+id: ubuntucore/os-reinstall-mode
+_summary: Reboot into reinstall mode and trigger a factory reset on the device.
+_purpose:
+ Check if system will reboot to reinstall mode and reinitialise the device with fresh factory reset
+requires:
+ lsb.release >= '20'
+_steps:
+ WARNING: ALL EXISTING DATA ON THIS DEVICE WILL BE WIPED!!
+ 1. Check the current serial-assertion device-key
+ $ ls /var/lib/snapd/save/device/private-keys-v1
+ 2. Clear TPM first if this device has enabled secure boot & FDE
+ For x86-based platforms:
+ $ sudo su
+ $ echo 5 > /sys/class/tpm/tpm0/ppi/request
+ For ARM-based platforms:
+ There is no generic command for ARM-based platforms, please refer to device user manual
+ 3. Send unix socket command to reboot system into reinstall mode
+ $ sudo http --pretty=format POST snapd:///v2/systems/$(ls /run/mnt/ubuntu-seed/systems/) action=do mode=install
+ If 'http' is not installed, run the following command first
+ $ sudo snap install http
+ 4. System should respond to the unix socket command and reboot itself
+ 5. Wait until system completes the installation and initialization process
+ 6. Check serial-assertion device-key after installation completes
+ $ ls /var/lib/snapd/save/device/private-keys-v1
+_verification:
+ Check if a new serial-assertion device-key got generated after reinstallation completes
+plugin: manual
+category_id: ubuntucore
diff --git a/units/ubuntucore/test-plan.pxu b/units/ubuntucore/test-plan.pxu
new file mode 100644
index 0000000..cdc03d7
--- /dev/null
+++ b/units/ubuntucore/test-plan.pxu
@@ -0,0 +1,22 @@
+id: ubuntucore-full
+unit: test plan
+_name: Ubuntu Core OS feature tests
+_description: OS feature test for Ubuntu Core devices
+include:
+nested_part:
+ ubuntucore-manual
+ ubuntucore-automated
+
+id: ubuntucore-automated
+unit: test plan
+_name: Automated Ubuntu Core OS feature tests
+_description: Automated OS feature tests for Ubuntu Core devices
+include:
+
+id: ubuntucore-manual
+unit: test plan
+_name: Manual Ubuntu Core OS feature tests
+_description: Manual OS feature tests for Ubuntu Core devices
+include:
+ ubuntucore/os-reinstall-mode
+ ubuntucore/os-recovery-mode
diff --git a/units/usb/usb.pxu b/units/usb/usb.pxu
index 0b923cb..f9e45da 100644
--- a/units/usb/usb.pxu
+++ b/units/usb/usb.pxu
@@ -240,10 +240,12 @@ user: root
estimated_duration: 45.0
command: removable_storage_test.py -l usb && timeout 300 removable_storage_test.py -s 268400000 usb
flags: also-after-suspend preserve-cwd
+imports: from com.canonical.plainbox import manifest
requires:
cpuinfo.platform != 's390x'
package.name == 'udisks2' or snap.name == 'udisks2'
package.name == 'udisks2' or (snap.name == 'core' and int(snap.revision) >= 1804)
+ manifest.has_usb_storage == 'True'
_summary:
Test USB 2.0 or 1.1 ports
_description:
@@ -256,11 +258,13 @@ category_id: com.canonical.plainbox::usb
id: usb3/storage-preinserted
user: root
flags: also-after-suspend
+imports: from com.canonical.plainbox import manifest
requires:
cpuinfo.platform != 's390x'
usb.usb3 == 'supported'
package.name == 'udisks2' or snap.name == 'udisks2'
package.name == 'udisks2' or (snap.name == 'core' and int(snap.revision) >= 1804)
+ manifest.has_usb_storage == 'True'
estimated_duration: 45.0
command: removable_storage_test.py -l usb && timeout 300 removable_storage_test.py -s 268400000 -m 500000000 usb --driver xhci_hcd
_summary:
diff --git a/units/watchdog/jobs.pxu b/units/watchdog/jobs.pxu
index 3247941..0e16f82 100644
--- a/units/watchdog/jobs.pxu
+++ b/units/watchdog/jobs.pxu
@@ -1,3 +1,11 @@
+id: watchdog/detect
+category_id: com.canonical.plainbox::power-management
+_summary: Detect presence of a Hardware Watchdog
+flags: simple
+imports: from com.canonical.plainbox import manifest
+requires: manifest.has_hardware_watchdog == 'True'
+command: udev_resource.py -f WATCHDOG
+
id: watchdog/systemd-config
_summary: Check if the hardware watchdog is properly configured
template-engine: jinja2
@@ -29,6 +37,8 @@ command:
{% endif -%}
category_id: com.canonical.plainbox::power-management
flags: simple
+imports: from com.canonical.plainbox import manifest
+requires: manifest.has_hardware_watchdog == 'True'
id: watchdog/trigger-system-reset
depends: watchdog/systemd-config
@@ -75,3 +85,5 @@ unit: job
plugin: shell
command: failed_service_check.sh
estimated_duration: 1.0
+imports: from com.canonical.plainbox import manifest
+requires: manifest.has_hardware_watchdog == 'True'
diff --git a/units/watchdog/manifest.pxu b/units/watchdog/manifest.pxu
new file mode 100644
index 0000000..d80d7e1
--- /dev/null
+++ b/units/watchdog/manifest.pxu
@@ -0,0 +1,5 @@
+
+unit: manifest entry
+id: has_hardware_watchdog
+_name: Hardware Watchdog
+value-type: bool
diff --git a/units/watchdog/test-plan.pxu b/units/watchdog/test-plan.pxu
index 6de8e71..01057d0 100644
--- a/units/watchdog/test-plan.pxu
+++ b/units/watchdog/test-plan.pxu
@@ -23,6 +23,7 @@ _description:
QA test plan that includes automated watchdog tests
estimated_duration: 1s
include:
+ watchdog/detect
watchdog/systemd-config
watchdog/trigger-system-reset-auto
watchdog/post-trigger-system-reset-auto
diff --git a/units/wireless/jobs.pxu b/units/wireless/jobs.pxu
index 3e50c90..4827af1 100644
--- a/units/wireless/jobs.pxu
+++ b/units/wireless/jobs.pxu
@@ -30,7 +30,7 @@ requires:
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
{% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -53,7 +53,7 @@ requires:
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
{% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -76,7 +76,7 @@ requires:
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
{% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -99,7 +99,7 @@ requires:
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
{% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -122,7 +122,7 @@ requires:
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
{% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -145,8 +145,8 @@ requires:
wireless_sta_protocol.{{ interface }}_ac == 'supported'
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
- {% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ {%- endif %}
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -169,8 +169,8 @@ requires:
wireless_sta_protocol.{{ interface }}_ac == 'supported'
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
- {% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ {%- endif %}
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -193,8 +193,8 @@ requires:
wireless_sta_protocol.{{ interface }}_ax == 'supported'
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
- {% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ {%- endif %}
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
unit: template
template-resource: device
@@ -217,8 +217,8 @@ requires:
wireless_sta_protocol.{{ interface }}_ax == 'supported'
{%- if __on_ubuntucore__ %}
connections.slot == 'network-manager:service' and connections.plug == '{{ __system_env__["SNAP_NAME"] }}:network-manager'
- {% endif -%}
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
+ {%- endif %}
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'NetworkManager'
plugin: user-interact-verify
category_id: com.canonical.plainbox::wireless
diff --git a/units/wireless/test-plan.pxu b/units/wireless/test-plan.pxu
index f6d6fb3..6b2b8c1 100644
--- a/units/wireless/test-plan.pxu
+++ b/units/wireless/test-plan.pxu
@@ -134,17 +134,6 @@ unit: test plan
_name: Manual tests for wireless
_description: Manual tests wireless
include:
- # following matchers may also include some automated jobs, this could be
- # fixed with some regex magic, but the lesser evil seems to be just to
- # include them as well; XXX: the test plan is not really manual-only
- wireless/wireless_connection_open_ax_.*
- wireless/wireless_connection_open_ac_.*
- wireless/wireless_connection_open_bg_.*
- wireless/wireless_connection_open_n_.*
- wireless/wireless_connection_wpa_ax_.*
- wireless/wireless_connection_wpa_ac_.*
- wireless/wireless_connection_wpa_bg_.*
- wireless/wireless_connection_wpa_n_.*
id: wireless-automated
unit: test plan
@@ -163,14 +152,14 @@ include:
wireless/wireless_connection_wpa_ac_nm_.*
wireless/wireless_connection_wpa_bg_nm_.*
wireless/wireless_connection_wpa_n_nm_.*
- # wireless/wireless_connection_open_ax_np_.*
- # wireless/wireless_connection_open_ac_np_.*
- # wireless/wireless_connection_open_bg_np_.*
- # wireless/wireless_connection_open_n_np_.*
- # wireless/wireless_connection_wpa_ax_np_.*
- # wireless/wireless_connection_wpa_ac_np_.*
- # wireless/wireless_connection_wpa_bg_np_.*
- # wireless/wireless_connection_wpa_n_np_.*
+ wireless/wireless_connection_open_ax_np_.*
+ wireless/wireless_connection_open_ac_np_.*
+ wireless/wireless_connection_open_bg_np_.*
+ wireless/wireless_connection_open_n_np_.*
+ wireless/wireless_connection_wpa_ax_np_.*
+ wireless/wireless_connection_wpa_ac_np_.*
+ wireless/wireless_connection_wpa_bg_np_.*
+ wireless/wireless_connection_wpa_n_np_.*
bootstrap_include:
device
@@ -182,15 +171,15 @@ _description:
networks using netplan.
include:
wireless/detect
- # wireless/wireless_scanning_.*
- # wireless/wireless_connection_open_ax_nm_.*
- # wireless/wireless_connection_open_ac_nm_.*
- # wireless/wireless_connection_open_bg_nm_.*
- # wireless/wireless_connection_open_n_nm_.*
- # wireless/wireless_connection_wpa_ax_nm_.*
- # wireless/wireless_connection_wpa_ac_nm_.*
- # wireless/wireless_connection_wpa_bg_nm_.*
- # wireless/wireless_connection_wpa_n_nm_.*
+ wireless/wireless_scanning_.*
+ wireless/wireless_connection_open_ax_nm_.*
+ wireless/wireless_connection_open_ac_nm_.*
+ wireless/wireless_connection_open_bg_nm_.*
+ wireless/wireless_connection_open_n_nm_.*
+ wireless/wireless_connection_wpa_ax_nm_.*
+ wireless/wireless_connection_wpa_ac_nm_.*
+ wireless/wireless_connection_wpa_bg_nm_.*
+ wireless/wireless_connection_wpa_n_nm_.*
wireless/wireless_connection_open_ax_np_.*
wireless/wireless_connection_open_ac_np_.*
wireless/wireless_connection_open_bg_np_.*
@@ -337,18 +326,6 @@ unit: test plan
_name: Manual tests for wireless (after suspend)
_description: Manual tests wireless
include:
- # following matchers may also include some automated jobs, this could be
- # fixed with some regex magic, but the lesser evil seems to be just to
- # include them as well; XXX: the test plan is not really manual-only
- after-suspend-wireless/wireless_connection_open_ax_.*
- after-suspend-wireless/wireless_connection_open_ac_.*
- after-suspend-wireless/wireless_connection_open_bg_.*
- after-suspend-wireless/wireless_connection_open_n_.*
- after-suspend-wireless/wireless_connection_wpa_ax_.*
- after-suspend-wireless/wireless_connection_wpa_ac_.*
- after-suspend-wireless/wireless_connection_wpa_bg_.*
- after-suspend-wireless/wireless_connection_wpa_n_.*
- after-suspend-wireless/wifi_ap_.*
id: after-suspend-wireless-automated
unit: test plan
@@ -366,14 +343,14 @@ include:
after-suspend-wireless/wireless_connection_wpa_ac_nm_.*
after-suspend-wireless/wireless_connection_wpa_bg_nm_.*
after-suspend-wireless/wireless_connection_wpa_n_nm_.*
- # after-suspend-wireless/wireless_connection_open_ax_np_.*
- # after-suspend-wireless/wireless_connection_open_ac_np_.*
- # after-suspend-wireless/wireless_connection_open_bg_np_.*
- # after-suspend-wireless/wireless_connection_open_n_np_.*
- # after-suspend-wireless/wireless_connection_wpa_ax_np_.*
- # after-suspend-wireless/wireless_connection_wpa_ac_np_.*
- # after-suspend-wireless/wireless_connection_wpa_bg_np_.*
- # after-suspend-wireless/wireless_connection_wpa_n_np_.*
+ after-suspend-wireless/wireless_connection_open_ax_np_.*
+ after-suspend-wireless/wireless_connection_open_ac_np_.*
+ after-suspend-wireless/wireless_connection_open_bg_np_.*
+ after-suspend-wireless/wireless_connection_open_n_np_.*
+ after-suspend-wireless/wireless_connection_wpa_ax_np_.*
+ after-suspend-wireless/wireless_connection_wpa_ac_np_.*
+ after-suspend-wireless/wireless_connection_wpa_bg_np_.*
+ after-suspend-wireless/wireless_connection_wpa_n_np_.*
bootstrap_include:
device
@@ -384,15 +361,15 @@ _description:
Automated connection tests for unencrypted or WPA-encrypted 802.11 bg, n, ac, ax
networks using netplan.
include:
- # after-suspend-wireless/wireless_scanning_.*
- # after-suspend-wireless/wireless_connection_open_ax_nm_.*
- # after-suspend-wireless/wireless_connection_open_ac_nm_.*
- # after-suspend-wireless/wireless_connection_open_bg_nm_.*
- # after-suspend-wireless/wireless_connection_open_n_nm_.*
- # after-suspend-wireless/wireless_connection_wpa_ax_nm_.*
- # after-suspend-wireless/wireless_connection_wpa_ac_nm_.*
- # after-suspend-wireless/wireless_connection_wpa_bg_nm_.*
- # after-suspend-wireless/wireless_connection_wpa_n_nm_.*
+ after-suspend-wireless/wireless_scanning_.*
+ after-suspend-wireless/wireless_connection_open_ax_nm_.*
+ after-suspend-wireless/wireless_connection_open_ac_nm_.*
+ after-suspend-wireless/wireless_connection_open_bg_nm_.*
+ after-suspend-wireless/wireless_connection_open_n_nm_.*
+ after-suspend-wireless/wireless_connection_wpa_ax_nm_.*
+ after-suspend-wireless/wireless_connection_wpa_ac_nm_.*
+ after-suspend-wireless/wireless_connection_wpa_bg_nm_.*
+ after-suspend-wireless/wireless_connection_wpa_n_nm_.*
after-suspend-wireless/wireless_connection_open_ax_np_.*
after-suspend-wireless/wireless_connection_open_ac_np_.*
after-suspend-wireless/wireless_connection_open_bg_np_.*
diff --git a/units/wireless/wireless-connection-netplan.pxu b/units/wireless/wireless-connection-netplan.pxu
index 8eaf8b2..44677e2 100644
--- a/units/wireless/wireless-connection-netplan.pxu
+++ b/units/wireless/wireless-connection-netplan.pxu
@@ -19,7 +19,7 @@ estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
requires:
wireless_sta_protocol.{{ interface }}_ax == 'supported'
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
unit: template
@@ -43,7 +43,7 @@ estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
requires:
wireless_sta_protocol.{{ interface }}_ac == 'supported'
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
@@ -66,8 +66,8 @@ environ: LD_LIBRARY_PATH OPEN_BG_SSID NET_DRIVER_INFO
category_id: com.canonical.plainbox::wireless
estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
-#requires:
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+requires:
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
unit: template
@@ -89,8 +89,8 @@ environ: LD_LIBRARY_PATH OPEN_N_SSID NET_DRIVER_INFO
category_id: com.canonical.plainbox::wireless
estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
-#requires:
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+requires:
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
unit: template
@@ -114,7 +114,7 @@ estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
requires:
wireless_sta_protocol.{{ interface }}_ax == 'supported'
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
unit: template
@@ -138,7 +138,7 @@ estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
requires:
wireless_sta_protocol.{{ interface }}_ac == 'supported'
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
unit: template
@@ -160,8 +160,8 @@ environ: LD_LIBRARY_PATH WPA_BG_SSID WPA_BG_PSK NET_DRIVER_INFO
category_id: com.canonical.plainbox::wireless
estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
-#requires:
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+requires:
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
unit: template
@@ -183,5 +183,5 @@ environ: LD_LIBRARY_PATH WPA_N_SSID WPA_N_PSK NET_DRIVER_INFO
category_id: com.canonical.plainbox::wireless
estimated_duration: 15
flags: preserve-locale also-after-suspend also-after-suspend-manual
-#requires:
-# net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
+requires:
+ net_if_management.device == '{{ interface }}' and net_if_management.managed_by == 'networkd'
diff --git a/units/wwan/jobs.pxu b/units/wwan/jobs.pxu
index aaa2089..de4b1bc 100644
--- a/units/wwan/jobs.pxu
+++ b/units/wwan/jobs.pxu
@@ -24,7 +24,6 @@ flags: preserve-locale also-after-suspend preserve-cwd
imports: from com.canonical.plainbox import manifest
requires:
manifest.has_wwan_module == 'True'
- snap.name == 'core' and int(snap.revision) >= 1804 or package.name == 'modemmanager'
snap.name == 'modem-manager' or package.name == 'modemmanager'
unit: template
@@ -53,7 +52,6 @@ flags: preserve-locale also-after-suspend preserve-cwd
imports: from com.canonical.plainbox import manifest
requires:
manifest.has_wwan_module == 'True'
- snap.name == 'core' and int(snap.revision) >= 1804 or package.name == 'modemmanager'
snap.name == 'modem-manager' or package.name == 'modemmanager'
unit: template
@@ -73,7 +71,6 @@ flags: preserve-locale also-after-suspend preserve-cwd
imports: from com.canonical.plainbox import manifest
requires:
manifest.has_wwan_module == 'True'
- snap.name == 'core' and int(snap.revision) >= 1804 or package.name == 'modemmanager'
snap.name == 'modem-manager' or package.name == 'modemmanager'
unit: template
@@ -99,7 +96,6 @@ flags: preserve-locale also-after-suspend preserve-cwd
imports: from com.canonical.plainbox import manifest
requires:
manifest.has_wwan_module == 'True'
- snap.name == 'core' and int(snap.revision) >= 1804 or package.name == 'modemmanager'
snap.name == 'modem-manager' or package.name == 'modemmanager'
id: wwan/detect-manual