summaryrefslogtreecommitdiff
diff options
authorMichael Vogt <mvo@ubuntu.com>2023-07-14 16:37:50 +0200
committerMichael Vogt <mvo@ubuntu.com>2023-07-14 16:37:50 +0200
commit25296a672a180e8b75dbf41fa3bb5c61116db15d (patch)
tree316807be5211e120cbf84cc006b6a3bfeb1ee077
parentd1f0efd2e4faee42f519795c4b309b2240eab21c (diff)
parent0622b612ba264b489fb86b277da8d42a665d9c06 (diff)
Merge remote-tracking branch 'upstream/master' into wait-cli-improvements
-rw-r--r--.github/workflows/cla-check.yaml1
-rw-r--r--.github/workflows/test.yaml4
-rw-r--r--NEWS.md9
-rw-r--r--advisor/backend_test.go76
-rw-r--r--aspects/aspects.go15
-rw-r--r--aspects/aspects_test.go36
-rw-r--r--aspects/transaction.go136
-rw-r--r--aspects/transaction_test.go350
-rw-r--r--build-aux/snap/patches/apparmor/parser-replace-dynamic_cast-with-is_type-method.patch791
-rw-r--r--build-aux/snap/snapcraft.yaml4
-rw-r--r--client/model.go113
-rw-r--r--client/model_test.go90
-rw-r--r--client/systems.go1
-rw-r--r--client/systems_test.go19
-rw-r--r--cmd/snap-bootstrap/cmd_initramfs_mounts.go3
-rw-r--r--cmd/snap-bootstrap/cmd_initramfs_mounts_test.go11
-rw-r--r--cmd/snap/cmd_aliases.go4
-rw-r--r--cmd/snap/cmd_aliases_test.go4
-rw-r--r--cmd/snap/cmd_help.go2
-rw-r--r--cmd/snap/cmd_remodel.go34
-rw-r--r--cmd/snap/cmd_remodel_test.go109
-rw-r--r--cmd/snapd-apparmor/main_test.go1
-rw-r--r--daemon/api.go4
-rw-r--r--daemon/api_aspects.go19
-rw-r--r--daemon/api_aspects_test.go148
-rw-r--r--daemon/api_model.go22
-rw-r--r--daemon/api_model_test.go61
-rw-r--r--daemon/api_systems_test.go3
-rw-r--r--daemon/daemon.go16
-rw-r--r--daemon/export_test.go17
-rw-r--r--data/preseed.json2
-rw-r--r--dirs/dirs.go40
-rw-r--r--gadget/gadget.go61
-rw-r--r--gadget/gadget_test.go281
-rw-r--r--gadget/gadgettest/examples.go70
-rw-r--r--gadget/gadgettest/gadgettest.go10
-rw-r--r--gadget/install/content.go19
-rw-r--r--gadget/install/content_test.go83
-rw-r--r--gadget/install/install.go5
-rw-r--r--gadget/install/install_test.go291
-rw-r--r--gadget/install/partition.go22
-rw-r--r--gadget/install/partition_test.go183
-rw-r--r--gadget/layout.go17
-rw-r--r--gadget/partial.go135
-rw-r--r--gadget/partial_test.go283
-rw-r--r--gadget/update.go157
-rw-r--r--gadget/update_test.go353
-rw-r--r--go.mod4
-rw-r--r--go.sum8
-rw-r--r--image/image_linux.go9
-rw-r--r--image/preseed/preseed_linux.go10
-rw-r--r--interfaces/apparmor/backend.go6
-rw-r--r--interfaces/apparmor/backend_test.go57
-rw-r--r--interfaces/apparmor/spec.go4
-rw-r--r--interfaces/apparmor/template.go4
-rw-r--r--interfaces/builtin/i2c.go2
-rw-r--r--interfaces/builtin/iio.go2
-rw-r--r--interfaces/builtin/network_control.go2
-rw-r--r--interfaces/builtin/opengl.go4
-rw-r--r--interfaces/builtin/opengl_test.go8
-rw-r--r--interfaces/builtin/shared_memory.go2
-rw-r--r--interfaces/builtin/shared_memory_test.go2
-rw-r--r--interfaces/builtin/shutdown.go2
-rw-r--r--interfaces/builtin/spi.go2
-rw-r--r--interfaces/builtin/system_observe.go1
-rw-r--r--interfaces/ifacetest/backendtest.go10
-rw-r--r--interfaces/mount/backend_test.go114
-rw-r--r--overlord/aspectstate/aspectstate.go101
-rw-r--r--overlord/aspectstate/aspectstate_test.go142
-rw-r--r--overlord/assertstate/assertstate_test.go30
-rw-r--r--overlord/assertstate/bulk.go2
-rw-r--r--overlord/devicestate/devicestate_install_api_test.go40
-rw-r--r--overlord/devicestate/devicestate_remodel_test.go5
-rw-r--r--overlord/devicestate/handlers_install.go8
-rw-r--r--overlord/hookstate/ctlcmd/refresh_test.go3
-rw-r--r--overlord/hookstate/hooks_test.go3
-rw-r--r--overlord/snapstate/autorefresh.go6
-rw-r--r--overlord/snapstate/autorefresh_test.go2
-rw-r--r--overlord/snapstate/backend_test.go5
-rw-r--r--overlord/snapstate/conflict.go61
-rw-r--r--overlord/snapstate/handlers.go8
-rw-r--r--overlord/snapstate/handlers_download_test.go4
-rw-r--r--overlord/snapstate/refreshhints.go4
-rw-r--r--overlord/snapstate/refreshhints_test.go9
-rw-r--r--overlord/snapstate/snapmgr.go5
-rw-r--r--overlord/snapstate/snapstate.go49
-rw-r--r--overlord/snapstate/snapstate_install_test.go25
-rw-r--r--overlord/snapstate/snapstate_test.go43
-rw-r--r--overlord/snapstate/snapstate_update_test.go204
-rw-r--r--overlord/state/change.go52
-rw-r--r--overlord/state/state.go65
-rw-r--r--overlord/state/state_test.go158
-rw-r--r--overlord/state/task.go55
-rw-r--r--packaging/arch/PKGBUILD2
-rw-r--r--packaging/debian-sid/changelog15
-rw-r--r--packaging/fedora/snapd.spec14
l---------packaging/opensuse-15.42
l---------packaging/opensuse-15.5 (renamed from packaging/opensuse-15.3)0
-rw-r--r--packaging/opensuse/snapd.changes5
-rw-r--r--packaging/opensuse/snapd.spec2
-rw-r--r--packaging/ubuntu-14.04/changelog15
-rw-r--r--packaging/ubuntu-16.04/changelog15
-rw-r--r--sandbox/apparmor/apparmor.go45
-rw-r--r--sandbox/apparmor/apparmor_test.go85
-rw-r--r--sandbox/apparmor/export_test.go4
-rw-r--r--sandbox/apparmor/profile.go9
-rw-r--r--sandbox/apparmor/profile_test.go66
-rw-r--r--secboot/luks2/cryptsetup.go106
-rw-r--r--secboot/luks2/cryptsetup_test.go118
-rw-r--r--secboot/luks2/export_test.go31
-rw-r--r--secboot/luks2/fifo.go10
-rw-r--r--snap/naming/core_version.go56
-rw-r--r--snap/naming/core_version_test.go53
-rw-r--r--spread.yaml59
-rw-r--r--store/download_test.go2
-rw-r--r--store/store_action.go4
-rw-r--r--store/store_action_test.go2
-rw-r--r--store/store_download.go4
-rw-r--r--store/store_download_test.go4
l---------tests/bin/tests.env1
-rw-r--r--tests/core/basic18/task.yaml9
-rw-r--r--tests/core/basic20plus/task.yaml9
-rw-r--r--tests/lib/external/snapd-testing-tools/spread.yaml7
-rw-r--r--tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml32
-rw-r--r--tests/lib/external/snapd-testing-tools/tests/remote.wait-for/task.yaml2
-rw-r--r--tests/lib/external/snapd-testing-tools/tests/tests.pkgs/task.yaml11
-rwxr-xr-xtests/lib/external/snapd-testing-tools/tools/os.query17
-rwxr-xr-xtests/lib/external/snapd-testing-tools/tools/tests.pkgs14
-rw-r--r--tests/lib/external/snapd-testing-tools/tools/tests.pkgs.apt.sh17
-rw-r--r--tests/lib/external/snapd-testing-tools/tools/tests.pkgs.dnf-yum.sh24
-rw-r--r--tests/lib/external/snapd-testing-tools/tools/tests.pkgs.pacman.sh18
-rw-r--r--tests/lib/external/snapd-testing-tools/tools/tests.pkgs.zypper.sh17
-rwxr-xr-xtests/lib/external/snapd-testing-tools/utils/spread-shellcheck5
-rwxr-xr-xtests/lib/external/snapd-testing-tools/utils/spreadJ119
-rw-r--r--tests/lib/image.sh6
-rw-r--r--tests/lib/muinstaller/go.mod8
-rw-r--r--tests/lib/muinstaller/go.sum41
-rw-r--r--tests/lib/muinstaller/main.go104
-rw-r--r--tests/lib/muinstaller/snapcraft.yaml5
-rw-r--r--tests/lib/nested.sh4
-rwxr-xr-xtests/lib/pkgdb.sh13
-rwxr-xr-xtests/lib/prepare-restore.sh14
-rwxr-xr-xtests/lib/prepare.sh60
-rwxr-xr-xtests/lib/state.sh2
-rw-r--r--tests/lib/tools/suite/tests.env/task.yaml59
-rwxr-xr-xtests/lib/tools/tests.env130
-rw-r--r--tests/main/cgroup-devices-v1/task.yaml2
-rw-r--r--tests/main/cgroup-devices-v2/task.yaml1
-rw-r--r--tests/main/cgroup-freezer/task.yaml2
-rw-r--r--tests/main/debug-sandbox/task.yaml2
-rw-r--r--tests/main/degraded/task.yaml5
-rw-r--r--tests/main/download-timeout/task.yaml2
-rw-r--r--tests/main/fake-netplan-apply/task.yaml2
-rw-r--r--tests/main/interfaces-avahi-observe/task.yaml5
-rw-r--r--tests/main/interfaces-calendar-service/task.yaml5
-rw-r--r--tests/main/interfaces-contacts-service/task.yaml5
-rw-r--r--tests/main/lxd-mount-units/task.yaml8
-rw-r--r--tests/main/lxd-postrm-purge/task.yaml4
-rw-r--r--tests/main/lxd-try/task.yaml4
-rw-r--r--tests/main/lxd/task.yaml8
-rw-r--r--tests/main/microk8s-smoke/task.yaml1
-rw-r--r--tests/main/nfs-support/task.yaml2
-rw-r--r--tests/main/security-seccomp/task.yaml4
-rw-r--r--tests/main/snap-logs-journal/task.yaml1
-rw-r--r--tests/main/snap-quota-journal/task.yaml1
-rw-r--r--tests/main/snap-quota-services/task.yaml1
-rw-r--r--tests/main/snapd-snap/task.yaml14
-rw-r--r--tests/main/store-state/task.yaml3
-rw-r--r--tests/main/system-usernames-illegal/task.yaml2
-rw-r--r--tests/main/system-usernames-install-twice/task.yaml2
-rw-r--r--tests/main/system-usernames-missing-user/task.yaml2
-rw-r--r--tests/main/system-usernames-snap-scoped/task.yaml4
-rw-r--r--tests/main/system-usernames/task.yaml2
-rw-r--r--tests/main/uc20-create-partitions-encrypt/task.yaml5
-rw-r--r--tests/main/uc20-create-partitions/task.yaml2
-rw-r--r--tests/nested/core/core20-factory-reset/task.yaml56
-rw-r--r--tests/nested/manual/core20-new-snapd-does-not-break-old-initrd/task.yaml8
-rw-r--r--tests/nested/manual/muinstaller-real/gadget-partial.yaml50
-rw-r--r--tests/nested/manual/muinstaller-real/task.yaml119
-rw-r--r--tests/regression/lp-1848567/task.yaml2
180 files changed, 6225 insertions, 1044 deletions
diff --git a/.github/workflows/cla-check.yaml b/.github/workflows/cla-check.yaml
index bced5041b8..b9102d6852 100644
--- a/.github/workflows/cla-check.yaml
+++ b/.github/workflows/cla-check.yaml
@@ -12,3 +12,4 @@ jobs:
uses: canonical/has-signed-canonical-cla@v1
with:
accept-existing-contributors: true
+ exempted-bots: 'Launchpad Translations on behalf of snappy-dev,dependabot'
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 5d0575dfcc..10dfcae4bb 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -366,12 +366,13 @@ jobs:
- centos-7-64
- centos-8-64
- centos-9-64
- - debian-10-64
- debian-11-64
+ - debian-12-64
- debian-sid-64
- fedora-36-64
- fedora-37-64
- opensuse-15.4-64
+ - opensuse-15.5-64
- opensuse-tumbleweed-64
- ubuntu-14.04-64
- ubuntu-16.04-64
@@ -382,6 +383,7 @@ jobs:
- ubuntu-22.04-64
- ubuntu-22.10-64
- ubuntu-23.04-64
+ - ubuntu-23.10-64
- ubuntu-core-16-64
- ubuntu-core-18-64
- ubuntu-core-20-64
diff --git a/NEWS.md b/NEWS.md
index a366712d88..a4d4c0ae3a 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,3 +1,12 @@
+# New in snapd 2.60.1:
+* Bugfixes
+* Use "aes-cbc-essiv:sha256" in cryptsetup on arm 32bit devices
+ to increase speed on devices with CAAM support
+* Stop using `-O no-expr-simplify` in apparmor_parser to avoid
+ potential exponential memory use. This can lead to slower
+ policy complication in some cases but it is much safer on
+ low memory devices.
+
# New in snapd 2.60:
* Support for dynamic snapshot data exclusions
* Apparmor userspace is vendored inside the snapd snap
diff --git a/advisor/backend_test.go b/advisor/backend_test.go
new file mode 100644
index 0000000000..25b35ea6d6
--- /dev/null
+++ b/advisor/backend_test.go
@@ -0,0 +1,76 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package advisor_test
+
+import (
+ "os"
+
+ . "gopkg.in/check.v1"
+
+ "github.com/snapcore/snapd/advisor"
+ "github.com/snapcore/snapd/dirs"
+)
+
+type backendSuite struct{}
+
+var _ = Suite(&backendSuite{})
+
+func (s *backendSuite) SetUpTest(c *C) {
+ dirs.SetRootDir(c.MkDir())
+ c.Assert(os.MkdirAll(dirs.SnapCacheDir, 0755), IsNil)
+
+ // create an empty DB
+ db, err := advisor.Create()
+ c.Assert(err, IsNil)
+ err = db.Commit()
+ c.Assert(err, IsNil)
+}
+
+func dumpCommands(c *C) map[string]string {
+ cmds, err := advisor.DumpCommands()
+ c.Assert(err, IsNil)
+ return cmds
+}
+
+func (s *backendSuite) TestCreateCommit(c *C) {
+ expectedCommands := map[string]string{
+ "meh": `[{"snap":"foo","version":"1.0"}]`,
+ "foo": `[{"snap":"foo","version":"1.0"}]`,
+ }
+
+ db, err := advisor.Create()
+ c.Assert(err, IsNil)
+ c.Assert(db.AddSnap("foo", "1.0", "foo summary", []string{"foo", "meh"}), IsNil)
+ // adding does not change the DB
+ c.Check(dumpCommands(c), DeepEquals, map[string]string{})
+ // but commit does
+ c.Assert(db.Commit(), IsNil)
+ c.Check(dumpCommands(c), DeepEquals, expectedCommands)
+}
+
+func (s *backendSuite) TestCreateRollback(c *C) {
+ db, err := advisor.Create()
+ c.Assert(err, IsNil)
+ // adding does not change the DB
+ c.Assert(db.AddSnap("foo", "1.0", "foo summary", []string{"foo", "meh"}), IsNil)
+ // and rollback ensures any change is reverted
+ c.Assert(db.Rollback(), IsNil)
+ c.Check(dumpCommands(c), DeepEquals, map[string]string{})
+}
diff --git a/aspects/aspects.go b/aspects/aspects.go
index 694076e7ca..bf186de30f 100644
--- a/aspects/aspects.go
+++ b/aspects/aspects.go
@@ -513,8 +513,7 @@ type JSONDataBag map[string]json.RawMessage
// NewJSONDataBag returns a DataBag implementation that stores data in JSON.
// The top-level of the JSON structure is always a map.
func NewJSONDataBag() JSONDataBag {
- storage := make(map[string]json.RawMessage)
- return storage
+ return JSONDataBag(make(map[string]json.RawMessage))
}
// Get takes a path and a pointer to a variable into which the value referenced
@@ -658,6 +657,18 @@ func (s JSONDataBag) Data() ([]byte, error) {
return json.Marshal(s)
}
+// Copy returns a copy of the databag.
+func (s JSONDataBag) Copy() JSONDataBag {
+ toplevel := map[string]json.RawMessage(s)
+ copy := make(map[string]json.RawMessage, len(toplevel))
+
+ for k, v := range toplevel {
+ copy[k] = v
+ }
+
+ return JSONDataBag(copy)
+}
+
// JSONSchema is the Schema implementation corresponding to JSONDataBag and it's
// able to validate its data.
type JSONSchema struct{}
diff --git a/aspects/aspects_test.go b/aspects/aspects_test.go
index a3b1c8a624..58bb33aeeb 100644
--- a/aspects/aspects_test.go
+++ b/aspects/aspects_test.go
@@ -281,7 +281,7 @@ type witnessDataBag struct {
getPath, setPath string
}
-func newSpyDataBag(bag aspects.DataBag) *witnessDataBag {
+func newWitnessDataBag(bag aspects.DataBag) *witnessDataBag {
return &witnessDataBag{bag: bag}
}
@@ -359,7 +359,7 @@ func (s *aspectSuite) TestAspectAssertionWithPlaceholder(c *C) {
} {
cmt := Commentf("sub-test %q failed", t.testName)
- databag := newSpyDataBag(aspects.NewJSONDataBag())
+ databag := newWitnessDataBag(aspects.NewJSONDataBag())
err := aspect.Set(databag, t.name, "expectedValue")
c.Assert(err, IsNil, cmt)
@@ -604,3 +604,35 @@ func (s *aspectSuite) TestIsNotFoundHelper(c *C) {
c.Assert(aspects.IsNotFound(&aspects.FieldNotFoundError{}), Equals, true)
c.Assert(aspects.IsNotFound(&aspects.InvalidAccessError{}), Equals, false)
}
+
+func (s *aspectSuite) TestJSONDataBagCopy(c *C) {
+ bag := aspects.NewJSONDataBag()
+ err := bag.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ // precondition check
+ data, err := bag.Data()
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, `{"foo":"bar"}`)
+
+ bagCopy := bag.Copy()
+ data, err = bagCopy.Data()
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, `{"foo":"bar"}`)
+
+ // changes in the copied bag don't affect the original
+ err = bagCopy.Set("foo", "baz")
+ c.Assert(err, IsNil)
+
+ data, err = bag.Data()
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, `{"foo":"bar"}`)
+
+ // and vice-versa
+ err = bag.Set("foo", "zab")
+ c.Assert(err, IsNil)
+
+ data, err = bagCopy.Data()
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, `{"foo":"baz"}`)
+}
diff --git a/aspects/transaction.go b/aspects/transaction.go
new file mode 100644
index 0000000000..dde36b2dad
--- /dev/null
+++ b/aspects/transaction.go
@@ -0,0 +1,136 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package aspects
+
+import (
+ "sync"
+)
+
+type DatabagRead func() (JSONDataBag, error)
+type DatabagWrite func(JSONDataBag) error
+
+// Transaction performs read and writes to a databag in an atomic way.
+type Transaction struct {
+ pristine JSONDataBag
+ deltas []map[string]interface{}
+ schema Schema
+
+ readDatabag DatabagRead
+ writeDatabag DatabagWrite
+ mu sync.RWMutex
+}
+
+// NewTransaction takes a getter and setter to read and write the databag.
+func NewTransaction(readDatabag DatabagRead, writeDatabag DatabagWrite, schema Schema) (*Transaction, error) {
+ databag, err := readDatabag()
+ if err != nil {
+ return nil, err
+ }
+
+ return &Transaction{
+ pristine: databag.Copy(),
+ schema: schema,
+ readDatabag: readDatabag,
+ writeDatabag: writeDatabag,
+ }, nil
+}
+
+// Set sets a value in the transaction's databag. The change isn't persisted
+// until Commit returns without errors.
+func (t *Transaction) Set(path string, value interface{}) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.deltas = append(t.deltas, map[string]interface{}{path: value})
+ return nil
+}
+
+// Get reads a value from the transaction's databag including uncommitted changes.
+func (t *Transaction) Get(path string, value interface{}) error {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+
+ // if there are changes, create a copy before applying (for isolation)
+ bag := t.pristine
+ if len(t.deltas) != 0 {
+ bag = t.pristine.Copy()
+
+ if err := applyDeltas(bag, t.deltas); err != nil {
+ return err
+ }
+ }
+
+ return bag.Get(path, value)
+}
+
+// Commit applies the previous writes and validates the final databag. If any
+// error occurs, the original databag is kept.
+func (t *Transaction) Commit() error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ pristine, err := t.readDatabag()
+ if err != nil {
+ return err
+ }
+
+ // ensure we're using a different databag, so outside changes can't affect
+ // the transaction
+ pristine = pristine.Copy()
+
+ if err := applyDeltas(pristine, t.deltas); err != nil {
+ return err
+ }
+
+ data, err := pristine.Data()
+ if err != nil {
+ return err
+ }
+
+ if err := t.schema.Validate(data); err != nil {
+ return err
+ }
+
+ // copy the databag before writing to make sure the writer can't modify into
+ // and introduce changes in the transaction
+ if err := t.writeDatabag(pristine.Copy()); err != nil {
+ return err
+ }
+
+ t.pristine = pristine
+ t.deltas = nil
+ return nil
+}
+
+func applyDeltas(bag JSONDataBag, deltas []map[string]interface{}) error {
+ // changes must be applied in the order they were written
+ for _, delta := range deltas {
+ for k, v := range delta {
+ if err := bag.Set(k, v); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Data returns the transaction's committed data.
+func (t *Transaction) Data() ([]byte, error) {
+ return t.pristine.Data()
+}
diff --git a/aspects/transaction_test.go b/aspects/transaction_test.go
new file mode 100644
index 0000000000..b1995900f2
--- /dev/null
+++ b/aspects/transaction_test.go
@@ -0,0 +1,350 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package aspects_test
+
+import (
+ "errors"
+
+ . "gopkg.in/check.v1"
+
+ "github.com/snapcore/snapd/aspects"
+)
+
+type transactionTestSuite struct{}
+
+var _ = Suite(&transactionTestSuite{})
+
+type witnessReadWriter struct {
+ readCalled int
+ writeCalled int
+ bag aspects.JSONDataBag
+ writtenDatabag aspects.JSONDataBag
+}
+
+func (w *witnessReadWriter) read() (aspects.JSONDataBag, error) {
+ w.readCalled++
+ return w.bag, nil
+}
+
+func (w *witnessReadWriter) write(bag aspects.JSONDataBag) error {
+ w.writeCalled++
+ w.writtenDatabag = bag
+ return nil
+}
+
+func (s *transactionTestSuite) TestSet(c *C) {
+ bag := aspects.NewJSONDataBag()
+ witness := &witnessReadWriter{bag: bag}
+ schema := aspects.NewJSONSchema()
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+ c.Assert(witness.readCalled, Equals, 1)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+ c.Assert(witness.writeCalled, Equals, 0)
+
+ var value interface{}
+ err = witness.writtenDatabag.Get("foo", &value)
+ c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
+}
+
+func (s *transactionTestSuite) TestCommit(c *C) {
+ witness := &witnessReadWriter{bag: aspects.NewJSONDataBag()}
+ schema := aspects.NewJSONSchema()
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+ c.Assert(witness.readCalled, Equals, 1)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+ c.Assert(witness.readCalled, Equals, 1)
+ c.Assert(witness.writeCalled, Equals, 0)
+ c.Assert(witness.writtenDatabag, IsNil)
+
+ err = tx.Commit()
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = witness.writtenDatabag.Get("foo", &value)
+ c.Assert(err, IsNil)
+
+ c.Assert(value, Equals, "bar")
+ c.Assert(witness.writeCalled, Equals, 1)
+}
+
+func (s *transactionTestSuite) TestGetReadsUncommitted(c *C) {
+ databag := aspects.NewJSONDataBag()
+ witness := &witnessReadWriter{bag: databag}
+ schema := aspects.NewJSONSchema()
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+
+ err = databag.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ err = tx.Set("foo", "baz")
+ c.Assert(err, IsNil)
+ // nothing was committed
+ c.Assert(witness.writeCalled, Equals, 0)
+ c.Assert(txData(c, tx), Equals, "{}")
+
+ var val string
+ err = tx.Get("foo", &val)
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, "baz")
+}
+
+type failingSchema struct {
+ err error
+}
+
+func (f *failingSchema) Validate([]byte) error {
+ return f.err
+}
+
+func (s *transactionTestSuite) TestRollBackOnCommitError(c *C) {
+ databag := aspects.NewJSONDataBag()
+ witness := &witnessReadWriter{bag: databag}
+ schema := &failingSchema{err: errors.New("expected error")}
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ err = tx.Commit()
+ c.Assert(err, ErrorMatches, "expected error")
+
+ // nothing was committed
+ c.Assert(witness.writeCalled, Equals, 0)
+ c.Assert(txData(c, tx), Equals, "{}")
+
+ // but subsequent Gets still read the uncommitted values
+ var val string
+ err = tx.Get("foo", &val)
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, "bar")
+}
+
+func (s *transactionTestSuite) TestManyWrites(c *C) {
+ databag := aspects.NewJSONDataBag()
+ witness := &witnessReadWriter{bag: databag}
+ schema := aspects.NewJSONSchema()
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+ err = tx.Set("foo", "baz")
+ c.Assert(err, IsNil)
+
+ err = tx.Commit()
+ c.Assert(err, IsNil)
+ c.Assert(witness.writeCalled, Equals, 1)
+
+ // writes are applied in chronological order
+ c.Assert(txData(c, tx), Equals, `{"foo":"baz"}`)
+
+ var value interface{}
+ err = witness.writtenDatabag.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "baz")
+}
+
+func (s *transactionTestSuite) TestCommittedIncludesRecentWrites(c *C) {
+ databag := aspects.NewJSONDataBag()
+ witness := &witnessReadWriter{bag: databag}
+ schema := aspects.NewJSONSchema()
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+ c.Assert(witness.readCalled, Equals, 1)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ err = databag.Set("bar", "baz")
+ c.Assert(err, IsNil)
+
+ err = tx.Commit()
+ c.Assert(err, IsNil)
+ // databag was read from state before writing
+ c.Assert(witness.readCalled, Equals, 2)
+ c.Assert(witness.writeCalled, Equals, 1)
+
+ // writes are applied in chronological order
+ var value interface{}
+ err = witness.writtenDatabag.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+
+ // contains recent values not written by the transaction
+ err = witness.writtenDatabag.Get("bar", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "baz")
+}
+
+func (s *transactionTestSuite) TestCommittedIncludesPreviousCommit(c *C) {
+ var databag aspects.JSONDataBag
+ readBag := func() (aspects.JSONDataBag, error) {
+ if databag == nil {
+ return aspects.NewJSONDataBag(), nil
+ }
+ return databag, nil
+ }
+
+ writeBag := func(bag aspects.JSONDataBag) error {
+ databag = bag
+ return nil
+ }
+
+ schema := aspects.NewJSONSchema()
+ txOne, err := aspects.NewTransaction(readBag, writeBag, schema)
+ c.Assert(err, IsNil)
+
+ txTwo, err := aspects.NewTransaction(readBag, writeBag, schema)
+ c.Assert(err, IsNil)
+
+ err = txOne.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ err = txTwo.Set("bar", "baz")
+ c.Assert(err, IsNil)
+
+ err = txOne.Commit()
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = databag.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+
+ err = databag.Get("bar", &value)
+ c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
+
+ err = txTwo.Commit()
+ c.Assert(err, IsNil)
+
+ value = nil
+ err = databag.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+
+ err = databag.Get("bar", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "baz")
+}
+
+func (s *transactionTestSuite) TestTransactionBagReadError(c *C) {
+ var readErr error
+ readBag := func() (aspects.JSONDataBag, error) {
+ return nil, readErr
+ }
+ writeBag := func(_ aspects.JSONDataBag) error {
+ return nil
+ }
+
+ schema := aspects.NewJSONSchema()
+ txOne, err := aspects.NewTransaction(readBag, writeBag, schema)
+ c.Assert(err, IsNil)
+
+ readErr = errors.New("expected")
+ // Commit()'s databag read fails
+ err = txOne.Commit()
+ c.Assert(err, ErrorMatches, "expected")
+
+ // NewTransaction()'s databag read fails
+ txOne, err = aspects.NewTransaction(readBag, writeBag, schema)
+ c.Assert(err, ErrorMatches, "expected")
+}
+
+func (s *transactionTestSuite) TestTransactionBagWriteError(c *C) {
+ readBag := func() (aspects.JSONDataBag, error) {
+ return nil, nil
+ }
+ var writeErr error
+ writeBag := func(_ aspects.JSONDataBag) error {
+ return writeErr
+ }
+
+ schema := aspects.NewJSONSchema()
+ txOne, err := aspects.NewTransaction(readBag, writeBag, schema)
+ c.Assert(err, IsNil)
+
+ writeErr = errors.New("expected")
+ // Commit()'s databag write fails
+ err = txOne.Commit()
+ c.Assert(err, ErrorMatches, "expected")
+}
+
+func (s *transactionTestSuite) TestTransactionReadsIsolated(c *C) {
+ databag := aspects.NewJSONDataBag()
+ readBag := func() (aspects.JSONDataBag, error) {
+ return databag, nil
+ }
+ writeBag := func(aspects.JSONDataBag) error {
+ return nil
+ }
+
+ schema := aspects.NewJSONSchema()
+ tx, err := aspects.NewTransaction(readBag, writeBag, schema)
+ c.Assert(err, IsNil)
+
+ err = databag.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = tx.Get("foo", &value)
+ c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
+}
+
+func (s *transactionTestSuite) TestReadDatabagsAreCopiedForIsolation(c *C) {
+ witness := &witnessReadWriter{bag: aspects.NewJSONDataBag()}
+ schema := &failingSchema{}
+ tx, err := aspects.NewTransaction(witness.read, witness.write, schema)
+ c.Assert(err, IsNil)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ err = tx.Commit()
+ c.Assert(err, IsNil)
+
+ err = tx.Set("foo", "baz")
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = witness.writtenDatabag.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+
+ schema.err = errors.New("expected error")
+ err = tx.Commit()
+ c.Assert(err, ErrorMatches, "expected error")
+
+ err = witness.writtenDatabag.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+}
+
+func txData(c *C, tx *aspects.Transaction) string {
+ data, err := tx.Data()
+ c.Assert(err, IsNil)
+ return string(data)
+}
diff --git a/build-aux/snap/patches/apparmor/parser-replace-dynamic_cast-with-is_type-method.patch b/build-aux/snap/patches/apparmor/parser-replace-dynamic_cast-with-is_type-method.patch
new file mode 100644
index 0000000000..159e8deb66
--- /dev/null
+++ b/build-aux/snap/patches/apparmor/parser-replace-dynamic_cast-with-is_type-method.patch
@@ -0,0 +1,791 @@
+From 5aab543a3b03ecaea356a02928e5bb5b7a0d8fa5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Alfonso=20S=C3=A1nchez-Beato?=
+ <alfonso.sanchez-beato@canonical.com>
+Date: Mon, 15 Feb 2021 16:26:18 +0100
+Subject: [PATCH] parser: replace dynamic_cast with is_type method
+
+The dynamic_cast operator is slow as it needs to look at RTTI
+information and even does some string comparisons, especially in deep
+hierarchies like the one for Node. Profiling with callgrind showed
+that dynamic_cast can eat a huge portion of the running time, as it
+takes most of the time that is spent in the simplify_tree()
+function. For some complex profiles, the number of calls to
+dynamic_cast can be in the range of millions.
+
+This commit replaces the use of dynamic_cast in the Node hierarchy
+with a method called is_type(), which returns true if the pointer can
+be casted to the specified type. It works by looking at a Node object
+field that is an integer with bits set for each type up in the
+hierarchy. Therefore, dynamic_cast is replaced by a simple bits
+operation.
+
+This change can reduce the compilation times for some profiles more
+that 50%, especially in arm/arm64 arch. This opens the door to maybe
+avoid "-O no-expr-simplify" in the snapd daemon, as now that option
+would make the compilation slower in almost all cases.
+
+This is the example profile used in some of my tests, with this change
+the run-time is around 1/3 of what it was before on an x86 laptop:
+
+profile "test" (attach_disconnected,mediate_deleted) {
+dbus send
+ bus={fcitx,session}
+ path=/inputcontext_[0-9]*
+ interface=org.fcitx.Fcitx.InputContext
+ member="{Close,Destroy,Enable}IC"
+ peer=(label=unconfined),
+dbus send
+ bus={fcitx,session}
+ path=/inputcontext_[0-9]*
+ interface=org.fcitx.Fcitx.InputContext
+ member=Reset
+ peer=(label=unconfined),
+dbus receive
+ bus=fcitx
+ peer=(label=unconfined),
+dbus receive
+ bus=session
+ interface=org.fcitx.Fcitx.*
+ peer=(label=unconfined),
+dbus send
+ bus={fcitx,session}
+ path=/inputcontext_[0-9]*
+ interface=org.fcitx.Fcitx.InputContext
+ member="Focus{In,Out}"
+ peer=(label=unconfined),
+dbus send
+ bus={fcitx,session}
+ path=/inputcontext_[0-9]*
+ interface=org.fcitx.Fcitx.InputContext
+ member="{CommitPreedit,Set*}"
+ peer=(label=unconfined),
+dbus send
+ bus={fcitx,session}
+ path=/inputcontext_[0-9]*
+ interface=org.fcitx.Fcitx.InputContext
+ member="{MouseEvent,ProcessKeyEvent}"
+ peer=(label=unconfined),
+dbus send
+ bus={fcitx,session}
+ path=/inputcontext_[0-9]*
+ interface=org.freedesktop.DBus.Properties
+ member=GetAll
+ peer=(label=unconfined),
+dbus (send)
+ bus=session
+ path=/org/a11y/bus
+ interface=org.a11y.Bus
+ member=GetAddress
+ peer=(label=unconfined),
+dbus (send)
+ bus=session
+ path=/org/a11y/bus
+ interface=org.freedesktop.DBus.Properties
+ member=Get{,All}
+ peer=(label=unconfined),
+dbus (receive, send)
+ bus=accessibility
+ path=/org/a11y/atspi/**
+ peer=(label=unconfined),
+dbus (send)
+ bus=system
+ path=/org/freedesktop/Accounts
+ interface=org.freedesktop.DBus.Introspectable
+ member=Introspect
+ peer=(label=unconfined),
+dbus (send)
+ bus=system
+ path=/org/freedesktop/Accounts
+ interface=org.freedesktop.Accounts
+ member=FindUserById
+ peer=(label=unconfined),
+dbus (receive, send)
+ bus=system
+ path=/org/freedesktop/Accounts/User[0-9]*
+ interface=org.freedesktop.DBus.Properties
+ member={Get,PropertiesChanged}
+ peer=(label=unconfined),
+dbus (send)
+ bus=session
+ interface=org.gtk.Actions
+ member=Changed
+ peer=(name=org.freedesktop.DBus, label=unconfined),
+dbus (receive)
+ bus=session
+ interface=org.gtk.Actions
+ member={Activate,DescribeAll,SetState}
+ peer=(label=unconfined),
+dbus (receive)
+ bus=session
+ interface=org.gtk.Menus
+ member={Start,End}
+ peer=(label=unconfined),
+dbus (send)
+ bus=session
+ interface=org.gtk.Menus
+ member=Changed
+ peer=(name=org.freedesktop.DBus, label=unconfined),
+dbus (send)
+ bus=session
+ path="/com/ubuntu/MenuRegistrar"
+ interface="com.ubuntu.MenuRegistrar"
+ member="{Register,Unregister}{App,Surface}Menu"
+ peer=(label=unconfined),
+}
+---
+ parser/libapparmor_re/aare_rules.cc | 10 +-
+ parser/libapparmor_re/expr-tree.cc | 63 +++++------
+ parser/libapparmor_re/expr-tree.h | 162 +++++++++++++++++++++-------
+ parser/libapparmor_re/hfa.cc | 9 +-
+ 4 files changed, 165 insertions(+), 79 deletions(-)
+
+diff --git a/parser/libapparmor_re/aare_rules.cc b/parser/libapparmor_re/aare_rules.cc
+index 1d56b3cb0..b250e1013 100644
+--- a/parser/libapparmor_re/aare_rules.cc
++++ b/parser/libapparmor_re/aare_rules.cc
+@@ -97,11 +97,11 @@ bool aare_rules::add_rule_vec(int deny, uint32_t perms, uint32_t audit,
+ */
+ exact_match = 1;
+ for (depth_first_traversal i(tree); i && exact_match; i++) {
+- if (dynamic_cast<StarNode *>(*i) ||
+- dynamic_cast<PlusNode *>(*i) ||
+- dynamic_cast<AnyCharNode *>(*i) ||
+- dynamic_cast<CharSetNode *>(*i) ||
+- dynamic_cast<NotCharSetNode *>(*i))
++ if ((*i)->is_type(NODE_TYPE_STAR) ||
++ (*i)->is_type(NODE_TYPE_PLUS) ||
++ (*i)->is_type(NODE_TYPE_ANYCHAR) ||
++ (*i)->is_type(NODE_TYPE_CHARSET) ||
++ (*i)->is_type(NODE_TYPE_NOTCHARSET))
+ exact_match = 0;
+ }
+
+diff --git a/parser/libapparmor_re/expr-tree.cc b/parser/libapparmor_re/expr-tree.cc
+index 28aa35000..7dc18b041 100644
+--- a/parser/libapparmor_re/expr-tree.cc
++++ b/parser/libapparmor_re/expr-tree.cc
+@@ -210,7 +210,7 @@ int TwoChildNode::normalize_eps(int dir)
+ // Test for E | (E | E) and E . (E . E) which will
+ // result in an infinite loop
+ Node *c = child[!dir];
+- if (dynamic_cast<TwoChildNode *>(c) &&
++ if (c->is_type(NODE_TYPE_TWOCHILD) &&
+ &epsnode == c->child[dir] &&
+ &epsnode == c->child[!dir]) {
+ c->release();
+@@ -229,7 +229,7 @@ void CatNode::normalize(int dir)
+ for (;;) {
+ if (normalize_eps(dir)) {
+ continue;
+- } else if (dynamic_cast<CatNode *>(child[dir])) {
++ } else if (child[dir]->is_type(NODE_TYPE_CAT)) {
+ // (ab)c -> a(bc)
+ rotate_node(this, dir);
+ } else {
+@@ -248,11 +248,11 @@ void AltNode::normalize(int dir)
+ for (;;) {
+ if (normalize_eps(dir)) {
+ continue;
+- } else if (dynamic_cast<AltNode *>(child[dir])) {
++ } else if (child[dir]->is_type(NODE_TYPE_ALT)) {
+ // (a | b) | c -> a | (b | c)
+ rotate_node(this, dir);
+- } else if (dynamic_cast<CharSetNode *>(child[dir]) &&
+- dynamic_cast<CharNode *>(child[!dir])) {
++ } else if (child[dir]->is_type(NODE_TYPE_CHARSET) &&
++ child[!dir]->is_type(NODE_TYPE_CHAR)) {
+ // [a] | b -> b | [a]
+ Node *c = child[dir];
+ child[dir] = child[!dir];
+@@ -344,7 +344,7 @@ static Node *alt_to_charsets(Node *t, int dir)
+
+ static Node *basic_alt_factor(Node *t, int dir)
+ {
+- if (!dynamic_cast<AltNode *>(t))
++ if (!t->is_type(NODE_TYPE_ALT))
+ return t;
+
+ if (t->child[dir]->eq(t->child[!dir])) {
+@@ -355,8 +355,8 @@ static Node *basic_alt_factor(Node *t, int dir)
+ return tmp;
+ }
+ // (ab) | (ac) -> a(b|c)
+- if (dynamic_cast<CatNode *>(t->child[dir]) &&
+- dynamic_cast<CatNode *>(t->child[!dir]) &&
++ if (t->child[dir]->is_type(NODE_TYPE_CAT) &&
++ t->child[!dir]->is_type(NODE_TYPE_CAT) &&
+ t->child[dir]->child[dir]->eq(t->child[!dir]->child[dir])) {
+ // (ab) | (ac) -> a(b|c)
+ Node *left = t->child[dir];
+@@ -369,7 +369,7 @@ static Node *basic_alt_factor(Node *t, int dir)
+ return left;
+ }
+ // a | (ab) -> a (E | b) -> a (b | E)
+- if (dynamic_cast<CatNode *>(t->child[!dir]) &&
++ if (t->child[!dir]->is_type(NODE_TYPE_CAT) &&
+ t->child[dir]->eq(t->child[!dir]->child[dir])) {
+ Node *c = t->child[!dir];
+ t->child[dir]->release();
+@@ -379,7 +379,7 @@ static Node *basic_alt_factor(Node *t, int dir)
+ return c;
+ }
+ // ab | (a) -> a (b | E)
+- if (dynamic_cast<CatNode *>(t->child[dir]) &&
++ if (t->child[dir]->is_type(NODE_TYPE_CAT) &&
+ t->child[dir]->child[dir]->eq(t->child[!dir])) {
+ Node *c = t->child[dir];
+ t->child[!dir]->release();
+@@ -394,7 +394,7 @@ static Node *basic_alt_factor(Node *t, int dir)
+
+ static Node *basic_simplify(Node *t, int dir)
+ {
+- if (dynamic_cast<CatNode *>(t) && &epsnode == t->child[!dir]) {
++ if (t->is_type(NODE_TYPE_CAT) && &epsnode == t->child[!dir]) {
+ // aE -> a
+ Node *tmp = t->child[dir];
+ t->child[dir] = NULL;
+@@ -419,7 +419,7 @@ static Node *basic_simplify(Node *t, int dir)
+ */
+ Node *simplify_tree_base(Node *t, int dir, bool &mod)
+ {
+- if (dynamic_cast<ImportantNode *>(t))
++ if (t->is_type(NODE_TYPE_IMPORTANT))
+ return t;
+
+ for (int i = 0; i < 2; i++) {
+@@ -442,15 +442,15 @@ Node *simplify_tree_base(Node *t, int dir, bool &mod)
+ }
+
+ /* all tests after this must meet 2 alt node condition */
+- if (!dynamic_cast<AltNode *>(t) ||
+- !dynamic_cast<AltNode *>(t->child[!dir]))
++ if (!t->is_type(NODE_TYPE_ALT) ||
++ !t->child[!dir]->is_type(NODE_TYPE_ALT))
+ break;
+
+ // a | (a | b) -> (a | b)
+ // a | (b | (c | a)) -> (b | (c | a))
+ Node *p = t;
+ Node *i = t->child[!dir];
+- for (; dynamic_cast<AltNode *>(i); p = i, i = i->child[!dir]) {
++ for (; i->is_type(NODE_TYPE_ALT); p = i, i = i->child[!dir]) {
+ if (t->child[dir]->eq(i->child[dir])) {
+ Node *tmp = t->child[!dir];
+ t->child[!dir] = NULL;
+@@ -475,19 +475,19 @@ Node *simplify_tree_base(Node *t, int dir, bool &mod)
+ int count = 0;
+ Node *subject = t->child[dir];
+ Node *a = subject;
+- if (dynamic_cast<CatNode *>(subject))
++ if (subject->is_type(NODE_TYPE_CAT))
+ a = subject->child[dir];
+
+ for (pp = p = t, i = t->child[!dir];
+- dynamic_cast<AltNode *>(i);) {
+- if ((dynamic_cast<CatNode *>(i->child[dir]) &&
++ i->is_type(NODE_TYPE_ALT);) {
++ if ((i->child[dir]->is_type(NODE_TYPE_CAT) &&
+ a->eq(i->child[dir]->child[dir])) ||
+ (a->eq(i->child[dir]))) {
+ // extract matching alt node
+ p->child[!dir] = i->child[!dir];
+ i->child[!dir] = subject;
+ subject = basic_simplify(i, dir);
+- if (dynamic_cast<CatNode *>(subject))
++ if (subject->is_type(NODE_TYPE_CAT))
+ a = subject->child[dir];
+ else
+ a = subject;
+@@ -502,7 +502,7 @@ Node *simplify_tree_base(Node *t, int dir, bool &mod)
+ }
+
+ // last altnode in chain check other dir as well
+- if ((dynamic_cast<CatNode *>(i) &&
++ if ((i->is_type(NODE_TYPE_CAT) &&
+ a->eq(i->child[dir])) || (a->eq(i))) {
+ count++;
+ if (t == p) {
+@@ -528,7 +528,7 @@ int debug_tree(Node *t)
+ {
+ int nodes = 1;
+
+- if (!dynamic_cast<ImportantNode *>(t)) {
++ if (!t->is_type(NODE_TYPE_IMPORTANT)) {
+ if (t->child[0])
+ nodes += debug_tree(t->child[0]);
+ if (t->child[1])
+@@ -539,30 +539,30 @@ int debug_tree(Node *t)
+
+ static void count_tree_nodes(Node *t, struct node_counts *counts)
+ {
+- if (dynamic_cast<AltNode *>(t)) {
++ if (t->is_type(NODE_TYPE_ALT)) {
+ counts->alt++;
+ count_tree_nodes(t->child[0], counts);
+ count_tree_nodes(t->child[1], counts);
+- } else if (dynamic_cast<CatNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_CAT)) {
+ counts->cat++;
+ count_tree_nodes(t->child[0], counts);
+ count_tree_nodes(t->child[1], counts);
+- } else if (dynamic_cast<PlusNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_PLUS)) {
+ counts->plus++;
+ count_tree_nodes(t->child[0], counts);
+- } else if (dynamic_cast<StarNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_STAR)) {
+ counts->star++;
+ count_tree_nodes(t->child[0], counts);
+- } else if (dynamic_cast<OptionalNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_OPTIONAL)) {
+ counts->optional++;
+ count_tree_nodes(t->child[0], counts);
+- } else if (dynamic_cast<CharNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_CHAR)) {
+ counts->charnode++;
+- } else if (dynamic_cast<AnyCharNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_ANYCHAR)) {
+ counts->any++;
+- } else if (dynamic_cast<CharSetNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_CHARSET)) {
+ counts->charset++;
+- } else if (dynamic_cast<NotCharSetNode *>(t)) {
++ } else if (t->is_type(NODE_TYPE_NOTCHARSET)) {
+ counts->notcharset++;
+ }
+ }
+@@ -635,7 +635,8 @@ Node *simplify_tree(Node *t, dfaflags_t flags)
+ void flip_tree(Node *node)
+ {
+ for (depth_first_traversal i(node); i; i++) {
+- if (CatNode *cat = dynamic_cast<CatNode *>(*i)) {
++ if ((*i)->is_type(NODE_TYPE_CAT)) {
++ CatNode *cat = static_cast<CatNode *>(*i);
+ swap(cat->child[0], cat->child[1]);
+ }
+ }
+diff --git a/parser/libapparmor_re/expr-tree.h b/parser/libapparmor_re/expr-tree.h
+index 551dd0eeb..8ada4a4a7 100644
+--- a/parser/libapparmor_re/expr-tree.h
++++ b/parser/libapparmor_re/expr-tree.h
+@@ -222,16 +222,43 @@ typedef struct Cases {
+
+ ostream &operator<<(ostream &os, Node &node);
+
++#define NODE_TYPE_NODE 0
++#define NODE_TYPE_INNER (1 << 0)
++#define NODE_TYPE_ONECHILD (1 << 1)
++#define NODE_TYPE_TWOCHILD (1 << 2)
++#define NODE_TYPE_LEAF (1 << 3)
++#define NODE_TYPE_EPS (1 << 4)
++#define NODE_TYPE_IMPORTANT (1 << 5)
++#define NODE_TYPE_C (1 << 6)
++#define NODE_TYPE_CHAR (1 << 7)
++#define NODE_TYPE_CHARSET (1 << 8)
++#define NODE_TYPE_NOTCHARSET (1 << 9)
++#define NODE_TYPE_ANYCHAR (1 << 10)
++#define NODE_TYPE_STAR (1 << 11)
++#define NODE_TYPE_OPTIONAL (1 << 12)
++#define NODE_TYPE_PLUS (1 << 13)
++#define NODE_TYPE_CAT (1 << 14)
++#define NODE_TYPE_ALT (1 << 15)
++#define NODE_TYPE_SHARED (1 << 16)
++#define NODE_TYPE_ACCEPT (1 << 17)
++#define NODE_TYPE_MATCHFLAG (1 << 18)
++#define NODE_TYPE_EXACTMATCHFLAG (1 << 19)
++#define NODE_TYPE_DENYMATCHFLAG (1 << 20)
++
+ /* An abstract node in the syntax tree. */
+ class Node {
+ public:
+- Node(): nullable(false), label(0) { child[0] = child[1] = 0; }
+- Node(Node *left): nullable(false), label(0)
++ Node(): nullable(false), type_flags(NODE_TYPE_NODE), label(0)
++ {
++ child[0] = child[1] = 0;
++ }
++ Node(Node *left): nullable(false), type_flags(NODE_TYPE_NODE), label(0)
+ {
+ child[0] = left;
+ child[1] = 0;
+ }
+- Node(Node *left, Node *right): nullable(false), label(0)
++ Node(Node *left, Node *right): nullable(false),
++ type_flags(NODE_TYPE_NODE), label(0)
+ {
+ child[0] = left;
+ child[1] = right;
+@@ -302,6 +329,13 @@ public:
+ NodeSet firstpos, lastpos, followpos;
+ /* child 0 is left, child 1 is right */
+ Node *child[2];
++ /*
++ * Bitmap that stores supported pointer casts for the Node, composed
++ * by the NODE_TYPE_* flags. This is used by is_type() as a substitute
++ * of costly dynamic_cast calls.
++ */
++ unsigned type_flags;
++ bool is_type(unsigned type) { return type_flags & type; }
+
+ unsigned int label; /* unique number for debug etc */
+ /**
+@@ -315,25 +349,34 @@ public:
+
+ class InnerNode: public Node {
+ public:
+- InnerNode(): Node() { };
+- InnerNode(Node *left): Node(left) { };
+- InnerNode(Node *left, Node *right): Node(left, right) { };
++ InnerNode(): Node() { type_flags |= NODE_TYPE_INNER; };
++ InnerNode(Node *left): Node(left) { type_flags |= NODE_TYPE_INNER; };
++ InnerNode(Node *left, Node *right): Node(left, right)
++ {
++ type_flags |= NODE_TYPE_INNER;
++ };
+ };
+
+ class OneChildNode: public InnerNode {
+ public:
+- OneChildNode(Node *left): InnerNode(left) { };
++ OneChildNode(Node *left): InnerNode(left)
++ {
++ type_flags |= NODE_TYPE_ONECHILD;
++ };
+ };
+
+ class TwoChildNode: public InnerNode {
+ public:
+- TwoChildNode(Node *left, Node *right): InnerNode(left, right) { };
++ TwoChildNode(Node *left, Node *right): InnerNode(left, right)
++ {
++ type_flags |= NODE_TYPE_TWOCHILD;
++ };
+ virtual int normalize_eps(int dir);
+ };
+
+ class LeafNode: public Node {
+ public:
+- LeafNode(): Node() { };
++ LeafNode(): Node() { type_flags |= NODE_TYPE_LEAF; };
+ virtual void normalize(int dir __attribute__((unused))) { return; }
+ };
+
+@@ -342,6 +385,7 @@ class EpsNode: public LeafNode {
+ public:
+ EpsNode(): LeafNode()
+ {
++ type_flags |= NODE_TYPE_EPS;
+ nullable = true;
+ label = 0;
+ }
+@@ -356,7 +400,7 @@ public:
+ void compute_lastpos() { }
+ int eq(Node *other)
+ {
+- if (dynamic_cast<EpsNode *>(other))
++ if (other->is_type(NODE_TYPE_EPS))
+ return 1;
+ return 0;
+ }
+@@ -373,7 +417,7 @@ public:
+ */
+ class ImportantNode: public LeafNode {
+ public:
+- ImportantNode(): LeafNode() { }
++ ImportantNode(): LeafNode() { type_flags |= NODE_TYPE_IMPORTANT; }
+ void compute_firstpos() { firstpos.insert(this); }
+ void compute_lastpos() { lastpos.insert(this); }
+ virtual void follow(Cases &cases) = 0;
+@@ -386,7 +430,7 @@ public:
+ */
+ class CNode: public ImportantNode {
+ public:
+- CNode(): ImportantNode() { }
++ CNode(): ImportantNode() { type_flags |= NODE_TYPE_C; }
+ int is_accept(void) { return false; }
+ int is_postprocess(void) { return false; }
+ };
+@@ -394,7 +438,7 @@ public:
+ /* Match one specific character (/c/). */
+ class CharNode: public CNode {
+ public:
+- CharNode(transchar c): c(c) { }
++ CharNode(transchar c): c(c) { type_flags |= NODE_TYPE_CHAR; }
+ void follow(Cases &cases)
+ {
+ NodeSet **x = &cases.cases[c];
+@@ -408,8 +452,8 @@ public:
+ }
+ int eq(Node *other)
+ {
+- CharNode *o = dynamic_cast<CharNode *>(other);
+- if (o) {
++ if (other->is_type(NODE_TYPE_CHAR)) {
++ CharNode *o = static_cast<CharNode *>(other);
+ return c == o->c;
+ }
+ return 0;
+@@ -439,7 +483,10 @@ public:
+ /* Match a set of characters (/[abc]/). */
+ class CharSetNode: public CNode {
+ public:
+- CharSetNode(Chars &chars): chars(chars) { }
++ CharSetNode(Chars &chars): chars(chars)
++ {
++ type_flags |= NODE_TYPE_CHARSET;
++ }
+ void follow(Cases &cases)
+ {
+ for (Chars::iterator i = chars.begin(); i != chars.end(); i++) {
+@@ -455,8 +502,11 @@ public:
+ }
+ int eq(Node *other)
+ {
+- CharSetNode *o = dynamic_cast<CharSetNode *>(other);
+- if (!o || chars.size() != o->chars.size())
++ if (!other->is_type(NODE_TYPE_CHARSET))
++ return 0;
++
++ CharSetNode *o = static_cast<CharSetNode *>(other);
++ if (chars.size() != o->chars.size())
+ return 0;
+
+ for (Chars::iterator i = chars.begin(), j = o->chars.begin();
+@@ -498,7 +548,10 @@ public:
+ /* Match all except one character (/[^abc]/). */
+ class NotCharSetNode: public CNode {
+ public:
+- NotCharSetNode(Chars &chars): chars(chars) { }
++ NotCharSetNode(Chars &chars): chars(chars)
++ {
++ type_flags |= NODE_TYPE_NOTCHARSET;
++ }
+ void follow(Cases &cases)
+ {
+ if (!cases.otherwise)
+@@ -522,8 +575,11 @@ public:
+ }
+ int eq(Node *other)
+ {
+- NotCharSetNode *o = dynamic_cast<NotCharSetNode *>(other);
+- if (!o || chars.size() != o->chars.size())
++ if (!other->is_type(NODE_TYPE_NOTCHARSET))
++ return 0;
++
++ NotCharSetNode *o = static_cast<NotCharSetNode *>(other);
++ if (chars.size() != o->chars.size())
+ return 0;
+
+ for (Chars::iterator i = chars.begin(), j = o->chars.begin();
+@@ -565,7 +621,7 @@ public:
+ /* Match any character (/./). */
+ class AnyCharNode: public CNode {
+ public:
+- AnyCharNode() { }
++ AnyCharNode() { type_flags |= NODE_TYPE_ANYCHAR; }
+ void follow(Cases &cases)
+ {
+ if (!cases.otherwise)
+@@ -579,7 +635,7 @@ public:
+ }
+ int eq(Node *other)
+ {
+- if (dynamic_cast<AnyCharNode *>(other))
++ if (other->is_type(NODE_TYPE_ANYCHAR))
+ return 1;
+ return 0;
+ }
+@@ -589,7 +645,11 @@ public:
+ /* Match a node zero or more times. (This is a unary operator.) */
+ class StarNode: public OneChildNode {
+ public:
+- StarNode(Node *left): OneChildNode(left) { nullable = true; }
++ StarNode(Node *left): OneChildNode(left)
++ {
++ type_flags |= NODE_TYPE_STAR;
++ nullable = true;
++ }
+ void compute_firstpos() { firstpos = child[0]->firstpos; }
+ void compute_lastpos() { lastpos = child[0]->lastpos; }
+ void compute_followpos()
+@@ -601,7 +661,7 @@ public:
+ }
+ int eq(Node *other)
+ {
+- if (dynamic_cast<StarNode *>(other))
++ if (other->is_type(NODE_TYPE_STAR))
+ return child[0]->eq(other->child[0]);
+ return 0;
+ }
+@@ -618,12 +678,16 @@ public:
+ /* Match a node zero or one times. */
+ class OptionalNode: public OneChildNode {
+ public:
+- OptionalNode(Node *left): OneChildNode(left) { nullable = true; }
++ OptionalNode(Node *left): OneChildNode(left)
++ {
++ type_flags |= NODE_TYPE_OPTIONAL;
++ nullable = true;
++ }
+ void compute_firstpos() { firstpos = child[0]->firstpos; }
+ void compute_lastpos() { lastpos = child[0]->lastpos; }
+ int eq(Node *other)
+ {
+- if (dynamic_cast<OptionalNode *>(other))
++ if (other->is_type(NODE_TYPE_OPTIONAL))
+ return child[0]->eq(other->child[0]);
+ return 0;
+ }
+@@ -638,7 +702,9 @@ public:
+ /* Match a node one or more times. (This is a unary operator.) */
+ class PlusNode: public OneChildNode {
+ public:
+- PlusNode(Node *left): OneChildNode(left) {
++ PlusNode(Node *left): OneChildNode(left)
++ {
++ type_flags |= NODE_TYPE_PLUS;
+ }
+ void compute_nullable() { nullable = child[0]->nullable; }
+ void compute_firstpos() { firstpos = child[0]->firstpos; }
+@@ -651,7 +717,7 @@ public:
+ }
+ }
+ int eq(Node *other) {
+- if (dynamic_cast<PlusNode *>(other))
++ if (other->is_type(NODE_TYPE_PLUS))
+ return child[0]->eq(other->child[0]);
+ return 0;
+ }
+@@ -667,7 +733,10 @@ public:
+ /* Match a pair of consecutive nodes. */
+ class CatNode: public TwoChildNode {
+ public:
+- CatNode(Node *left, Node *right): TwoChildNode(left, right) { }
++ CatNode(Node *left, Node *right): TwoChildNode(left, right)
++ {
++ type_flags |= NODE_TYPE_CAT;
++ }
+ void compute_nullable()
+ {
+ nullable = child[0]->nullable && child[1]->nullable;
+@@ -695,7 +764,7 @@ public:
+ }
+ int eq(Node *other)
+ {
+- if (dynamic_cast<CatNode *>(other)) {
++ if (other->is_type(NODE_TYPE_CAT)) {
+ if (!child[0]->eq(other->child[0]))
+ return 0;
+ return child[1]->eq(other->child[1]);
+@@ -730,7 +799,10 @@ public:
+ /* Match one of two alternative nodes. */
+ class AltNode: public TwoChildNode {
+ public:
+- AltNode(Node *left, Node *right): TwoChildNode(left, right) { }
++ AltNode(Node *left, Node *right): TwoChildNode(left, right)
++ {
++ type_flags |= NODE_TYPE_ALT;
++ }
+ void compute_nullable()
+ {
+ nullable = child[0]->nullable || child[1]->nullable;
+@@ -745,7 +817,7 @@ public:
+ }
+ int eq(Node *other)
+ {
+- if (dynamic_cast<AltNode *>(other)) {
++ if (other->is_type(NODE_TYPE_ALT)) {
+ if (!child[0]->eq(other->child[0]))
+ return 0;
+ return child[1]->eq(other->child[1]);
+@@ -780,7 +852,10 @@ public:
+
+ class SharedNode: public ImportantNode {
+ public:
+- SharedNode() { }
++ SharedNode()
++ {
++ type_flags |= NODE_TYPE_SHARED;
++ }
+ void release(void)
+ {
+ /* don't delete SharedNodes via release as they are shared, and
+@@ -803,14 +878,17 @@ public:
+ */
+ class AcceptNode: public SharedNode {
+ public:
+- AcceptNode() { }
++ AcceptNode() { type_flags |= NODE_TYPE_ACCEPT; }
+ int is_accept(void) { return true; }
+ int is_postprocess(void) { return false; }
+ };
+
+ class MatchFlag: public AcceptNode {
+ public:
+- MatchFlag(uint32_t flag, uint32_t audit): flag(flag), audit(audit) { }
++ MatchFlag(uint32_t flag, uint32_t audit): flag(flag), audit(audit)
++ {
++ type_flags |= NODE_TYPE_MATCHFLAG;
++ }
+ ostream &dump(ostream &os) { return os << "< 0x" << hex << flag << '>'; }
+
+ uint32_t flag;
+@@ -819,12 +897,18 @@ public:
+
+ class ExactMatchFlag: public MatchFlag {
+ public:
+- ExactMatchFlag(uint32_t flag, uint32_t audit): MatchFlag(flag, audit) {}
++ ExactMatchFlag(uint32_t flag, uint32_t audit): MatchFlag(flag, audit)
++ {
++ type_flags |= NODE_TYPE_EXACTMATCHFLAG;
++ }
+ };
+
+ class DenyMatchFlag: public MatchFlag {
+ public:
+- DenyMatchFlag(uint32_t flag, uint32_t quiet): MatchFlag(flag, quiet) {}
++ DenyMatchFlag(uint32_t flag, uint32_t quiet): MatchFlag(flag, quiet)
++ {
++ type_flags |= NODE_TYPE_DENYMATCHFLAG;
++ }
+ };
+
+ /* Traverse the syntax tree depth-first in an iterator-like manner. */
+@@ -833,7 +917,7 @@ class depth_first_traversal {
+ void push_left(Node *node) {
+ pos.push(node);
+
+- while (dynamic_cast<InnerNode *>(node)) {
++ while (node->is_type(NODE_TYPE_INNER)) {
+ pos.push(node->child[0]);
+ node = node->child[0];
+ }
+diff --git a/parser/libapparmor_re/hfa.cc b/parser/libapparmor_re/hfa.cc
+index 9cea4c3fc..e1ef1803b 100644
+--- a/parser/libapparmor_re/hfa.cc
++++ b/parser/libapparmor_re/hfa.cc
+@@ -1352,17 +1352,18 @@ int accept_perms(NodeSet *state, perms_t &perms, bool filedfa)
+ return error;
+
+ for (NodeSet::iterator i = state->begin(); i != state->end(); i++) {
+- MatchFlag *match;
+- if (!(match = dynamic_cast<MatchFlag *>(*i)))
++ if (!(*i)->is_type(NODE_TYPE_MATCHFLAG))
+ continue;
+- if (dynamic_cast<ExactMatchFlag *>(match)) {
++
++ MatchFlag *match = static_cast<MatchFlag *>(*i);
++ if (match->is_type(NODE_TYPE_EXACTMATCHFLAG)) {
+ /* exact match only ever happens with x */
+ if (filedfa && !is_merged_x_consistent(exact_match_allow,
+ match->flag))
+ error = 1;;
+ exact_match_allow |= match->flag;
+ exact_audit |= match->audit;
+- } else if (dynamic_cast<DenyMatchFlag *>(match)) {
++ } else if (match->is_type(NODE_TYPE_DENYMATCHFLAG)) {
+ perms.deny |= match->flag;
+ perms.quiet |= match->audit;
+ } else {
+--
+2.34.1
diff --git a/build-aux/snap/snapcraft.yaml b/build-aux/snap/snapcraft.yaml
index ef889a1db5..3596c567bd 100644
--- a/build-aux/snap/snapcraft.yaml
+++ b/build-aux/snap/snapcraft.yaml
@@ -177,6 +177,10 @@ parts:
for feature in mqueue userns; do
wget https://git.launchpad.net/ubuntu/+source/apparmor/plain/debian/patches/ubuntu/add-${feature}-support.patch?h=ubuntu/lunar -O - | patch -p1
done
+ # apply local apparmor patches
+ for patch in $SNAPCRAFT_PROJECT_DIR/build-aux/snap/patches/apparmor/*; do
+ patch -p1 < $patch
+ done
override-build: |
cd $SNAPCRAFT_PART_BUILD/libraries/libapparmor
./autogen.sh
diff --git a/client/model.go b/client/model.go
index 72a307a368..57bc53541e 100644
--- a/client/model.go
+++ b/client/model.go
@@ -24,7 +24,12 @@ import (
"context"
"encoding/json"
"fmt"
+ "io"
+ "mime/multipart"
+ "net/textproto"
"net/url"
+ "os"
+ "path/filepath"
"golang.org/x/xerrors"
@@ -50,6 +55,114 @@ func (client *Client) Remodel(b []byte) (changeID string, err error) {
return client.doAsync("POST", "/v2/model", nil, headers, bytes.NewReader(data))
}
+// RemodelOffline tries to remodel the system with the given model assertion
+// and local snaps and assertion files.
+func (client *Client) RemodelOffline(
+ model []byte, snapPaths, assertPaths []string) (changeID string, err error) {
+
+ // Check if all files exist before starting the go routine
+ snapFiles, err := checkAndOpenFiles(snapPaths)
+ if err != nil {
+ return "", err
+ }
+ assertsFiles, err := checkAndOpenFiles(assertPaths)
+ if err != nil {
+ return "", err
+ }
+
+ pr, pw := io.Pipe()
+ mw := multipart.NewWriter(pw)
+ go sendRemodelFiles(model, snapPaths, snapFiles, assertsFiles, pw, mw)
+
+ headers := map[string]string{
+ "Content-Type": mw.FormDataContentType(),
+ }
+
+ _, changeID, err = client.doAsyncFull("POST", "/v2/model", nil, headers, pr, doNoTimeoutAndRetry)
+ return changeID, err
+}
+
+func checkAndOpenFiles(paths []string) ([]*os.File, error) {
+ var files []*os.File
+ for _, path := range paths {
+ f, err := os.Open(path)
+ if err != nil {
+ for _, openFile := range files {
+ openFile.Close()
+ }
+ return nil, fmt.Errorf("cannot open %q: %w", path, err)
+ }
+
+ files = append(files, f)
+ }
+
+ return files, nil
+}
+
+func createAssertionPart(name string, mw *multipart.Writer) (io.Writer, error) {
+ h := make(textproto.MIMEHeader)
+ h.Set("Content-Disposition",
+ fmt.Sprintf(`form-data; name="%s"`, name))
+ h.Set("Content-Type", asserts.MediaType)
+ return mw.CreatePart(h)
+}
+
+func sendRemodelFiles(model []byte, paths []string, files, assertFiles []*os.File, pw *io.PipeWriter, mw *multipart.Writer) {
+ defer func() {
+ for _, f := range files {
+ f.Close()
+ }
+ }()
+
+ w, err := createAssertionPart("new-model", mw)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ _, err = w.Write(model)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+
+ for _, file := range assertFiles {
+ if err := sendPartFromFile(file,
+ func() (io.Writer, error) {
+ return createAssertionPart("assertion", mw)
+ }); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+
+ for i, file := range files {
+ if err := sendPartFromFile(file,
+ func() (io.Writer, error) {
+ return mw.CreateFormFile("snap", filepath.Base(paths[i]))
+ }); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+
+ mw.Close()
+ pw.Close()
+}
+
+func sendPartFromFile(file *os.File, writeHeader func() (io.Writer, error)) error {
+ fw, err := writeHeader()
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(fw, file)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
// CurrentModelAssertion returns the current model assertion
func (client *Client) CurrentModelAssertion() (*asserts.Model, error) {
assert, err := currentAssertion(client, "/v2/model")
diff --git a/client/model_test.go b/client/model_test.go
index c9e535123e..2a23c55568 100644
--- a/client/model_test.go
+++ b/client/model_test.go
@@ -24,11 +24,15 @@ import (
"errors"
"io/ioutil"
"net/http"
+ "path/filepath"
+ "regexp"
+ "strings"
"golang.org/x/xerrors"
. "gopkg.in/check.v1"
"github.com/snapcore/snapd/asserts"
+ "github.com/snapcore/snapd/dirs"
)
const happyModelAssertionResponse = `type: model
@@ -173,3 +177,89 @@ func (cs *clientSuite) TestClientCurrentModelAssertionErrIsWrapped(c *C) {
var e xerrors.Wrapper
c.Assert(err, Implements, &e)
}
+
+func (cs *clientSuite) TestClientOfflineRemodel(c *C) {
+ cs.status = 202
+ cs.rsp = `{
+ "type": "async",
+ "status-code": 202,
+ "result": {},
+ "change": "d728"
+ }`
+ rawModel := []byte(`some-model`)
+
+ var err error
+ snapPaths := []string{filepath.Join(dirs.GlobalRootDir, "snap1.snap")}
+ err = ioutil.WriteFile(snapPaths[0], []byte("snap1"), 0644)
+ c.Assert(err, IsNil)
+ assertsPaths := []string{filepath.Join(dirs.GlobalRootDir, "f1.asserts")}
+ err = ioutil.WriteFile(assertsPaths[0], []byte("asserts1"), 0644)
+ c.Assert(err, IsNil)
+
+ id, err := cs.cli.RemodelOffline(rawModel, snapPaths, assertsPaths)
+ c.Assert(err, IsNil)
+ c.Check(id, Equals, "d728")
+ contentTypeReStr := "^multipart/form-data; boundary=([A-Za-z0-9]*)$"
+ contentType := cs.req.Header.Get("Content-Type")
+ c.Assert(contentType, Matches, contentTypeReStr)
+ contentTypeRe := regexp.MustCompile(contentTypeReStr)
+ matches := contentTypeRe.FindStringSubmatch(contentType)
+ c.Assert(len(matches), Equals, 2)
+ boundary := "--" + matches[1]
+
+ body, err := ioutil.ReadAll(cs.req.Body)
+ c.Assert(err, IsNil)
+ expected := boundary + `
+Content-Disposition: form-data; name="new-model"
+Content-Type: application/x.ubuntu.assertion
+
+some-model
+` + boundary + `
+Content-Disposition: form-data; name="assertion"
+Content-Type: application/x.ubuntu.assertion
+
+asserts1
+` + boundary + `
+Content-Disposition: form-data; name="snap"; filename="snap1.snap"
+Content-Type: application/octet-stream
+
+snap1
+` + boundary + `--
+`
+ expected = strings.Replace(expected, "\n", "\r\n", -1)
+ c.Assert(string(body), Equals, expected)
+}
+
+func (cs *clientSuite) TestClientOfflineRemodelServerError(c *C) {
+ cs.status = 404
+ cs.rsp = noSerialAssertionYetResponse
+ rawModel := []byte(`some-model`)
+
+ var err error
+ snapPaths := []string{filepath.Join(dirs.GlobalRootDir, "snap1.snap")}
+ err = ioutil.WriteFile(snapPaths[0], []byte("snap1"), 0644)
+ c.Assert(err, IsNil)
+ assertsPaths := []string{filepath.Join(dirs.GlobalRootDir, "f1.asserts")}
+ err = ioutil.WriteFile(assertsPaths[0], []byte("asserts1"), 0644)
+ c.Assert(err, IsNil)
+
+ id, err := cs.cli.RemodelOffline(rawModel, snapPaths, assertsPaths)
+ c.Assert(err.Error(), Equals, "no serial assertion yet")
+ c.Check(id, Equals, "")
+}
+
+func (cs *clientSuite) TestClientOfflineRemodelNoFile(c *C) {
+ rawModel := []byte(`some-model`)
+
+ paths := []string{filepath.Join(dirs.GlobalRootDir, "snap1.snap")}
+
+ // No snap file
+ id, err := cs.cli.RemodelOffline(rawModel, paths, nil)
+ c.Assert(err, ErrorMatches, `cannot open .*: no such file or directory`)
+ c.Assert(id, Equals, "")
+
+ // No assertions file
+ id, err = cs.cli.RemodelOffline(rawModel, nil, paths)
+ c.Assert(err, ErrorMatches, `cannot open .*: no such file or directory`)
+ c.Assert(id, Equals, "")
+}
diff --git a/client/systems.go b/client/systems.go
index 44af029478..e6485360ae 100644
--- a/client/systems.go
+++ b/client/systems.go
@@ -192,6 +192,7 @@ func (client *Client) SystemDetails(systemLabel string) (*SystemDetails, error)
if _, err := client.doSync("GET", "/v2/systems/"+systemLabel, nil, nil, nil, &rsp); err != nil {
return nil, xerrors.Errorf("cannot get details for system %q: %v", systemLabel, err)
}
+ gadget.SetEnclosingVolumeInStructs(rsp.Volumes)
return &rsp, nil
}
diff --git a/client/systems_test.go b/client/systems_test.go
index 7043140f56..52b6e81eab 100644
--- a/client/systems_test.go
+++ b/client/systems_test.go
@@ -272,6 +272,15 @@ func (cs *clientSuite) TestSystemDetailsHappy(c *check.C) {
c.Assert(err, check.IsNil)
c.Check(cs.req.Method, check.Equals, "GET")
c.Check(cs.req.URL.Path, check.Equals, "/v2/systems/20190102")
+ vols := map[string]*gadget.Volume{
+ "pc": {
+ Schema: "gpt",
+ Bootloader: "grub",
+ Structure: []gadget.VolumeStructure{
+ {Name: "mbr", Type: "mbr", Size: 440},
+ },
+ }}
+ gadget.SetEnclosingVolumeInStructs(vols)
c.Check(sys, check.DeepEquals, &client.SystemDetails{
Current: true,
Label: "20200101",
@@ -295,15 +304,7 @@ func (cs *clientSuite) TestSystemDetailsHappy(c *check.C) {
StorageSafety: "prefer-encrypted",
Type: "cryptsetup",
},
- Volumes: map[string]*gadget.Volume{
- "pc": {
- Schema: "gpt",
- Bootloader: "grub",
- Structure: []gadget.VolumeStructure{
- {Name: "mbr", Type: "mbr", Size: 440},
- },
- },
- },
+ Volumes: vols,
})
}
diff --git a/cmd/snap-bootstrap/cmd_initramfs_mounts.go b/cmd/snap-bootstrap/cmd_initramfs_mounts.go
index ff7077bb3f..9f906d3061 100644
--- a/cmd/snap-bootstrap/cmd_initramfs_mounts.go
+++ b/cmd/snap-bootstrap/cmd_initramfs_mounts.go
@@ -45,6 +45,7 @@ import (
"github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/osutil/disks"
+ "github.com/snapcore/snapd/snapdtool"
// to set sysconfig.ApplyFilesystemOnlyDefaultsImpl
_ "github.com/snapcore/snapd/overlord/configstate/configcore"
@@ -79,6 +80,8 @@ func (c *cmdInitramfsMounts) Execute([]string) error {
boot.HasFDESetupHook = hasFDESetupHook
boot.RunFDESetupHook = runFDESetupHook
+ logger.Noticef("snap-bootstrap version %v starting", snapdtool.Version)
+
return generateInitramfsMounts()
}
diff --git a/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go b/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go
index 07e2337f37..8e1764fa1a 100644
--- a/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go
+++ b/cmd/snap-bootstrap/cmd_initramfs_mounts_test.go
@@ -50,6 +50,7 @@ import (
"github.com/snapcore/snapd/seed"
"github.com/snapcore/snapd/seed/seedtest"
"github.com/snapcore/snapd/snap"
+ "github.com/snapcore/snapd/snapdtool"
"github.com/snapcore/snapd/systemd"
"github.com/snapcore/snapd/testutil"
"github.com/snapcore/snapd/timings"
@@ -693,6 +694,12 @@ func (s *baseInitramfsMountsSuite) mockSystemdMountSequence(c *C, mounts []syste
}
func (s *initramfsMountsSuite) TestInitramfsMountsInstallModeHappy(c *C) {
+ logbuf, restore := logger.MockLogger()
+ defer restore()
+
+ restore = snapdtool.MockVersion("1.2.3")
+ defer restore()
+
s.mockProcCmdlineContent(c, "snapd_recovery_mode=install snapd_recovery_system="+s.sysLabel)
// ensure that we check that access to sealed keys were locked
@@ -702,7 +709,7 @@ func (s *initramfsMountsSuite) TestInitramfsMountsInstallModeHappy(c *C) {
return nil
})()
- restore := s.mockSystemdMountSequence(c, []systemdMount{
+ restore = s.mockSystemdMountSequence(c, []systemdMount{
s.ubuntuLabelMount("ubuntu-seed", "install"),
s.makeSeedSnapSystemdMount(snap.TypeSnapd),
s.makeSeedSnapSystemdMount(snap.TypeKernel),
@@ -732,6 +739,8 @@ grade=signed
c.Check(cloudInitDisable, testutil.FilePresent)
c.Check(sealedKeysLocked, Equals, true)
+
+ c.Check(logbuf.String(), testutil.Contains, "snap-bootstrap version 1.2.3 starting\n")
}
func (s *initramfsMountsSuite) TestInitramfsMountsInstallModeBootFlagsSet(c *C) {
diff --git a/cmd/snap/cmd_aliases.go b/cmd/snap/cmd_aliases.go
index d0cd10aaa0..30817b69cf 100644
--- a/cmd/snap/cmd_aliases.go
+++ b/cmd/snap/cmd_aliases.go
@@ -44,10 +44,6 @@ The aliases command lists all aliases available in the system and their status.
$ snap aliases <snap>
Lists only the aliases defined by the specified snap.
-
-An alias noted as undefined means it was explicitly enabled or disabled but is
-not defined in the current revision of the snap, possibly temporarily (e.g.
-because of a revert). This can cleared with 'snap alias --reset'.
`)
func init() {
diff --git a/cmd/snap/cmd_aliases_test.go b/cmd/snap/cmd_aliases_test.go
index 4563c5035f..6e731ab405 100644
--- a/cmd/snap/cmd_aliases_test.go
+++ b/cmd/snap/cmd_aliases_test.go
@@ -38,10 +38,6 @@ The aliases command lists all aliases available in the system and their status.
$ snap aliases <snap>
Lists only the aliases defined by the specified snap.
-
-An alias noted as undefined means it was explicitly enabled or disabled but is
-not defined in the current revision of the snap, possibly temporarily (e.g.
-because of a revert). This can cleared with 'snap alias --reset'.
`
s.testSubCommandHelp(c, "aliases", msg)
}
diff --git a/cmd/snap/cmd_help.go b/cmd/snap/cmd_help.go
index f269db3e8b..942f1a4eaf 100644
--- a/cmd/snap/cmd_help.go
+++ b/cmd/snap/cmd_help.go
@@ -240,7 +240,7 @@ var helpCategories = []helpCategory{
}, {
Label: i18n.G("Device"),
Description: i18n.G("manage device"),
- Commands: []string{"model", "reboot", "recovery"},
+ Commands: []string{"model", "remodel", "reboot", "recovery"},
}, {
Label: i18n.G("Warnings"),
Other: true,
diff --git a/cmd/snap/cmd_remodel.go b/cmd/snap/cmd_remodel.go
index 335abdfab3..efd65eafec 100644
--- a/cmd/snap/cmd_remodel.go
+++ b/cmd/snap/cmd_remodel.go
@@ -36,29 +36,40 @@ revision or a full new model.
In the process it applies any implied changes to the device: new required
snaps, new kernel or gadget etc.
+
+Snaps and assertions are downloaded from the store unless they are provided as
+local files specified by --snap and --assertion options. If using these
+options, it is expected that all the needed snaps and assertions are provided
+locally, otherwise the remodel will fail.
`)
)
type cmdRemodel struct {
waitMixin
+ SnapFiles []string `long:"snap"`
+ AssertionFiles []string `long:"assertion"`
RemodelOptions struct {
NewModelFile flags.Filename
} `positional-args:"true" required:"true"`
}
func init() {
- cmd := addCommand("remodel",
+ addCommand("remodel",
shortRemodelHelp,
longRemodelHelp,
func() flags.Commander {
return &cmdRemodel{}
- }, nil, []argDesc{{
+ },
+ waitDescs.also(map[string]string{
+ "snap": i18n.G("Use one or more locally available snaps."),
+ "assertion": i18n.G("Use one or more locally available assertion files."),
+ }),
+ []argDesc{{
// TRANSLATORS: This needs to begin with < and end with >
name: i18n.G("<new model file>"),
// TRANSLATORS: This should not start with a lowercase letter.
desc: i18n.G("New model file"),
}})
- cmd.hidden = true
}
func (x *cmdRemodel) Execute(args []string) error {
@@ -70,9 +81,20 @@ func (x *cmdRemodel) Execute(args []string) error {
if err != nil {
return err
}
- changeID, err := x.client.Remodel(modelData)
- if err != nil {
- return fmt.Errorf("cannot remodel: %v", err)
+
+ var changeID string
+ if len(x.SnapFiles) > 0 || len(x.AssertionFiles) > 0 {
+ // don't log the request's body as it will be large
+ x.client.SetMayLogBody(false)
+ changeID, err = x.client.RemodelOffline(modelData, x.SnapFiles, x.AssertionFiles)
+ if err != nil {
+ return fmt.Errorf("cannot do offline remodel: %v", err)
+ }
+ } else {
+ changeID, err = x.client.Remodel(modelData)
+ if err != nil {
+ return fmt.Errorf("cannot remodel: %v", err)
+ }
}
if _, err := x.wait(changeID); err != nil {
diff --git a/cmd/snap/cmd_remodel_test.go b/cmd/snap/cmd_remodel_test.go
new file mode 100644
index 0000000000..f55cc51433
--- /dev/null
+++ b/cmd/snap/cmd_remodel_test.go
@@ -0,0 +1,109 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package main_test
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "path/filepath"
+
+ . "gopkg.in/check.v1"
+
+ snap "github.com/snapcore/snapd/cmd/snap"
+ "github.com/snapcore/snapd/dirs"
+)
+
+const remodelOk = `{
+ "type": "async",
+ "status-code": 202,
+ "status": "OK",
+ "change": "101"
+}`
+
+const remodelError = `{
+ "type": "error",
+ "result": {
+ "message": "bad snap",
+ "kind": "bad snap"
+ },
+ "status-code": 400
+}`
+
+func (s *SnapSuite) TestRemodelOfflineOk(c *C) {
+ n := 0
+
+ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) {
+ c.Check(r.Method, Equals, "POST")
+ c.Check(r.URL.Path, Equals, "/v2/model")
+ w.WriteHeader(202)
+ fmt.Fprint(w, remodelOk)
+ n++
+ })
+
+ var err error
+ modelPath := filepath.Join(dirs.GlobalRootDir, "new-model")
+ err = os.WriteFile(modelPath, []byte("snap1"), 0644)
+ c.Assert(err, IsNil)
+ snapPath := filepath.Join(dirs.GlobalRootDir, "snap1.snap")
+ err = os.WriteFile(snapPath, []byte("snap1"), 0644)
+ c.Assert(err, IsNil)
+
+ rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"remodel", "--no-wait", "--snap", snapPath, modelPath})
+
+ c.Assert(err, IsNil)
+ c.Assert(rest, DeepEquals, []string{})
+ c.Assert(n, Equals, 1)
+
+ c.Check(s.Stdout(), Matches, "101\n")
+ c.Check(s.Stderr(), Equals, "")
+
+ s.ResetStdStreams()
+}
+
+func (s *SnapSuite) TestRemodelOfflineError(c *C) {
+ n := 0
+
+ s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) {
+ c.Check(r.Method, Equals, "POST")
+ c.Check(r.URL.Path, Equals, "/v2/model")
+ w.WriteHeader(400)
+ fmt.Fprint(w, remodelError)
+ n++
+ })
+
+ var err error
+ modelPath := filepath.Join(dirs.GlobalRootDir, "new-model")
+ err = os.WriteFile(modelPath, []byte("snap1"), 0644)
+ c.Assert(err, IsNil)
+ snapPath := filepath.Join(dirs.GlobalRootDir, "snap1.snap")
+ err = os.WriteFile(snapPath, []byte("snap1"), 0644)
+ c.Assert(err, IsNil)
+
+ _, err = snap.Parser(snap.Client()).ParseArgs([]string{"remodel", "--no-wait", "--snap", snapPath, modelPath})
+
+ c.Assert(err.Error(), Equals, "cannot do offline remodel: bad snap")
+ c.Check(n, Equals, 1)
+
+ c.Check(s.Stdout(), Matches, "")
+ c.Check(s.Stderr(), Equals, "")
+
+ s.ResetStdStreams()
+}
diff --git a/cmd/snapd-apparmor/main_test.go b/cmd/snapd-apparmor/main_test.go
index 447528e977..827cc970be 100644
--- a/cmd/snapd-apparmor/main_test.go
+++ b/cmd/snapd-apparmor/main_test.go
@@ -139,7 +139,6 @@ func (s *mainSuite) TestLoadAppArmorProfiles(c *C) {
// check arguments to the parser are as expected
c.Assert(parserCmd.Calls(), DeepEquals, [][]string{
{"apparmor_parser", "--replace", "--write-cache",
- "-O", "no-expr-simplify",
fmt.Sprintf("--cache-loc=%s/var/cache/apparmor", dirs.GlobalRootDir),
profile}})
diff --git a/daemon/api.go b/daemon/api.go
index d7e4fe0b5b..5f5b9cd5d4 100644
--- a/daemon/api.go
+++ b/daemon/api.go
@@ -158,8 +158,8 @@ var (
assertstateRefreshSnapAssertions = assertstate.RefreshSnapAssertions
assertstateRestoreValidationSetsTracking = assertstate.RestoreValidationSetsTracking
- aspectstateGet = aspectstate.Get
- aspectstateSet = aspectstate.Set
+ aspectstateGetAspect = aspectstate.GetAspect
+ aspectstateSetAspect = aspectstate.SetAspect
)
func ensureStateSoonImpl(st *state.State) {
diff --git a/daemon/api_aspects.go b/daemon/api_aspects.go
index 5003a2b872..74a36c94fd 100644
--- a/daemon/api_aspects.go
+++ b/daemon/api_aspects.go
@@ -25,6 +25,7 @@ import (
"net/http"
"github.com/snapcore/snapd/aspects"
+ "github.com/snapcore/snapd/overlord/aspectstate"
"github.com/snapcore/snapd/overlord/auth"
"github.com/snapcore/snapd/strutil"
)
@@ -53,9 +54,14 @@ func getAspect(c *Command, r *http.Request, _ *auth.UserState) Response {
st.Lock()
defer st.Unlock()
+ tx, err := aspectstate.NewTransaction(st, account, bundleName)
+ if err != nil {
+ return toAPIError(err)
+ }
+
for _, field := range fields {
var value interface{}
- err := aspectstateGet(c.d.state, account, bundleName, aspect, field, &value)
+ err := aspectstateGetAspect(tx, account, bundleName, aspect, field, &value)
if err != nil {
if errors.Is(err, &aspects.FieldNotFoundError{}) {
// keep looking; return partial result, if only some fields are found
@@ -90,13 +96,22 @@ func setAspect(c *Command, r *http.Request, _ *auth.UserState) Response {
st.Lock()
defer st.Unlock()
+ tx, err := aspectstate.NewTransaction(st, account, bundleName)
+ if err != nil {
+ return toAPIError(err)
+ }
+
for field, value := range values {
- err := aspectstateSet(c.d.state, account, bundleName, aspect, field, value)
+ err := aspectstateSetAspect(tx, account, bundleName, aspect, field, value)
if err != nil {
return toAPIError(err)
}
}
+ if err := tx.Commit(); err != nil {
+ return toAPIError(err)
+ }
+
// NOTE: could be sync but this is closer to the final version and the conf API
summary := fmt.Sprintf("Set aspect %s/%s/%s", account, bundleName, aspect)
chg := newChange(st, "set-aspect", summary, nil, nil)
diff --git a/daemon/api_aspects_test.go b/daemon/api_aspects_test.go
index f3d5d691ad..432abe34b4 100644
--- a/daemon/api_aspects_test.go
+++ b/daemon/api_aspects_test.go
@@ -32,6 +32,7 @@ import (
"github.com/snapcore/snapd/aspects"
"github.com/snapcore/snapd/client"
"github.com/snapcore/snapd/daemon"
+ "github.com/snapcore/snapd/overlord"
"github.com/snapcore/snapd/overlord/state"
"github.com/snapcore/snapd/testutil"
)
@@ -47,11 +48,21 @@ func (s *aspectsSuite) SetUpTest(c *C) {
s.expectReadAccess(daemon.AuthenticatedAccess{Polkit: "io.snapcraft.snapd.manage"})
s.expectWriteAccess(daemon.AuthenticatedAccess{Polkit: "io.snapcraft.snapd.manage"})
+
+ st := state.New(nil)
+ o := overlord.MockWithState(st)
+ s.d = daemon.NewWithOverlord(o)
+
+ st.Lock()
+ databags := map[string]map[string]aspects.JSONDataBag{
+ "system": {"network": aspects.NewJSONDataBag()},
+ }
+ st.Set("aspect-databags", databags)
+ st.Unlock()
+
}
func (s *aspectsSuite) TestGetAspect(c *C) {
- s.daemon(c)
-
type test struct {
name string
value interface{}
@@ -64,7 +75,7 @@ func (s *aspectsSuite) TestGetAspect(c *C) {
{name: "map", value: map[string]int{"foo": 123}},
} {
cmt := Commentf("%s test", t.name)
- restore := daemon.MockAspectstateGet(func(_ *state.State, acc, bundleName, aspect, field string, value interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, acc, bundleName, aspect, field string, value interface{}) error {
c.Check(acc, Equals, "system", cmt)
c.Check(bundleName, Equals, "network", cmt)
c.Check(aspect, Equals, "wifi-setup", cmt)
@@ -86,10 +97,8 @@ func (s *aspectsSuite) TestGetAspect(c *C) {
}
func (s *aspectsSuite) TestAspectGetMany(c *C) {
- s.daemon(c)
-
var calls int
- restore := daemon.MockAspectstateGet(func(_ *state.State, _, _, _, _ string, value interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, _, _, _, _ string, value interface{}) error {
calls++
switch calls {
case 1:
@@ -115,10 +124,8 @@ func (s *aspectsSuite) TestAspectGetMany(c *C) {
}
func (s *aspectsSuite) TestAspectGetSomeFieldNotFound(c *C) {
- s.daemon(c)
-
var calls int
- restore := daemon.MockAspectstateGet(func(_ *state.State, acc, bundle, aspect, _ string, value interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, acc, bundle, aspect, _ string, value interface{}) error {
calls++
switch calls {
case 1:
@@ -144,9 +151,7 @@ func (s *aspectsSuite) TestAspectGetSomeFieldNotFound(c *C) {
}
func (s *aspectsSuite) TestGetAspectNoFieldsFound(c *C) {
- s.daemon(c)
-
- restore := daemon.MockAspectstateGet(func(_ *state.State, _, _, _, _ string, _ interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, _, _, _, _ string, _ interface{}) error {
return &aspects.FieldNotFoundError{}
})
defer restore()
@@ -159,18 +164,66 @@ func (s *aspectsSuite) TestGetAspectNoFieldsFound(c *C) {
c.Check(rspe.Error(), Equals, "no fields were found (api 404)")
}
+func (s *aspectsSuite) TestAspectGetDatabagNotFound(c *C) {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, _, _, _, _ string, _ interface{}) error {
+ return &aspects.AspectNotFoundError{Account: "foo", BundleName: "network", Aspect: "wifi-setup"}
+ })
+ defer restore()
+
+ req, err := http.NewRequest("GET", "/v2/aspects/foo/network/wifi-setup?fields=ssid", nil)
+ c.Assert(err, IsNil)
+
+ rspe := s.errorReq(c, req, nil)
+ c.Check(rspe.Status, Equals, 404)
+ c.Check(rspe.Message, Equals, "aspect foo/network/wifi-setup not found")
+}
+
+func (s *aspectsSuite) TestAspectSetManyWithExistingState(c *C) {
+ st := s.d.Overlord().State()
+ st.Lock()
+
+ databag := aspects.NewJSONDataBag()
+ err := databag.Set("wifi.ssid", "foo")
+ c.Assert(err, IsNil)
+
+ databags := map[string]map[string]aspects.JSONDataBag{
+ "system": {"network": databag},
+ }
+ st.Set("aspect-databags", databags)
+ st.Unlock()
+
+ s.testAspectSetMany(c)
+}
+
+func (s *aspectsSuite) TestAspectSetManyWithExistingEmptyState(c *C) {
+ st := s.d.Overlord().State()
+ st.Lock()
+
+ databags := map[string]map[string]aspects.JSONDataBag{
+ "system": {"network": aspects.NewJSONDataBag()},
+ }
+ st.Set("aspect-databags", databags)
+ st.Unlock()
+
+ s.testAspectSetMany(c)
+}
+
func (s *aspectsSuite) TestAspectSetMany(c *C) {
- s.daemonWithOverlordMock()
+ s.testAspectSetMany(c)
+}
+func (s *aspectsSuite) testAspectSetMany(c *C) {
var calls int
- restore := daemon.MockAspectstateSet(func(_ *state.State, _, _, _, field string, value interface{}) error {
+ restore := daemon.MockAspectstateSet(func(bag aspects.DataBag, _, _, _, field string, value interface{}) error {
calls++
switch calls {
case 1, 2:
if field == "ssid" {
c.Assert(value, Equals, "foo")
+ return bag.Set("wifi.ssid", value)
} else if field == "password" {
c.Assert(value, IsNil)
+ return bag.Set("wifi.psk", nil)
} else {
c.Errorf("expected field to be \"ssid\" or \"password\" but got %q", field)
}
@@ -199,11 +252,21 @@ func (s *aspectsSuite) TestAspectSetMany(c *C) {
chg := st.Change(rspe.Change)
c.Check(chg.Kind(), check.Equals, "set-aspect")
c.Check(chg.Summary(), check.Equals, `Set aspect system/network/wifi-setup`)
+
+ var databags map[string]map[string]aspects.JSONDataBag
+ err = st.Get("aspect-databags", &databags)
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = databags["system"]["network"].Get("wifi.ssid", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "foo")
+
+ err = databags["system"]["network"].Get("wifi.psk", &value)
+ c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
}
func (s *aspectsSuite) TestGetAspectError(c *C) {
- s.daemon(c)
-
type test struct {
name string
err error
@@ -215,7 +278,7 @@ func (s *aspectsSuite) TestGetAspectError(c *C) {
{name: "internal", err: errors.New("internal"), code: 500},
{name: "invalid access", err: &aspects.InvalidAccessError{RequestedAccess: 1, FieldAccess: 2, Field: "foo"}, code: 403},
} {
- restore := daemon.MockAspectstateGet(func(_ *state.State, _, _, _, _ string, _ interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, _, _, _, _ string, _ interface{}) error {
return t.err
})
@@ -229,8 +292,6 @@ func (s *aspectsSuite) TestGetAspectError(c *C) {
}
func (s *aspectsSuite) TestGetAspectMissingField(c *C) {
- s.daemon(c)
-
req, err := http.NewRequest("GET", "/v2/aspects/system/network/wifi-setup", nil)
c.Assert(err, IsNil)
@@ -240,10 +301,8 @@ func (s *aspectsSuite) TestGetAspectMissingField(c *C) {
}
func (s *aspectsSuite) TestGetAspectMisshapenQuery(c *C) {
- s.daemon(c)
-
var calls int
- restore := daemon.MockAspectstateGet(func(_ *state.State, _, _, _, field string, value interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, _, _, _, field string, value interface{}) error {
calls++
switch calls {
case 1:
@@ -270,8 +329,6 @@ func (s *aspectsSuite) TestGetAspectMisshapenQuery(c *C) {
}
func (s *aspectsSuite) TestSetAspect(c *C) {
- s.daemonWithOverlordMock()
-
type test struct {
name string
value interface{}
@@ -284,13 +341,14 @@ func (s *aspectsSuite) TestSetAspect(c *C) {
{name: "map", value: map[string]interface{}{"foo": "bar"}},
} {
cmt := Commentf("%s test", t.name)
- restore := daemon.MockAspectstateSet(func(_ *state.State, acc, bundleName, aspect, field string, value interface{}) error {
+ restore := daemon.MockAspectstateSet(func(bag aspects.DataBag, acc, bundleName, aspect, field string, value interface{}) error {
c.Check(acc, Equals, "system", cmt)
c.Check(bundleName, Equals, "network", cmt)
c.Check(aspect, Equals, "wifi-setup", cmt)
c.Check(field, Equals, "ssid", cmt)
c.Check(value, DeepEquals, t.value, cmt)
- return nil
+ c.Assert(bag, NotNil)
+ return bag.Set("wifi.ssid", value)
})
jsonVal, err := json.Marshal(t.value)
c.Check(err, IsNil, cmt)
@@ -308,16 +366,26 @@ func (s *aspectsSuite) TestSetAspect(c *C) {
chg := st.Change(rspe.Change)
st.Unlock()
- c.Check(chg.Kind(), check.Equals, "set-aspect", cmt)
- c.Check(chg.Summary(), check.Equals, `Set aspect system/network/wifi-setup`, cmt)
+ c.Check(chg.Kind(), Equals, "set-aspect", cmt)
+ c.Check(chg.Summary(), Equals, `Set aspect system/network/wifi-setup`, cmt)
+
+ st.Lock()
+ var databags map[string]map[string]aspects.JSONDataBag
+ err = st.Get("aspect-databags", &databags)
+ st.Unlock()
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = databags["system"]["network"].Get("wifi.ssid", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, DeepEquals, t.value)
+
restore()
}
}
func (s *aspectsSuite) TestUnsetAspect(c *C) {
- s.daemonWithOverlordMock()
-
- restore := daemon.MockAspectstateSet(func(_ *state.State, acc, bundleName, aspect, field string, value interface{}) error {
+ restore := daemon.MockAspectstateSet(func(_ aspects.DataBag, acc, bundleName, aspect, field string, value interface{}) error {
c.Check(acc, Equals, "system")
c.Check(bundleName, Equals, "network")
c.Check(aspect, Equals, "wifi-setup")
@@ -345,8 +413,6 @@ func (s *aspectsSuite) TestUnsetAspect(c *C) {
}
func (s *aspectsSuite) TestSetAspectError(c *C) {
- s.daemon(c)
-
type test struct {
name string
err error
@@ -358,7 +424,7 @@ func (s *aspectsSuite) TestSetAspectError(c *C) {
{name: "field not found", err: &aspects.FieldNotFoundError{}, code: 404},
{name: "internal", err: errors.New("internal"), code: 500},
} {
- restore := daemon.MockAspectstateSet(func(*state.State, string, string, string, string, interface{}) error {
+ restore := daemon.MockAspectstateSet(func(aspects.DataBag, string, string, string, string, interface{}) error {
return t.err
})
cmt := Commentf("%s test", t.name)
@@ -375,9 +441,7 @@ func (s *aspectsSuite) TestSetAspectError(c *C) {
}
func (s *aspectsSuite) TestSetAspectEmptyBody(c *C) {
- s.daemon(c)
-
- restore := daemon.MockAspectstateSet(func(*state.State, string, string, string, string, interface{}) error {
+ restore := daemon.MockAspectstateSet(func(aspects.DataBag, string, string, string, string, interface{}) error {
err := errors.New("unexpected call to aspectstate.Set")
c.Error(err)
return err
@@ -393,8 +457,6 @@ func (s *aspectsSuite) TestSetAspectEmptyBody(c *C) {
}
func (s *aspectsSuite) TestSetAspectBadRequest(c *C) {
- s.daemon(c)
-
buf := bytes.NewBufferString(`{`)
req, err := http.NewRequest("PUT", "/v2/aspects/system/network/wifi-setup", buf)
c.Assert(err, IsNil)
@@ -405,9 +467,7 @@ func (s *aspectsSuite) TestSetAspectBadRequest(c *C) {
}
func (s *aspectsSuite) TestSetAspectNotAllowed(c *C) {
- s.daemon(c)
-
- restore := daemon.MockAspectstateSet(func(_ *state.State, acc, bundleName, aspect, field string, val interface{}) error {
+ restore := daemon.MockAspectstateSet(func(_ aspects.DataBag, acc, bundleName, aspect, field string, val interface{}) error {
return &aspects.InvalidAccessError{RequestedAccess: 2, FieldAccess: 1, Field: "foo"}
})
defer restore()
@@ -424,9 +484,7 @@ func (s *aspectsSuite) TestSetAspectNotAllowed(c *C) {
}
func (s *aspectsSuite) TestGetAspectNotAllowed(c *C) {
- s.daemon(c)
-
- restore := daemon.MockAspectstateGet(func(_ *state.State, acc, bundleName, aspect, field string, val interface{}) error {
+ restore := daemon.MockAspectstateGet(func(_ aspects.DataBag, acc, bundleName, aspect, field string, val interface{}) error {
return &aspects.InvalidAccessError{RequestedAccess: 1, FieldAccess: 2, Field: "foo"}
})
defer restore()
diff --git a/daemon/api_model.go b/daemon/api_model.go
index 02bf9e575f..681fad586c 100644
--- a/daemon/api_model.go
+++ b/daemon/api_model.go
@@ -22,6 +22,7 @@ package daemon
import (
"encoding/json"
"errors"
+ "mime"
"net/http"
"github.com/snapcore/snapd/asserts"
@@ -56,12 +57,30 @@ type postModelData struct {
}
func postModel(c *Command, r *http.Request, _ *auth.UserState) Response {
- defer r.Body.Close()
+ contentType := r.Header.Get("Content-Type")
+ mediaType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ // assume json body, as type was not enforced in the past
+ mediaType = "application/json"
+ }
+
+ switch mediaType {
+ case "application/json":
+ return storeRemodel(c, r)
+ case "multipart/form-data":
+ return BadRequest("media type %q not implemented yet", mediaType)
+ default:
+ return BadRequest("unexpected media type %q", mediaType)
+ }
+}
+
+func storeRemodel(c *Command, r *http.Request) Response {
var data postModelData
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&data); err != nil {
return BadRequest("cannot decode request body into remodel operation: %v", err)
}
+
rawNewModel, err := asserts.Decode([]byte(data.NewModel))
if err != nil {
return BadRequest("cannot decode new model assertion: %v", err)
@@ -82,7 +101,6 @@ func postModel(c *Command, r *http.Request, _ *auth.UserState) Response {
ensureStateSoon(st)
return AsyncResponse(nil, chg.ID())
-
}
// getModel gets the current model assertion using the DeviceManager
diff --git a/daemon/api_model_test.go b/daemon/api_model_test.go
index 31913008b3..f3a025de62 100644
--- a/daemon/api_model_test.go
+++ b/daemon/api_model_test.go
@@ -134,6 +134,67 @@ func (s *modelSuite) TestPostRemodel(c *check.C) {
c.Assert(soon, check.Equals, 1)
}
+func (s *modelSuite) TestPostRemodelWrongBody(c *check.C) {
+ s.expectRootAccess()
+
+ d := s.daemonWithOverlordMockAndStore()
+ hookMgr, err := hookstate.Manager(d.Overlord().State(), d.Overlord().TaskRunner())
+ c.Assert(err, check.IsNil)
+ deviceMgr, err := devicestate.Manager(d.Overlord().State(), hookMgr, d.Overlord().TaskRunner(), nil)
+ c.Assert(err, check.IsNil)
+ d.Overlord().AddManager(deviceMgr)
+
+ type badBody struct {
+ body, err string
+ }
+ for _, tc := range []badBody{
+ {"", "cannot decode request body into remodel operation: EOF"},
+ {"garbage", `cannot decode request body into remodel operation: invalid character 'g' looking for beginning of value`},
+ {`{ "new-model": "garbage"}`, "cannot decode new model assertion: assertion content/signature separator not found"},
+ } {
+ req, err := http.NewRequest("POST", "/v2/model", bytes.NewBuffer([]byte(tc.body)))
+ req.Header.Set("Content-Type", "application/json")
+ c.Assert(err, check.IsNil)
+
+ rspe := s.errorReq(c, req, nil)
+ c.Assert(rspe.Status, check.Equals, 400)
+ c.Assert(rspe.Kind, check.Equals, client.ErrorKind(""))
+ c.Assert(rspe.Value, check.IsNil)
+ c.Assert(rspe.Message, check.Equals, tc.err)
+ }
+}
+
+func (s *modelSuite) TestPostRemodelWrongContentType(c *check.C) {
+ s.expectRootAccess()
+
+ d := s.daemonWithOverlordMockAndStore()
+ hookMgr, err := hookstate.Manager(d.Overlord().State(), d.Overlord().TaskRunner())
+ c.Assert(err, check.IsNil)
+ deviceMgr, err := devicestate.Manager(d.Overlord().State(), hookMgr, d.Overlord().TaskRunner(), nil)
+ c.Assert(err, check.IsNil)
+ d.Overlord().AddManager(deviceMgr)
+
+ req, err := http.NewRequest("POST", "/v2/model", bytes.NewBuffer([]byte("garbage")))
+ req.Header.Set("Content-Type", "footype")
+ c.Assert(err, check.IsNil)
+
+ rspe := s.errorReq(c, req, nil)
+ c.Assert(rspe.Status, check.Equals, 400)
+ c.Assert(rspe.Kind, check.Equals, client.ErrorKind(""))
+ c.Assert(rspe.Value, check.IsNil)
+ c.Assert(rspe.Message, check.Equals, `unexpected media type "footype"`)
+
+ req, err = http.NewRequest("POST", "/v2/model", bytes.NewBuffer([]byte("garbage")))
+ req.Header.Set("Content-Type", "multipart/form-data")
+ c.Assert(err, check.IsNil)
+
+ rspe = s.errorReq(c, req, nil)
+ c.Assert(rspe.Status, check.Equals, 400)
+ c.Assert(rspe.Kind, check.Equals, client.ErrorKind(""))
+ c.Assert(rspe.Value, check.IsNil)
+ c.Assert(rspe.Message, check.Equals, `media type "multipart/form-data" not implemented yet`)
+}
+
func (s *modelSuite) TestGetModelNoModelAssertion(c *check.C) {
d := s.daemonWithOverlordMockAndStore()
diff --git a/daemon/api_systems_test.go b/daemon/api_systems_test.go
index 70f4955459..4782cdee50 100644
--- a/daemon/api_systems_test.go
+++ b/daemon/api_systems_test.go
@@ -43,7 +43,6 @@ import (
"github.com/snapcore/snapd/daemon"
"github.com/snapcore/snapd/dirs"
"github.com/snapcore/snapd/gadget"
- "github.com/snapcore/snapd/gadget/gadgettest"
"github.com/snapcore/snapd/gadget/quantity"
"github.com/snapcore/snapd/overlord/assertstate/assertstatetest"
"github.com/snapcore/snapd/overlord/devicestate"
@@ -1063,7 +1062,7 @@ func (s *systemsSuite) TestSystemsGetSpecificLabelIntegration(c *check.C) {
},
},
}
- gadgettest.SetEnclosingVolumeInStructs(sd.Volumes)
+ gadget.SetEnclosingVolumeInStructs(sd.Volumes)
c.Assert(sys, check.DeepEquals, sd)
}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index d9b48c3f63..13e57c5bc9 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -76,7 +76,7 @@ type Daemon struct {
router *mux.Router
standbyOpinions *standby.StandbyOpinions
- // set to what kind of restart was requested if any
+ // set to what kind of restart was requested (if any)
requestedRestart restart.RestartType
// reboot info needed to handle reboots
rebootInfo boot.RebootInfo
@@ -94,7 +94,7 @@ type Daemon struct {
// A ResponseFunc handles one of the individual verbs for a method
type ResponseFunc func(*Command, *http.Request, *auth.UserState) Response
-// A Command routes a request to an individual per-verb ResponseFUnc
+// A Command routes a request to an individual per-verb ResponseFunc
type Command struct {
Path string
PathPrefix string
@@ -227,7 +227,7 @@ func (d *Daemon) Init() error {
return err
}
- // The SnapdSocket is required-- without it, die.
+ // The SnapdSocket is required -- without it, die.
if listener, err := netutil.GetListener(dirs.SnapdSocket, listenerMap); err == nil {
d.snapdListener = &ucrednetListener{Listener: listener}
} else {
@@ -249,15 +249,15 @@ func (d *Daemon) Init() error {
return nil
}
-// SetDegradedMode puts the daemon into an degraded mode which will the
-// error given in the "err" argument for commands that are not marked
-// as readonlyOK.
+// SetDegradedMode puts the daemon into a degraded mode. In this mode
+// it will return the error given in the "err" argument for commands
+// that are not pure HTTP GETs.
//
// This is useful to report errors to the client when the daemon
// cannot work because e.g. a snapd squashfs precondition check failed
// or the system is out of diskspace.
//
-// When the system is fine again calling "DegradedMode(nil)" is enough
+// When the system is fine again, calling "SetDegradedMode(nil)" is enough
// to put the daemon into full operation again.
func (d *Daemon) SetDegradedMode(err error) {
d.degradedErr = err
@@ -340,7 +340,7 @@ func (d *Daemon) Start() error {
logger.Noticef("adjusting startup timeout by %v (%s)", to, reasoning)
systemdSdNotify(fmt.Sprintf("EXTEND_TIMEOUT_USEC=%d", us))
}
- // now perform expensive overlord/manages initiliazation
+ // now perform expensive overlord/manages initialization
if err := d.overlord.StartUp(); err != nil {
return err
}
diff --git a/daemon/export_test.go b/daemon/export_test.go
index 301e57d530..27750df8f1 100644
--- a/daemon/export_test.go
+++ b/daemon/export_test.go
@@ -26,6 +26,7 @@ import (
"github.com/gorilla/mux"
+ "github.com/snapcore/snapd/aspects"
"github.com/snapcore/snapd/asserts/snapasserts"
"github.com/snapcore/snapd/boot"
"github.com/snapcore/snapd/overlord"
@@ -313,18 +314,18 @@ var (
MaxReadBuflen = maxReadBuflen
)
-func MockAspectstateGet(f func(st *state.State, account, bundleName, aspect, field string, value interface{}) error) (restore func()) {
- old := aspectstateGet
- aspectstateGet = f
+func MockAspectstateGet(f func(databag aspects.DataBag, account, bundleName, aspect, field string, value interface{}) error) (restore func()) {
+ old := aspectstateGetAspect
+ aspectstateGetAspect = f
return func() {
- aspectstateGet = old
+ aspectstateGetAspect = old
}
}
-func MockAspectstateSet(f func(st *state.State, account, bundleName, aspect, field string, val interface{}) error) (restore func()) {
- old := aspectstateSet
- aspectstateSet = f
+func MockAspectstateSet(f func(databag aspects.DataBag, account, bundleName, aspect, field string, val interface{}) error) (restore func()) {
+ old := aspectstateSetAspect
+ aspectstateSetAspect = f
return func() {
- aspectstateSet = old
+ aspectstateSetAspect = old
}
}
diff --git a/data/preseed.json b/data/preseed.json
index 2c885506b9..1307a0b255 100644
--- a/data/preseed.json
+++ b/data/preseed.json
@@ -3,6 +3,8 @@
"etc/udev/rules.d",
"etc/systemd",
"etc/dbus-1",
+ "etc/modprobe.d",
+ "etc/modules-load.d",
"snap",
"var/lib/extrausers",
"var/lib/snapd/state.json",
diff --git a/dirs/dirs.go b/dirs/dirs.go
index 371b931237..43980a1325 100644
--- a/dirs/dirs.go
+++ b/dirs/dirs.go
@@ -41,26 +41,25 @@ var (
HiddenSnapDataHomeGlob string
- SnapBlobDir string
- SnapDataDir string
- SnapDataHomeGlob string
- SnapDownloadCacheDir string
- SnapAppArmorDir string
- SnapConfineAppArmorDir string
- SnapSeccompBase string
- SnapSeccompDir string
- SnapMountPolicyDir string
- SnapUdevRulesDir string
- SnapKModModulesDir string
- SnapKModModprobeDir string
- LocaleDir string
- SnapdSocket string
- SnapSocket string
- SnapRunDir string
- SnapRunNsDir string
- SnapRunLockDir string
- SnapBootstrapRunDir string
- SnapVoidDir string
+ SnapBlobDir string
+ SnapDataDir string
+ SnapDataHomeGlob string
+ SnapDownloadCacheDir string
+ SnapAppArmorDir string
+ SnapSeccompBase string
+ SnapSeccompDir string
+ SnapMountPolicyDir string
+ SnapUdevRulesDir string
+ SnapKModModulesDir string
+ SnapKModModprobeDir string
+ LocaleDir string
+ SnapdSocket string
+ SnapSocket string
+ SnapRunDir string
+ SnapRunNsDir string
+ SnapRunLockDir string
+ SnapBootstrapRunDir string
+ SnapVoidDir string
SnapdMaintenanceFile string
@@ -365,7 +364,6 @@ func SetRootDir(rootdir string) {
SnapDataHomeGlob = filepath.Join(rootdir, "/home/*/", UserHomeSnapDir)
HiddenSnapDataHomeGlob = filepath.Join(rootdir, "/home/*/", HiddenSnapDataHomeDir)
SnapAppArmorDir = filepath.Join(rootdir, snappyDir, "apparmor", "profiles")
- SnapConfineAppArmorDir = filepath.Join(rootdir, snappyDir, "apparmor", "snap-confine")
SnapDownloadCacheDir = filepath.Join(rootdir, snappyDir, "cache")
SnapSeccompBase = filepath.Join(rootdir, snappyDir, "seccomp")
SnapSeccompDir = filepath.Join(SnapSeccompBase, "bpf")
diff --git a/gadget/gadget.go b/gadget/gadget.go
index c927663059..933628f034 100644
--- a/gadget/gadget.go
+++ b/gadget/gadget.go
@@ -94,6 +94,10 @@ const (
// UnboundedStructureOffset is the maximum effective partition offset
// that we can handle.
UnboundedStructureOffset = quantity.Offset(math.MaxUint64)
+
+ // UnboundedStructureSize is the maximum effective partition size
+ // that we can handle.
+ UnboundedStructureSize = quantity.Size(math.MaxUint64)
)
var (
@@ -255,6 +259,16 @@ type VolumeStructure struct {
EnclosingVolume *Volume `yaml:"-" json:"-"`
}
+// SetEnclosingVolumeInStructs is a helper that sets the pointer to
+// the Volume in all VolumeStructure objects it contains.
+func SetEnclosingVolumeInStructs(vv map[string]*Volume) {
+ for _, v := range vv {
+ for sidx := range v.Structure {
+ v.Structure[sidx].EnclosingVolume = v
+ }
+ }
+}
+
// IsRoleMBR tells us if v has MBR role or not.
func (v *VolumeStructure) IsRoleMBR() bool {
return v.Role == schemaMBR
@@ -287,8 +301,12 @@ func (vs *VolumeStructure) HasLabel(label string) bool {
return vs.Label == label
}
-// IsFixedSize tells us if size is fixed or if there is range.
-func (vs *VolumeStructure) IsFixedSize() bool {
+// isFixedSize tells us if size is fixed or if there is range.
+func (vs *VolumeStructure) isFixedSize() bool {
+ if vs.hasPartialSize() {
+ return false
+ }
+
return vs.Size == vs.MinSize
}
@@ -343,6 +361,12 @@ func maxStructureOffset(vss []VolumeStructure, idx int) quantity.Offset {
max := quantity.Offset(0)
othersSz := quantity.Size(0)
for i := idx - 1; i >= 0; i-- {
+ if vss[i].hasPartialSize() {
+ // If a previous partition has not a defined size, the
+ // allowed offset is not really bounded.
+ max = UnboundedStructureOffset
+ break
+ }
othersSz += vss[i].Size
if vss[i].Offset != nil {
max = *vss[i].Offset + quantity.Offset(othersSz)
@@ -981,7 +1005,11 @@ func asOffsetPtr(offs quantity.Offset) *quantity.Offset {
func setImplicitForVolume(vol *Volume, model Model) error {
rs := whichVolRuleset(model)
- if vol.Schema == "" && !vol.HasPartial(PartialSchema) {
+ if vol.HasPartial(PartialSchema) {
+ if vol.Schema != "" {
+ return fmt.Errorf("partial schema is set but schema is still specified as %q", vol.Schema)
+ }
+ } else if vol.Schema == "" {
// default for schema is gpt
vol.Schema = schemaGPT
}
@@ -1029,7 +1057,7 @@ func setImplicitForVolume(vol *Volume, model Model) error {
}
// We know the end of the structure only if we could define an offset
// and the size is fixed.
- if vol.Structure[i].Offset != nil && vol.Structure[i].IsFixedSize() {
+ if vol.Structure[i].Offset != nil && vol.Structure[i].isFixedSize() {
previousEnd = asOffsetPtr(*vol.Structure[i].Offset +
quantity.Offset(vol.Structure[i].Size))
} else {
@@ -1158,7 +1186,7 @@ func validateVolume(vol *Volume) error {
if !validVolumeName.MatchString(vol.Name) {
return errors.New("invalid name")
}
- if vol.Schema != "" && vol.Schema != schemaGPT && vol.Schema != schemaMBR {
+ if !vol.HasPartial(PartialSchema) && vol.Schema != schemaGPT && vol.Schema != schemaMBR {
return fmt.Errorf("invalid schema %q", vol.Schema)
}
@@ -1383,9 +1411,6 @@ func validateStructureType(s string, vol *Volume) error {
}
} else {
schema := vol.Schema
- if schema == "" {
- schema = schemaGPT
- }
if schema != schemaGPT && isGPT {
// type: <uuid> is only valid for GPT volumes
return fmt.Errorf("GUID structure type with non-GPT schema %q", vol.Schema)
@@ -1570,16 +1595,28 @@ func IsCompatible(current, new *Info) error {
return err
}
- if currentVol.Schema == "" || newVol.Schema == "" {
- return fmt.Errorf("internal error: unset volume schemas: old: %q new: %q", currentVol.Schema, newVol.Schema)
- }
-
if err := isLayoutCompatible(currentVol, newVol); err != nil {
return fmt.Errorf("incompatible layout change: %v", err)
}
return nil
}
+// checkCompatibleSchema checks if the schema in a new volume we are
+// updating to is compatible with the old volume.
+func checkCompatibleSchema(old, new *Volume) error {
+ // If old schema is partial, any schema in new will be fine
+ if !old.HasPartial(PartialSchema) {
+ if new.HasPartial(PartialSchema) {
+ return fmt.Errorf("new schema is partial, while old was not")
+ }
+ if old.Schema != new.Schema {
+ return fmt.Errorf("incompatible schema change from %v to %v",
+ old.Schema, new.Schema)
+ }
+ }
+ return nil
+}
+
// LaidOutVolumesFromGadget takes a gadget rootdir and lays out the partitions
// on all volumes as specified. It returns the specific volume on which system-*
// roles/partitions exist, as well as all volumes mentioned in the gadget.yaml
diff --git a/gadget/gadget_test.go b/gadget/gadget_test.go
index 066edab3b0..8a0a5b1a27 100644
--- a/gadget/gadget_test.go
+++ b/gadget/gadget_test.go
@@ -814,7 +814,6 @@ func checkEnclosingPointsToVolume(c *C, vols map[string]*gadget.Volume) {
c.Assert(v.Structure[sidx].EnclosingVolume, Equals, v)
}
}
-
}
func (s *gadgetYamlTestSuite) TestReadGadgetYamlValid(c *C) {
@@ -866,7 +865,7 @@ func (s *gadgetYamlTestSuite) TestReadGadgetYamlValid(c *C) {
},
},
}
- gadgettest.SetEnclosingVolumeInStructs(expectedgi.Volumes)
+ gadget.SetEnclosingVolumeInStructs(expectedgi.Volumes)
c.Assert(ginfo, DeepEquals, expectedgi)
checkEnclosingPointsToVolume(c, ginfo.Volumes)
}
@@ -938,7 +937,7 @@ func (s *gadgetYamlTestSuite) TestReadMultiVolumeGadgetYamlValid(c *C) {
},
},
}
- gadgettest.SetEnclosingVolumeInStructs(expectedgi.Volumes)
+ gadget.SetEnclosingVolumeInStructs(expectedgi.Volumes)
c.Assert(ginfo, DeepEquals, expectedgi)
checkEnclosingPointsToVolume(c, ginfo.Volumes)
}
@@ -1062,7 +1061,7 @@ func (s *gadgetYamlTestSuite) TestReadGadgetYamlVolumeUpdate(c *C) {
},
},
}
- gadgettest.SetEnclosingVolumeInStructs(expectedgi.Volumes)
+ gadget.SetEnclosingVolumeInStructs(expectedgi.Volumes)
c.Assert(ginfo, DeepEquals, expectedgi)
checkEnclosingPointsToVolume(c, ginfo.Volumes)
}
@@ -1205,7 +1204,7 @@ func (s *gadgetYamlTestSuite) TestValidateStructureType(c *C) {
// hybrid, partially lowercase UUID
{"EF,aa686148-6449-6e6f-744E-656564454649", "", ""},
// GPT UUID, partially lowercase
- {"aa686148-6449-6e6f-744E-656564454649", "", ""},
+ {"aa686148-6449-6e6f-744E-656564454649", "", "gpt"},
// no type specified
{"", `invalid type "": type is not specified`, ""},
// plain MBR type without mbr schema
@@ -1232,7 +1231,8 @@ func (s *gadgetYamlTestSuite) TestValidateStructureType(c *C) {
} {
c.Logf("tc: %v %q", i, tc.s)
- err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{Type: tc.s, Size: 123, EnclosingVolume: &gadget.Volume{}}, &gadget.Volume{Schema: tc.schema})
+ vol := &gadget.Volume{Schema: tc.schema}
+ err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{Type: tc.s, Size: 123, EnclosingVolume: vol}, vol)
if tc.err != "" {
c.Check(err, ErrorMatches, tc.err)
} else {
@@ -1319,7 +1319,7 @@ size: 446`
legacyTypeAsMBRTooLarge := `
type: mbr
size: 447`
- vol := &gadget.Volume{}
+ vol := &gadget.Volume{Schema: "gpt"}
mbrVol := &gadget.Volume{Schema: "mbr"}
for i, tc := range []struct {
s *gadget.VolumeStructure
@@ -1362,6 +1362,7 @@ size: 447`
}
func (s *gadgetYamlTestSuite) TestValidateFilesystem(c *C) {
+ vol := &gadget.Volume{Schema: "gpt"}
for i, tc := range []struct {
s string
err string
@@ -1373,7 +1374,7 @@ func (s *gadgetYamlTestSuite) TestValidateFilesystem(c *C) {
} {
c.Logf("tc: %v %+v", i, tc.s)
- err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{Filesystem: tc.s, Type: "21686148-6449-6E6F-744E-656564454649", Size: 123, EnclosingVolume: &gadget.Volume{}}, &gadget.Volume{})
+ err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{Filesystem: tc.s, Type: "21686148-6449-6E6F-744E-656564454649", Size: 123, EnclosingVolume: vol}, vol)
if tc.err != "" {
c.Check(err, ErrorMatches, tc.err)
} else {
@@ -1389,9 +1390,9 @@ func (s *gadgetYamlTestSuite) TestValidateVolumeSchema(c *C) {
}{
{"gpt", ""},
{"mbr", ""},
- // implicit GPT
- {"", ""},
// invalid
+ // A bit redundant it is always set in setImplicitForVolume
+ {"", `invalid schema ""`},
{"some", `invalid schema "some"`},
} {
c.Logf("tc: %v %+v", i, tc.s)
@@ -1405,6 +1406,11 @@ func (s *gadgetYamlTestSuite) TestValidateVolumeSchema(c *C) {
}
}
+func (s *gadgetYamlTestSuite) TestValidateVolumePartialSchema(c *C) {
+ err := gadget.ValidateVolume(&gadget.Volume{Name: "name", Schema: "", Partial: []gadget.PartialProperty{gadget.PartialSchema}})
+ c.Check(err, IsNil)
+}
+
func (s *gadgetYamlTestSuite) TestValidateVolumeSchemaNotOverlapWithGPT(c *C) {
for i, tc := range []struct {
s string
@@ -1469,7 +1475,7 @@ func (s *gadgetYamlTestSuite) TestValidateVolumeName(c *C) {
} {
c.Logf("tc: %v %+v", i, tc.s)
- err := gadget.ValidateVolume(&gadget.Volume{Name: tc.s})
+ err := gadget.ValidateVolume(&gadget.Volume{Name: tc.s, Schema: "gpt"})
if tc.err != "" {
c.Check(err, ErrorMatches, tc.err)
} else {
@@ -1479,13 +1485,16 @@ func (s *gadgetYamlTestSuite) TestValidateVolumeName(c *C) {
}
func (s *gadgetYamlTestSuite) TestValidateVolumeDuplicateStructures(c *C) {
- err := gadget.ValidateVolume(&gadget.Volume{
- Name: "name",
+ vol := &gadget.Volume{
+ Name: "name",
+ Schema: "gpt",
Structure: []gadget.VolumeStructure{
- {Name: "duplicate", Type: "bare", Size: 1024, Offset: asOffsetPtr(24576), EnclosingVolume: &gadget.Volume{}},
- {Name: "duplicate", Type: "21686148-6449-6E6F-744E-656564454649", Size: 2048, Offset: asOffsetPtr(24576), EnclosingVolume: &gadget.Volume{}},
+ {Name: "duplicate", Type: "bare", Size: 1024, Offset: asOffsetPtr(24576)},
+ {Name: "duplicate", Type: "21686148-6449-6E6F-744E-656564454649", Size: 2048, Offset: asOffsetPtr(24576)},
},
- })
+ }
+ gadget.SetEnclosingVolumeInStructs(map[string]*gadget.Volume{"pc": vol})
+ err := gadget.ValidateVolume(vol)
c.Assert(err, ErrorMatches, `structure name "duplicate" is not unique`)
}
@@ -1563,30 +1572,39 @@ volumes:
}
func (s *gadgetYamlTestSuite) TestValidateVolumeErrorsWrapped(c *C) {
- err := gadget.ValidateVolume(&gadget.Volume{
- Name: "name",
+ vol := &gadget.Volume{
+ Name: "name",
+ Schema: "gpt",
Structure: []gadget.VolumeStructure{
- {Type: "bare", Size: 1024, Offset: asOffsetPtr(24576), EnclosingVolume: &gadget.Volume{}},
- {Type: "bogus", Size: 1024, Offset: asOffsetPtr(24576), EnclosingVolume: &gadget.Volume{}},
+ {Type: "bare", Size: 1024, Offset: asOffsetPtr(24576)},
+ {Type: "bogus", Size: 1024, Offset: asOffsetPtr(24576)},
},
- })
+ }
+ gadget.SetEnclosingVolumeInStructs(map[string]*gadget.Volume{"pc": vol})
+ err := gadget.ValidateVolume(vol)
c.Assert(err, ErrorMatches, `invalid structure #1: invalid type "bogus": invalid format`)
- err = gadget.ValidateVolume(&gadget.Volume{
- Name: "name",
+ vol = &gadget.Volume{
+ Name: "name",
+ Schema: "gpt",
Structure: []gadget.VolumeStructure{
- {Type: "bare", Size: 1024, Offset: asOffsetPtr(24576), EnclosingVolume: &gadget.Volume{}},
- {Type: "bogus", Size: 1024, Name: "foo", Offset: asOffsetPtr(24576), EnclosingVolume: &gadget.Volume{}},
+ {Type: "bare", Size: 1024, Offset: asOffsetPtr(24576)},
+ {Type: "bogus", Size: 1024, Name: "foo", Offset: asOffsetPtr(24576)},
},
- })
+ }
+ gadget.SetEnclosingVolumeInStructs(map[string]*gadget.Volume{"pc": vol})
+ err = gadget.ValidateVolume(vol)
c.Assert(err, ErrorMatches, `invalid structure #1 \("foo"\): invalid type "bogus": invalid format`)
- err = gadget.ValidateVolume(&gadget.Volume{
- Name: "name",
+ vol = &gadget.Volume{
+ Name: "name",
+ Schema: "gpt",
Structure: []gadget.VolumeStructure{
- {Type: "bare", Name: "foo", Size: 1024, Offset: asOffsetPtr(24576), Content: []gadget.VolumeContent{{UnresolvedSource: "foo"}}, EnclosingVolume: &gadget.Volume{}},
+ {Type: "bare", Name: "foo", Size: 1024, Offset: asOffsetPtr(24576), Content: []gadget.VolumeContent{{UnresolvedSource: "foo"}}},
},
- })
+ }
+ gadget.SetEnclosingVolumeInStructs(map[string]*gadget.Volume{"pc": vol})
+ err = gadget.ValidateVolume(vol)
c.Assert(err, ErrorMatches, `invalid structure #0 \("foo"\): invalid content #0: cannot use non-image content for bare file system`)
}
@@ -1659,7 +1677,7 @@ content:
} {
c.Logf("tc: %v %+v", i, tc.s)
- err := gadget.ValidateVolumeStructure(tc.s, &gadget.Volume{})
+ err := gadget.ValidateVolumeStructure(tc.s, &gadget.Volume{Schema: "gpt"})
if tc.err != "" {
c.Check(err, ErrorMatches, tc.err)
} else {
@@ -1698,13 +1716,13 @@ volumes:
}
func (s *gadgetYamlTestSuite) TestValidateStructureUpdatePreserveOnlyForFs(c *C) {
- gv := &gadget.Volume{}
+ gv := &gadget.Volume{Schema: "gpt"}
err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{
Type: "bare",
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
Size: 512,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, ErrorMatches, "preserving files during update is not supported for non-filesystem structures")
@@ -1712,7 +1730,7 @@ func (s *gadgetYamlTestSuite) TestValidateStructureUpdatePreserveOnlyForFs(c *C)
Type: "21686148-6449-6E6F-744E-656564454649",
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
Size: 512,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, ErrorMatches, "preserving files during update is not supported for non-filesystem structures")
@@ -1721,20 +1739,20 @@ func (s *gadgetYamlTestSuite) TestValidateStructureUpdatePreserveOnlyForFs(c *C)
Filesystem: "vfat",
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
Size: 512,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, IsNil)
}
func (s *gadgetYamlTestSuite) TestValidateStructureUpdatePreserveDuplicates(c *C) {
- gv := &gadget.Volume{}
+ gv := &gadget.Volume{Schema: "gpt"}
err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{
Type: "21686148-6449-6E6F-744E-656564454649",
Filesystem: "vfat",
Update: gadget.VolumeUpdate{Edition: 1, Preserve: []string{"foo", "bar"}},
Size: 512,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, IsNil)
@@ -1743,19 +1761,19 @@ func (s *gadgetYamlTestSuite) TestValidateStructureUpdatePreserveDuplicates(c *C
Filesystem: "vfat",
Update: gadget.VolumeUpdate{Edition: 1, Preserve: []string{"foo", "bar", "foo"}},
Size: 512,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, ErrorMatches, `duplicate "preserve" entry "foo"`)
}
func (s *gadgetYamlTestSuite) TestValidateStructureSizeRequired(c *C) {
- gv := &gadget.Volume{}
+ gv := &gadget.Volume{Schema: "gpt"}
err := gadget.ValidateVolumeStructure(&gadget.VolumeStructure{
Type: "bare",
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, ErrorMatches, "missing size")
@@ -1763,7 +1781,7 @@ func (s *gadgetYamlTestSuite) TestValidateStructureSizeRequired(c *C) {
Type: "21686148-6449-6E6F-744E-656564454649",
Filesystem: "vfat",
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, ErrorMatches, "missing size")
@@ -1772,7 +1790,7 @@ func (s *gadgetYamlTestSuite) TestValidateStructureSizeRequired(c *C) {
Filesystem: "vfat",
Size: mustParseGadgetSize(c, "123M"),
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, IsNil)
@@ -1782,7 +1800,7 @@ func (s *gadgetYamlTestSuite) TestValidateStructureSizeRequired(c *C) {
MinSize: mustParseGadgetSize(c, "10M"),
Size: mustParseGadgetSize(c, "123M"),
Update: gadget.VolumeUpdate{Preserve: []string{"foo"}},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: gv,
}, gv)
c.Check(err, IsNil)
}
@@ -3598,6 +3616,78 @@ func (s *gadgetYamlTestSuite) TestCompatibilityWithMinSizePartitions(c *C) {
c.Assert(match, IsNil)
}
+const mockPartialGadgetYaml = `volumes:
+ pc:
+ partial: [schema, structure, filesystem, size]
+ bootloader: grub
+ structure:
+ - name: ubuntu-save
+ role: system-save
+ offset: 2M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ - name: ubuntu-data
+ role: system-data
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 100M
+`
+
+func (s *gadgetYamlTestSuite) TestDiskCompatibilityWithPartialGadget(c *C) {
+ var mockDiskVolDataAndSaveParts = gadget.OnDiskVolume{
+ Structure: []gadget.OnDiskStructure{
+ {
+ Node: "/dev/node1",
+ Name: "BIOS Boot",
+ Size: 1 * quantity.SizeMiB,
+ StartOffset: 1 * quantity.OffsetMiB,
+ },
+ {
+ Node: "/dev/node2",
+ Name: "ubuntu-save",
+ Size: 8 * quantity.SizeMiB,
+ PartitionFSType: "ext4",
+ PartitionFSLabel: "ubuntu-save",
+ StartOffset: 2 * quantity.OffsetMiB,
+ },
+ {
+ Node: "/dev/node3",
+ Name: "ubuntu-data",
+ Size: 200 * quantity.SizeMiB,
+ PartitionFSType: "ext4",
+ PartitionFSLabel: "ubuntu-data",
+ StartOffset: 10 * quantity.OffsetMiB,
+ },
+ },
+ ID: "anything",
+ Device: "/dev/node",
+ Schema: "gpt",
+ Size: 2 * quantity.SizeGiB,
+ SectorSize: 512,
+
+ // ( 2 GB / 512 B sector size ) - 33 typical GPT header backup sectors +
+ // 1 sector to get the exclusive end
+ UsableSectorsEnd: uint64((2*quantity.SizeGiB/512)-33) + 1,
+ }
+
+ gadgetVolume, err := gadgettest.VolumeFromYaml(c.MkDir(),
+ mockPartialGadgetYaml, nil)
+ c.Assert(err, IsNil)
+ diskVol := mockDiskVolDataAndSaveParts
+
+ // Compatible as we have defined partial
+ match, err := gadget.EnsureVolumeCompatibility(gadgetVolume, &diskVol, nil)
+ c.Assert(err, IsNil)
+ c.Assert(match, DeepEquals, map[int]*gadget.OnDiskStructure{
+ 0: &mockDiskVolDataAndSaveParts.Structure[1],
+ 1: &mockDiskVolDataAndSaveParts.Structure[2],
+ })
+
+ // Not compatible if no partial structure
+ gadgetVolume.Partial = []gadget.PartialProperty{gadget.PartialSchema, gadget.PartialSize, gadget.PartialFilesystem}
+ match, err = gadget.EnsureVolumeCompatibility(gadgetVolume, &diskVol, nil)
+ c.Assert(match, IsNil)
+ c.Assert(err.Error(), Equals, "cannot find disk partition /dev/node1 (starting at 1048576) in gadget")
+}
+
var multipleUC20DisksDeviceTraitsMap = map[string]gadget.DiskVolumeDeviceTraits{
"foo": gadgettest.VMExtraVolumeDeviceTraits,
"pc": gadgettest.VMSystemVolumeDeviceTraits,
@@ -4350,10 +4440,10 @@ func (s *gadgetYamlTestSuite) TestValidStartOffset(c *C) {
{
vs: gadget.Volume{
Structure: []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- {Offset: nil, MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- {Offset: nil, MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- {Offset: asOffsetPtr(50), MinSize: 100, Size: 100, EnclosingVolume: &gadget.Volume{}},
+ {Offset: asOffsetPtr(0), MinSize: 10, Size: 20},
+ {Offset: nil, MinSize: 10, Size: 20},
+ {Offset: nil, MinSize: 10, Size: 20},
+ {Offset: asOffsetPtr(50), MinSize: 100, Size: 100},
},
},
votcs: []validOffsetTc{
@@ -4380,9 +4470,9 @@ func (s *gadgetYamlTestSuite) TestValidStartOffset(c *C) {
{
vs: gadget.Volume{
Structure: []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 10, Size: 100, EnclosingVolume: &gadget.Volume{}},
- {Offset: nil, MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
- {Offset: asOffsetPtr(80), MinSize: 100, Size: 100, EnclosingVolume: &gadget.Volume{}},
+ {Offset: asOffsetPtr(0), MinSize: 10, Size: 100},
+ {Offset: nil, MinSize: 10, Size: 10},
+ {Offset: asOffsetPtr(80), MinSize: 100, Size: 100},
},
},
votcs: []validOffsetTc{
@@ -4403,10 +4493,10 @@ func (s *gadgetYamlTestSuite) TestValidStartOffset(c *C) {
// comments in function).
vs: gadget.Volume{
Structure: []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 20, Size: 40, EnclosingVolume: &gadget.Volume{}},
- {Offset: nil, MinSize: 20, Size: 40, EnclosingVolume: &gadget.Volume{}},
- {Offset: nil, MinSize: 20, Size: 20, EnclosingVolume: &gadget.Volume{}},
- {Offset: nil, MinSize: 20, Size: 20, EnclosingVolume: &gadget.Volume{}},
+ {Offset: asOffsetPtr(0), MinSize: 20, Size: 40},
+ {Offset: nil, MinSize: 20, Size: 40},
+ {Offset: nil, MinSize: 20, Size: 20},
+ {Offset: nil, MinSize: 20, Size: 20},
{Offset: asOffsetPtr(100), MinSize: 100, Size: 100},
},
},
@@ -4418,7 +4508,26 @@ func (s *gadgetYamlTestSuite) TestValidStartOffset(c *C) {
},
description: "test three",
},
+ {
+ vs: gadget.Volume{
+ Partial: []gadget.PartialProperty{gadget.PartialSize},
+ Structure: []gadget.VolumeStructure{
+ {Offset: asOffsetPtr(0), MinSize: 20, Size: 40},
+ {Offset: nil},
+ {Offset: nil, MinSize: 20, Size: 20},
+ },
+ },
+ votcs: []validOffsetTc{
+ {structIdx: 2, offset: 19, err: gadget.NewInvalidOffsetError(19, 20, gadget.UnboundedStructureOffset)},
+ {structIdx: 2, offset: 1000, err: nil},
+ },
+ description: "test four",
+ },
} {
+ for sidx := range tc.vs.Structure {
+ tc.vs.Structure[sidx].EnclosingVolume = &tc.vs
+ }
+
for _, votc := range tc.votcs {
c.Logf("testing valid offset: %s (%+v)", tc.description, votc)
if votc.err == nil {
@@ -4704,6 +4813,21 @@ volumes:
c.Assert(err.Error(), Equals, `invalid volume "frobinator-image": invalid structure #4 ("data"): invalid type "0FC63DAF-8483-4772-8E79-3D69D8477DE4": both MBR type and GUID structure type needs to be defined on partial schemas`)
}
+func (s *gadgetYamlTestSuite) TestGadgetPartialSchemaButStillSet(c *C) {
+ var yaml = []byte(`
+volumes:
+ frobinator-image:
+ partial: [schema]
+ schema: gpt
+ bootloader: u-boot
+`)
+
+ // Not defining schema is fine
+ _, err := gadget.InfoFromGadgetYaml(yaml, nil)
+ c.Assert(err.Error(), Equals,
+ `invalid volume "frobinator-image": partial schema is set but schema is still specified as "gpt"`)
+}
+
func (s *gadgetYamlTestSuite) TestGadgetPartialStructure(c *C) {
var yaml = []byte(`
volumes:
@@ -4731,3 +4855,52 @@ volumes:
_, err := gadget.InfoFromGadgetYaml(yaml, nil)
c.Assert(err, IsNil)
}
+
+func newPartialGadgetYaml(c *C) *gadget.Info {
+ gi, err := gadget.InfoFromGadgetYaml([]byte(mockPartialGadgetYaml), coreMod)
+ c.Assert(err, IsNil)
+ return gi
+}
+
+func (s *gadgetCompatibilityTestSuite) TestPartialGadgetIsCompatible(c *C) {
+ gi1 := newPartialGadgetYaml(c)
+ gi2 := newPartialGadgetYaml(c)
+
+ // self-compatible
+ err := gadget.IsCompatible(gi1, gi2)
+ c.Check(err, IsNil)
+
+ // from partial schema to defined is ok
+ gi1 = newPartialGadgetYaml(c)
+ gi2 = newPartialGadgetYaml(c)
+ gi2.Volumes["pc"].Partial = []gadget.PartialProperty{gadget.PartialStructure, gadget.PartialFilesystem, gadget.PartialSize}
+ gi2.Volumes["pc"].Schema = "gpt"
+ err = gadget.IsCompatible(gi1, gi2)
+ c.Check(err, IsNil)
+
+ // from defined to partial schema is not
+ gi1 = newPartialGadgetYaml(c)
+ gi1.Volumes["pc"].Partial = []gadget.PartialProperty{gadget.PartialStructure, gadget.PartialFilesystem, gadget.PartialSize}
+ gi1.Volumes["pc"].Schema = "gpt"
+ gi2 = newPartialGadgetYaml(c)
+ err = gadget.IsCompatible(gi1, gi2)
+ c.Check(err.Error(), Equals, "incompatible layout change: new schema is partial, while old was not")
+
+ // set filesystems in new
+ gi1 = newPartialGadgetYaml(c)
+ gi2 = newPartialGadgetYaml(c)
+ gi2.Volumes["pc"].Partial = []gadget.PartialProperty{gadget.PartialStructure, gadget.PartialSchema, gadget.PartialSize}
+ for istr := range gi2.Volumes["pc"].Structure {
+ gi2.Volumes["pc"].Structure[istr].Filesystem = "ext4"
+ }
+ err = gadget.IsCompatible(gi1, gi2)
+ c.Check(err, IsNil)
+
+ // set missing sizes in new
+ gi1 = newPartialGadgetYaml(c)
+ gi2 = newPartialGadgetYaml(c)
+ gi2.Volumes["pc"].Partial = []gadget.PartialProperty{gadget.PartialStructure, gadget.PartialFilesystem, gadget.PartialSchema}
+ gi2.Volumes["pc"].Structure[0].Size = quantity.SizeMiB
+ err = gadget.IsCompatible(gi1, gi2)
+ c.Check(err, IsNil)
+}
diff --git a/gadget/gadgettest/examples.go b/gadget/gadgettest/examples.go
index 904672367e..0e48ea618d 100644
--- a/gadget/gadgettest/examples.go
+++ b/gadget/gadgettest/examples.go
@@ -855,6 +855,7 @@ volumes:
content:
- image: pc-boot.img
- name: BIOS Boot
+ role: system-seed-null
type: DA,21686148-6449-6E6F-744E-656564454649
size: 1M
offset: 1M
@@ -901,6 +902,75 @@ volumes:
size: 4G
`
+const SingleVolumeClassicWithModesPartialGadgetYaml = `
+volumes:
+ pc:
+ partial: [schema, structure, filesystem, size]
+ bootloader: grub
+ structure:
+ - name: ubuntu-seed
+ role: system-seed-null
+ filesystem: vfat
+ type: EF,C12A7328-F81F-11D2-BA4B-00A0C93EC93B
+ offset: 2M
+ size: 1200M
+ - name: ubuntu-boot
+ role: system-boot
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ - name: ubuntu-save
+ role: system-save
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ - name: ubuntu-data
+ role: system-data
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 1G
+`
+
+const SingleVolumeClassicWithModesFilledPartialGadgetYaml = `
+volumes:
+ pc:
+ bootloader: grub
+ partial: [structure]
+ schema: gpt
+ structure:
+ - name: ubuntu-seed
+ role: system-seed-null
+ filesystem: vfat
+ type: EF,C12A7328-F81F-11D2-BA4B-00A0C93EC93B
+ offset: 2M
+ size: 99M
+ update:
+ edition: 2
+ content:
+ - source: grubx64.efi
+ target: EFI/boot/grubx64.efi
+ - source: shim.efi.signed
+ target: EFI/boot/bootx64.efi
+ - name: ubuntu-boot
+ role: system-boot
+ filesystem: ext4
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ offset: 1202M
+ size: 750M
+ update:
+ edition: 1
+ content:
+ - source: grubx64.efi
+ target: EFI/boot/grubx64.efi
+ - source: shim.efi.signed
+ target: EFI/boot/bootx64.efi
+ - name: ubuntu-save
+ filesystem: ext4
+ role: system-save
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 16M
+ - name: ubuntu-data
+ filesystem: ext4
+ role: system-data
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 4G
+`
+
const MultiVolumeUC20GadgetYamlNoBIOS = SingleVolumeUC20GadgetYamlSeedNoBIOS + `
foo:
schema: gpt
diff --git a/gadget/gadgettest/gadgettest.go b/gadget/gadgettest/gadgettest.go
index b2d17fc591..40eff4a6f5 100644
--- a/gadget/gadgettest/gadgettest.go
+++ b/gadget/gadgettest/gadgettest.go
@@ -229,13 +229,3 @@ func MockGadgetPartitionedDisk(gadgetYaml, gadgetRoot string) (ginfo *gadget.Inf
return ginfo, laidVols, model, restore, nil
}
-
-// SetEnclosingVolumeInStructs is a helper that sets the pointer to
-// the Volume in all VolumeStructure objects it contains.
-func SetEnclosingVolumeInStructs(vv map[string]*gadget.Volume) {
- for _, v := range vv {
- for sidx := range v.Structure {
- v.Structure[sidx].EnclosingVolume = v
- }
- }
-}
diff --git a/gadget/install/content.go b/gadget/install/content.go
index 55b8d4c213..ea9c2001f9 100644
--- a/gadget/install/content.go
+++ b/gadget/install/content.go
@@ -24,6 +24,7 @@ import (
"os"
"path/filepath"
"strings"
+ "syscall"
"github.com/snapcore/snapd/dirs"
"github.com/snapcore/snapd/gadget"
@@ -67,6 +68,18 @@ func mountFilesystem(fsDevice, fs, mountpoint string) error {
return nil
}
+func unmountWithFallbackToLazy(mntPt, operationMsg string) error {
+ if err := sysUnmount(mntPt, 0); err != nil {
+ logger.Noticef("cannot unmount %s after %s: %v (trying lazy unmount next)", mntPt, operationMsg, err)
+ // lazy umount on error, see LP:2025402
+ if err = sysUnmount(mntPt, syscall.MNT_DETACH); err != nil {
+ logger.Noticef("cannot lazy unmount %q: %v", mntPt, err)
+ return err
+ }
+ }
+ return nil
+}
+
// writeContent populates the given on-disk filesystem structure with a
// corresponding filesystem device, according to the contents defined in the
// gadget.
@@ -82,9 +95,9 @@ func writeFilesystemContent(laidOut *gadget.LaidOutStructure, fsDevice string, o
return fmt.Errorf("cannot mount %q at %q: %v", fsDevice, mountpoint, err)
}
defer func() {
- errUnmount := sysUnmount(mountpoint, 0)
- if err == nil {
- err = errUnmount
+ errUnmount := unmountWithFallbackToLazy(mountpoint, "writing filesystem content")
+ if err == nil && errUnmount != nil {
+ err = fmt.Errorf("cannot unmount %v after writing filesystem content: %v", fsDevice, errUnmount)
}
}()
fs, err := gadget.NewMountedFilesystemWriter(laidOut, observer)
diff --git a/gadget/install/content_test.go b/gadget/install/content_test.go
index b84ad925c4..04132dc13f 100644
--- a/gadget/install/content_test.go
+++ b/gadget/install/content_test.go
@@ -24,6 +24,7 @@ import (
"fmt"
"io/ioutil"
"path/filepath"
+ "syscall"
. "gopkg.in/check.v1"
@@ -33,6 +34,7 @@ import (
"github.com/snapcore/snapd/gadget/gadgettest"
"github.com/snapcore/snapd/gadget/install"
"github.com/snapcore/snapd/gadget/quantity"
+ "github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/testutil"
)
@@ -181,7 +183,7 @@ func (s *contentTestSuite) TestWriteFilesystemContent(c *C) {
}, {
mountErr: nil,
unmountErr: errors.New("unmount error"),
- err: "unmount error",
+ err: "cannot unmount /dev/node2 after writing filesystem content: unmount error",
}, {
observeErr: errors.New("observe error"),
err: "cannot create filesystem image: cannot write filesystem content of source:grubx64.efi: cannot observe file write: observe error",
@@ -233,6 +235,85 @@ func (s *contentTestSuite) TestWriteFilesystemContent(c *C) {
}
}
+func (s *contentTestSuite) TestWriteFilesystemContentUnmountErrHandling(c *C) {
+ dirs.SetRootDir(c.MkDir())
+ defer dirs.SetRootDir(dirs.GlobalRootDir)
+
+ log, restore := logger.MockLogger()
+ defer restore()
+
+ type unmountArgs struct {
+ target string
+ flags int
+ }
+
+ restore = install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error {
+ return nil
+ })
+ defer restore()
+
+ // copy existing mock
+ m := mockOnDiskStructureSystemSeed(s.gadgetRoot)
+ obs := &mockWriteObserver{
+ c: c,
+ observeErr: nil,
+ expectedRole: m.Role(),
+ }
+
+ for _, tc := range []struct {
+ unmountErr error
+ lazyUnmountErr error
+
+ expectedErr string
+ }{
+ {
+ nil,
+ nil,
+ "",
+ }, {
+ errors.New("umount error"),
+ nil,
+ "", // no error as lazy unmount succeeded
+ }, {
+ errors.New("umount error"),
+ errors.New("lazy umount err"),
+ `cannot unmount /dev/node2 after writing filesystem content: lazy umount err`},
+ } {
+ log.Reset()
+
+ var unmountCalls []unmountArgs
+ restore = install.MockSysUnmount(func(target string, flags int) error {
+ unmountCalls = append(unmountCalls, unmountArgs{target, flags})
+ switch flags {
+ case 0:
+ return tc.unmountErr
+ case syscall.MNT_DETACH:
+ return tc.lazyUnmountErr
+ default:
+ return fmt.Errorf("unexpected mount flag %v", flags)
+ }
+ })
+ defer restore()
+
+ err := install.WriteFilesystemContent(m, "/dev/node2", obs)
+ if tc.expectedErr == "" {
+ c.Assert(err, IsNil)
+ } else {
+ c.Assert(err, ErrorMatches, tc.expectedErr)
+ }
+ if tc.unmountErr == nil {
+ c.Check(unmountCalls, HasLen, 1)
+ c.Check(unmountCalls[0].flags, Equals, 0)
+ c.Check(log.String(), Equals, "")
+ } else {
+ c.Check(unmountCalls, HasLen, 2)
+ c.Check(unmountCalls[0].flags, Equals, 0)
+ c.Check(unmountCalls[1].flags, Equals, syscall.MNT_DETACH)
+ c.Check(log.String(), Matches, `(?sm).* cannot unmount /.*/run/snapd/gadget-install/dev-node2 after writing filesystem content: umount error \(trying lazy unmount next\)`)
+ }
+ }
+}
+
func (s *contentTestSuite) TestMakeFilesystem(c *C) {
mockUdevadm := testutil.MockCommand(c, "udevadm", "")
defer mockUdevadm.Restore()
diff --git a/gadget/install/install.go b/gadget/install/install.go
index 4bd942353b..3541baf536 100644
--- a/gadget/install/install.go
+++ b/gadget/install/install.go
@@ -567,10 +567,7 @@ func MountVolumes(onVolumes map[string]*gadget.Volume, encSetupData *EncryptionS
numSeedPart := 0
unmount = func() (err error) {
for _, mntPt := range mountPoints {
- errUnmount := sysUnmount(mntPt, 0)
- if errUnmount != nil {
- logger.Noticef("cannot unmount %q: %v", mntPt, errUnmount)
- }
+ errUnmount := unmountWithFallbackToLazy(mntPt, "mounting volumes")
// Make sure we do not set err to nil if it had already an error
if errUnmount != nil {
err = errUnmount
diff --git a/gadget/install/install_test.go b/gadget/install/install_test.go
index 02dcdca8f0..e400c96365 100644
--- a/gadget/install/install_test.go
+++ b/gadget/install/install_test.go
@@ -26,8 +26,10 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "runtime"
"strconv"
"strings"
+ "syscall"
"time"
. "gopkg.in/check.v1"
@@ -38,6 +40,7 @@ import (
"github.com/snapcore/snapd/gadget/gadgettest"
"github.com/snapcore/snapd/gadget/install"
"github.com/snapcore/snapd/gadget/quantity"
+ "github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/osutil/disks"
"github.com/snapcore/snapd/secboot"
@@ -1075,6 +1078,24 @@ type encryptPartitionsOpts struct {
encryptType secboot.EncryptionType
}
+func expectedCipher() string {
+ switch runtime.GOARCH {
+ case "arm":
+ return "aes-cbc-essiv:sha256"
+ default:
+ return "aes-xts-plain64"
+ }
+}
+
+func expectedKeysize() string {
+ switch runtime.GOARCH {
+ case "arm":
+ return "256"
+ default:
+ return "512"
+ }
+}
+
func (s *installSuite) testEncryptPartitions(c *C, opts encryptPartitionsOpts) {
vdaSysPath := "/sys/devices/pci0000:00/0000:00:03.0/virtio1/block/vda"
restore := install.MockSysfsPathForBlockDevice(func(device string) (string, error) {
@@ -1113,10 +1134,10 @@ func (s *installSuite) testEncryptPartitions(c *C, opts encryptPartitionsOpts) {
c.Assert(err, IsNil)
c.Assert(mockCryptsetup.Calls(), DeepEquals, [][]string{
- {"cryptsetup", "-q", "luksFormat", "--type", "luks2", "--key-file", "-", "--cipher", "aes-xts-plain64", "--key-size", "512", "--label", "ubuntu-save-enc", "--pbkdf", "argon2i", "--pbkdf-force-iterations", "4", "--pbkdf-memory", "32", "--luks2-metadata-size", "2048k", "--luks2-keyslots-size", "2560k", "/dev/vda4"},
+ {"cryptsetup", "-q", "luksFormat", "--type", "luks2", "--key-file", "-", "--cipher", expectedCipher(), "--key-size", expectedKeysize(), "--label", "ubuntu-save-enc", "--pbkdf", "argon2i", "--pbkdf-force-iterations", "4", "--pbkdf-memory", "32", "--luks2-metadata-size", "2048k", "--luks2-keyslots-size", "2560k", "/dev/vda4"},
{"cryptsetup", "config", "--priority", "prefer", "--key-slot", "0", "/dev/vda4"},
{"cryptsetup", "open", "--key-file", "-", "/dev/vda4", "ubuntu-save"},
- {"cryptsetup", "-q", "luksFormat", "--type", "luks2", "--key-file", "-", "--cipher", "aes-xts-plain64", "--key-size", "512", "--label", "ubuntu-data-enc", "--pbkdf", "argon2i", "--pbkdf-force-iterations", "4", "--pbkdf-memory", "32", "--luks2-metadata-size", "2048k", "--luks2-keyslots-size", "2560k", "/dev/vda5"},
+ {"cryptsetup", "-q", "luksFormat", "--type", "luks2", "--key-file", "-", "--cipher", expectedCipher(), "--key-size", expectedKeysize(), "--label", "ubuntu-data-enc", "--pbkdf", "argon2i", "--pbkdf-force-iterations", "4", "--pbkdf-memory", "32", "--luks2-metadata-size", "2048k", "--luks2-keyslots-size", "2560k", "/dev/vda5"},
{"cryptsetup", "config", "--priority", "prefer", "--key-slot", "0", "/dev/vda5"},
{"cryptsetup", "open", "--key-file", "-", "/dev/vda5", "ubuntu-data"},
})
@@ -1146,3 +1167,269 @@ func (s *installSuite) TestInstallEncryptPartitionsNoDeviceSet(c *C) {
c.Check(err, ErrorMatches, "device field for volume struct .* cannot be empty")
c.Check(encryptSetup, IsNil)
}
+
+type mountVolumesOpts struct {
+ encryption bool
+}
+
+func (s *installSuite) testMountVolumes(c *C, opts mountVolumesOpts) {
+ seedMntPt := filepath.Join(s.dir, "run/mnt/ubuntu-seed")
+ bootMntPt := filepath.Join(s.dir, "run/mnt/ubuntu-boot")
+ saveMntPt := filepath.Join(s.dir, "run/mnt/ubuntu-save")
+ dataMntPt := filepath.Join(s.dir, "run/mnt/ubuntu-data")
+ mountCall := 0
+ restore := install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error {
+ mountCall++
+ switch mountCall {
+ case 1:
+ c.Assert(source, Equals, "/dev/vda2")
+ c.Assert(target, Equals, seedMntPt)
+ c.Assert(fstype, Equals, "vfat")
+ c.Assert(flags, Equals, uintptr(0))
+ c.Assert(data, Equals, "")
+ case 2:
+ c.Assert(source, Equals, "/dev/vda3")
+ c.Assert(target, Equals, bootMntPt)
+ c.Assert(fstype, Equals, "ext4")
+ c.Assert(flags, Equals, uintptr(0))
+ c.Assert(data, Equals, "")
+ case 3:
+ if opts.encryption {
+ c.Assert(source, Equals, "/dev/mapper/ubuntu-save")
+ } else {
+ c.Assert(source, Equals, "/dev/vda4")
+ }
+ c.Assert(target, Equals, saveMntPt)
+ c.Assert(fstype, Equals, "ext4")
+ c.Assert(flags, Equals, uintptr(0))
+ c.Assert(data, Equals, "")
+ case 4:
+ if opts.encryption {
+ c.Assert(source, Equals, "/dev/mapper/ubuntu-data")
+ } else {
+ c.Assert(source, Equals, "/dev/vda5")
+ }
+ c.Assert(target, Equals, dataMntPt)
+ c.Assert(fstype, Equals, "ext4")
+ c.Assert(flags, Equals, uintptr(0))
+ c.Assert(data, Equals, "")
+ default:
+ c.Errorf("unexpected mount call (%d)", mountCall)
+ return fmt.Errorf("test broken")
+ }
+ return nil
+ })
+ defer restore()
+
+ umountCall := 0
+ restore = install.MockSysUnmount(func(target string, flags int) error {
+ umountCall++
+ switch umountCall {
+ case 1:
+ c.Assert(target, Equals, seedMntPt)
+ case 2:
+ c.Assert(target, Equals, bootMntPt)
+ case 3:
+ c.Assert(target, Equals, saveMntPt)
+ case 4:
+ c.Assert(target, Equals, dataMntPt)
+ default:
+ c.Errorf("unexpected umount call (%d)", umountCall)
+ return fmt.Errorf("test broken")
+ }
+ c.Assert(flags, Equals, 0)
+ return nil
+ })
+ defer restore()
+
+ gadgetRoot := filepath.Join(c.MkDir(), "gadget")
+ ginfo, _, _, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgettest.SingleVolumeUC20GadgetYaml, gadgetRoot)
+ c.Assert(err, IsNil)
+ defer restore()
+
+ // Fill in additional information about the target device as the installer does
+ partIdx := 1
+ for i, part := range ginfo.Volumes["pc"].Structure {
+ if part.Role == "mbr" {
+ continue
+ }
+ ginfo.Volumes["pc"].Structure[i].Device = "/dev/vda" + strconv.Itoa(partIdx)
+ partIdx++
+ }
+ // Fill encrypted partitions if encrypting
+ var esd *install.EncryptionSetupData
+ if opts.encryption {
+ labelToEncData := map[string]*install.MockEncryptedDeviceAndRole{
+ "ubuntu-save": {
+ Role: "system-save",
+ EncryptedDevice: "/dev/mapper/ubuntu-save",
+ },
+ "ubuntu-data": {
+ Role: "system-data",
+ EncryptedDevice: "/dev/mapper/ubuntu-data",
+ },
+ }
+ esd = install.MockEncryptionSetupData(labelToEncData)
+ }
+
+ // 10 million mocks later ...
+ // finally actually run MountVolumes
+ seedMntDir, unmount, err := install.MountVolumes(ginfo.Volumes, esd)
+ c.Assert(err, IsNil)
+ c.Assert(seedMntDir, Equals, seedMntPt)
+
+ err = unmount()
+ c.Assert(err, IsNil)
+
+ c.Assert(mountCall, Equals, 4)
+ c.Assert(umountCall, Equals, 4)
+}
+
+func (s *installSuite) TestMountVolumesSimpleHappy(c *C) {
+ s.testMountVolumes(c, mountVolumesOpts{
+ encryption: false,
+ })
+}
+
+func (s *installSuite) TestMountVolumesSimpleHappyEncrypted(c *C) {
+ s.testMountVolumes(c, mountVolumesOpts{
+ encryption: true,
+ })
+}
+
+func (s *installSuite) TestMountVolumesZeroSeeds(c *C) {
+ onVolumes := map[string]*gadget.Volume{}
+ _, _, err := install.MountVolumes(onVolumes, nil)
+ c.Assert(err, ErrorMatches, "there are 0 system-seed{,-null} partitions, expected one")
+}
+
+func (s *installSuite) TestMountVolumesManySeeds(c *C) {
+ onVolumes := map[string]*gadget.Volume{
+ "pc": {
+ Structure: []gadget.VolumeStructure{
+ {Name: "system-seed", Filesystem: "vfat", Role: gadget.SystemSeed},
+ {Name: "system-seed-null", Filesystem: "vfat", Role: gadget.SystemSeedNull},
+ },
+ },
+ }
+
+ mountCall := 0
+ restore := install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error {
+ mountCall++
+ c.Assert(flags, Equals, uintptr(0))
+ return nil
+ })
+ defer restore()
+
+ umountCall := 0
+ restore = install.MockSysUnmount(func(target string, flags int) error {
+ umountCall++
+ c.Assert(flags, Equals, 0)
+ return nil
+ })
+ defer restore()
+
+ _, _, err := install.MountVolumes(onVolumes, nil)
+ c.Assert(err, ErrorMatches, "there are 2 system-seed{,-null} partitions, expected one")
+
+ c.Assert(mountCall, Equals, 2)
+ // check unmount is called implicitly on error for cleanup
+ c.Assert(umountCall, Equals, 2)
+}
+
+func (s *installSuite) TestMountVolumesLazyUnmount(c *C) {
+ seedMntPt := filepath.Join(s.dir, "run/mnt/ubuntu-seed")
+ onVolumes := map[string]*gadget.Volume{
+ "pc": {
+ Structure: []gadget.VolumeStructure{
+ {Name: "system-seed", Filesystem: "vfat", Role: gadget.SystemSeed},
+ },
+ },
+ }
+
+ mountCall := 0
+ restore := install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error {
+ mountCall++
+ c.Assert(flags, Equals, uintptr(0))
+ return nil
+ })
+ defer restore()
+
+ umountCall := 0
+ restore = install.MockSysUnmount(func(target string, flags int) error {
+ umountCall++
+ if umountCall == 1 {
+ c.Assert(flags, Equals, 0)
+ return fmt.Errorf("forcing lazy unmount")
+ } else {
+ // check fallback to lazy unmount, see LP:2025402
+ c.Assert(flags, Equals, syscall.MNT_DETACH)
+ return nil
+ }
+ })
+ defer restore()
+
+ log, restore := logger.MockLogger()
+ defer restore()
+
+ seedMntDir, unmount, err := install.MountVolumes(onVolumes, nil)
+ c.Assert(err, IsNil)
+ c.Assert(seedMntDir, Equals, seedMntPt)
+
+ err = unmount()
+ c.Assert(err, IsNil)
+
+ c.Assert(mountCall, Equals, 1)
+ c.Assert(umountCall, Equals, 2)
+
+ c.Check(log.String(), testutil.Contains, fmt.Sprintf("cannot unmount %s after mounting volumes: forcing lazy unmount (trying lazy unmount next)", seedMntPt))
+}
+
+func (s *installSuite) TestMountVolumesLazyUnmountError(c *C) {
+ seedMntPt := filepath.Join(s.dir, "run/mnt/ubuntu-seed")
+ onVolumes := map[string]*gadget.Volume{
+ "pc": {
+ Structure: []gadget.VolumeStructure{
+ {Name: "system-seed", Filesystem: "vfat", Role: gadget.SystemSeed},
+ },
+ },
+ }
+
+ mountCall := 0
+ restore := install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error {
+ mountCall++
+ c.Assert(flags, Equals, uintptr(0))
+ return nil
+ })
+ defer restore()
+
+ umountCall := 0
+ restore = install.MockSysUnmount(func(target string, flags int) error {
+ umountCall++
+ if umountCall == 1 {
+ c.Assert(flags, Equals, 0)
+ return fmt.Errorf("forcing lazy unmount")
+ } else {
+ // check fallback to lazy unmount, see LP:2025402
+ c.Assert(flags, Equals, syscall.MNT_DETACH)
+ return fmt.Errorf("lazy unmount failed")
+ }
+ })
+ defer restore()
+
+ log, restore := logger.MockLogger()
+ defer restore()
+
+ seedMntDir, unmount, err := install.MountVolumes(onVolumes, nil)
+ c.Assert(err, IsNil)
+ c.Assert(seedMntDir, Equals, seedMntPt)
+
+ err = unmount()
+ c.Assert(err, ErrorMatches, "lazy unmount failed")
+
+ c.Assert(mountCall, Equals, 1)
+ c.Assert(umountCall, Equals, 2)
+
+ c.Check(log.String(), testutil.Contains, fmt.Sprintf("cannot unmount %s after mounting volumes: forcing lazy unmount (trying lazy unmount next)", seedMntPt))
+ c.Check(log.String(), testutil.Contains, fmt.Sprintf("cannot lazy unmount %q: lazy unmount failed", seedMntPt))
+}
diff --git a/gadget/install/partition.go b/gadget/install/partition.go
index 5598e2efb7..4bb8e7037c 100644
--- a/gadget/install/partition.go
+++ b/gadget/install/partition.go
@@ -143,6 +143,15 @@ func buildPartitionList(dl *gadget.OnDiskVolume, lov *gadget.LaidOutVolume, opts
}
sectorSize := uint64(dl.SectorSize)
+ // The partition / disk index - we find the current max number
+ // currently on the disk and we start from there for the partitions we
+ // create. This is necessary as some partitions might not be defined by
+ // the gadget if we have a gadget with PartialStructure set. Note that
+ // this condition is checked by EnsureVolumeCompatibility, which is
+ // called before this function. muinstaller also checks for
+ // PartialStructure before this is run.
+ pIndex := 0
+
// Keep track what partitions we already have on disk - the keys to this map
// is the starting sector of the structure we have seen.
// TODO: use quantity.SectorOffset or similar when that is available
@@ -151,6 +160,9 @@ func buildPartitionList(dl *gadget.OnDiskVolume, lov *gadget.LaidOutVolume, opts
for _, s := range dl.Structure {
start := uint64(s.StartOffset) / sectorSize
seen[start] = true
+ if s.DiskIndex > pIndex {
+ pIndex = s.DiskIndex
+ }
}
// Check if the last partition has a system-data role
@@ -162,10 +174,6 @@ func buildPartitionList(dl *gadget.OnDiskVolume, lov *gadget.LaidOutVolume, opts
}
}
- // The partition / disk index - note that it will start at 1, we increment
- // it before we use it in the loop below
- pIndex := 0
-
// Write new partition data in named-fields format
buf := &bytes.Buffer{}
for _, laidOut := range lov.LaidOutStructure {
@@ -173,14 +181,14 @@ func buildPartitionList(dl *gadget.OnDiskVolume, lov *gadget.LaidOutVolume, opts
continue
}
- pIndex++
-
- // Skip partitions that are already in the volume
+ // Skip partitions defined in the gadget that are already in the volume
startInSectors := uint64(laidOut.StartOffset) / sectorSize
if seen[startInSectors] {
continue
}
+ pIndex++
+
// Only allow creating certain partitions, namely the ubuntu-* roles
if !opts.CreateAllMissingPartitions && !gadget.IsCreatableAtInstall(laidOut.VolumeStructure) {
return nil, nil, fmt.Errorf("cannot create partition %s", laidOut)
diff --git a/gadget/install/partition_test.go b/gadget/install/partition_test.go
index bcc7d74f10..dfe140529e 100644
--- a/gadget/install/partition_test.go
+++ b/gadget/install/partition_test.go
@@ -191,61 +191,67 @@ var mockLaidoutStructureWritable = gadget.LaidOutStructure{
},
}
-var mockLaidoutStructureSave = gadget.LaidOutStructure{
- OnDiskStructure: gadget.OnDiskStructure{
- Node: "/dev/node3",
- Name: "Save",
- Size: 128 * quantity.SizeMiB,
- Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
- PartitionFSLabel: "ubuntu-save",
- PartitionFSType: "ext4",
- StartOffset: 1260388352,
- // Note the DiskIndex appears to be the same as the YamlIndex, but this is
- // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a
- // yaml structure (the MBR) that does not appear on disk
- DiskIndex: 3,
- },
- VolumeStructure: &gadget.VolumeStructure{
- VolumeName: "pc",
- Name: "Save",
- Label: "ubuntu-save",
- MinSize: 128 * quantity.SizeMiB,
- Size: 128 * quantity.SizeMiB,
- Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
- Role: "system-save",
- Filesystem: "ext4",
- Offset: asOffsetPtr(1260388352),
- YamlIndex: 3,
- },
+func createLaidoutStructureSave(enclosing *gadget.Volume) *gadget.LaidOutStructure {
+ return &gadget.LaidOutStructure{
+ OnDiskStructure: gadget.OnDiskStructure{
+ Node: "/dev/node3",
+ Name: "Save",
+ Size: 128 * quantity.SizeMiB,
+ Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ PartitionFSLabel: "ubuntu-save",
+ PartitionFSType: "ext4",
+ StartOffset: 1260388352,
+ // Note the DiskIndex appears to be the same as the YamlIndex, but this is
+ // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a
+ // yaml structure (the MBR) that does not appear on disk
+ DiskIndex: 3,
+ },
+ VolumeStructure: &gadget.VolumeStructure{
+ VolumeName: "pc",
+ Name: "Save",
+ Label: "ubuntu-save",
+ MinSize: 128 * quantity.SizeMiB,
+ Size: 128 * quantity.SizeMiB,
+ Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ Role: "system-save",
+ Filesystem: "ext4",
+ Offset: asOffsetPtr(1260388352),
+ YamlIndex: 3,
+ EnclosingVolume: enclosing,
+ },
+ }
}
-var mockLaidoutStructureWritableAfterSave = gadget.LaidOutStructure{
- OnDiskStructure: gadget.OnDiskStructure{
- Node: "/dev/node4",
- Name: "Writable",
- // expanded to fill the disk
- Size: 2*quantity.SizeGiB + 717*quantity.SizeMiB + 1031680,
- Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
- PartitionFSLabel: "ubuntu-data",
- PartitionFSType: "ext4",
- StartOffset: 1394606080,
- // Note the DiskIndex appears to be the same as the YamlIndex, but this is
- // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a
- // yaml structure (the MBR) that does not appear on disk
- DiskIndex: 4,
- },
- VolumeStructure: &gadget.VolumeStructure{
- VolumeName: "pc",
- Name: "Writable",
- MinSize: 1200 * quantity.SizeMiB,
- Size: 1200 * quantity.SizeMiB,
- Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
- Role: "system-data",
- Label: "ubuntu-data",
- Filesystem: "ext4",
- Offset: asOffsetPtr(1394606080),
- YamlIndex: 4,
- },
+func createLaidoutStructureWritableAfterSave(enclosing *gadget.Volume) *gadget.LaidOutStructure {
+ return &gadget.LaidOutStructure{
+ OnDiskStructure: gadget.OnDiskStructure{
+ Node: "/dev/node4",
+ Name: "Writable",
+ // expanded to fill the disk
+ Size: 2*quantity.SizeGiB + 717*quantity.SizeMiB + 1031680,
+ Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ PartitionFSLabel: "ubuntu-data",
+ PartitionFSType: "ext4",
+ StartOffset: 1394606080,
+ // Note the DiskIndex appears to be the same as the YamlIndex, but this is
+ // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a
+ // yaml structure (the MBR) that does not appear on disk
+ DiskIndex: 4,
+ },
+ VolumeStructure: &gadget.VolumeStructure{
+ VolumeName: "pc",
+ Name: "Writable",
+ MinSize: 1200 * quantity.SizeMiB,
+ Size: 1200 * quantity.SizeMiB,
+ Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ Role: "system-data",
+ Label: "ubuntu-data",
+ Filesystem: "ext4",
+ Offset: asOffsetPtr(1394606080),
+ YamlIndex: 4,
+ EnclosingVolume: enclosing,
+ },
+ }
}
type uc20Model struct{}
@@ -280,11 +286,47 @@ func (s *partitionTestSuite) TestBuildPartitionList(c *C) {
/dev/node4 : start= 2723840, size= 5664735, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, name="Writable"
`)
c.Check(create, NotNil)
- mockLaidoutStructureSave.VolumeStructure.EnclosingVolume = pv.Volume
- mockLaidoutStructureWritableAfterSave.VolumeStructure.EnclosingVolume = pv.Volume
c.Assert(create, DeepEquals, []gadget.LaidOutStructure{
- mockLaidoutStructureSave,
- mockLaidoutStructureWritableAfterSave,
+ *createLaidoutStructureSave(pv.Volume),
+ *createLaidoutStructureWritableAfterSave(pv.Volume),
+ })
+}
+
+func (s *partitionTestSuite) TestBuildPartitionListPartsNotInGadget(c *C) {
+ m := map[string]*disks.MockDiskMapping{
+ "/dev/node": makeMockDiskMappingIncludingPartitions(scriptPartitionsBiosSeed),
+ }
+
+ restore := disks.MockDeviceNameToDiskMapping(m)
+ defer restore()
+
+ // This gadget does not specify the bios partition, but it is on the disk
+ err := gadgettest.MakeMockGadget(s.gadgetRoot, gptGadgetContentWithGap)
+ c.Assert(err, IsNil)
+ pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod)
+ c.Assert(err, IsNil)
+
+ dl, err := gadget.OnDiskVolumeFromDevice("/dev/node")
+ c.Assert(err, IsNil)
+
+ // the expected expanded writable partition size is: start
+ // offset = (2M + 1200M), expanded size in sectors =
+ // (8388575*512 - start offset)/512
+ sfdiskInput, create, err := install.BuildPartitionList(dl, pv,
+ &install.CreateOptions{})
+ c.Assert(err, IsNil)
+ c.Assert(sfdiskInput.String(), Equals,
+ `/dev/node3 : start= 2461696, size= 262144, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, name="Save"
+/dev/node4 : start= 2723840, size= 5664735, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, name="Writable"
+`)
+ c.Check(create, NotNil)
+ saveStruct := createLaidoutStructureSave(pv.Volume)
+ saveStruct.VolumeStructure.YamlIndex = 1
+ dataStruct := createLaidoutStructureWritableAfterSave(pv.Volume)
+ dataStruct.VolumeStructure.YamlIndex = 2
+ c.Assert(create, DeepEquals, []gadget.LaidOutStructure{
+ *saveStruct,
+ *dataStruct,
})
}
@@ -881,6 +923,33 @@ const gptGadgetContentWithSave = `volumes:
size: 1200M
`
+const gptGadgetContentWithGap = `volumes:
+ pc:
+ bootloader: grub
+ partial: [ filesystem ]
+ structure:
+ - name: Recovery
+ offset: 2M
+ role: system-seed
+ filesystem: vfat
+ # UEFI will boot the ESP partition by default first
+ type: EF,C12A7328-F81F-11D2-BA4B-00A0C93EC93B
+ size: 1200M
+ content:
+ - source: grubx64.efi
+ target: EFI/boot/grubx64.efi
+ - name: Save
+ role: system-save
+ filesystem: ext4
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 128M
+ - name: Writable
+ role: system-data
+ filesystem: ext4
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 1200M
+`
+
const gptGadgetContentWithMinSize = `volumes:
pc:
bootloader: grub
diff --git a/gadget/layout.go b/gadget/layout.go
index 3700a89989..f623a8cd40 100644
--- a/gadget/layout.go
+++ b/gadget/layout.go
@@ -75,6 +75,12 @@ type PartiallyLaidOutVolume struct {
// LaidOutStructure describes a VolumeStructure coming from the gadget plus the
// OnDiskStructure that describes how it would be applied to a given disk and
// additional content used when writing/updating data in the structure.
+//
+// Note that we need to be careful while using the fields in OnDiskStructure as
+// some times LaidOutStructure is created before we have information about the
+// finally matched partition. This is especially important for StartOffset and
+// Size fields. TODO We want to eventually create LaidOutStructure only after
+// this information is available.
type LaidOutStructure struct {
OnDiskStructure
// VolumeStructure is the volume structure defined in gadget.yaml
@@ -120,7 +126,7 @@ func (l LaidOutStructure) Role() string {
return l.VolumeStructure.Role
}
-// HasFilesystem returns true if the structure is using a filesystem.
+// HasFilesystem returns true if the gadget expects a filesystem.
func (l *LaidOutStructure) HasFilesystem() bool {
return l.VolumeStructure.HasFilesystem()
}
@@ -287,7 +293,7 @@ func LayoutVolume(volume *Volume, opts *LayoutOptions) (*LaidOutVolume, error) {
// creation is needed and is safe because each volume structure
// has a size so even without the structure content the layout
// can be calculated.
- if !opts.IgnoreContent {
+ if !opts.IgnoreContent && !structures[idx].HasFilesystem() {
content, err := layOutStructureContent(opts.GadgetRootDir, &structures[idx])
if err != nil {
return nil, err
@@ -497,9 +503,8 @@ func isLayoutCompatible(current, new *Volume) error {
if current.ID != new.ID {
return fmt.Errorf("incompatible ID change from %v to %v", current.ID, new.ID)
}
- if current.Schema != new.Schema {
- return fmt.Errorf("incompatible schema change from %v to %v",
- current.Schema, new.Schema)
+ if err := checkCompatibleSchema(current, new); err != nil {
+ return err
}
if current.Bootloader != new.Bootloader {
return fmt.Errorf("incompatible bootloader change from %v to %v",
@@ -515,7 +520,7 @@ func isLayoutCompatible(current, new *Volume) error {
// at the structure level we expect the volume to be identical
for i := range current.Structure {
- if err := canUpdateStructure(current.Structure, i, new.Structure, i, new.Schema); err != nil {
+ if err := canUpdateStructure(current, i, new, i); err != nil {
return fmt.Errorf("incompatible structure #%d (%q) change: %v", new.Structure[i].YamlIndex, new.Structure[i].Name, err)
}
}
diff --git a/gadget/partial.go b/gadget/partial.go
new file mode 100644
index 0000000000..5a5ceef138
--- /dev/null
+++ b/gadget/partial.go
@@ -0,0 +1,135 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package gadget
+
+import "fmt"
+
+// ApplyInstallerVolumesToGadget takes the volume information returned
+// by the installer and applies it to the laid out volumes for
+// properties partially defined. After that it checks that the gadget
+// is now fully specified.
+func ApplyInstallerVolumesToGadget(installerVols map[string]*Volume, lovs map[string]*LaidOutVolume) error {
+ for volName, lov := range lovs {
+ if len(lov.Partial) == 0 {
+ continue
+ }
+
+ insVol := installerVols[volName]
+ if insVol == nil {
+ return fmt.Errorf("installer did not provide information for volume %q", volName)
+ }
+
+ // TODO: partial structure, as it is not clear what will be possible when set
+
+ if lov.HasPartial(PartialSchema) {
+ if insVol.Schema == "" {
+ return fmt.Errorf("installer did not provide schema for volume %q", volName)
+ }
+ lov.Schema = insVol.Schema
+ }
+
+ if lov.HasPartial(PartialFilesystem) {
+ if err := applyPartialFilesystem(insVol, lov, volName); err != nil {
+ return err
+ }
+ }
+
+ if lov.HasPartial(PartialSize) {
+ if err := applyPartialSize(insVol, lov, volName); err != nil {
+ return err
+ }
+ }
+
+ // The only thing that can still be partial is the structure
+ if lov.HasPartial(PartialStructure) {
+ lov.Partial = []PartialProperty{PartialStructure}
+ } else {
+ lov.Partial = []PartialProperty{}
+ }
+
+ // Now validate finalized volume
+ if err := validateVolume(lov.Volume); err != nil {
+ return fmt.Errorf("finalized volume %q is wrong: %v", lov.Name, err)
+ }
+ }
+
+ return nil
+}
+
+func applyPartialFilesystem(insVol *Volume, lov *LaidOutVolume, volName string) error {
+ for sidx := range lov.Structure {
+ // Two structures to modify due to copies inside LaidOutVolume
+ vs := &lov.Structure[sidx]
+ vsLos := lov.LaidOutStructure[sidx].VolumeStructure
+ if vs.Filesystem != "" || !vs.HasFilesystem() {
+ continue
+ }
+
+ insStr, err := structureByName(insVol.Structure, vs.Name)
+ if err != nil {
+ return err
+ }
+ if insStr.Filesystem == "" {
+ return fmt.Errorf("installer did not provide filesystem for structure %q in volume %q", vs.Name, volName)
+ }
+
+ vs.Filesystem = insStr.Filesystem
+ vsLos.Filesystem = insStr.Filesystem
+ }
+ return nil
+}
+
+func applyPartialSize(insVol *Volume, lov *LaidOutVolume, volName string) error {
+ for sidx := range lov.Structure {
+ // Two structures to modify due to copies inside LaidOutVolume
+ vs := &lov.Structure[sidx]
+ vsLos := lov.LaidOutStructure[sidx].VolumeStructure
+ if !vs.hasPartialSize() {
+ continue
+ }
+
+ insStr, err := structureByName(insVol.Structure, vs.Name)
+ if err != nil {
+ return err
+ }
+ if insStr.Size == 0 {
+ return fmt.Errorf("installer did not provide size for structure %q in volume %q", vs.Name, volName)
+ }
+ if insStr.Offset == nil {
+ return fmt.Errorf("installer did not provide offset for structure %q in volume %q", vs.Name, volName)
+ }
+
+ vs.Size = insStr.Size
+ vsLos.Size = insStr.Size
+ vs.Offset = insStr.Offset
+ vsLos.Offset = insStr.Offset
+ lov.LaidOutStructure[sidx].StartOffset = *insStr.Offset
+ }
+ return nil
+}
+
+func structureByName(vss []VolumeStructure, name string) (*VolumeStructure, error) {
+ for sidx := range vss {
+ if vss[sidx].Name == name {
+ return &vss[sidx], nil
+ }
+ }
+ return nil, fmt.Errorf("cannot find structure %q", name)
+}
diff --git a/gadget/partial_test.go b/gadget/partial_test.go
new file mode 100644
index 0000000000..142fe7981e
--- /dev/null
+++ b/gadget/partial_test.go
@@ -0,0 +1,283 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package gadget_test
+
+import (
+ "io/ioutil"
+
+ "github.com/snapcore/snapd/gadget"
+ "github.com/snapcore/snapd/gadget/quantity"
+ "github.com/snapcore/snapd/secboot"
+ . "gopkg.in/check.v1"
+)
+
+func (s *gadgetYamlTestSuite) newCleanLovs(c *C) map[string]*gadget.LaidOutVolume {
+ _, lovs, err := gadget.LaidOutVolumesFromGadget(
+ s.dir, "", uc20Mod, secboot.EncryptionTypeNone)
+ c.Assert(err, IsNil)
+ return lovs
+}
+
+func (s *gadgetYamlTestSuite) TestApplyInstallerVolumesToGadgetPartialSchema(c *C) {
+ var yaml = []byte(`
+volumes:
+ vol0:
+ partial: [schema]
+ bootloader: u-boot
+ structure:
+ - name: ubuntu-seed
+ filesystem: vfat
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-seed
+ - name: ubuntu-boot
+ filesystem: ext4
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-boot
+ - name: ubuntu-save
+ size: 1M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-save
+ - name: ubuntu-data
+ size: 1000M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-data
+`)
+ err := ioutil.WriteFile(s.gadgetYamlPath, yaml, 0644)
+ c.Assert(err, IsNil)
+
+ installerVols := map[string]*gadget.Volume{
+ "vol0": {
+ Name: "vol0",
+ Schema: "gpt",
+ },
+ }
+
+ // New schema is set
+ lovs := s.newCleanLovs(c)
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, lovs)
+ c.Assert(err, IsNil)
+ c.Assert(lovs["vol0"].Schema, Equals, "gpt")
+
+ // Invalid schema is detected
+ installerVols["vol0"].Schema = "nextbigthing"
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals,
+ `finalized volume "vol0" is wrong: invalid schema "nextbigthing"`)
+
+ // No schema set case
+ installerVols["vol0"].Schema = ""
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `installer did not provide schema for volume "vol0"`)
+}
+
+func (s *gadgetYamlTestSuite) TestApplyInstallerVolumesToGadgetPartialFilesystem(c *C) {
+ var yaml = []byte(`
+volumes:
+ vol0:
+ partial: [filesystem]
+ bootloader: u-boot
+ structure:
+ - name: ubuntu-seed
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-seed
+ - name: ubuntu-boot
+ filesystem: ext4
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-boot
+ - name: ubuntu-save
+ size: 1M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-save
+ - name: ubuntu-data
+ size: 1000M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-data
+`)
+ err := ioutil.WriteFile(s.gadgetYamlPath, yaml, 0644)
+ c.Assert(err, IsNil)
+
+ installerVols := map[string]*gadget.Volume{
+ "vol0": {
+ Name: "vol0",
+ Schema: "gpt",
+ Structure: []gadget.VolumeStructure{
+ {
+ Name: "ubuntu-seed",
+ Filesystem: "vfat",
+ },
+ {
+ Name: "ubuntu-boot",
+ },
+ {
+ Name: "ubuntu-save",
+ Filesystem: "ext4",
+ },
+ {
+ Name: "ubuntu-data",
+ Filesystem: "ext4",
+ },
+ },
+ },
+ }
+
+ lovs := s.newCleanLovs(c)
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, lovs)
+ c.Assert(err, IsNil)
+ c.Assert(lovs["vol0"].Structure[0].Filesystem, Equals, "vfat")
+ c.Assert(lovs["vol0"].Structure[2].Filesystem, Equals, "ext4")
+ c.Assert(lovs["vol0"].Structure[3].Filesystem, Equals, "ext4")
+
+ installerVols["vol0"].Structure[0].Filesystem = ""
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `installer did not provide filesystem for structure "ubuntu-seed" in volume "vol0"`)
+
+ installerVols["vol0"].Structure[0].Filesystem = "ext44"
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `finalized volume "vol0" is wrong: invalid structure #0 ("ubuntu-seed"): invalid filesystem "ext44"`)
+}
+
+func (s *gadgetYamlTestSuite) TestApplyInstallerVolumesToGadgetPartialSize(c *C) {
+ var yaml = []byte(`
+volumes:
+ vol0:
+ partial: [size]
+ bootloader: u-boot
+ schema: gpt
+ structure:
+ - name: ubuntu-seed
+ filesystem: ext4
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-seed
+ - name: ubuntu-boot
+ filesystem: ext4
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-boot
+ - name: ubuntu-save
+ min-size: 1M
+ filesystem: ext4
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-save
+ - name: ubuntu-data
+ filesystem: ext4
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-data
+`)
+ err := ioutil.WriteFile(s.gadgetYamlPath, yaml, 0644)
+ c.Assert(err, IsNil)
+
+ installerVols := map[string]*gadget.Volume{
+ "vol0": {
+ Name: "vol0",
+ Schema: "gpt",
+ Structure: []gadget.VolumeStructure{
+ {
+ Name: "ubuntu-seed",
+ },
+ {
+ Name: "ubuntu-boot",
+ },
+ {
+ Name: "ubuntu-save",
+ Offset: asOffsetPtr(1001 * quantity.OffsetMiB),
+ Size: 2 * quantity.SizeMiB,
+ },
+ {
+ Name: "ubuntu-data",
+ Offset: asOffsetPtr(1003 * quantity.OffsetMiB),
+ Size: 2000 * quantity.SizeMiB,
+ },
+ },
+ },
+ }
+
+ lovs := s.newCleanLovs(c)
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, lovs)
+ c.Assert(err, IsNil)
+ c.Assert(*lovs["vol0"].Structure[2].Offset, Equals, 1001*quantity.OffsetMiB)
+ c.Assert(*lovs["vol0"].Structure[3].Offset, Equals, 1003*quantity.OffsetMiB)
+ c.Assert(lovs["vol0"].Structure[2].Size, Equals, 2*quantity.SizeMiB)
+ c.Assert(lovs["vol0"].Structure[3].Size, Equals, 2000*quantity.SizeMiB)
+
+ installerVols["vol0"].Structure[2].Offset = nil
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `installer did not provide offset for structure "ubuntu-save" in volume "vol0"`)
+
+ installerVols["vol0"].Structure[2].Offset = asOffsetPtr(1001 * quantity.OffsetMiB)
+ installerVols["vol0"].Structure[2].Size = 0
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `installer did not provide size for structure "ubuntu-save" in volume "vol0"`)
+
+ installerVols["vol0"].Structure[2].Size = 500 * quantity.SizeKiB
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `finalized volume "vol0" is wrong: invalid structure #2 ("ubuntu-save"): min-size (1048576) is bigger than size (512000)`)
+}
+
+func (s *gadgetYamlTestSuite) TestApplyInstallerVolumesToGadgetBadInstallerVol(c *C) {
+ var yaml = []byte(`
+volumes:
+ vol0:
+ partial: [filesystem]
+ bootloader: u-boot
+ structure:
+ - name: ubuntu-seed
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-seed
+ - name: ubuntu-boot
+ filesystem: ext4
+ size: 500M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-boot
+ - name: ubuntu-save
+ size: 1M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-save
+ - name: ubuntu-data
+ size: 1000M
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ role: system-data
+`)
+ err := ioutil.WriteFile(s.gadgetYamlPath, yaml, 0644)
+ c.Assert(err, IsNil)
+
+ installerVols := map[string]*gadget.Volume{
+ "foo": {
+ Name: "foo",
+ Schema: "gpt",
+ },
+ }
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `installer did not provide information for volume "vol0"`)
+
+ installerVols = map[string]*gadget.Volume{
+ "vol0": {
+ Name: "vol0",
+ Schema: "gpt",
+ },
+ }
+ err = gadget.ApplyInstallerVolumesToGadget(installerVols, s.newCleanLovs(c))
+ c.Assert(err.Error(), Equals, `cannot find structure "ubuntu-seed"`)
+}
diff --git a/gadget/update.go b/gadget/update.go
index 8c34effc93..5d87f36169 100644
--- a/gadget/update.go
+++ b/gadget/update.go
@@ -123,6 +123,17 @@ type ContentUpdateObserver interface {
Canceled() error
}
+func setOnDiskStructuresInLaidOutVolume(gadgetToDiskStruct map[int]*OnDiskStructure, lv *LaidOutVolume) {
+ for i := range lv.LaidOutStructure {
+ gs := lv.LaidOutStructure[i].VolumeStructure
+ if ds, ok := gadgetToDiskStruct[gs.YamlIndex]; ok {
+ logger.Debugf("partition %s (offset %d) matched to gadget structure %s",
+ ds.Node, ds.StartOffset, lv.LaidOutStructure[i].VolumeStructure.Name)
+ lv.LaidOutStructure[i].OnDiskStructure = *ds
+ }
+ }
+}
+
// searchVolumeWithTraitsAndMatchParts searches for a disk matching the given
// traits and assigns disk partitions data to the matching laid out partition.
func searchVolumeWithTraitsAndMatchParts(laidOutVol *LaidOutVolume, traits DiskVolumeDeviceTraits, validateOpts *DiskVolumeValidationOptions) (disks.Disk, error) {
@@ -166,14 +177,7 @@ func searchVolumeWithTraitsAndMatchParts(laidOutVol *LaidOutVolume, traits DiskV
}
// Set OnDiskStructure for laidOutVol, now that we know the exact match
- for i := range laidOutVol.LaidOutStructure {
- gs := laidOutVol.LaidOutStructure[i].VolumeStructure
- if ds, ok := gadgetStructToDiskStruct[gs.YamlIndex]; ok {
- logger.Debugf("partition %s (offset %d) matched to gadget structure %s",
- ds.Node, ds.StartOffset, laidOutVol.LaidOutStructure[i].VolumeStructure.Name)
- laidOutVol.LaidOutStructure[i].OnDiskStructure = *ds
- }
- }
+ setOnDiskStructuresInLaidOutVolume(gadgetStructToDiskStruct, laidOutVol)
// success, we found it
return true
@@ -336,13 +340,13 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
if opts == nil {
opts = &VolumeCompatibilityOptions{}
}
- logger.Debugf("checking volume compatibility between gadget volume %s and disk %s",
- gadgetVolume.Name, diskVolume.Device)
+ logger.Debugf("checking volume compatibility between gadget volume %s (partial: %v) and disk %s",
+ gadgetVolume.Name, gadgetVolume.Partial, diskVolume.Device)
eq := func(ds *OnDiskStructure, vss []VolumeStructure, vssIdx int) (bool, string) {
- gv := &vss[vssIdx]
+ gs := &vss[vssIdx]
// name mismatch
- if gv.Name != ds.Name {
+ if gs.Name != ds.Name {
// partitions have no names in MBR so bypass the name check
if gadgetVolume.Schema != "mbr" {
// don't return a reason if the names don't match
@@ -355,18 +359,19 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
return false, fmt.Sprintf("disk partition %q %v", ds.Name, err)
}
+ maxSz := effectivePartSize(gs)
switch {
// on disk size too small
- case ds.Size < gv.MinSize:
+ case ds.Size < gs.MinSize:
return false, fmt.Sprintf("on disk size %d (%s) is smaller than gadget min size %d (%s)",
- ds.Size, ds.Size.IECString(), gv.MinSize, gv.MinSize.IECString())
+ ds.Size, ds.Size.IECString(), gs.MinSize, gs.MinSize.IECString())
// on disk size too large
- case ds.Size > gv.Size:
+ case ds.Size > maxSz:
// larger on disk size is allowed specifically only for system-data
- if gv.Role != SystemData {
+ if gs.Role != SystemData {
return false, fmt.Sprintf("on disk size %d (%s) is larger than gadget size %d (%s) (and the role should not be expanded)",
- ds.Size, ds.Size.IECString(), gv.Size, gv.Size.IECString())
+ ds.Size, ds.Size.IECString(), maxSz, maxSz.IECString())
}
}
@@ -377,10 +382,10 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
// first handle the strict case where this partition was created at
// install in case it is an encrypted one
- if opts.AssumeCreatablePartitionsCreated && IsCreatableAtInstall(gv) {
+ if opts.AssumeCreatablePartitionsCreated && IsCreatableAtInstall(gs) {
// only partitions that are creatable at install can be encrypted,
// check if this partition was encrypted
- if encTypeParams, ok := opts.ExpectedStructureEncryption[gv.Name]; ok {
+ if encTypeParams, ok := opts.ExpectedStructureEncryption[gs.Name]; ok {
if encTypeParams.Method == "" {
return false, "encrypted structure parameter missing required parameter \"method\""
}
@@ -396,13 +401,13 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
case EncryptionLUKS:
// then this partition is expected to have been encrypted, the
// filesystem label on disk will need "-enc" appended
- if ds.PartitionFSLabel != gv.Name+"-enc" {
- return false, fmt.Sprintf("partition %[1]s is expected to be encrypted but is not named %[1]s-enc", gv.Name)
+ if ds.PartitionFSLabel != gs.Name+"-enc" {
+ return false, fmt.Sprintf("partition %[1]s is expected to be encrypted but is not named %[1]s-enc", gs.Name)
}
// the filesystem should also be "crypto_LUKS"
if ds.PartitionFSType != "crypto_LUKS" {
- return false, fmt.Sprintf("partition %[1]s is expected to be encrypted but does not have an encrypted filesystem", gv.Name)
+ return false, fmt.Sprintf("partition %[1]s is expected to be encrypted but does not have an encrypted filesystem", gs.Name)
}
// at this point the partition matches
@@ -416,7 +421,7 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
// below logic still applies
}
- if opts.AssumeCreatablePartitionsCreated || !IsCreatableAtInstall(gv) {
+ if opts.AssumeCreatablePartitionsCreated || !IsCreatableAtInstall(gs) {
// we assume that this partition has already been created
// successfully - either because this function was forced to(as is
// the case when doing gadget asset updates), or because this
@@ -430,15 +435,16 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
// filesystem when the image is deployed to a partition. In this
// case we don't care about the filesystem at all because snapd does
// not touch it, unless a gadget asset update says to update that
- // image file with a new binary image file.
- if gv.Filesystem != "" && gv.Filesystem != ds.PartitionFSType {
+ // image file with a new binary image file. This also covers the
+ // partial filesystem case.
+ if gs.Filesystem != "" && gs.Filesystem != ds.PartitionFSType {
// use more specific error message for structures that are
// not creatable at install when we are not being strict
- if !IsCreatableAtInstall(gv) && !opts.AssumeCreatablePartitionsCreated {
- return false, fmt.Sprintf("filesystems do not match (and the partition is not creatable at install): declared as %s, got %s", gv.Filesystem, ds.PartitionFSType)
+ if !IsCreatableAtInstall(gs) && !opts.AssumeCreatablePartitionsCreated {
+ return false, fmt.Sprintf("filesystems do not match (and the partition is not creatable at install): declared as %s, got %s", gs.Filesystem, ds.PartitionFSType)
}
// otherwise generic
- return false, fmt.Sprintf("filesystems do not match: declared as %s, got %s", gv.Filesystem, ds.PartitionFSType)
+ return false, fmt.Sprintf("filesystems do not match: declared as %s, got %s", gs.Filesystem, ds.PartitionFSType)
}
}
@@ -529,24 +535,30 @@ func EnsureVolumeCompatibility(gadgetVolume *Volume, diskVolume *OnDiskVolume, o
}
}
- // Check if top level properties match
- if !isCompatibleSchema(gadgetVolume.Schema, diskVolume.Schema) {
+ // Check if gadget schema is compatible with the disk, when defined
+ if (!gadgetVolume.HasPartial(PartialSchema) || gadgetVolume.Schema != "") &&
+ !isCompatibleSchema(gadgetVolume.Schema, diskVolume.Schema) {
return nil, fmt.Errorf("disk partitioning schema %q doesn't match gadget schema %q", diskVolume.Schema, gadgetVolume.Schema)
}
+
+ // Check disk ID if defined in gadget
if gadgetVolume.ID != "" && gadgetVolume.ID != diskVolume.ID {
return nil, fmt.Errorf("disk ID %q doesn't match gadget volume ID %q", diskVolume.ID, gadgetVolume.ID)
}
// Check if all existing device partitions are also in gadget
- for _, ds := range diskVolume.Structure {
- present, reasonAbsent := gadgetContains(gadgetVolume.Structure, &ds)
- if !present {
- if reasonAbsent != "" {
- // use the right format so that it can be
- // appended to the error message
- reasonAbsent = fmt.Sprintf(": %s", reasonAbsent)
+ // (unless partial strucuture).
+ if !gadgetVolume.HasPartial(PartialStructure) {
+ for _, ds := range diskVolume.Structure {
+ present, reasonAbsent := gadgetContains(gadgetVolume.Structure, &ds)
+ if !present {
+ if reasonAbsent != "" {
+ // use the right format so that it can be
+ // appended to the error message
+ reasonAbsent = fmt.Sprintf(": %s", reasonAbsent)
+ }
+ return nil, fmt.Errorf("cannot find disk partition %s (starting at %d) in gadget%s", ds.Node, ds.StartOffset, reasonAbsent)
}
- return nil, fmt.Errorf("cannot find disk partition %s (starting at %d) in gadget%s", ds.Node, ds.StartOffset, reasonAbsent)
}
}
@@ -650,9 +662,12 @@ func DiskTraitsFromDeviceAndValidate(expLayout *LaidOutVolume, dev string, opts
AllowImplicitSystemData: opts.AllowImplicitSystemData,
ExpectedStructureEncryption: opts.ExpectedStructureEncryption,
}
- if _, err := EnsureVolumeCompatibility(vol, diskLayout, volCompatOpts); err != nil {
+ gadgetToDiskStruct, err := EnsureVolumeCompatibility(vol, diskLayout, volCompatOpts)
+ if err != nil {
return res, fmt.Errorf("volume %s is not compatible with disk %s: %v", vol.Name, dev, err)
}
+ // Set OnDiskStructure for laidOutVol, now that we know the exact match
+ setOnDiskStructuresInLaidOutVolume(gadgetToDiskStruct, expLayout)
// also get a Disk{} interface for this device
disk, err := disks.DiskFromDeviceName(dev)
@@ -756,12 +771,16 @@ func DiskTraitsFromDeviceAndValidate(expLayout *LaidOutVolume, dev string, opts
for _, part := range diskPartitionsByOffset {
leftovers = append(leftovers, part.KernelDeviceNode)
}
- // this is an internal error because to get here we would have had to
- // pass validation in EnsureVolumeCompatibility but then still have
- // extra partitions - the only non-buggy situation where that function
- // passes validation but leaves partitions on disk not in the YAML is
- // the implicit system-data role handled above
- return res, fmt.Errorf("internal error: unexpected additional partitions on disk %s not present in the gadget layout: %v", disk.KernelDeviceNode(), leftovers)
+ if vol.HasPartial(PartialStructure) {
+ logger.Debugf("additional partitions on disk %s ignored as the gadget has partial structures: %v", disk.KernelDeviceNode(), leftovers)
+ } else {
+ // this is an internal error because to get here we would have had to
+ // pass validation in EnsureVolumeCompatibility but then still have
+ // extra partitions - the only non-buggy situation where that function
+ // passes validation but leaves partitions on disk not in the YAML is
+ // the implicit system-data role handled above
+ return res, fmt.Errorf("internal error: unexpected additional partitions on disk %s not present in the gadget layout: %v", disk.KernelDeviceNode(), leftovers)
+ }
}
return DiskVolumeDeviceTraits{
@@ -1261,10 +1280,6 @@ func Update(model Model, old, new GadgetData, rollbackDirPath string, updatePoli
for volName, oldVol := range old.Info.Volumes {
newVol := new.Info.Volumes[volName]
- if oldVol.Schema == "" || newVol.Schema == "" {
- return fmt.Errorf("internal error: unset volume schemas: old: %q new: %q", oldVol.Schema, newVol.Schema)
- }
-
// layout old partially, without going deep into the layout of structure
// content
pOld, err := LayoutVolumePartially(oldVol)
@@ -1310,7 +1325,7 @@ func Update(model Model, old, new GadgetData, rollbackDirPath string, updatePoli
if err != nil {
return err
}
- if err := canUpdateStructure(oldVol.Structure, fromIdx, newVol.Structure, toIdx, pNew.Schema); err != nil {
+ if err := canUpdateStructure(oldVol, fromIdx, newVol, toIdx); err != nil {
return fmt.Errorf("cannot update volume structure %v for volume %s: %v", update.to, volName, err)
}
}
@@ -1335,7 +1350,7 @@ func Update(model Model, old, new GadgetData, rollbackDirPath string, updatePoli
// build the map of volume structure locations where the first key is the
// volume name, and the second key is the structure's index in the list of
// structures on that volume, and the final value is the StructureLocation
- // hat can actually be used to perform the lookup/update in applyUpdates
+ // that can actually be used to perform the lookup/update in applyUpdates
structureLocations, err := volumeStructureToLocationMap(old, model, laidOutVols)
if err != nil {
if err == errSkipUpdateProceedRefresh {
@@ -1419,6 +1434,14 @@ func isLegacyMBRTransition(from *VolumeStructure, to *VolumeStructure) bool {
return from.Type == schemaMBR && to.Role == schemaMBR
}
+func effectivePartSize(part *VolumeStructure) quantity.Size {
+ // Partitions with partial size are set as unbounded (their Size field is 0)
+ if part.hasPartialSize() {
+ return UnboundedStructureSize
+ }
+ return part.Size
+}
+
func arePossibleSizesCompatible(from *VolumeStructure, to *VolumeStructure) bool {
// Check if [from.MinSize,from.Size], the interval of sizes allowed in
// "from", intersects with [to.MinSize,to.Size] (the interval of sizes
@@ -1427,7 +1450,7 @@ func arePossibleSizesCompatible(from *VolumeStructure, to *VolumeStructure) bool
// visualized by sliding a segment over the abscissa while the other is
// fixed, for a moving segment either smaller or bigger than the fixed
// one).
- return from.Size >= to.MinSize && from.MinSize <= to.Size
+ return effectivePartSize(from) >= to.MinSize && from.MinSize <= effectivePartSize(to)
}
func arePossibleOffsetsCompatible(vss1 []VolumeStructure, idx1 int, vss2 []VolumeStructure, idx2 int) bool {
@@ -1437,21 +1460,28 @@ func arePossibleOffsetsCompatible(vss1 []VolumeStructure, idx1 int, vss2 []Volum
minStructureOffset(vss1, idx1) <= maxStructureOffset(vss2, idx2)
}
-func canUpdateStructure(fromVss []VolumeStructure, fromIdx int, toVss []VolumeStructure, toIdx int, schema string) error {
- from := &fromVss[fromIdx]
- to := &toVss[toIdx]
- if schema == schemaGPT && from.Name != to.Name {
+// canUpdateStructure checks gadget compatibility on updates, looking only at
+// features that are not reflected on the installed disk (for this we check
+// elsewhere the new gadget against the actual disk content).
+//
+// Partial properties are not checked as they will be checked against the real
+// disk later, in EnsureVolumeCompatibility. TODO Some checks should maybe
+// happen only there even for non-partial gadgets.
+func canUpdateStructure(fromV *Volume, fromIdx int, toV *Volume, toIdx int) error {
+ from := &fromV.Structure[fromIdx]
+ to := &toV.Structure[toIdx]
+ if !toV.HasPartial(PartialSchema) && toV.Schema == schemaGPT && from.Name != to.Name {
// partition names are only effective when GPT is used
return fmt.Errorf("cannot change structure name from %q to %q",
from.Name, to.Name)
}
if !arePossibleSizesCompatible(from, to) {
return fmt.Errorf("new valid structure size range [%v, %v] is not compatible with current ([%v, %v])",
- to.MinSize, to.Size, from.MinSize, from.Size)
+ to.MinSize, effectivePartSize(to), from.MinSize, effectivePartSize(from))
}
- if !arePossibleOffsetsCompatible(fromVss, fromIdx, toVss, toIdx) {
+ if !arePossibleOffsetsCompatible(fromV.Structure, fromIdx, toV.Structure, toIdx) {
return fmt.Errorf("new valid structure offset range [%v, %v] is not compatible with current ([%v, %v])",
- minStructureOffset(toVss, toIdx), maxStructureOffset(toVss, toIdx), minStructureOffset(fromVss, fromIdx), maxStructureOffset(fromVss, fromIdx))
+ minStructureOffset(toV.Structure, toIdx), maxStructureOffset(toV.Structure, toIdx), minStructureOffset(fromV.Structure, fromIdx), maxStructureOffset(fromV.Structure, fromIdx))
}
// TODO: should this limitation be lifted?
if !isSameRelativeOffset(from.OffsetWrite, to.OffsetWrite) {
@@ -1474,7 +1504,10 @@ func canUpdateStructure(fromVss []VolumeStructure, fromIdx int, toVss []VolumeSt
if !from.HasFilesystem() {
return fmt.Errorf("cannot change a bare structure to filesystem one")
}
- if from.Filesystem != to.Filesystem {
+ // If partial filesystem we have an empty string. Here we allow
+ // moving from undefined filesystem to defined one, but not from
+ // defined to undefined, or changing defined filesystem.
+ if from.Filesystem != "" && from.Filesystem != to.Filesystem {
return fmt.Errorf("cannot change filesystem from %q to %q",
from.Filesystem, to.Filesystem)
}
@@ -1495,8 +1528,8 @@ func canUpdateVolume(from *PartiallyLaidOutVolume, to *LaidOutVolume) error {
if from.ID != to.ID {
return fmt.Errorf("cannot change volume ID from %q to %q", from.ID, to.ID)
}
- if from.Schema != to.Schema {
- return fmt.Errorf("cannot change volume schema from %q to %q", from.Schema, to.Schema)
+ if err := checkCompatibleSchema(from.Volume, to.Volume); err != nil {
+ return err
}
if len(from.LaidOutStructure) != len(to.LaidOutStructure) {
return fmt.Errorf("cannot change the number of structures within volume from %v to %v", len(from.LaidOutStructure), len(to.LaidOutStructure))
diff --git a/gadget/update_test.go b/gadget/update_test.go
index e28b4f6115..4fa2a6a07d 100644
--- a/gadget/update_test.go
+++ b/gadget/update_test.go
@@ -134,9 +134,11 @@ func (u *updateTestSuite) testCanUpdate(c *C, testCases []canUpdateTestCase) {
if schema == "" {
schema = "gpt"
}
- fromVss := []gadget.VolumeStructure{tc.from}
- toVss := []gadget.VolumeStructure{tc.to}
- err := gadget.CanUpdateStructure(fromVss, 0, toVss, 0, schema)
+ fromVss := &gadget.Volume{Schema: schema,
+ Structure: []gadget.VolumeStructure{tc.from}}
+ toVss := &gadget.Volume{Schema: schema,
+ Structure: []gadget.VolumeStructure{tc.to}}
+ err := gadget.CanUpdateStructure(fromVss, 0, toVss, 0)
if tc.err == "" {
c.Check(err, IsNil)
} else {
@@ -146,47 +148,65 @@ func (u *updateTestSuite) testCanUpdate(c *C, testCases []canUpdateTestCase) {
}
func (u *updateTestSuite) TestCanUpdateSize(c *C) {
-
+ mokVol := &gadget.Volume{}
+ partSizeVol := &gadget.Volume{Partial: []gadget.PartialProperty{gadget.PartialSize}}
cases := []canUpdateTestCase{
{
// size change
- from: gadget.VolumeStructure{MinSize: quantity.SizeMiB, Size: quantity.SizeMiB, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: quantity.SizeMiB + quantity.SizeKiB, Size: quantity.SizeMiB + quantity.SizeKiB, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: quantity.SizeMiB, Size: quantity.SizeMiB, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: quantity.SizeMiB + quantity.SizeKiB, Size: quantity.SizeMiB + quantity.SizeKiB, EnclosingVolume: mokVol},
err: `new valid structure size range \[1049600, 1049600\] is not compatible with current \(\[1048576, 1048576\]\)`,
}, {
// no size change
- from: gadget.VolumeStructure{MinSize: quantity.SizeMiB, Size: quantity.SizeMiB, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: quantity.SizeMiB, Size: quantity.SizeMiB, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: quantity.SizeMiB, Size: quantity.SizeMiB, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: quantity.SizeMiB, Size: quantity.SizeMiB, EnclosingVolume: mokVol},
err: "",
}, {
// range ok
- from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: 0, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 0, Size: 10, EnclosingVolume: mokVol},
err: "",
}, {
// range ok
- from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: 0, Size: 15, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 0, Size: 15, EnclosingVolume: mokVol},
err: "",
}, {
// range ok
- from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: 15, Size: 18, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 15, Size: 18, EnclosingVolume: mokVol},
err: "",
}, {
// range ok
- from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: 15, Size: 25, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 15, Size: 25, EnclosingVolume: mokVol},
err: "",
}, {
// range out
- from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: 1, Size: 9, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 1, Size: 9, EnclosingVolume: mokVol},
err: `new valid structure size range \[1, 9\] is not compatible with current \(\[10, 20\]\)`,
}, {
+ // from is partial, ok
+ from: gadget.VolumeStructure{MinSize: 0, Size: 0, EnclosingVolume: partSizeVol},
+ to: gadget.VolumeStructure{MinSize: 1, Size: 9, EnclosingVolume: mokVol},
+ }, {
+ // to is partial, ok
+ from: gadget.VolumeStructure{MinSize: 1, Size: 9, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 0, Size: 0, EnclosingVolume: partSizeVol},
+ }, {
+ // both are partial, ok
+ from: gadget.VolumeStructure{MinSize: 0, Size: 0, EnclosingVolume: partSizeVol},
+ to: gadget.VolumeStructure{MinSize: 0, Size: 0, EnclosingVolume: partSizeVol},
+ }, {
+ // from is partial, but has MinSize so we are out of range
+ from: gadget.VolumeStructure{MinSize: 10, Size: 0, EnclosingVolume: partSizeVol},
+ to: gadget.VolumeStructure{MinSize: 1, Size: 9, EnclosingVolume: mokVol},
+ err: `new valid structure size range \[1, 9\] is not compatible with current \(\[10, 18446744073709551615\]\)`,
+ }, {
// range out
- from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{MinSize: 21, Size: 25, EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{MinSize: 10, Size: 20, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{MinSize: 21, Size: 25, EnclosingVolume: mokVol},
err: `new valid structure size range \[21, 25\] is not compatible with current \(\[10, 20\]\)`,
},
}
@@ -195,104 +215,114 @@ func (u *updateTestSuite) TestCanUpdateSize(c *C) {
}
func (u *updateTestSuite) TestCanUpdateOffsetWrite(c *C) {
-
+ mokVol := &gadget.Volume{}
+ partSizeVol := &gadget.Volume{Partial: []gadget.PartialProperty{gadget.PartialSize}}
cases := []canUpdateTestCase{
{
// offset-write change
from: gadget.VolumeStructure{
- OffsetWrite: &gadget.RelativeOffset{Offset: 1024}, EnclosingVolume: &gadget.Volume{}},
+ OffsetWrite: &gadget.RelativeOffset{Offset: 1024}, EnclosingVolume: mokVol},
to: gadget.VolumeStructure{
- OffsetWrite: &gadget.RelativeOffset{Offset: 2048}, EnclosingVolume: &gadget.Volume{}},
+ OffsetWrite: &gadget.RelativeOffset{Offset: 2048}, EnclosingVolume: mokVol},
err: "cannot change structure offset-write from [0-9]+ to [0-9]+",
}, {
// offset-write, change in relative-to structure name
from: gadget.VolumeStructure{
- OffsetWrite: &gadget.RelativeOffset{RelativeTo: "foo", Offset: 1024}, EnclosingVolume: &gadget.Volume{}},
+ OffsetWrite: &gadget.RelativeOffset{RelativeTo: "foo", Offset: 1024}, EnclosingVolume: mokVol},
to: gadget.VolumeStructure{
- OffsetWrite: &gadget.RelativeOffset{RelativeTo: "bar", Offset: 1024}, EnclosingVolume: &gadget.Volume{}},
+ OffsetWrite: &gadget.RelativeOffset{RelativeTo: "bar", Offset: 1024}, EnclosingVolume: mokVol},
err: `cannot change structure offset-write from foo\+[0-9]+ to bar\+[0-9]+`,
}, {
// offset-write, unspecified in old
from: gadget.VolumeStructure{
- OffsetWrite: nil, EnclosingVolume: &gadget.Volume{},
+ OffsetWrite: nil, EnclosingVolume: mokVol,
},
to: gadget.VolumeStructure{
OffsetWrite: &gadget.RelativeOffset{RelativeTo: "bar", Offset: 1024},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
err: `cannot change structure offset-write from unspecified to bar\+[0-9]+`,
}, {
// offset-write, unspecified in new
from: gadget.VolumeStructure{
OffsetWrite: &gadget.RelativeOffset{RelativeTo: "foo", Offset: 1024},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
to: gadget.VolumeStructure{
OffsetWrite: nil,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
err: `cannot change structure offset-write from foo\+[0-9]+ to unspecified`,
}, {
// all ok, both nils
from: gadget.VolumeStructure{
OffsetWrite: nil,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
to: gadget.VolumeStructure{
OffsetWrite: nil,
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
- err: ``,
}, {
// all ok, both fully specified
from: gadget.VolumeStructure{
OffsetWrite: &gadget.RelativeOffset{RelativeTo: "foo", Offset: 1024},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
to: gadget.VolumeStructure{
OffsetWrite: &gadget.RelativeOffset{RelativeTo: "foo", Offset: 1024},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
- err: ``,
}, {
// all ok, both fully specified
from: gadget.VolumeStructure{
OffsetWrite: &gadget.RelativeOffset{Offset: 1024},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
to: gadget.VolumeStructure{
OffsetWrite: &gadget.RelativeOffset{Offset: 1024},
- EnclosingVolume: &gadget.Volume{},
+ EnclosingVolume: mokVol,
},
- err: ``,
+ }, {
+ // from is partial
+ from: gadget.VolumeStructure{EnclosingVolume: partSizeVol},
+ to: gadget.VolumeStructure{EnclosingVolume: mokVol},
+ }, {
+ // to is partial
+ from: gadget.VolumeStructure{EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{EnclosingVolume: partSizeVol},
+ }, {
+ // both partial
+ from: gadget.VolumeStructure{EnclosingVolume: partSizeVol},
+ to: gadget.VolumeStructure{EnclosingVolume: partSizeVol},
},
}
u.testCanUpdate(c, cases)
}
func (u *updateTestSuite) TestCanUpdateOffset(c *C) {
-
+ mokVol := &gadget.Volume{}
cases := []canUpdateTestCase{
{
// explicitly declared start offset change
- from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(1024)},
- to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(2048)},
+ from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(1024), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(2048), EnclosingVolume: mokVol},
err: `new valid structure offset range \[2048, 2048\] is not compatible with current \(\[1024, 1024\]\)`,
}, {
// explicitly declared start offset in new structure
- from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: nil},
- to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(2048)},
+ from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: nil, EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(2048), EnclosingVolume: mokVol},
err: `new valid structure offset range \[2048, 2048\] is not compatible with current \(\[0, 0\]\)`,
}, {
// explicitly declared start offset in old structure,
// missing from new
- from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(1024)},
- to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: nil},
+ from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(1024), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: nil, EnclosingVolume: mokVol},
err: `new valid structure offset range \[0, 0\] is not compatible with current \(\[1024, 1024\]\)`,
}, {
// start offset changed due to layout
- from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(1 * quantity.OffsetMiB)},
- to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(2 * quantity.OffsetMiB)},
+ from: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(1 * quantity.OffsetMiB), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Size: 1 * quantity.SizeMiB, Offset: asOffsetPtr(2 * quantity.OffsetMiB), EnclosingVolume: mokVol},
err: `new valid structure offset range \[2097152, 2097152\] is not compatible with current \(\[1048576, 1048576\]\)`,
},
}
@@ -381,8 +411,8 @@ func (u *updateTestSuite) TestCanUpdateID(c *C) {
cases := []canUpdateTestCase{
{
- from: gadget.VolumeStructure{ID: "00000000-0000-0000-0000-dd00deadbeef", Offset: asOffsetPtr(0)},
- to: gadget.VolumeStructure{ID: "00000000-0000-0000-0000-dd00deadcafe", Offset: asOffsetPtr(0)},
+ from: gadget.VolumeStructure{ID: "00000000-0000-0000-0000-dd00deadbeef", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
+ to: gadget.VolumeStructure{ID: "00000000-0000-0000-0000-dd00deadcafe", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
err: `cannot change structure ID from "00000000-0000-0000-0000-dd00deadbeef" to "00000000-0000-0000-0000-dd00deadcafe"`,
},
}
@@ -390,28 +420,42 @@ func (u *updateTestSuite) TestCanUpdateID(c *C) {
}
func (u *updateTestSuite) TestCanUpdateBareOrFilesystem(c *C) {
-
+ mokVol := &gadget.Volume{}
+ partFsVol := &gadget.Volume{Partial: []gadget.PartialProperty{gadget.PartialFilesystem}}
cases := []canUpdateTestCase{
{
- from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
err: `cannot change a filesystem structure to a bare one`,
}, {
- from: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
err: `cannot change a bare structure to filesystem one`,
}, {
- from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{Type: "0C", Filesystem: "vfat", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "vfat", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
err: `cannot change filesystem from "ext4" to "vfat"`,
}, {
- from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Label: "writable", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Label: "writable", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
err: `cannot change filesystem label from "writable" to ""`,
}, {
+ // From/to both undefined filesystem is ok
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: partFsVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: partFsVol},
+ }, {
+ // From undefined to defined filesystem is ok
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: partFsVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ }, {
+ // From defined to undefined filesystem is wrong
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "", Offset: asOffsetPtr(0), EnclosingVolume: partFsVol},
+ err: `cannot change filesystem from "ext4" to ""`,
+ }, {
// all ok
- from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Label: "do-not-touch", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
- to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Label: "do-not-touch", Offset: asOffsetPtr(0), EnclosingVolume: &gadget.Volume{}},
+ from: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Label: "do-not-touch", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
+ to: gadget.VolumeStructure{Type: "0C", Filesystem: "ext4", Label: "do-not-touch", Offset: asOffsetPtr(0), EnclosingVolume: mokVol},
err: ``,
},
}
@@ -437,38 +481,49 @@ func (u *updateTestSuite) TestCanUpdateName(c *C) {
}
func (u *updateTestSuite) TestCanUpdateOffsetRange(c *C) {
- fromVss := []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
- // Valid offset range for second structure is [10, 20]
- {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ fromV := &gadget.Volume{
+ Structure: []gadget.VolumeStructure{
+ {Offset: asOffsetPtr(0), MinSize: 10, Size: 20, EnclosingVolume: &gadget.Volume{}},
+ // Valid offset range for second structure is [10, 20]
+ {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ },
}
- toVss := []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
- {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ toV := &gadget.Volume{
+ Structure: []gadget.VolumeStructure{
+ {Offset: asOffsetPtr(0), MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ },
}
- err := gadget.CanUpdateStructure(fromVss, 1, toVss, 1, "")
+ err := gadget.CanUpdateStructure(fromV, 1, toV, 1)
c.Check(err, IsNil)
- toVss = []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 15, Size: 21, EnclosingVolume: &gadget.Volume{}},
- {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ toV = &gadget.Volume{
+ Structure: []gadget.VolumeStructure{
+ {Offset: asOffsetPtr(0), MinSize: 15, Size: 21, EnclosingVolume: &gadget.Volume{}},
+ {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ },
}
- err = gadget.CanUpdateStructure(fromVss, 1, toVss, 1, "")
+ err = gadget.CanUpdateStructure(fromV, 1, toV, 1)
c.Check(err, IsNil)
- toVss = []gadget.VolumeStructure{
- {Offset: asOffsetPtr(0), MinSize: 21, Size: 30, EnclosingVolume: &gadget.Volume{}},
- {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ toV = &gadget.Volume{
+ Structure: []gadget.VolumeStructure{
+ {Offset: asOffsetPtr(0), MinSize: 21, Size: 30, EnclosingVolume: &gadget.Volume{}},
+ {MinSize: 10, Size: 10, EnclosingVolume: &gadget.Volume{}},
+ },
}
- err = gadget.CanUpdateStructure(fromVss, 1, toVss, 1, "")
+ err = gadget.CanUpdateStructure(fromV, 1, toV, 1)
c.Check(err.Error(), Equals,
`new valid structure offset range [21, 30] is not compatible with current ([10, 20])`)
}
func (u *updateTestSuite) TestCanUpdateVolume(c *C) {
+ mbrVol := &gadget.Volume{Schema: "mbr"}
+ mbrLaidOut := &gadget.LaidOutStructure{
+ VolumeStructure: &gadget.VolumeStructure{EnclosingVolume: mbrVol}}
for idx, tc := range []struct {
from gadget.PartiallyLaidOutVolume
@@ -480,9 +535,9 @@ func (u *updateTestSuite) TestCanUpdateVolume(c *C) {
Volume: &gadget.Volume{Schema: "gpt"},
},
to: gadget.LaidOutVolume{
- Volume: &gadget.Volume{Schema: "mbr"},
+ Volume: mbrVol,
},
- err: `cannot change volume schema from "gpt" to "mbr"`,
+ err: `incompatible schema change from gpt to mbr`,
}, {
from: gadget.PartiallyLaidOutVolume{
Volume: &gadget.Volume{ID: "00000000-0000-0000-0000-0000deadbeef"},
@@ -508,15 +563,15 @@ func (u *updateTestSuite) TestCanUpdateVolume(c *C) {
}, {
// valid
from: gadget.PartiallyLaidOutVolume{
- Volume: &gadget.Volume{Schema: "mbr"},
+ Volume: mbrVol,
LaidOutStructure: []gadget.LaidOutStructure{
- {}, {},
+ *mbrLaidOut, *mbrLaidOut,
},
},
to: gadget.LaidOutVolume{
- Volume: &gadget.Volume{Schema: "mbr"},
+ Volume: mbrVol,
LaidOutStructure: []gadget.LaidOutStructure{
- {}, {},
+ *mbrLaidOut, *mbrLaidOut,
},
},
err: ``,
@@ -649,8 +704,8 @@ func (u *updateTestSuite) updateDataSet(c *C) (oldData gadget.GadgetData, newDat
makeSizedFile(c, filepath.Join(newRootDir, "/second-content/foo"), quantity.SizeKiB, nil)
makeSizedFile(c, filepath.Join(newRootDir, "/third-content/bar"), quantity.SizeKiB, nil)
newData = gadget.GadgetData{Info: newInfo, RootDir: newRootDir}
- gadgettest.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
- gadgettest.SetEnclosingVolumeInStructs(newData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(newData.Info.Volumes)
rollbackDir = c.MkDir()
return oldData, newData, rollbackDir
@@ -2760,11 +2815,11 @@ func (u *updateTestSuite) TestUpdateApplyErrorLayout(c *C) {
newRootDir := c.MkDir()
newData := gadget.GadgetData{Info: newInfo, RootDir: newRootDir}
- gadgettest.SetEnclosingVolumeInStructs(newData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(newData.Info.Volumes)
oldRootDir := c.MkDir()
oldData := gadget.GadgetData{Info: oldInfo, RootDir: oldRootDir}
- gadgettest.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
rollbackDir := c.MkDir()
@@ -2818,11 +2873,11 @@ func (u *updateTestSuite) TestUpdateApplyErrorIllegalVolumeUpdate(c *C) {
newRootDir := c.MkDir()
newData := gadget.GadgetData{Info: newInfo, RootDir: newRootDir}
- gadgettest.SetEnclosingVolumeInStructs(newData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(newData.Info.Volumes)
oldRootDir := c.MkDir()
oldData := gadget.GadgetData{Info: oldInfo, RootDir: oldRootDir}
- gadgettest.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
rollbackDir := c.MkDir()
@@ -2874,11 +2929,11 @@ func (u *updateTestSuite) TestUpdateApplyErrorIllegalStructureUpdate(c *C) {
newRootDir := c.MkDir()
newData := gadget.GadgetData{Info: newInfo, RootDir: newRootDir}
- gadgettest.SetEnclosingVolumeInStructs(newData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(newData.Info.Volumes)
oldRootDir := c.MkDir()
oldData := gadget.GadgetData{Info: oldInfo, RootDir: oldRootDir}
- gadgettest.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
rollbackDir := c.MkDir()
@@ -2954,14 +3009,14 @@ func (u *updateTestSuite) TestUpdateApplyUpdatesAreOptInWithDefaultPolicy(c *C)
oldRootDir := c.MkDir()
oldData := gadget.GadgetData{Info: oldInfo, RootDir: oldRootDir}
makeSizedFile(c, filepath.Join(oldRootDir, "first.img"), quantity.SizeMiB, nil)
- gadgettest.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
newRootDir := c.MkDir()
// same volume description
newData := gadget.GadgetData{Info: oldInfo, RootDir: newRootDir}
// different content, but updates are opt in
makeSizedFile(c, filepath.Join(newRootDir, "first.img"), 900*quantity.SizeKiB, nil)
- gadgettest.SetEnclosingVolumeInStructs(newData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(newData.Info.Volumes)
rollbackDir := c.MkDir()
@@ -3034,7 +3089,7 @@ func (u *updateTestSuite) policyDataSet(c *C) (oldData gadget.GadgetData, newDat
oldVol.Structure = append(oldVol.Structure, oldStructs...)
oldVol.Structure = append(oldVol.Structure, noPartitionStruct)
oldData.Info.Volumes["foo"] = oldVol
- gadgettest.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(oldData.Info.Volumes)
newVol := newData.Info.Volumes["foo"]
newStructs := newVol.Structure
@@ -3042,7 +3097,7 @@ func (u *updateTestSuite) policyDataSet(c *C) (oldData gadget.GadgetData, newDat
newVol.Structure = append(newVol.Structure, newStructs...)
newVol.Structure = append(newVol.Structure, noPartitionStruct)
newData.Info.Volumes["foo"] = newVol
- gadgettest.SetEnclosingVolumeInStructs(newData.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(newData.Info.Volumes)
c.Assert(oldData.Info.Volumes["foo"].Structure, HasLen, 5)
c.Assert(newData.Info.Volumes["foo"].Structure, HasLen, 5)
@@ -3685,14 +3740,16 @@ func (u *updateTestSuite) TestUpdateApplyUpdatesWithKernelPolicy(c *C) {
{UnresolvedSource: "$kernel:ref/kernel-content", Target: "/"},
},
}
+ vol := &gadget.Volume{
+ Name: "foo",
+ Bootloader: "grub",
+ Schema: "gpt",
+ Structure: []gadget.VolumeStructure{fsStruct},
+ }
+ vol.Structure[0].EnclosingVolume = vol
oldInfo := &gadget.Info{
Volumes: map[string]*gadget.Volume{
- "foo": {
- Name: "foo",
- Bootloader: "grub",
- Schema: "gpt",
- Structure: []gadget.VolumeStructure{fsStruct},
- },
+ "foo": vol,
},
}
@@ -3930,6 +3987,102 @@ volumes:
})
}
+func (u *updateTestSuite) TestDiskTraitsFromDeviceAndValidateMinSize(c *C) {
+ restore := disks.MockDeviceNameToDiskMapping(map[string]*disks.MockDiskMapping{
+ "/dev/foo": {
+ DevNode: "/dev/foo",
+ DevPath: "/sys/block/foo",
+ DevNum: "525:1",
+ // assume 34 sectors at end for GPT headers backup
+ DiskUsableSectorEnd: 6000*1024*1024/512 - 34,
+ DiskSizeInBytes: 6000 * 1024 * 1024,
+ SectorSizeBytes: 512,
+ DiskSchema: "gpt",
+ ID: "651AC800-B9FB-4B9D-B6D3-A72EB54D9006",
+ Structure: []disks.Partition{
+ {
+ PartitionLabel: "nofspart",
+ PartitionUUID: "C5A930DF-E86A-4BAE-A4C5-C861353796E6",
+ FilesystemType: "",
+ Major: 525,
+ Minor: 2,
+ KernelDeviceNode: "/dev/foo1",
+ KernelDevicePath: "/sys/block/foo/foo1",
+ DiskIndex: 1,
+ StartInBytes: 1024 * 1024,
+ SizeInBytes: 4096,
+ },
+ {
+ PartitionLabel: "some-filesystem",
+ PartitionUUID: "DA2ADBC8-90DF-4B1D-A93F-A92516C12E01",
+ FilesystemLabel: "some-filesystem",
+ FilesystemUUID: "3E3D392C-5D50-4C84-8A6E-09B7A3FEA2C7",
+ FilesystemType: "ext4",
+ Major: 525,
+ Minor: 3,
+ KernelDeviceNode: "/dev/foo2",
+ KernelDevicePath: "/sys/block/foo/foo2",
+ DiskIndex: 2,
+ StartInBytes: 1024*1024 + 4096,
+ SizeInBytes: 1024 * 1024 * 1024,
+ },
+ },
+ },
+ })
+ defer restore()
+
+ const yaml = `
+volumes:
+ foo:
+ bootloader: u-boot
+ schema: gpt
+ structure:
+ - name: nofspart
+ type: EBBEADAF-22C9-E33B-8F5D-0E81686A68CB
+ min-size: 4096
+ size: 8192
+ - name: some-filesystem
+ filesystem: ext4
+ type: 83,0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ size: 1G
+`
+ lvol, err := gadgettest.LayoutFromYaml(c.MkDir(), yaml, nil)
+ fmt.Println("structs", len(lvol.LaidOutStructure))
+ c.Assert(err, IsNil)
+
+ traits, err := gadget.DiskTraitsFromDeviceAndValidate(lvol, "/dev/foo", nil)
+ c.Assert(err, IsNil)
+ c.Assert(traits, DeepEquals, gadget.DiskVolumeDeviceTraits{
+ OriginalDevicePath: "/sys/block/foo",
+ OriginalKernelPath: "/dev/foo",
+ DiskID: "651AC800-B9FB-4B9D-B6D3-A72EB54D9006",
+ SectorSize: 512,
+ Size: 6000 * 1024 * 1024,
+ Schema: "gpt",
+ Structure: []gadget.DiskStructureDeviceTraits{
+ {
+ PartitionLabel: "nofspart",
+ PartitionUUID: "C5A930DF-E86A-4BAE-A4C5-C861353796E6",
+ OriginalDevicePath: "/sys/block/foo/foo1",
+ OriginalKernelPath: "/dev/foo1",
+ Offset: 0x100000,
+ Size: 0x1000,
+ },
+ {
+ PartitionLabel: "some-filesystem",
+ PartitionUUID: "DA2ADBC8-90DF-4B1D-A93F-A92516C12E01",
+ OriginalDevicePath: "/sys/block/foo/foo2",
+ OriginalKernelPath: "/dev/foo2",
+ FilesystemLabel: "some-filesystem",
+ FilesystemUUID: "3E3D392C-5D50-4C84-8A6E-09B7A3FEA2C7",
+ FilesystemType: "ext4",
+ Offset: 0x101000,
+ Size: 0x40000000,
+ },
+ },
+ })
+}
+
func (u *updateTestSuite) TestDiskTraitsFromDeviceAndValidateGPTMultiVolume(c *C) {
restore := disks.MockDeviceNameToDiskMapping(map[string]*disks.MockDiskMapping{
"/dev/vda": gadgettest.VMSystemVolumeDiskMapping,
diff --git a/go.mod b/go.mod
index 6f3ca67795..ca1d8b5d51 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,7 @@ require (
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502024300-f57e1d55ea18
github.com/snapcore/bolt v1.3.2-0.20210908134111-63c8bfcf7af8
github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785
- github.com/snapcore/secboot v0.0.0-20230428184943-be3902241d8a
+ github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
golang.org/x/net v0.9.0 // indirect
golang.org/x/sys v0.7.0
@@ -33,7 +33,7 @@ require (
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
gopkg.in/tylerb/graceful.v1 v1.2.15
gopkg.in/yaml.v2 v2.4.0
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
+ gopkg.in/yaml.v3 v3.0.1
)
require (
diff --git a/go.sum b/go.sum
index 8460e32bd1..ee0e51e906 100644
--- a/go.sum
+++ b/go.sum
@@ -47,8 +47,8 @@ github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785 h1:PaunR+BhraK
github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785/go.mod h1:D3SsWAXK7wCCBZu+Vk5hc1EuKj/L3XN1puEMXTU4LrQ=
github.com/snapcore/maze.io-x-crypto v0.0.0-20190131090603-9b94c9afe066 h1:InG0EmriMOiI4YgtQNOo+6fNxzLCYioo3Q3BCVLdMCE=
github.com/snapcore/maze.io-x-crypto v0.0.0-20190131090603-9b94c9afe066/go.mod h1:VuAdaITF1MrGzxPU+8GxagM1HW2vg7QhEFEeGHbmEMU=
-github.com/snapcore/secboot v0.0.0-20230428184943-be3902241d8a h1:0mHd/TdxsyR6XWqznXRuCHxHltX736XspJlPFSUzHxU=
-github.com/snapcore/secboot v0.0.0-20230428184943-be3902241d8a/go.mod h1:72paVOkm4sJugXt+v9ItmnjXgO921D8xqsbH2OekouY=
+github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830 h1:SCJ9Uiekv6uMqzMGP50Y0KBxkLP7IzPW35aI3Po6iyM=
+github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830/go.mod h1:72paVOkm4sJugXt+v9ItmnjXgO921D8xqsbH2OekouY=
github.com/snapcore/snapd v0.0.0-20201005140838-501d14ac146e/go.mod h1:3xrn7QDDKymcE5VO2rgWEQ5ZAUGb9htfwlXnoel6Io8=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
@@ -91,5 +91,5 @@ gopkg.in/tylerb/graceful.v1 v1.2.15/go.mod h1:yBhekWvR20ACXVObSSdD3u6S9DeSylanL2
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/image/image_linux.go b/image/image_linux.go
index e7c53f8f77..0d18440288 100644
--- a/image/image_linux.go
+++ b/image/image_linux.go
@@ -163,8 +163,13 @@ func Prepare(opts *Options) error {
if model.Classic() {
return fmt.Errorf("cannot preseed the image for a classic model")
}
- if model.Base() != "core20" {
- return fmt.Errorf("cannot preseed the image for a model other than core20")
+
+ coreVersion, err := naming.CoreVersion(model.Base())
+ if err != nil {
+ return fmt.Errorf("cannot preseed the image for %s: %v", model.Base(), err)
+ }
+ if coreVersion < 20 {
+ return fmt.Errorf("cannot preseed the image for older base than core20")
}
coreOpts := &preseed.CoreOptions{
PrepareImageDir: opts.PrepareDir,
diff --git a/image/preseed/preseed_linux.go b/image/preseed/preseed_linux.go
index 84af49505a..b3072060a4 100644
--- a/image/preseed/preseed_linux.go
+++ b/image/preseed/preseed_linux.go
@@ -35,6 +35,7 @@ import (
"github.com/snapcore/snapd/dirs"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/osutil/squashfs"
+ "github.com/snapcore/snapd/snap/naming"
"github.com/snapcore/snapd/snapdtool"
"github.com/snapcore/snapd/strutil"
"github.com/snapcore/snapd/timings"
@@ -126,11 +127,14 @@ var systemSnapFromSeed = func(seedDir, sysLabel string) (systemSnap string, base
if model.Classic() {
fmt.Fprintf(Stdout, "ubuntu classic preseeding\n")
} else {
- if model.Base() == "core20" {
+ coreVersion, err := naming.CoreVersion(model.Base())
+ if err != nil {
+ return "", "", fmt.Errorf("preseeding of ubuntu core with base %s is not supported: %v", model.Base(), err)
+ }
+ if coreVersion >= 20 {
fmt.Fprintf(Stdout, "UC20+ preseeding\n")
} else {
- // TODO: support uc20+
- return "", "", fmt.Errorf("preseeding of ubuntu core with base %s is not supported", model.Base())
+ return "", "", fmt.Errorf("preseeding of ubuntu core with base %s is not supported: core20 or later is expected", model.Base())
}
}
diff --git a/interfaces/apparmor/backend.go b/interfaces/apparmor/backend.go
index dd964d62fe..b8ed5bbc48 100644
--- a/interfaces/apparmor/backend.go
+++ b/interfaces/apparmor/backend.go
@@ -176,6 +176,10 @@ func snapConfineFromSnapProfile(info *snap.Info) (dir, glob string, content map[
patchedProfileText := bytes.Replace(
vanillaProfileText, []byte("/usr/lib/snapd/snap-confine"), []byte(snapConfineInCore), -1)
+ // Replace the path to the vanilla snap-confine apparmor snippets
+ patchedProfileText = bytes.Replace(
+ patchedProfileText, []byte("/var/lib/snapd/apparmor/snap-confine"), []byte(apparmor_sandbox.SnapConfineAppArmorDir), -1)
+
// Also replace the test providing access to verbatim
// /usr/lib/snapd/snap-confine, which is necessary because to execute snaps
// from strict snaps, we need to be able read and map
@@ -232,7 +236,7 @@ func snapConfineProfileName(snapName string, rev snap.Revision) string {
//
// Additionally it will cleanup stale apparmor profiles it created.
func (b *Backend) setupSnapConfineReexec(info *snap.Info) error {
- if err := os.MkdirAll(dirs.SnapConfineAppArmorDir, 0755); err != nil {
+ if err := os.MkdirAll(apparmor_sandbox.SnapConfineAppArmorDir, 0755); err != nil {
return fmt.Errorf("cannot create snap-confine policy directory: %s", err)
}
dir, glob, content, err := snapConfineFromSnapProfile(info)
diff --git a/interfaces/apparmor/backend_test.go b/interfaces/apparmor/backend_test.go
index a64534ca65..5548145731 100644
--- a/interfaces/apparmor/backend_test.go
+++ b/interfaces/apparmor/backend_test.go
@@ -1252,6 +1252,8 @@ func (s *backendSuite) writeVanillaSnapConfineProfile(c *C, coreOrSnapdInfo *sna
vanillaProfilePath := filepath.Join(coreOrSnapdInfo.MountDir(), "/etc/apparmor.d/usr.lib.snapd.snap-confine.real")
vanillaProfileText := []byte(`#include <tunables/global>
/usr/lib/snapd/snap-confine (attach_disconnected) {
+ #include "/var/lib/snapd/apparmor/snap-confine"
+
# We run privileged, so be fanatical about what we include and don't use
# any abstractions
/etc/ld.so.cache r,
@@ -1271,11 +1273,13 @@ func (s *backendSuite) TestSnapConfineProfile(c *C) {
expectedProfileGlob := "snap-confine.core.*"
expectedProfileText := fmt.Sprintf(`#include <tunables/global>
%s/usr/lib/snapd/snap-confine (attach_disconnected) {
+ #include "%s/var/lib/snapd/apparmor/snap-confine"
+
# We run privileged, so be fanatical about what we include and don't use
# any abstractions
/etc/ld.so.cache r,
}
-`, coreInfo.MountDir())
+`, coreInfo.MountDir(), dirs.GlobalRootDir)
c.Assert(expectedProfileName, testutil.Contains, coreInfo.Revision.String())
@@ -1306,11 +1310,13 @@ func (s *backendSuite) TestSnapConfineProfileFromSnapdSnap(c *C) {
expectedProfileGlob := "snap-confine.snapd.222"
expectedProfileText := fmt.Sprintf(`#include <tunables/global>
%s/usr/lib/snapd/snap-confine (attach_disconnected) {
+ #include "%s/var/lib/snapd/apparmor/snap-confine"
+
# We run privileged, so be fanatical about what we include and don't use
# any abstractions
/etc/ld.so.cache r,
}
-`, snapdInfo.MountDir())
+`, snapdInfo.MountDir(), dirs.GlobalRootDir)
c.Assert(expectedProfileName, testutil.Contains, snapdInfo.Revision.String())
@@ -1327,6 +1333,21 @@ func (s *backendSuite) TestSnapConfineProfileFromSnapdSnap(c *C) {
})
}
+func (s *backendSuite) TestSnapConfineProfileUsesSandboxSnapConfineDir(c *C) {
+ snapdInfo := snaptest.MockInfo(c, snapdYaml, &snap.SideInfo{Revision: snap.R(222)})
+ s.writeVanillaSnapConfineProfile(c, snapdInfo)
+ expectedProfileName := "snap-confine.snapd.222"
+
+ // Compute the profile and see that it replaces
+ // "/var/lib/snapd/apparmor/snap-confine" with the
+ // apparmor_sandbox.SnapConfineAppArmorDir dir
+ apparmor_sandbox.SnapConfineAppArmorDir = "/apparmor/sandbox/dir"
+ _, _, content, err := apparmor.SnapConfineFromSnapProfile(snapdInfo)
+ c.Assert(err, IsNil)
+ contentStr := string(content[expectedProfileName].(*osutil.MemoryFileState).Content)
+ c.Check(contentStr, testutil.Contains, ` #include "/apparmor/sandbox/dir"`)
+}
+
func (s *backendSuite) TestSnapConfineFromSnapProfileCreatesAllDirs(c *C) {
c.Assert(osutil.IsDirectory(dirs.SnapAppArmorDir), Equals, false)
coreInfo := snaptest.MockInfo(c, coreYaml, &snap.SideInfo{Revision: snap.R(111)})
@@ -1384,18 +1405,20 @@ func (s *backendSuite) TestSetupHostSnapConfineApparmorForReexecWritesNew(c *C)
// No other changes other than that to the input
c.Check(newAA[0], testutil.FileEquals, fmt.Sprintf(`#include <tunables/global>
%s/core/111/usr/lib/snapd/snap-confine (attach_disconnected) {
+ #include "%s/var/lib/snapd/apparmor/snap-confine"
+
# We run privileged, so be fanatical about what we include and don't use
# any abstractions
/etc/ld.so.cache r,
}
-`, dirs.SnapMountDir))
+`, dirs.SnapMountDir, dirs.GlobalRootDir))
c.Check(s.loadProfilesCalls, DeepEquals, []loadProfilesParams{
{[]string{newAA[0]}, fmt.Sprintf("%s/var/cache/apparmor", s.RootDir), 0},
})
// snap-confine directory was created
- _, err = os.Stat(dirs.SnapConfineAppArmorDir)
+ _, err = os.Stat(apparmor_sandbox.SnapConfineAppArmorDir)
c.Check(err, IsNil)
}
@@ -1557,7 +1580,7 @@ func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithNFS(c *C, profileF
c.Assert(err, IsNil)
// Because NFS is being used, we have the extra policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "nfs-support")
@@ -1565,7 +1588,7 @@ func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithNFS(c *C, profileF
c.Assert(files[0].IsDir(), Equals, false)
// The policy allows network access.
- fn := filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name())
+ fn := filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name())
c.Assert(fn, testutil.FileContains, "network inet,")
c.Assert(fn, testutil.FileContains, "network inet6,")
@@ -1605,7 +1628,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithNFSAndReExec(c *C)
c.Assert(err, IsNil)
// Because NFS is being used, we have the extra policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "nfs-support")
@@ -1613,7 +1636,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithNFSAndReExec(c *C)
c.Assert(files[0].IsDir(), Equals, false)
// The policy allows network access.
- fn := filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name())
+ fn := filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name())
c.Assert(fn, testutil.FileContains, "network inet,")
c.Assert(fn, testutil.FileContains, "network inet6,")
@@ -1647,7 +1670,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyError1(c *C) {
c.Assert(err, ErrorMatches, "cannot read .*corrupt-proc-self-exe: .*")
// We didn't create the policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
@@ -1686,7 +1709,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyError2(c *C) {
// While created the policy file initially we also removed it so that
// no side-effects remain.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
@@ -1755,7 +1778,7 @@ func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithOverlay(c *C, prof
c.Assert(err, IsNil)
// Because overlay is being used, we have the extra policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "overlay-root")
@@ -1763,7 +1786,7 @@ func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithOverlay(c *C, prof
c.Assert(files[0].IsDir(), Equals, false)
// The policy allows upperdir access.
- data, err := ioutil.ReadFile(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()))
+ data, err := ioutil.ReadFile(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name()))
c.Assert(err, IsNil)
c.Assert(string(data), testutil.Contains, "\"/upper/{,**/}\" r,")
@@ -1802,7 +1825,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithOverlayAndReExec(c
c.Assert(err, IsNil)
// Because overlay is being used, we have the extra policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "overlay-root")
@@ -1810,7 +1833,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithOverlayAndReExec(c
c.Assert(files[0].IsDir(), Equals, false)
// The policy allows upperdir access
- data, err := ioutil.ReadFile(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()))
+ data, err := ioutil.ReadFile(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name()))
c.Assert(err, IsNil)
c.Assert(string(data), testutil.Contains, "\"/upper/{,**/}\" r,")
@@ -1856,14 +1879,14 @@ func (s *backendSuite) testSetupSnapConfineGeneratedPolicyWithBPFCapability(c *C
// Capability bpf is supported by the parser, so an extra policy file
// for snap-confine is present
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "cap-bpf")
c.Assert(files[0].Mode(), Equals, os.FileMode(0644))
c.Assert(files[0].IsDir(), Equals, false)
- c.Assert(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()),
+ c.Assert(filepath.Join(apparmor_sandbox.SnapConfineAppArmorDir, files[0].Name()),
testutil.FileContains, "capability bpf,")
if reexec {
@@ -1924,7 +1947,7 @@ func (s *backendSuite) TestSetupSnapConfineGeneratedPolicyWithBPFProbeError(c *C
// Probing apparmor_parser capabilities failed, so nothing gets written
// to the snap-confine policy directory
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor_sandbox.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
diff --git a/interfaces/apparmor/spec.go b/interfaces/apparmor/spec.go
index e9556e98d3..f2c40d037c 100644
--- a/interfaces/apparmor/spec.go
+++ b/interfaces/apparmor/spec.go
@@ -172,8 +172,8 @@ func (spec *Specification) AddDeduplicatedSnippet(snippet string) {
// This function should be used whenever the apparmor template features more
// than one use of "**" syntax (which represent arbitrary many directories or
// files) and a variable component, like a device name or similar. Repeated
-// instances of this pattern require exponential memory when compiled with
-// apparmor_parser -O no-expr-simplify.
+// instances of this pattern slow down the apparmor parser in the default
+// "expr-simplify" mode (see PR#12943 for measurements).
func (spec *Specification) AddParametricSnippet(templateFragment []string, value string) {
if len(spec.securityTags) == 0 {
return
diff --git a/interfaces/apparmor/template.go b/interfaces/apparmor/template.go
index c288b492b0..11e218acf3 100644
--- a/interfaces/apparmor/template.go
+++ b/interfaces/apparmor/template.go
@@ -1070,6 +1070,10 @@ profile snap-update-ns.###SNAP_INSTANCE_NAME### (attach_disconnected) {
# snapd logger.go checks /proc/cmdline
@{PROC}/cmdline r,
+ # snap checks if vendored apparmor parser should be used at startup
+ /usr/lib/snapd/info r,
+ /lib/apparmor/functions r,
+
###SNIPPETS###
}
`
diff --git a/interfaces/builtin/i2c.go b/interfaces/builtin/i2c.go
index e32f21a625..8bf1accab9 100644
--- a/interfaces/builtin/i2c.go
+++ b/interfaces/builtin/i2c.go
@@ -129,7 +129,7 @@ func (iface *i2cInterface) AppArmorConnectedPlug(spec *apparmor.Specification, p
cleanedPath := filepath.Clean(path)
spec.AddSnippet(fmt.Sprintf(i2cConnectedPlugAppArmorPath, cleanedPath))
- // Use parametric snippets to avoid no-expr-simplify side-effects.
+ // Use parametric snippets to avoid parser slowdown.
spec.AddParametricSnippet([]string{
"/sys/devices/platform/{*,**.i2c}/i2c-" /* ###PARAM### */, "/** rw, # Add any condensed parametric rules",
}, strings.TrimPrefix(path, "/dev/i2c-"))
diff --git a/interfaces/builtin/iio.go b/interfaces/builtin/iio.go
index b96b7c609b..ace7dcc61f 100644
--- a/interfaces/builtin/iio.go
+++ b/interfaces/builtin/iio.go
@@ -120,7 +120,7 @@ func (iface *iioInterface) AppArmorConnectedPlug(spec *apparmor.Specification, p
// shorter expansion expression.
deviceNum := strings.TrimPrefix(deviceName, "iio:device")
- // Use parametric snippets to avoid no-expr-simplify side-effects.
+ // Use parametric snippets to avoid parser slowdown.
spec.AddParametricSnippet([]string{
"/sys/devices/**/iio:device" /* ###PARAM### */, "/** rwk, # Add any condensed parametric rules",
}, deviceNum)
diff --git a/interfaces/builtin/network_control.go b/interfaces/builtin/network_control.go
index db4e492ca1..93fab45ca6 100644
--- a/interfaces/builtin/network_control.go
+++ b/interfaces/builtin/network_control.go
@@ -298,7 +298,7 @@ umount /,
capability sys_ptrace,
# 'ip netns exec foo /bin/sh'
-#mount options=(rw, rslave) /, # commented out because of LP: #2023025
+mount options=(rw, rslave) /,
mount options=(rw, rslave), # LP: #1648245
mount fstype=sysfs,
umount /sys/,
diff --git a/interfaces/builtin/opengl.go b/interfaces/builtin/opengl.go
index 1d39204c8e..0375e122c6 100644
--- a/interfaces/builtin/opengl.go
+++ b/interfaces/builtin/opengl.go
@@ -127,6 +127,8 @@ unix (bind,listen) type=seqpacket addr="@cuda-uvmfd-[0-9a-f]*",
# ARM Mali driver
/dev/mali[0-9]* rw,
/dev/dma_buf_te rw,
+/dev/dma_heap/linux,cma rw,
+/dev/dma_heap/system rw,
# NXP i.MX driver
# https://github.com/Freescale/kernel-module-imx-gpu-viv
@@ -174,6 +176,8 @@ unix (send, receive) type=dgram peer=(addr="@var/run/nvidia-xdriver-*"),
// will be added by snap-confine.
var openglConnectedPlugUDev = []string{
`SUBSYSTEM=="drm", KERNEL=="card[0-9]*"`,
+ `SUBSYSTEM=="dma_heap", KERNEL=="linux,cma"`,
+ `SUBSYSTEM=="dma_heap", KERNEL=="system"`,
`KERNEL=="vchiq"`,
`KERNEL=="vcsm-cma"`,
`KERNEL=="renderD[0-9]*"`,
diff --git a/interfaces/builtin/opengl_test.go b/interfaces/builtin/opengl_test.go
index 0787480233..3c0b7abf0c 100644
--- a/interfaces/builtin/opengl_test.go
+++ b/interfaces/builtin/opengl_test.go
@@ -83,6 +83,8 @@ func (s *OpenglInterfaceSuite) TestAppArmorSpec(c *C) {
c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/dev/nvidia* rw,`)
c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/dev/dri/renderD[0-9]* rw,`)
c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/dev/mali[0-9]* rw,`)
+ c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/dev/dma_heap/linux,cma rw,`)
+ c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/dev/dma_heap/system rw,`)
c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/dev/galcore rw,`)
c.Assert(spec.SnippetForTag("snap.consumer.app"), testutil.Contains, `/usr/share/libdrm/amdgpu.ids r,`)
}
@@ -90,10 +92,14 @@ func (s *OpenglInterfaceSuite) TestAppArmorSpec(c *C) {
func (s *OpenglInterfaceSuite) TestUDevSpec(c *C) {
spec := &udev.Specification{}
c.Assert(spec.AddConnectedPlug(s.iface, s.plug, s.slot), IsNil)
- c.Assert(spec.Snippets(), HasLen, 13)
+ c.Assert(spec.Snippets(), HasLen, 15)
c.Assert(spec.Snippets(), testutil.Contains, `# opengl
SUBSYSTEM=="drm", KERNEL=="card[0-9]*", TAG+="snap_consumer_app"`)
c.Assert(spec.Snippets(), testutil.Contains, `# opengl
+SUBSYSTEM=="dma_heap", KERNEL=="linux,cma", TAG+="snap_consumer_app"`)
+ c.Assert(spec.Snippets(), testutil.Contains, `# opengl
+SUBSYSTEM=="dma_heap", KERNEL=="system", TAG+="snap_consumer_app"`)
+ c.Assert(spec.Snippets(), testutil.Contains, `# opengl
KERNEL=="renderD[0-9]*", TAG+="snap_consumer_app"`)
c.Assert(spec.Snippets(), testutil.Contains, `# opengl
KERNEL=="nvhost-*", TAG+="snap_consumer_app"`)
diff --git a/interfaces/builtin/shared_memory.go b/interfaces/builtin/shared_memory.go
index 3ac700b0df..0740424d1c 100644
--- a/interfaces/builtin/shared_memory.go
+++ b/interfaces/builtin/shared_memory.go
@@ -100,7 +100,7 @@ const sharedMemoryBaseDeclarationSlots = `
const sharedMemoryPrivateConnectedPlugAppArmor = `
# Description: Allow access to everything in private /dev/shm
-"/dev/shm/*" mrwlkix,
+"/dev/shm/**" mrwlkix,
`
func validateSharedMemoryPath(path string) error {
diff --git a/interfaces/builtin/shared_memory_test.go b/interfaces/builtin/shared_memory_test.go
index 206e459389..e3dc6d0141 100644
--- a/interfaces/builtin/shared_memory_test.go
+++ b/interfaces/builtin/shared_memory_test.go
@@ -424,7 +424,7 @@ func (s *SharedMemoryInterfaceSuite) TestAppArmorSpec(c *C) {
c.Assert(spec.AddConnectedSlot(s.iface, s.privatePlug, s.privateSlot), IsNil)
privateSlotSnippet := spec.SnippetForTag("snap.core.app")
- c.Check(privatePlugSnippet, testutil.Contains, `"/dev/shm/*" mrwlkix`)
+ c.Check(privatePlugSnippet, testutil.Contains, `"/dev/shm/**" mrwlkix`)
c.Check(privateSlotSnippet, Equals, "")
c.Check(strings.Join(privateUpdateNS, ""), Equals, ` # Private /dev/shm
/dev/ r,
diff --git a/interfaces/builtin/shutdown.go b/interfaces/builtin/shutdown.go
index e3f5a9af4c..306be054cc 100644
--- a/interfaces/builtin/shutdown.go
+++ b/interfaces/builtin/shutdown.go
@@ -45,7 +45,7 @@ dbus (send)
bus=system
path=/org/freedesktop/login1
interface=org.freedesktop.login1.Manager
- member={PowerOff,Reboot,Suspend,Hibernate,HybridSleep,CanPowerOff,CanReboot,CanSuspend,CanHibernate,CanHybridSleep,ScheduleShutdown,CancelScheduledShutdown,SetWallMessage}
+ member={Inhibit,PowerOff,Reboot,Suspend,Hibernate,HybridSleep,CanPowerOff,CanReboot,CanSuspend,CanHibernate,CanHybridSleep,ScheduleShutdown,CancelScheduledShutdown,SetWallMessage}
peer=(label=unconfined),
# Allow clients to introspect
diff --git a/interfaces/builtin/spi.go b/interfaces/builtin/spi.go
index ff91f24dd4..bda58782b7 100644
--- a/interfaces/builtin/spi.go
+++ b/interfaces/builtin/spi.go
@@ -84,7 +84,7 @@ func (iface *spiInterface) AppArmorConnectedPlug(spec *apparmor.Specification, p
return nil
}
spec.AddSnippet(fmt.Sprintf("%s rw,", path))
- // Use parametric snippets to avoid no-expr-simplify side-effects.
+ // Use parametric snippets to avoid parser slowdown.
spec.AddParametricSnippet([]string{
"/sys/devices/platform/**/**.spi/**/spidev" /* ###PARAM### */, "/** rw, # Add any condensed parametric rules",
}, strings.TrimPrefix(path, "/dev/spidev"))
diff --git a/interfaces/builtin/system_observe.go b/interfaces/builtin/system_observe.go
index 2c3a2dea8c..2b0d307e32 100644
--- a/interfaces/builtin/system_observe.go
+++ b/interfaces/builtin/system_observe.go
@@ -98,6 +98,7 @@ ptrace (read),
# but not smaps which contains a detailed mappings breakdown like
# /proc/self/maps, which we do not allow access to for other processes
@{PROC}/*/{,task/*/}smaps_rollup r,
+@{PROC}/*/{,task/*/}schedstat r,
@{PROC}/*/{,task/*/}stat r,
@{PROC}/*/{,task/*/}statm r,
@{PROC}/*/{,task/*/}status r,
diff --git a/interfaces/ifacetest/backendtest.go b/interfaces/ifacetest/backendtest.go
index 40f9a2d946..ef450afb0d 100644
--- a/interfaces/ifacetest/backendtest.go
+++ b/interfaces/ifacetest/backendtest.go
@@ -211,6 +211,13 @@ func (s *BackendSuite) InstallSnap(c *C, opts interfaces.ConfinementOptions, ins
// UpdateSnap "updates" an existing snap from YAML.
func (s *BackendSuite) UpdateSnap(c *C, oldSnapInfo *snap.Info, opts interfaces.ConfinementOptions, snapYaml string, revision int) *snap.Info {
+ newSnapInfo, err := s.UpdateSnapMaybeErr(c, oldSnapInfo, opts, snapYaml, revision)
+ c.Assert(err, IsNil)
+ return newSnapInfo
+}
+
+// UpdateSnapMaybeErr "updates" an existing snap from YAML, this might error.
+func (s *BackendSuite) UpdateSnapMaybeErr(c *C, oldSnapInfo *snap.Info, opts interfaces.ConfinementOptions, snapYaml string, revision int) (*snap.Info, error) {
newSnapInfo := snaptest.MockInfo(c, snapYaml, &snap.SideInfo{
Revision: snap.R(revision),
})
@@ -218,8 +225,7 @@ func (s *BackendSuite) UpdateSnap(c *C, oldSnapInfo *snap.Info, opts interfaces.
s.removePlugsSlots(c, oldSnapInfo)
s.addPlugsSlots(c, newSnapInfo)
err := s.Backend.Setup(newSnapInfo, opts, s.Repo, s.meas)
- c.Assert(err, IsNil)
- return newSnapInfo
+ return newSnapInfo, err
}
// RemoveSnap "removes" an "installed" snap.
diff --git a/interfaces/mount/backend_test.go b/interfaces/mount/backend_test.go
index 10581bde95..91779033fc 100644
--- a/interfaces/mount/backend_test.go
+++ b/interfaces/mount/backend_test.go
@@ -24,7 +24,6 @@ import (
"io/ioutil"
"os"
"path/filepath"
- "sort"
"strings"
"testing"
@@ -157,14 +156,12 @@ func (s *backendSuite) TestSetupSetsupSimple(c *C) {
// ensure both security effects from iface/iface2 are combined
// (because mount profiles are global in the whole snap)
expected := strings.Split(fmt.Sprintf("%s\n%s\n", fsEntry1, fsEntry2), "\n")
- sort.Strings(expected)
// and that we have the modern fstab file (global for snap)
fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab")
content, err := ioutil.ReadFile(fn)
c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap"))
got := strings.Split(string(content), "\n")
- sort.Strings(got)
- c.Check(got, DeepEquals, expected)
+ c.Check(got, testutil.DeepUnsortedMatches, expected)
// Check that the user-fstab file was written with the user mount
fn = filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.user-fstab")
@@ -246,3 +243,112 @@ func (s *backendSuite) TestSandboxFeatures(c *C) {
"stale-base-invalidation",
})
}
+
+func (s *backendSuite) TestSetupUpdates(c *C) {
+ fsEntry1 := osutil.MountEntry{Name: "/src-1", Dir: "/dst-1", Type: "none", Options: []string{"bind", "ro"}, DumpFrequency: 0, CheckPassNumber: 0}
+ fsEntry2 := osutil.MountEntry{Name: "/src-2", Dir: "/dst-2", Type: "none", Options: []string{"bind", "ro"}, DumpFrequency: 0, CheckPassNumber: 0}
+ fsEntry3 := osutil.MountEntry{Name: "/src-3", Dir: "/dst-3", Type: "none", Options: []string{"bind", "ro"}, DumpFrequency: 0, CheckPassNumber: 0}
+
+ update := false
+ // Give the plug a permanent effect
+ s.Iface.MountPermanentPlugCallback = func(spec *mount.Specification, plug *snap.PlugInfo) error {
+ return spec.AddMountEntry(fsEntry1)
+ }
+ // Give the slot a permanent effect
+ s.iface2.MountPermanentSlotCallback = func(spec *mount.Specification, slot *snap.SlotInfo) error {
+ if update {
+ if err := spec.AddMountEntry(fsEntry3); err != nil {
+ return err
+ }
+ }
+ return spec.AddMountEntry(fsEntry2)
+ }
+
+ cmd := testutil.MockCommand(c, "snap-update-ns", "")
+ defer cmd.Restore()
+ dirs.DistroLibExecDir = cmd.BinDir()
+
+ // confinement options are irrelevant to this security backend
+ snapInfo := s.InstallSnap(c, interfaces.ConfinementOptions{}, "", mockSnapYaml, 0)
+
+ // ensure both security effects from iface/iface2 are combined
+ // (because mount profiles are global in the whole snap)
+ expected := strings.Split(fmt.Sprintf("%s\n%s\n", fsEntry1, fsEntry2), "\n")
+ // and that we have the modern fstab file (global for snap)
+ fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab")
+ content, err := ioutil.ReadFile(fn)
+ c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap"))
+ got := strings.Split(string(content), "\n")
+ c.Check(got, testutil.DeepUnsortedMatches, expected)
+
+ update = true
+ // ensure .mnt file
+ mntFile := filepath.Join(dirs.SnapRunNsDir, "snap-name.mnt")
+ err = ioutil.WriteFile(mntFile, []byte(""), 0644)
+ c.Assert(err, IsNil)
+
+ // confinement options are irrelevant to this security backend
+ s.UpdateSnap(c, snapInfo, interfaces.ConfinementOptions{}, mockSnapYaml, 1)
+
+ // snap-update-ns was invoked
+ c.Check(cmd.Calls(), DeepEquals, [][]string{{"snap-update-ns", "snap-name"}})
+
+ // ensure both security effects from iface/iface2 are combined
+ // (because mount profiles are global in the whole snap)
+ expected = strings.Split(fmt.Sprintf("%s\n%s\n%s\n", fsEntry1, fsEntry2, fsEntry3), "\n")
+ // and that we have the modern fstab file (global for snap)
+ content, err = ioutil.ReadFile(fn)
+ c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap"))
+ got = strings.Split(string(content), "\n")
+ c.Check(got, testutil.DeepUnsortedMatches, expected)
+}
+
+func (s *backendSuite) TestSetupUpdatesError(c *C) {
+ fsEntry1 := osutil.MountEntry{Name: "/src-1", Dir: "/dst-1", Type: "none", Options: []string{"bind", "ro"}, DumpFrequency: 0, CheckPassNumber: 0}
+ fsEntry2 := osutil.MountEntry{Name: "/src-2", Dir: "/dst-2", Type: "none", Options: []string{"bind", "ro"}, DumpFrequency: 0, CheckPassNumber: 0}
+ fsEntry3 := osutil.MountEntry{Name: "/src-3", Dir: "/dst-3", Type: "none", Options: []string{"bind", "ro"}, DumpFrequency: 0, CheckPassNumber: 0}
+
+ update := false
+ // Give the plug a permanent effect
+ s.Iface.MountPermanentPlugCallback = func(spec *mount.Specification, plug *snap.PlugInfo) error {
+ return spec.AddMountEntry(fsEntry1)
+ }
+ // Give the slot a permanent effect
+ s.iface2.MountPermanentSlotCallback = func(spec *mount.Specification, slot *snap.SlotInfo) error {
+ if update {
+ if err := spec.AddMountEntry(fsEntry3); err != nil {
+ return err
+ }
+ }
+ return spec.AddMountEntry(fsEntry2)
+ }
+
+ cmd := testutil.MockCommand(c, "snap-update-ns", "exit 1")
+ defer cmd.Restore()
+ dirs.DistroLibExecDir = cmd.BinDir()
+
+ // confinement options are irrelevant to this security backend
+ snapInfo := s.InstallSnap(c, interfaces.ConfinementOptions{}, "", mockSnapYaml, 0)
+
+ update = true
+ // ensure .mnt file
+ mntFile := filepath.Join(dirs.SnapRunNsDir, "snap-name.mnt")
+ err := ioutil.WriteFile(mntFile, []byte(""), 0644)
+ c.Assert(err, IsNil)
+
+ // confinement options are irrelevant to this security backend
+ _, err = s.UpdateSnapMaybeErr(c, snapInfo, interfaces.ConfinementOptions{}, mockSnapYaml, 1)
+ c.Check(err, ErrorMatches, `cannot update mount namespace of snap "snap-name":.*`)
+
+ // snap-update-ns was invoked
+ c.Check(cmd.Calls(), DeepEquals, [][]string{{"snap-update-ns", "snap-name"}})
+
+ // no undo at this level
+ expected := strings.Split(fmt.Sprintf("%s\n%s\n%s\n", fsEntry1, fsEntry2, fsEntry3), "\n")
+ // and that we have the modern fstab file (global for snap)
+ fn := filepath.Join(dirs.SnapMountPolicyDir, "snap.snap-name.fstab")
+ content, err := ioutil.ReadFile(fn)
+ c.Assert(err, IsNil, Commentf("Expected mount profile for the whole snap"))
+ got := strings.Split(string(content), "\n")
+ c.Check(got, testutil.DeepUnsortedMatches, expected)
+}
diff --git a/overlord/aspectstate/aspectstate.go b/overlord/aspectstate/aspectstate.go
index a324df6d15..ee450184ed 100644
--- a/overlord/aspectstate/aspectstate.go
+++ b/overlord/aspectstate/aspectstate.go
@@ -26,20 +26,13 @@ import (
"github.com/snapcore/snapd/overlord/state"
)
-// Set finds the aspect identified by the account, bundleName and aspect and sets
-// the specified field to the supplied value.
-func Set(st *state.State, account, bundleName, aspect, field string, value interface{}) error {
- databag, err := getDatabag(st, account, bundleName)
- if err != nil {
- if !errors.Is(err, state.ErrNoState) {
- return err
- }
-
- databag = aspects.NewJSONDataBag()
- }
-
+// SetAspect finds the aspect identified by the account, bundleName and aspect
+// and sets the specified field to the supplied value in the provided matching databag.
+func SetAspect(databag aspects.DataBag, account, bundleName, aspect, field string, value interface{}) error {
accPatterns := aspecttest.MockWifiSetupAspect()
- aspectBundle, err := aspects.NewAspectBundle(bundleName, accPatterns, aspects.NewJSONSchema())
+ schema := aspects.NewJSONSchema()
+
+ aspectBundle, err := aspects.NewAspectBundle(bundleName, accPatterns, schema)
if err != nil {
return err
}
@@ -53,27 +46,17 @@ func Set(st *state.State, account, bundleName, aspect, field string, value inter
return err
}
- if err := updateDatabags(st, account, bundleName, databag); err != nil {
- return err
- }
-
return nil
}
-// Get finds the aspect identified by the account, bundleName and aspect and
-// returns the specified field's value through the "value" output parameter.
-func Get(st *state.State, account, bundleName, aspect, field string, value interface{}) error {
- databag, err := getDatabag(st, account, bundleName)
- if err != nil {
- if errors.Is(err, state.ErrNoState) {
- return &aspects.AspectNotFoundError{Account: account, BundleName: bundleName, Aspect: aspect}
- }
-
- return err
- }
-
+// GetAspect finds the aspect identified by the account, bundleName and aspect
+// and returns the specified field value from the provided matching databag
+// through the value output parameter.
+func GetAspect(databag aspects.DataBag, account, bundleName, aspect, field string, value interface{}) error {
accPatterns := aspecttest.MockWifiSetupAspect()
- aspectBundle, err := aspects.NewAspectBundle(bundleName, accPatterns, aspects.NewJSONSchema())
+ schema := aspects.NewJSONSchema()
+
+ aspectBundle, err := aspects.NewAspectBundle(bundleName, accPatterns, schema)
if err != nil {
return err
}
@@ -90,21 +73,35 @@ func Get(st *state.State, account, bundleName, aspect, field string, value inter
return nil
}
-func updateDatabags(st *state.State, account, bundleName string, databag aspects.JSONDataBag) error {
- var databags map[string]map[string]aspects.JSONDataBag
- if err := st.Get("aspect-databags", &databags); err != nil {
- if !errors.Is(err, state.ErrNoState) {
- return err
- }
+// NewTransaction returns a transaction configured to read and write databags
+// from state as needed.
+func NewTransaction(st *state.State, account, bundleName string) (*aspects.Transaction, error) {
+ schema := aspects.NewJSONSchema()
+ getter := bagGetter(st, account, bundleName)
+ setter := func(bag aspects.JSONDataBag) error {
+ return updateDatabags(st, account, bundleName, bag)
+ }
- databags = map[string]map[string]aspects.JSONDataBag{
- account: {bundleName: aspects.NewJSONDataBag()},
- }
+ tx, err := aspects.NewTransaction(getter, setter, schema)
+ if err != nil {
+ return nil, err
}
- databags[account][bundleName] = databag
- st.Set("aspect-databags", databags)
- return nil
+ return tx, nil
+}
+
+func bagGetter(st *state.State, account, bundleName string) aspects.DatabagRead {
+ return func() (aspects.JSONDataBag, error) {
+ databag, err := getDatabag(st, account, bundleName)
+ if err != nil {
+ if !errors.Is(err, state.ErrNoState) {
+ return nil, err
+ }
+
+ databag = aspects.NewJSONDataBag()
+ }
+ return databag, nil
+ }
}
func getDatabag(st *state.State, account, bundleName string) (aspects.JSONDataBag, error) {
@@ -112,5 +109,25 @@ func getDatabag(st *state.State, account, bundleName string) (aspects.JSONDataBa
if err := st.Get("aspect-databags", &databags); err != nil {
return nil, err
}
+
+ if databags[account] == nil || databags[account][bundleName] == nil {
+ return nil, state.ErrNoState
+ }
return databags[account][bundleName], nil
}
+
+func updateDatabags(st *state.State, account, bundleName string, databag aspects.JSONDataBag) error {
+ var databags map[string]map[string]aspects.JSONDataBag
+ err := st.Get("aspect-databags", &databags)
+ if err != nil && !errors.Is(err, state.ErrNoState) {
+ return err
+ } else if errors.Is(err, &state.NoStateError{}) || databags[account] == nil || databags[account][bundleName] == nil {
+ databags = map[string]map[string]aspects.JSONDataBag{
+ account: {bundleName: aspects.NewJSONDataBag()},
+ }
+ }
+
+ databags[account][bundleName] = databag
+ st.Set("aspect-databags", databags)
+ return nil
+}
diff --git a/overlord/aspectstate/aspectstate_test.go b/overlord/aspectstate/aspectstate_test.go
index cf8b349956..fe28a637a3 100644
--- a/overlord/aspectstate/aspectstate_test.go
+++ b/overlord/aspectstate/aspectstate_test.go
@@ -46,57 +46,32 @@ func (s *aspectTestSuite) TestGetAspect(c *C) {
err := databag.Set("wifi.ssid", "foo")
c.Assert(err, IsNil)
- s.state.Lock()
- defer s.state.Unlock()
- s.state.Set("aspect-databags", map[string]map[string]aspects.JSONDataBag{
- "system": {"network": databag},
- })
-
var res interface{}
- err = aspectstate.Get(s.state, "system", "network", "wifi-setup", "ssid", &res)
+ err = aspectstate.GetAspect(databag, "system", "network", "wifi-setup", "ssid", &res)
c.Assert(err, IsNil)
c.Assert(res, Equals, "foo")
}
func (s *aspectTestSuite) TestGetNotFound(c *C) {
- s.state.Lock()
- defer s.state.Unlock()
+ databag := aspects.NewJSONDataBag()
var res interface{}
- err := aspectstate.Get(s.state, "system", "network", "wifi-setup", "ssid", &res)
- c.Assert(err, FitsTypeOf, &aspects.AspectNotFoundError{})
- c.Assert(err, ErrorMatches, `aspect system/network/wifi-setup not found`)
- c.Check(res, IsNil)
-
- s.state.Set("aspect-databags", map[string]map[string]aspects.JSONDataBag{
- "system": {"network": aspects.NewJSONDataBag()},
- })
-
- err = aspectstate.Get(s.state, "system", "network", "other-aspect", "ssid", &res)
+ err := aspectstate.GetAspect(databag, "system", "network", "other-aspect", "ssid", &res)
c.Assert(err, FitsTypeOf, &aspects.AspectNotFoundError{})
c.Assert(err, ErrorMatches, `aspect system/network/other-aspect not found`)
c.Check(res, IsNil)
- err = aspectstate.Get(s.state, "system", "network", "wifi-setup", "ssid", &res)
+ err = aspectstate.GetAspect(databag, "system", "network", "wifi-setup", "ssid", &res)
c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
c.Assert(err, ErrorMatches, `cannot get field "ssid": no value was found under "wifi"`)
c.Check(res, IsNil)
}
func (s *aspectTestSuite) TestSetAspect(c *C) {
- s.state.Lock()
- defer s.state.Unlock()
-
- err := aspectstate.Set(s.state, "system", "network", "wifi-setup", "ssid", "foo")
- c.Assert(err, IsNil)
-
- var databags map[string]map[string]aspects.JSONDataBag
- err = s.state.Get("aspect-databags", &databags)
+ databag := aspects.NewJSONDataBag()
+ err := aspectstate.SetAspect(databag, "system", "network", "wifi-setup", "ssid", "foo")
c.Assert(err, IsNil)
- databag := databags["system"]["network"]
- c.Assert(databag, NotNil)
-
var val string
err = databag.Get("wifi.ssid", &val)
c.Assert(err, IsNil)
@@ -104,43 +79,112 @@ func (s *aspectTestSuite) TestSetAspect(c *C) {
}
func (s *aspectTestSuite) TestSetNotFound(c *C) {
- s.state.Lock()
- defer s.state.Unlock()
-
- err := aspectstate.Set(s.state, "system", "other-bundle", "other-aspect", "foo", "bar")
+ databag := aspects.NewJSONDataBag()
+ err := aspectstate.SetAspect(databag, "system", "other-bundle", "other-aspect", "foo", "bar")
c.Assert(err, FitsTypeOf, &aspects.AspectNotFoundError{})
- err = aspectstate.Set(s.state, "system", "network", "other-aspect", "foo", "bar")
+ err = aspectstate.SetAspect(databag, "system", "network", "other-aspect", "foo", "bar")
c.Assert(err, FitsTypeOf, &aspects.AspectNotFoundError{})
}
func (s *aspectTestSuite) TestSetAccessError(c *C) {
- s.state.Lock()
- defer s.state.Unlock()
-
- err := aspectstate.Set(s.state, "system", "network", "wifi-setup", "status", "foo")
+ databag := aspects.NewJSONDataBag()
+ err := aspectstate.SetAspect(databag, "system", "network", "wifi-setup", "status", "foo")
c.Assert(err, ErrorMatches, `cannot write field "status": only supports read access`)
}
func (s *aspectTestSuite) TestUnsetAspect(c *C) {
+ databag := aspects.NewJSONDataBag()
+ err := aspectstate.SetAspect(databag, "system", "network", "wifi-setup", "ssid", "foo")
+ c.Assert(err, IsNil)
+
+ err = aspectstate.SetAspect(databag, "system", "network", "wifi-setup", "ssid", nil)
+ c.Assert(err, IsNil)
+
+ var val string
+ err = databag.Get("wifi.ssid", &val)
+ c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
+ c.Assert(val, Equals, "")
+}
+
+func (s *aspectTestSuite) TestNewTransactionExistingState(c *C) {
s.state.Lock()
defer s.state.Unlock()
- err := aspectstate.Set(s.state, "system", "network", "wifi-setup", "ssid", "foo")
+ bag := aspects.NewJSONDataBag()
+ err := bag.Set("foo", "bar")
+ c.Assert(err, IsNil)
+ databags := map[string]map[string]aspects.JSONDataBag{
+ "system": {"network": bag},
+ }
+ s.state.Set("aspect-databags", databags)
+
+ tx, err := aspectstate.NewTransaction(s.state, "system", "network")
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = tx.Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+
+ err = tx.Set("foo", "baz")
c.Assert(err, IsNil)
- err = aspectstate.Set(s.state, "system", "network", "wifi-setup", "ssid", nil)
+ err = tx.Commit()
c.Assert(err, IsNil)
- var databags map[string]map[string]aspects.JSONDataBag
err = s.state.Get("aspect-databags", &databags)
c.Assert(err, IsNil)
+ err = databags["system"]["network"].Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "baz")
+}
- databag := databags["system"]["network"]
- c.Assert(databag, NotNil)
+func (s *aspectTestSuite) TestNewTransactionNoState(c *C) {
+ type testcase struct {
+ state map[string]map[string]aspects.JSONDataBag
+ }
+
+ testcases := []testcase{
+ {
+ state: map[string]map[string]aspects.JSONDataBag{
+ "system": {"network": nil},
+ },
+ },
+ {
+ state: map[string]map[string]aspects.JSONDataBag{
+ "system": nil,
+ },
+ },
+ {
+ state: map[string]map[string]aspects.JSONDataBag{},
+ },
+ {
+ state: nil,
+ },
+ }
- var val string
- err = databag.Get("wifi.ssid", &val)
- c.Assert(err, FitsTypeOf, &aspects.FieldNotFoundError{})
- c.Assert(val, Equals, "")
+ s.state.Lock()
+ defer s.state.Unlock()
+ for _, tc := range testcases {
+ s.state.Set("aspect-databags", tc.state)
+
+ tx, err := aspectstate.NewTransaction(s.state, "system", "network")
+ c.Assert(err, IsNil)
+
+ err = tx.Set("foo", "bar")
+ c.Assert(err, IsNil)
+
+ err = tx.Commit()
+ c.Assert(err, IsNil)
+
+ var databags map[string]map[string]aspects.JSONDataBag
+ err = s.state.Get("aspect-databags", &databags)
+ c.Assert(err, IsNil)
+
+ var value interface{}
+ err = databags["system"]["network"].Get("foo", &value)
+ c.Assert(err, IsNil)
+ c.Assert(value, Equals, "bar")
+ }
}
diff --git a/overlord/assertstate/assertstate_test.go b/overlord/assertstate/assertstate_test.go
index bb4db1cdc0..eba96a2f6f 100644
--- a/overlord/assertstate/assertstate_test.go
+++ b/overlord/assertstate/assertstate_test.go
@@ -1212,7 +1212,7 @@ func (s *assertMgrSuite) TestRefreshAssertionsRefreshSnapDeclarationsAndValidati
c.Check(a.Revision(), Equals, 3)
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
// changed validation set assertion again
vsetAs3 := s.validationSetAssert(c, "bar", "4", "5", "required", "1")
@@ -1251,7 +1251,7 @@ func (s *assertMgrSuite) TestRefreshSnapDeclarationsNop(c *C) {
err := assertstate.RefreshSnapDeclarations(s.state, 0, &assertstate.RefreshAssertionsOptions{IsAutoRefresh: true})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, true)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, true)
}
func (s *assertMgrSuite) TestRefreshSnapDeclarationsNoStore(c *C) {
@@ -2504,7 +2504,7 @@ func (s *assertMgrSuite) TestValidationSetAssertionsAutoRefresh(c *C) {
assertstate.UpdateValidationSet(s.state, &tr)
c.Assert(assertstate.AutoRefreshAssertions(s.state, 0), IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, true)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, true)
a, err := assertstate.DB(s.state).Find(asserts.ValidationSetType, map[string]string{
"series": "16",
@@ -2607,7 +2607,7 @@ func (s *assertMgrSuite) TestRefreshValidationSetAssertions(c *C) {
c.Check(s.fakeStore.(*fakeStore).requestedTypes, DeepEquals, [][]string{
{"account", "account-key", "validation-set"},
})
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, true)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, true)
// sequence changed in the store to 4
vsetAs3 := s.validationSetAssert(c, "bar", "4", "3", "required", "1")
@@ -3307,7 +3307,7 @@ func (s *assertMgrSuite) TestValidationSetAssertionForEnforcePinnedHappy(c *C) {
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
}
func (s *assertMgrSuite) TestValidationSetAssertionForEnforceNotPinnedUnhappyMissingSnap(c *C) {
@@ -3601,7 +3601,7 @@ func (s *assertMgrSuite) TestEnforceValidationSetAssertion(c *C) {
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
var tr assertstate.ValidationSetTracking
c.Assert(assertstate.GetValidationSet(s.state, s.dev1Acct.AccountID(), "bar", &tr), IsNil)
@@ -3662,7 +3662,7 @@ func (s *assertMgrSuite) TestEnforceValidationSetAssertionUpdate(c *C) {
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
var tr assertstate.ValidationSetTracking
c.Assert(assertstate.GetValidationSet(s.state, s.dev1Acct.AccountID(), "bar", &tr), IsNil)
@@ -3739,7 +3739,7 @@ func (s *assertMgrSuite) TestEnforceValidationSetAssertionPinToOlderSequence(c *
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
var tr assertstate.ValidationSetTracking
c.Assert(assertstate.GetValidationSet(s.state, s.dev1Acct.AccountID(), "bar", &tr), IsNil)
@@ -3814,7 +3814,7 @@ func (s *assertMgrSuite) TestEnforceValidationSetAssertionAfterMonitor(c *C) {
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
var tr assertstate.ValidationSetTracking
c.Assert(assertstate.GetValidationSet(s.state, s.dev1Acct.AccountID(), "bar", &tr), IsNil)
@@ -3869,7 +3869,7 @@ func (s *assertMgrSuite) TestEnforceValidationSetAssertionIgnoreValidation(c *C)
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
var tr assertstate.ValidationSetTracking
c.Assert(assertstate.GetValidationSet(s.state, s.dev1Acct.AccountID(), "bar", &tr), IsNil)
@@ -3972,7 +3972,7 @@ func (s *assertMgrSuite) TestTryEnforceValidationSetsAssertionsValidationError(c
"sequence": "1",
})
c.Assert(errors.Is(err, &asserts.NotFoundError{}), Equals, true)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
}
func (s *assertMgrSuite) TestTryEnforceValidationSetsAssertionsOK(c *C) {
@@ -4044,7 +4044,7 @@ func (s *assertMgrSuite) TestTryEnforceValidationSetsAssertionsOK(c *C) {
"sequence": "1",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
// tracking was updated
var tr assertstate.ValidationSetTracking
@@ -4145,7 +4145,7 @@ func (s *assertMgrSuite) TestTryEnforceValidationSetsAssertionsAlreadyTrackedUpd
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
// tracking was updated
var tr assertstate.ValidationSetTracking
@@ -4228,7 +4228,7 @@ func (s *assertMgrSuite) TestTryEnforceValidationSetsAssertionsConflictError(c *
"sequence": "2",
})
c.Assert(errors.Is(err, &asserts.NotFoundError{}), Equals, true)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
}
func (s *assertMgrSuite) TestMonitorValidationSet(c *C) {
@@ -4267,7 +4267,7 @@ func (s *assertMgrSuite) TestMonitorValidationSet(c *C) {
"sequence": "2",
})
c.Assert(err, IsNil)
- c.Check(s.fakeStore.(*fakeStore).opts.IsAutoRefresh, Equals, false)
+ c.Check(s.fakeStore.(*fakeStore).opts.Scheduled, Equals, false)
var tr assertstate.ValidationSetTracking
c.Assert(assertstate.GetValidationSet(s.state, s.dev1Acct.AccountID(), "bar", &tr), IsNil)
diff --git a/overlord/assertstate/bulk.go b/overlord/assertstate/bulk.go
index 553e4a5c0c..6fce93b797 100644
--- a/overlord/assertstate/bulk.go
+++ b/overlord/assertstate/bulk.go
@@ -256,7 +256,7 @@ func resolvePool(s *state.State, pool *asserts.Pool, checkBeforeCommit func(*ass
unsupported := handleUnsupported(db)
for {
- storeOpts := &store.RefreshOptions{IsAutoRefresh: opts.IsAutoRefresh}
+ storeOpts := &store.RefreshOptions{Scheduled: opts.IsAutoRefresh}
s.Unlock()
_, aresults, err := sto.SnapAction(context.TODO(), nil, nil, pool, user, storeOpts)
s.Lock()
diff --git a/overlord/devicestate/devicestate_install_api_test.go b/overlord/devicestate/devicestate_install_api_test.go
index de75f92838..84db98360b 100644
--- a/overlord/devicestate/devicestate_install_api_test.go
+++ b/overlord/devicestate/devicestate_install_api_test.go
@@ -158,24 +158,29 @@ func (s *deviceMgrInstallAPISuite) setupSystemSeed(c *C, sysLabel, gadgetYaml st
}
type finishStepOpts struct {
- encrypted bool
- isClassic bool
+ encrypted bool
+ isClassic bool
+ hasPartial bool
}
-func (s *deviceMgrInstallAPISuite) mockSystemSeedWithLabel(c *C, label string, isClassic bool) (gadgetSnapPath, kernelSnapPath string, ginfo *gadget.Info, mountCmd *testutil.MockCmd) {
+func (s *deviceMgrInstallAPISuite) mockSystemSeedWithLabel(c *C, label string, isClassic, hasPartial bool) (gadgetSnapPath, kernelSnapPath string, ginfo *gadget.Info, mountCmd *testutil.MockCmd) {
// Mock partitioned disk
gadgetYaml := gadgettest.SingleVolumeClassicWithModesGadgetYaml
+ seedGadget := gadgetYaml
+ if hasPartial {
+ // This is the gadget provided by the installer, that must have
+ // filled the partial information.
+ gadgetYaml = gadgettest.SingleVolumeClassicWithModesFilledPartialGadgetYaml
+ // This is the partial gadget, with parts not filled
+ seedGadget = gadgettest.SingleVolumeClassicWithModesPartialGadgetYaml
+ }
gadgetRoot := filepath.Join(c.MkDir(), "gadget")
ginfo, _, _, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgetYaml, gadgetRoot)
c.Assert(err, IsNil)
s.AddCleanup(restore)
// now create a label with snaps/assertions
- // TODO This should be "gadgetYaml" instead of SingleVolumeUC20GadgetYaml,
- // but we have to do it this way as otherwise snap pack will complain
- // while validating, as it does not have information about the model at
- // that time. When that is fixed this must change to gadgetYaml.
- model := s.setupSystemSeed(c, label, gadgettest.SingleVolumeUC20GadgetYaml, isClassic)
+ model := s.setupSystemSeed(c, label, seedGadget, isClassic)
c.Check(model, NotNil)
// Create fake seed that will return information from the label we created
@@ -227,7 +232,7 @@ func (s *deviceMgrInstallAPISuite) testInstallFinishStep(c *C, opts finishStepOp
// Mock label
label := "classic"
- gadgetSnapPath, kernelSnapPath, ginfo, mountCmd := s.mockSystemSeedWithLabel(c, label, opts.isClassic)
+ gadgetSnapPath, kernelSnapPath, ginfo, mountCmd := s.mockSystemSeedWithLabel(c, label, opts.isClassic, opts.hasPartial)
// Unpack gadget snap from seed where it would have been mounted
gadgetDir := filepath.Join(dirs.SnapRunDir, "snap-content/gadget")
@@ -276,6 +281,17 @@ func (s *deviceMgrInstallAPISuite) testInstallFinishStep(c *C, opts finishStepOp
saveStorageTraitsCalls := 0
restore = devicestate.MockInstallSaveStorageTraits(func(model gadget.Model, allLaidOutVols map[string]*gadget.LaidOutVolume, encryptSetupData *install.EncryptionSetupData) error {
saveStorageTraitsCalls++
+ // This is a good point to check if things have been filled
+ if opts.hasPartial {
+ for _, v := range allLaidOutVols {
+ c.Check(v.Partial, DeepEquals, []gadget.PartialProperty{gadget.PartialStructure})
+ c.Check(v.Schema != "", Equals, true)
+ for _, vs := range v.Structure {
+ c.Check(vs.Filesystem != "", Equals, true)
+ c.Check(vs.Size != 0, Equals, true)
+ }
+ }
+ }
return nil
})
s.AddCleanup(restore)
@@ -391,6 +407,10 @@ func (s *deviceMgrInstallAPISuite) TestInstallFinishEncryptionHappy(c *C) {
s.testInstallFinishStep(c, finishStepOpts{encrypted: true, isClassic: true})
}
+func (s *deviceMgrInstallAPISuite) TestInstallFinishEncryptionPartialHappy(c *C) {
+ s.testInstallFinishStep(c, finishStepOpts{encrypted: true, isClassic: true, hasPartial: true})
+}
+
func (s *deviceMgrInstallAPISuite) TestInstallFinishNoLabel(c *C) {
// Mock partitioned disk, but there will be no label in the system
gadgetYaml := gadgettest.SingleVolumeClassicWithModesGadgetYaml
@@ -429,7 +449,7 @@ func (s *deviceMgrInstallAPISuite) testInstallSetupStorageEncryption(c *C, hasTP
// Mock label
label := "classic"
isClassic := true
- gadgetSnapPath, kernelSnapPath, ginfo, mountCmd := s.mockSystemSeedWithLabel(c, label, isClassic)
+ gadgetSnapPath, kernelSnapPath, ginfo, mountCmd := s.mockSystemSeedWithLabel(c, label, isClassic, false)
// Simulate system with TPM
if hasTPM {
diff --git a/overlord/devicestate/devicestate_remodel_test.go b/overlord/devicestate/devicestate_remodel_test.go
index c3c0c8dbec..cecae069e4 100644
--- a/overlord/devicestate/devicestate_remodel_test.go
+++ b/overlord/devicestate/devicestate_remodel_test.go
@@ -37,7 +37,6 @@ import (
"github.com/snapcore/snapd/asserts/assertstest"
"github.com/snapcore/snapd/boot"
"github.com/snapcore/snapd/gadget"
- "github.com/snapcore/snapd/gadget/gadgettest"
"github.com/snapcore/snapd/gadget/quantity"
"github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/overlord/assertstate"
@@ -1545,7 +1544,7 @@ volumes:
},
RootDir: currentGadgetInfo.MountDir(),
}
- gadgettest.SetEnclosingVolumeInStructs(gd.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(gd.Info.Volumes)
c.Check(current, DeepEquals, gd)
gd = gadget.GadgetData{
Info: &gadget.Info{
@@ -1583,7 +1582,7 @@ volumes:
},
RootDir: newGadgetInfo.MountDir(),
}
- gadgettest.SetEnclosingVolumeInStructs(gd.Info.Volumes)
+ gadget.SetEnclosingVolumeInStructs(gd.Info.Volumes)
c.Check(update, DeepEquals, gd)
return nil
})
diff --git a/overlord/devicestate/handlers_install.go b/overlord/devicestate/handlers_install.go
index 50c5844e94..1c5458de21 100644
--- a/overlord/devicestate/handlers_install.go
+++ b/overlord/devicestate/handlers_install.go
@@ -933,7 +933,7 @@ func (m *DeviceManager) doInstallFinish(t *state.Task, _ *tomb.Tomb) error {
}
defer unmount()
- // TODO validation of onVolumes versus gadget.yaml
+ // TODO validation of onVolumes versus gadget.yaml, considering also partial
// Check if encryption is mandatory
if sys.Model.StorageSafety() == asserts.StorageSafetyEncrypted && encryptSetupData == nil {
@@ -961,6 +961,12 @@ func (m *DeviceManager) doInstallFinish(t *state.Task, _ *tomb.Tomb) error {
return fmt.Errorf("on finish install: cannot layout volumes: %v", err)
}
+ // Import new information from the installer to the laid out data,
+ // so the gadget is not partially defined anymore if it was.
+ if err := gadget.ApplyInstallerVolumesToGadget(onVolumes, allLaidOutVols); err != nil {
+ return err
+ }
+
logger.Debugf("writing content to partitions")
timings.Run(perfTimings, "install-content", "Writing content to partitions", func(tm timings.Measurer) {
st.Unlock()
diff --git a/overlord/hookstate/ctlcmd/refresh_test.go b/overlord/hookstate/ctlcmd/refresh_test.go
index e8fae9d357..d7e253cb96 100644
--- a/overlord/hookstate/ctlcmd/refresh_test.go
+++ b/overlord/hookstate/ctlcmd/refresh_test.go
@@ -55,8 +55,9 @@ func mockRefreshCandidate(snapName, channel, version string, revision snap.Revis
Revision: revision,
RealName: snapName,
},
+ Version: version,
}
- return snapstate.MockRefreshCandidate(sup, version)
+ return snapstate.MockRefreshCandidate(sup)
}
func (s *refreshSuite) SetUpTest(c *C) {
diff --git a/overlord/hookstate/hooks_test.go b/overlord/hookstate/hooks_test.go
index b655df5386..01744210e4 100644
--- a/overlord/hookstate/hooks_test.go
+++ b/overlord/hookstate/hooks_test.go
@@ -114,8 +114,9 @@ func mockRefreshCandidate(snapName, instanceKey, channel, version string, revisi
Revision: revision,
RealName: snapName,
},
+ Version: version,
}
- return snapstate.MockRefreshCandidate(sup, version)
+ return snapstate.MockRefreshCandidate(sup)
}
func (s *gateAutoRefreshHookSuite) settle(c *C) {
diff --git a/overlord/snapstate/autorefresh.go b/overlord/snapstate/autorefresh.go
index 110aedc158..63d754d34d 100644
--- a/overlord/snapstate/autorefresh.go
+++ b/overlord/snapstate/autorefresh.go
@@ -1,7 +1,7 @@
// -*- Mode: Go; indent-tabs-mode: t -*-
/*
- * Copyright (C) 2017-2022 Canonical Ltd
+ * Copyright (C) 2017-2023 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
@@ -80,7 +80,6 @@ var refreshRetryDelay = 20 * time.Minute
// of auto-refresh.
type refreshCandidate struct {
SnapSetup
- Version string `json:"version,omitempty"`
}
func (rc *refreshCandidate) Type() snap.Type {
@@ -785,9 +784,8 @@ func inhibitRefresh(st *state.State, snapst *SnapState, snapsup *SnapSetup, info
}
// for testing outside of snapstate
-func MockRefreshCandidate(snapSetup *SnapSetup, version string) interface{} {
+func MockRefreshCandidate(snapSetup *SnapSetup) interface{} {
return &refreshCandidate{
SnapSetup: *snapSetup,
- Version: version,
}
}
diff --git a/overlord/snapstate/autorefresh_test.go b/overlord/snapstate/autorefresh_test.go
index a350c2e7cf..bac7dfc6eb 100644
--- a/overlord/snapstate/autorefresh_test.go
+++ b/overlord/snapstate/autorefresh_test.go
@@ -73,7 +73,7 @@ func (r *autoRefreshStore) SnapAction(ctx context.Context, currentSnaps []*store
if assertQuery != nil {
panic("no assertion query support")
}
- if !opts.IsAutoRefresh {
+ if !opts.Scheduled {
panic("AutoRefresh snap action did not set IsAutoRefresh flag")
}
diff --git a/overlord/snapstate/backend_test.go b/overlord/snapstate/backend_test.go
index d6c1ff1d7b..2a9c64ab2a 100644
--- a/overlord/snapstate/backend_test.go
+++ b/overlord/snapstate/backend_test.go
@@ -259,7 +259,7 @@ func (f *fakeStore) snap(spec snapSpec) (*snap.Info, error) {
SnapID: snapID,
Revision: spec.Revision,
},
- Version: spec.Name,
+ Version: spec.Name + "Ver",
DownloadInfo: snap.DownloadInfo{
DownloadURL: "https://some-server.com/some/path.snap",
Size: 5,
@@ -454,7 +454,7 @@ func (f *fakeStore) lookupRefresh(cand refreshCand) (*snap.Info, error) {
SnapID: cand.snapID,
Revision: revno,
},
- Version: name,
+ Version: name + "Ver",
DownloadInfo: snap.DownloadInfo{
DownloadURL: "https://some-server.com/some/path.snap",
},
@@ -898,6 +898,7 @@ func (f *fakeSnappyBackend) ReadInfo(name string, si *snap.SideInfo) (*snap.Info
// naive emulation for now, always works
info := &snap.Info{
SuggestedName: snapName,
+ Version: snapName + "Ver",
SideInfo: *si,
Architectures: []string{"all"},
SnapType: snap.TypeApp,
diff --git a/overlord/snapstate/conflict.go b/overlord/snapstate/conflict.go
index a6ca8f9051..5e89694398 100644
--- a/overlord/snapstate/conflict.go
+++ b/overlord/snapstate/conflict.go
@@ -25,6 +25,7 @@ import (
"reflect"
"github.com/snapcore/snapd/overlord/state"
+ "github.com/snapcore/snapd/strutil"
)
// FinalTasks are task kinds for final tasks in a change which means no further
@@ -98,6 +99,51 @@ func affectedSnaps(t *state.Task) ([]string, error) {
return nil, nil
}
+func snapSetupFromChange(chg *state.Change) (*SnapSetup, error) {
+ for _, t := range chg.Tasks() {
+ // Check a known task of each change that we know keep snap info.
+ if t.Kind() != "prerequisites" {
+ continue
+ }
+ return TaskSnapSetup(t)
+ }
+ return nil, nil
+}
+
+// changeIsSnapdDowngrade returns true if the change provided is a snapd
+// setup change with a version lower than what is currently installed. If a change
+// is not SnapSetup related this returns false.
+func changeIsSnapdDowngrade(st *state.State, chg *state.Change) (bool, error) {
+ snapsup, err := snapSetupFromChange(chg)
+ if err != nil {
+ return false, err
+ }
+ if snapsup == nil || snapsup.SnapName() != "snapd" {
+ return false, nil
+ }
+
+ var snapst SnapState
+ if err := Get(st, snapsup.InstanceName(), &snapst); err != nil {
+ return false, err
+ }
+
+ currentInfo, err := snapst.CurrentInfo()
+ if err != nil {
+ return false, fmt.Errorf("cannot retrieve snap info for current snapd: %v", err)
+ }
+
+ // On older snapd's 'Version' might be empty, and in this case we assume
+ // that snapd is downgrading as we cannot determine otherwise.
+ if snapsup.Version == "" {
+ return true, nil
+ }
+ res, err := strutil.VersionCompare(currentInfo.Version, snapsup.Version)
+ if err != nil {
+ return false, fmt.Errorf("cannot compare versions of snapd [cur: %s, new: %s]: %v", currentInfo.Version, snapsup.Version, err)
+ }
+ return res == 1, nil
+}
+
func checkChangeConflictExclusiveKinds(st *state.State, newExclusiveChangeKind, ignoreChangeID string) error {
for _, chg := range st.Changes() {
if chg.Status().Ready() {
@@ -134,6 +180,21 @@ func checkChangeConflictExclusiveKinds(st *state.State, newExclusiveChangeKind,
ChangeKind: "create-recovery-system",
ChangeID: chg.ID(),
}
+ case "revert-snap", "refresh-snap":
+ // Snapd downgrades are exclusive changes
+ if ignoreChangeID != "" && chg.ID() == ignoreChangeID {
+ continue
+ }
+ if downgrading, err := changeIsSnapdDowngrade(st, chg); err != nil {
+ return err
+ } else if !downgrading {
+ continue
+ }
+ return &ChangeConflictError{
+ Message: "snapd downgrade in progress, no other changes allowed until this is done",
+ ChangeKind: chg.Kind(),
+ ChangeID: chg.ID(),
+ }
default:
if newExclusiveChangeKind != "" {
// we want to run a new exclusive change, but other
diff --git a/overlord/snapstate/handlers.go b/overlord/snapstate/handlers.go
index 42b0f9b6f5..77474a5d54 100644
--- a/overlord/snapstate/handlers.go
+++ b/overlord/snapstate/handlers.go
@@ -753,8 +753,8 @@ func (m *SnapManager) doDownloadSnap(t *state.Task, tomb *tomb.Tomb) error {
targetFn := snapsup.MountFile()
dlOpts := &store.DownloadOptions{
- IsAutoRefresh: snapsup.IsAutoRefresh,
- RateLimit: rate,
+ Scheduled: snapsup.IsAutoRefresh,
+ RateLimit: rate,
}
if snapsup.DownloadInfo == nil {
var storeInfo store.SnapActionResult
@@ -836,8 +836,8 @@ func (m *SnapManager) doPreDownloadSnap(t *state.Task, tomb *tomb.Tomb) error {
targetFn := snapsup.MountFile()
dlOpts := &store.DownloadOptions{
// pre-downloads are only triggered in auto-refreshes
- IsAutoRefresh: true,
- RateLimit: autoRefreshRateLimited(st),
+ Scheduled: true,
+ RateLimit: autoRefreshRateLimited(st),
}
perfTimings := state.TimingsForTask(t)
diff --git a/overlord/snapstate/handlers_download_test.go b/overlord/snapstate/handlers_download_test.go
index 5505ac4e80..52d60c21a6 100644
--- a/overlord/snapstate/handlers_download_test.go
+++ b/overlord/snapstate/handlers_download_test.go
@@ -296,8 +296,8 @@ func (s *downloadSnapSuite) TestDoDownloadRateLimitedIntegration(c *C) {
name: "foo",
target: filepath.Join(dirs.SnapBlobDir, "foo_11.snap"),
opts: &store.DownloadOptions{
- RateLimit: 1234,
- IsAutoRefresh: true,
+ RateLimit: 1234,
+ Scheduled: true,
},
},
})
diff --git a/overlord/snapstate/refreshhints.go b/overlord/snapstate/refreshhints.go
index a609dfd30f..7cc5e23e71 100644
--- a/overlord/snapstate/refreshhints.go
+++ b/overlord/snapstate/refreshhints.go
@@ -1,7 +1,7 @@
// -*- Mode: Go; indent-tabs-mode: t -*-
/*
- * Copyright (C) 2017 Canonical Ltd
+ * Copyright (C) 2017-2023 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
@@ -181,6 +181,7 @@ func refreshHintsFromCandidates(st *state.State, updates []*snap.Info, ignoreVal
DownloadInfo: &update.DownloadInfo,
SideInfo: &update.SideInfo,
Type: update.Type(),
+ Version: update.Version,
PlugsOnly: len(update.Slots) == 0,
InstanceKey: update.InstanceKey,
auxStoreInfo: auxStoreInfo{
@@ -190,7 +191,6 @@ func refreshHintsFromCandidates(st *state.State, updates []*snap.Info, ignoreVal
Website: update.Website(),
},
},
- Version: update.Version,
}
hints[update.InstanceName()] = snapsup
}
diff --git a/overlord/snapstate/refreshhints_test.go b/overlord/snapstate/refreshhints_test.go
index d5a9a3097b..52431db067 100644
--- a/overlord/snapstate/refreshhints_test.go
+++ b/overlord/snapstate/refreshhints_test.go
@@ -266,6 +266,7 @@ func (s *refreshHintsTestSuite) TestRefreshHintsStoresRefreshCandidates(c *C) {
c.Assert(candidates, HasLen, 2)
cand1 := candidates["some-snap"]
c.Assert(cand1, NotNil)
+
c.Check(cand1.InstanceName(), Equals, "some-snap")
c.Check(cand1.SnapBase(), Equals, "some-base")
c.Check(cand1.Type(), Equals, snap.TypeApp)
@@ -287,8 +288,9 @@ func (s *refreshHintsTestSuite) TestRefreshHintsStoresRefreshCandidates(c *C) {
sup, snapst, err := cand1.SnapSetupForUpdate(s.state, nil, 0, nil)
c.Assert(err, IsNil)
c.Check(sup, DeepEquals, &snapstate.SnapSetup{
- Base: "some-base",
- Type: "app",
+ Base: "some-base",
+ Type: "app",
+ Version: "2",
SideInfo: &snap.SideInfo{
RealName: "some-snap",
Revision: snap.R(1),
@@ -312,7 +314,8 @@ func (s *refreshHintsTestSuite) TestRefreshHintsStoresRefreshCandidates(c *C) {
sup, snapst, err = cand2.SnapSetupForUpdate(s.state, nil, 0, nil)
c.Assert(err, IsNil)
c.Check(sup, DeepEquals, &snapstate.SnapSetup{
- Type: "app",
+ Type: "app",
+ Version: "v1",
SideInfo: &snap.SideInfo{
RealName: "other-snap",
Revision: snap.R(2),
diff --git a/overlord/snapstate/snapmgr.go b/overlord/snapstate/snapmgr.go
index f0f734c509..e0abc31350 100644
--- a/overlord/snapstate/snapmgr.go
+++ b/overlord/snapstate/snapmgr.go
@@ -1,7 +1,7 @@
// -*- Mode: Go; indent-tabs-mode: t -*-
/*
- * Copyright (C) 2016-2022 Canonical Ltd
+ * Copyright (C) 2016-2023 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
@@ -83,6 +83,9 @@ type SnapSetup struct {
// slots (#slots == 0).
PlugsOnly bool `json:"plugs-only,omitempty"`
+ // Version being installed/refreshed to.
+ Version string `json:"version,omitempty"`
+
CohortKey string `json:"cohort-key,omitempty"`
// FIXME: implement rename of this as suggested in
diff --git a/overlord/snapstate/snapstate.go b/overlord/snapstate/snapstate.go
index d29840b119..4bdd1d46b1 100644
--- a/overlord/snapstate/snapstate.go
+++ b/overlord/snapstate/snapstate.go
@@ -1,7 +1,7 @@
// -*- Mode: Go; indent-tabs-mode: t -*-
/*
- * Copyright (C) 2016-2022 Canonical Ltd
+ * Copyright (C) 2016-2023 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
@@ -161,6 +161,7 @@ func (ins installSnapInfo) SnapSetupForUpdate(st *state.State, params updatePara
DownloadInfo: &update.DownloadInfo,
SideInfo: &update.SideInfo,
Type: update.Type(),
+ Version: update.Version,
PlugsOnly: len(update.Slots) == 0,
InstanceKey: update.InstanceKey,
auxStoreInfo: auxStoreInfo{
@@ -208,6 +209,7 @@ func (i pathInfo) SnapSetupForUpdate(st *state.State, params updateParamsFunc, _
SnapPath: i.path,
Flags: flags.ForSnapSetup(),
Type: i.Type(),
+ Version: i.Version,
PlugsOnly: len(i.Slots) == 0,
InstanceKey: i.InstanceKey,
}
@@ -397,6 +399,20 @@ func doInstall(st *state.State, snapst *SnapState, snapsup *SnapSetup, flags int
}
snapsup.PlugsOnly = snapsup.PlugsOnly && (len(info.Slots) == 0)
+ // When downgrading snapd we want to make sure that it's an exclusive change.
+ if snapsup.SnapName() == "snapd" {
+ res, err := strutil.VersionCompare(info.Version, snapsup.Version)
+ if err != nil {
+ return nil, fmt.Errorf("cannot compare versions of snapd [cur: %s, new: %s]: %v", info.Version, snapsup.Version, err)
+ }
+ // If snapsup.Version was smaller, 1 is returned.
+ if res == 1 {
+ if err := CheckChangeConflictRunExclusively(st, "snapd downgrade"); err != nil {
+ return nil, err
+ }
+ }
+ }
+
if experimentalRefreshAppAwareness && !excludeFromRefreshAppAwareness(snapsup.Type) && !snapsup.Flags.IgnoreRunning {
// Note that because we are modifying the snap state inside
// softCheckNothingRunningForRefresh, this block must be located
@@ -1174,6 +1190,7 @@ func InstallPath(st *state.State, si *snap.SideInfo, path, instanceName, channel
Channel: channel,
Flags: flags.ForSnapSetup(),
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: info.InstanceKey,
}
@@ -1272,6 +1289,7 @@ func InstallWithDeviceContext(ctx context.Context, st *state.State, name string,
DownloadInfo: &info.DownloadInfo,
SideInfo: &info.SideInfo,
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: info.InstanceKey,
auxStoreInfo: auxStoreInfo{
@@ -1458,6 +1476,7 @@ func InstallMany(st *state.State, names []string, revOpts []*RevisionOptions, us
DownloadInfo: &info.DownloadInfo,
SideInfo: &info.SideInfo,
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: info.InstanceKey,
ExpectedProvenance: info.SnapProvenance,
@@ -1688,12 +1707,23 @@ func updateManyFiltered(ctx context.Context, st *state.State, names []string, re
names = strutil.Deduplicate(names)
- refreshOpts := &store.RefreshOptions{IsAutoRefresh: flags.IsAutoRefresh}
+ refreshOpts := &store.RefreshOptions{Scheduled: flags.IsAutoRefresh}
updates, stateByInstanceName, ignoreValidation, err := refreshCandidates(ctx, st, names, revOpts, user, refreshOpts)
if err != nil {
return nil, nil, err
}
+ // save the candidates so the auto-refresh can be continued if it's inhibited
+ // by a running snap.
+ if flags.IsAutoRefresh {
+ hints, err := refreshHintsFromCandidates(st, updates, ignoreValidation, deviceCtx)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ st.Set("refresh-candidates", hints)
+ }
+
if filter != nil {
actual := updates[:0]
for _, update := range updates {
@@ -2403,7 +2433,8 @@ func UpdateWithDeviceContext(st *state.State, name string, opts *RevisionOptions
Flags: snapst.Flags.ForSnapSetup(),
InstanceKey: snapst.InstanceKey,
Type: snap.Type(snapst.SnapType),
- CohortKey: opts.CohortKey,
+ // no version info needed
+ CohortKey: opts.CohortKey,
}
if switchChannel || switchCohortKey {
@@ -2551,7 +2582,7 @@ func autoRefreshPhase1(ctx context.Context, st *state.State, forGatingSnap strin
return nil, nil, err
}
- refreshOpts := &store.RefreshOptions{IsAutoRefresh: true}
+ refreshOpts := &store.RefreshOptions{Scheduled: true}
// XXX: should we skip refreshCandidates if forGatingSnap isn't empty (meaning we're handling proceed from a snap)?
candidates, snapstateByInstance, ignoreValidationByInstanceName, err := refreshCandidates(ctx, st, nil, nil, user, refreshOpts)
if err != nil {
@@ -2886,6 +2917,7 @@ func LinkNewBaseOrKernel(st *state.State, name string, fromChange string) (*stat
SideInfo: snapst.CurrentSideInfo(),
Flags: snapst.Flags.ForSnapSetup(),
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: snapst.InstanceKey,
}
@@ -2999,6 +3031,7 @@ func SwitchToNewGadget(st *state.State, name string, fromChange string) (*state.
SideInfo: snapst.CurrentSideInfo(),
Flags: snapst.Flags.ForSnapSetup(),
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: snapst.InstanceKey,
}
@@ -3080,6 +3113,7 @@ func Enable(st *state.State, name string) (*state.TaskSet, error) {
SideInfo: snapst.CurrentSideInfo(),
Flags: snapst.Flags.ForSnapSetup(),
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: snapst.InstanceKey,
}
@@ -3139,6 +3173,7 @@ func Disable(st *state.State, name string) (*state.TaskSet, error) {
Revision: snapst.Current,
},
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: snapst.InstanceKey,
}
@@ -3312,7 +3347,8 @@ func removeTasks(st *state.State, name string, revision snap.Revision, flags *Re
RealName: snap.InstanceSnap(name),
Revision: revision,
},
- Type: info.Type(),
+ Type: info.Type(),
+ // no version info needed
PlugsOnly: len(info.Slots) == 0,
InstanceKey: snapst.InstanceKey,
}
@@ -3431,6 +3467,7 @@ func removeInactiveRevision(st *state.State, name, snapID string, revision snap.
},
InstanceKey: instanceKey,
Type: typ,
+ // no version info needed
}
clearData := st.NewTask("clear-snap", fmt.Sprintf(i18n.G("Remove data for snap %q (%s)"), name, revision))
@@ -3569,6 +3606,7 @@ func RevertToRevision(st *state.State, name string, rev snap.Revision, flags Fla
SideInfo: snapst.Sequence[i],
Flags: flags.ForSnapSetup(),
Type: info.Type(),
+ Version: info.Version,
PlugsOnly: len(info.Slots) == 0,
InstanceKey: snapst.InstanceKey,
}
@@ -3613,6 +3651,7 @@ func TransitionCore(st *state.State, oldName, newName string) ([]*state.TaskSet,
DownloadInfo: &newInfo.DownloadInfo,
SideInfo: &newInfo.SideInfo,
Type: newInfo.Type(),
+ Version: newInfo.Version,
}, 0, "", nil)
if err != nil {
return nil, err
diff --git a/overlord/snapstate/snapstate_install_test.go b/overlord/snapstate/snapstate_install_test.go
index 094e526c57..abe8a2b4f1 100644
--- a/overlord/snapstate/snapstate_install_test.go
+++ b/overlord/snapstate/snapstate_install_test.go
@@ -1183,6 +1183,7 @@ func (s *snapmgrTestSuite) TestInstallRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, &snap.SideInfo{
@@ -1356,6 +1357,7 @@ func (s *snapmgrTestSuite) TestParallelInstanceInstallRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
InstanceKey: "instance",
})
@@ -1704,6 +1706,7 @@ func (s *snapmgrTestSuite) TestInstallWithCohortRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
CohortKey: "scurries",
})
@@ -1870,6 +1873,7 @@ func (s *snapmgrTestSuite) TestInstallWithRevisionRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, &snap.SideInfo{
@@ -2031,6 +2035,7 @@ version: 1.0`)
SnapPath: mockSnap,
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "1.0",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, &snap.SideInfo{
@@ -2157,6 +2162,7 @@ epoch: 1*
SnapPath: mockSnap,
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "1.0",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, &snap.SideInfo{
@@ -2341,6 +2347,7 @@ version: 1.0`)
Required: true,
},
Type: snap.TypeApp,
+ Version: "1.0",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, si)
@@ -3494,6 +3501,11 @@ func (s *snapmgrTestSuite) TestInstallMany(c *C) {
// check that tasksets are in separate lanes
for _, t := range ts.Tasks() {
c.Assert(t.Lanes(), DeepEquals, []int{i + 1})
+ if t.Kind() == "prerequisites" {
+ sup, err := snapstate.TaskSnapSetup(t)
+ c.Assert(err, IsNil)
+ c.Check(sup.Version, Equals, sup.SnapName()+"Ver")
+ }
}
}
}
@@ -4902,6 +4914,19 @@ epoch: 1
c.Assert(err, IsNil)
c.Assert(tss, HasLen, 2)
+ for i, ts := range tss {
+ // check that tasksets are in separate lanes
+ for _, t := range ts.Tasks() {
+ c.Assert(t.Lanes(), DeepEquals, []int{i + 1})
+ if t.Kind() == "prerequisites" {
+ sup, err := snapstate.TaskSnapSetup(t)
+ c.Assert(err, IsNil)
+ c.Check(sup.SnapName(), Equals, snapNames[i])
+ c.Check(sup.Version, Equals, "1.0")
+ }
+ }
+ }
+
chg := s.state.NewChange("install", "install local snaps")
for _, ts := range tss {
chg.AddAll(ts)
diff --git a/overlord/snapstate/snapstate_test.go b/overlord/snapstate/snapstate_test.go
index cac3a65aff..c570573192 100644
--- a/overlord/snapstate/snapstate_test.go
+++ b/overlord/snapstate/snapstate_test.go
@@ -653,6 +653,7 @@ func (s *snapmgrTestSuite) testRevertTasksFullFlags(flags fullFlags, c *C) {
flags.setup.Revert = true
c.Check(snapsup.Flags, Equals, flags.setup)
c.Check(snapsup.Type, Equals, snap.TypeApp)
+ c.Check(snapsup.Version, Equals, "some-snapVer")
chg := s.state.NewChange("revert", "revert snap")
chg.AddAll(ts)
@@ -2974,6 +2975,7 @@ func (s *snapmgrTestSuite) TestEnableRunThrough(c *C) {
SideInfo: &si,
Flags: flags,
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
})
}
@@ -3037,6 +3039,7 @@ func (s *snapmgrTestSuite) TestDisableRunThrough(c *C) {
Revision: snap.R(7),
},
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
})
}
@@ -8968,3 +8971,43 @@ WantedBy=multi-user.target
c.Assert(mountFile, testutil.FileEquals, expectedContent)
}
+
+func (s *snapmgrTestSuite) TestSaveRefreshCandidatesOnAutoRefresh(c *C) {
+ s.state.Lock()
+ defer s.state.Unlock()
+
+ snapstate.Set(s.state, "some-snap", &snapstate.SnapState{
+ Active: true,
+ Sequence: []*snap.SideInfo{
+ {RealName: "some-snap", SnapID: "some-snap-id", Revision: snap.R(1)},
+ },
+ Current: snap.R(1),
+ SnapType: "app",
+ })
+ snapstate.Set(s.state, "some-other-snap", &snapstate.SnapState{
+ Active: true,
+ Sequence: []*snap.SideInfo{
+ {RealName: "some-other-snap", SnapID: "some-other-snap-id", Revision: snap.R(1)},
+ },
+ Current: snap.R(1),
+ SnapType: "app",
+ })
+
+ // precondition check
+ var cands map[string]*snapstate.RefreshCandidate
+ err := s.state.Get("refresh-candidates", &cands)
+ c.Assert(err, testutil.ErrorIs, &state.NoStateError{})
+
+ names, tss, err := snapstate.AutoRefresh(context.Background(), s.state, nil)
+ c.Assert(err, IsNil)
+ c.Assert(tss, NotNil)
+ c.Check(names, DeepEquals, []string{"some-other-snap", "some-snap"})
+
+ // check that refresh-candidates in the state were updated
+ err = s.state.Get("refresh-candidates", &cands)
+ c.Assert(err, IsNil)
+
+ c.Assert(cands, HasLen, 2)
+ c.Check(cands["some-snap"], NotNil)
+ c.Check(cands["some-other-snap"], NotNil)
+}
diff --git a/overlord/snapstate/snapstate_update_test.go b/overlord/snapstate/snapstate_update_test.go
index b581dfbeb9..07e87e9f3b 100644
--- a/overlord/snapstate/snapstate_update_test.go
+++ b/overlord/snapstate/snapstate_update_test.go
@@ -803,6 +803,7 @@ func (s *snapmgrTestSuite) TestUpdateAmendRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "some-snapVer",
PlugsOnly: true,
Flags: snapstate.Flags{Amend: true},
})
@@ -1049,6 +1050,7 @@ func (s *snapmgrTestSuite) TestUpdateRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "services-snapVer",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, &snap.SideInfo{
@@ -1403,6 +1405,7 @@ func (s *snapmgrTestSuite) TestParallelInstanceUpdateRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeApp,
+ Version: "services-snapVer",
PlugsOnly: true,
InstanceKey: "instance",
})
@@ -1740,6 +1743,7 @@ func (s *snapmgrTestSuite) TestUpdateModelKernelSwitchTrackRunThrough(c *C) {
},
SideInfo: snapsup.SideInfo,
Type: snap.TypeKernel,
+ Version: "kernelVer",
PlugsOnly: true,
})
c.Assert(snapsup.SideInfo, DeepEquals, &snap.SideInfo{
@@ -9921,3 +9925,203 @@ func (s *snapmgrTestSuite) TestMonitoringIsPersistedAndRestored(c *C) {
c.Assert(s.state.Cached("monitored-snaps"), IsNil)
c.Check(s.state.Cached("auto-refresh-continue-attempt"), Equals, 1)
}
+
+func (s *snapmgrTestSuite) testUpdateDowngradeBlockedByOtherChanges(c *C, old, new string, revert bool) error {
+ si1 := snap.SideInfo{
+ RealName: "snapd",
+ SnapID: "snapd-id",
+ Channel: "latest",
+ Revision: snap.R(1),
+ }
+ si2 := snap.SideInfo{
+ RealName: "snapd",
+ SnapID: "snapd-id",
+ Channel: "latest",
+ Revision: snap.R(2),
+ }
+ si3 := snap.SideInfo{
+ RealName: "snapd",
+ SnapID: "snapd-id",
+ Channel: "latest",
+ Revision: snap.R(3),
+ }
+
+ restore := snapstate.MockSnapReadInfo(func(name string, si *snap.SideInfo) (*snap.Info, error) {
+ var version string
+ switch name {
+ case "snapd":
+ if (revert && si.Revision.N == 1) || (!revert && si.Revision.N == 2) {
+ version = old
+ } else if (revert && si.Revision.N == 2) || si.Revision.N == 3 {
+ version = new
+ } else {
+ return nil, fmt.Errorf("unexpected revision for test")
+ }
+ default:
+ version = "1.0"
+ }
+ return &snap.Info{
+ SuggestedName: name,
+ Version: version,
+ Architectures: []string{"all"},
+ SideInfo: *si,
+ }, nil
+ })
+ defer restore()
+
+ st := s.state
+ st.Lock()
+ defer st.Unlock()
+
+ chg := st.NewChange("unrelated", "...")
+ chg.AddTask(st.NewTask("task0", "..."))
+
+ snapstate.Set(s.state, "snapd", &snapstate.SnapState{
+ Active: true,
+ Sequence: []*snap.SideInfo{&si1, &si2, &si3},
+ TrackingChannel: "latest/stable",
+ Current: si2.Revision,
+ })
+
+ var err error
+ if revert {
+ _, err = snapstate.Revert(s.state, "snapd", snapstate.Flags{}, "")
+ } else {
+ _, err = snapstate.Update(s.state, "snapd", &snapstate.RevisionOptions{Revision: snap.R(3)}, s.user.ID, snapstate.Flags{})
+ }
+ return err
+}
+
+func (s *snapmgrTestSuite) TestUpdateDowngradeBlockedByOtherChanges(c *C) {
+ err := s.testUpdateDowngradeBlockedByOtherChanges(c, "2.57.1", "2.56", false)
+ c.Assert(err, ErrorMatches, `other changes in progress \(conflicting change "unrelated"\), change "snapd downgrade" not allowed until they are done`)
+}
+
+func (s *snapmgrTestSuite) TestUpdateDowngradeBlockedByOtherChangesAlsoWhenEmpty(c *C) {
+ err := s.testUpdateDowngradeBlockedByOtherChanges(c, "2.57.1", "", false)
+ c.Assert(err, ErrorMatches, `other changes in progress \(conflicting change "unrelated"\), change "snapd downgrade" not allowed until they are done`)
+}
+
+func (s *snapmgrTestSuite) TestUpdateDowngradeNotBlockedByOtherChanges(c *C) {
+ err := s.testUpdateDowngradeBlockedByOtherChanges(c, "2.57.1", "2.58", false)
+ c.Assert(err, IsNil)
+}
+
+func (s *snapmgrTestSuite) TestRevertBlockedByOtherChanges(c *C) {
+ // Swap values for revert case
+ err := s.testUpdateDowngradeBlockedByOtherChanges(c, "2.56", "2.57.1", true)
+ c.Assert(err, ErrorMatches, `other changes in progress \(conflicting change "unrelated"\), change "snapd downgrade" not allowed until they are done`)
+}
+
+func (s *snapmgrTestSuite) TestRevertBlockedByOtherChangesAlsoWhenEmpty(c *C) {
+ // Swap values for revert case
+ err := s.testUpdateDowngradeBlockedByOtherChanges(c, "2.58", "2.57.1", true)
+ c.Assert(err, IsNil)
+}
+
+func (s *snapmgrTestSuite) testUpdateNotAllowedWhileDowngrading(c *C, old, new string, revert bool) error {
+ si1 := snap.SideInfo{
+ RealName: "snapd",
+ SnapID: "snapd-id",
+ Channel: "latest",
+ Revision: snap.R(1),
+ }
+ si2 := snap.SideInfo{
+ RealName: "snapd",
+ SnapID: "snapd-id",
+ Channel: "latest",
+ Revision: snap.R(2),
+ }
+ si3 := snap.SideInfo{
+ RealName: "snapd",
+ SnapID: "snapd-id",
+ Channel: "latest",
+ Revision: snap.R(3),
+ }
+
+ si := snap.SideInfo{
+ RealName: "some-snap",
+ SnapID: "some-snap-id",
+ Revision: snap.R(7),
+ Channel: "channel-for-7",
+ }
+
+ restore := snapstate.MockSnapReadInfo(func(name string, si *snap.SideInfo) (*snap.Info, error) {
+ var version string
+ switch name {
+ case "snapd":
+ if (revert && si.Revision.N == 1) || (!revert && si.Revision.N == 2) {
+ version = old
+ } else if (revert && si.Revision.N == 2) || si.Revision.N == 3 {
+ version = new
+ } else {
+ return nil, fmt.Errorf("unexpected revision for test")
+ }
+ default:
+ version = "1.0"
+ }
+ return &snap.Info{
+ SuggestedName: name,
+ Version: version,
+ Architectures: []string{"all"},
+ SideInfo: *si,
+ }, nil
+ })
+ defer restore()
+
+ s.state.Lock()
+ defer s.state.Unlock()
+
+ snapstate.Set(s.state, "snapd", &snapstate.SnapState{
+ Active: true,
+ Sequence: []*snap.SideInfo{&si1, &si2, &si3},
+ TrackingChannel: "latest/stable",
+ Current: si2.Revision,
+ })
+ snapstate.Set(s.state, "some-snap", &snapstate.SnapState{
+ Active: true,
+ Sequence: []*snap.SideInfo{&si},
+ TrackingChannel: "other-chanel/stable",
+ Current: si.Revision,
+ })
+
+ var err error
+ var ts *state.TaskSet
+ if revert {
+ ts, err = snapstate.Revert(s.state, "snapd", snapstate.Flags{}, "")
+ } else {
+ ts, err = snapstate.Update(s.state, "snapd", &snapstate.RevisionOptions{Revision: snap.R(3)}, s.user.ID, snapstate.Flags{})
+ }
+ c.Assert(err, IsNil)
+
+ chg := s.state.NewChange("refresh-snap", "refresh snapd")
+ chg.AddAll(ts)
+
+ _, err = snapstate.Update(s.state, "some-snap", &snapstate.RevisionOptions{Channel: "channel-for-7/stable"}, s.user.ID, snapstate.Flags{})
+ return err
+}
+
+func (s *snapmgrTestSuite) TestUpdateNotAllowedWhileDowngrading(c *C) {
+ err := s.testUpdateNotAllowedWhileDowngrading(c, "2.57.1", "2.56", false)
+ c.Assert(err, ErrorMatches, `snapd downgrade in progress, no other changes allowed until this is done`)
+}
+
+func (s *snapmgrTestSuite) TestUpdateNotAllowedWhileDowngradingAndWhenEmpty(c *C) {
+ err := s.testUpdateNotAllowedWhileDowngrading(c, "2.57.1", "", false)
+ c.Assert(err, ErrorMatches, `snapd downgrade in progress, no other changes allowed until this is done`)
+}
+
+func (s *snapmgrTestSuite) TestUpdateAllowedWhileUpgrading(c *C) {
+ err := s.testUpdateNotAllowedWhileDowngrading(c, "2.57.1", "2.58", false)
+ c.Assert(err, IsNil)
+}
+
+func (s *snapmgrTestSuite) TestUpdateNotAllowedWhileRevertDowngrading(c *C) {
+ err := s.testUpdateNotAllowedWhileDowngrading(c, "2.56", "2.57.1", true)
+ c.Assert(err, ErrorMatches, `snapd downgrade in progress, no other changes allowed until this is done`)
+}
+
+func (s *snapmgrTestSuite) TestUpdateAllowedWhileRevertUpgrading(c *C) {
+ err := s.testUpdateNotAllowedWhileDowngrading(c, "2.58", "2.57.1", true)
+ c.Assert(err, IsNil)
+}
diff --git a/overlord/state/change.go b/overlord/state/change.go
index 56eb7b440c..ec86431450 100644
--- a/overlord/state/change.go
+++ b/overlord/state/change.go
@@ -139,15 +139,16 @@ const (
// while the individual Task values would track the running of
// the hooks themselves.
type Change struct {
- state *State
- id string
- kind string
- summary string
- status Status
- clean bool
- data customData
- taskIDs []string
- ready chan struct{}
+ state *State
+ id string
+ kind string
+ summary string
+ status Status
+ clean bool
+ data customData
+ taskIDs []string
+ ready chan struct{}
+ lastObservedStatus Status
spawnTime time.Time
readyTime time.Time
@@ -422,6 +423,14 @@ func (c *Change) Status() Status {
panic(fmt.Sprintf("internal error: cannot process change status: %v", statusStats))
}
+func (c *Change) notifyStatusChange(new Status) {
+ if c.lastObservedStatus == new {
+ return
+ }
+ c.state.notifyChangeStatusChangedHandlers(c, c.lastObservedStatus, new)
+ c.lastObservedStatus = new
+}
+
// SetStatus sets the change status, overriding the default behavior (see Status method).
func (c *Change) SetStatus(s Status) {
c.state.writing()
@@ -429,6 +438,7 @@ func (c *Change) SetStatus(s Status) {
if s.Ready() {
c.markReady()
}
+ c.notifyStatusChange(c.Status())
}
func (c *Change) markReady() {
@@ -447,15 +457,10 @@ func (c *Change) Ready() <-chan struct{} {
return c.ready
}
-// taskStatusChanged is called by tasks when their status is changed,
-// to give the opportunity for the change to close its ready channel.
-func (c *Change) taskStatusChanged(t *Task, old, new Status) {
- if old.Ready() == new.Ready() {
- return
- }
+func (c *Change) detectChangeReady(excludeTask *Task) {
for _, tid := range c.taskIDs {
task := c.state.tasks[tid]
- if task != t && !task.status.Ready() {
+ if task != excludeTask && !task.status.Ready() {
return
}
}
@@ -468,6 +473,21 @@ func (c *Change) taskStatusChanged(t *Task, old, new Status) {
c.markReady()
}
+// taskStatusChanged is called by tasks when their status is changed,
+// to give the opportunity for the change to close its ready channel, and
+// notify observers of Change changes.
+func (c *Change) taskStatusChanged(t *Task, old, new Status) {
+ cs := c.Status()
+ // If the task changes from ready => unready or unready => ready,
+ // update the ready status for the change.
+ if old.Ready() == new.Ready() {
+ c.notifyStatusChange(cs)
+ return
+ }
+ c.detectChangeReady(t)
+ c.notifyStatusChange(cs)
+}
+
// IsClean returns whether all tasks in the change have been cleaned. See SetClean.
func (c *Change) IsClean() bool {
c.state.reading()
diff --git a/overlord/state/state.go b/overlord/state/state.go
index 19c691bece..62f7910c40 100644
--- a/overlord/state/state.go
+++ b/overlord/state/state.go
@@ -87,6 +87,9 @@ type State struct {
lastTaskId int
lastChangeId int
lastLaneId int
+ // lastHandlerId is not serialized, it's only used during runtime
+ // for registering runtime callbacks
+ lastHandlerId int
backend Backend
data customData
@@ -99,6 +102,10 @@ type State struct {
cache map[interface{}]interface{}
pendingChangeByAttr map[string]func(*Change) bool
+
+ // task/changes observing
+ taskHandlers map[int]func(t *Task, old, new Status)
+ changeHandlers map[int]func(chg *Change, old, new Status)
}
// New returns a new empty state.
@@ -112,6 +119,8 @@ func New(backend Backend) *State {
modified: true,
cache: make(map[interface{}]interface{}),
pendingChangeByAttr: make(map[string]func(*Change) bool),
+ taskHandlers: make(map[int]func(t *Task, old Status, new Status)),
+ changeHandlers: make(map[int]func(chg *Change, old Status, new Status)),
}
}
@@ -482,6 +491,62 @@ func (s *State) GetMaybeTimings(timings interface{}) error {
return nil
}
+// AddTaskStatusChangedHandler adds a callback function that will be invoked
+// whenever tasks change status.
+// NOTE: Callbacks registered this way may be invoked in the context
+// of the taskrunner, so the callbacks should be as simple as possible, and return
+// as quickly as possible, and should avoid the use of i/o code or blocking, as this
+// will stop the entire task system.
+func (s *State) AddTaskStatusChangedHandler(f func(t *Task, old, new Status)) (id int) {
+ // We are reading here as we want to ensure access to the state is serialized,
+ // and not writing as we are not changing the part of state that goes on the disk.
+ s.reading()
+ id = s.lastHandlerId
+ s.lastHandlerId++
+ s.taskHandlers[id] = f
+ return id
+}
+
+func (s *State) RemoveTaskStatusChangedHandler(id int) {
+ s.reading()
+ delete(s.taskHandlers, id)
+}
+
+func (s *State) notifyTaskStatusChangedHandlers(t *Task, old, new Status) {
+ s.reading()
+ for _, f := range s.taskHandlers {
+ f(t, old, new)
+ }
+}
+
+// AddChangeStatusChangedHandler adds a callback function that will be invoked
+// whenever a Change changes status.
+// NOTE: Callbacks registered this way may be invoked in the context
+// of the taskrunner, so the callbacks should be as simple as possible, and return
+// as quickly as possible, and should avoid the use of i/o code or blocking, as this
+// will stop the entire task system.
+func (s *State) AddChangeStatusChangedHandler(f func(chg *Change, old, new Status)) (id int) {
+ // We are reading here as we want to ensure access to the state is serialized,
+ // and not writing as we are not changing the part of state that goes on the disk.
+ s.reading()
+ id = s.lastHandlerId
+ s.lastHandlerId++
+ s.changeHandlers[id] = f
+ return id
+}
+
+func (s *State) RemoveChangeStatusChangedHandler(id int) {
+ s.reading()
+ delete(s.changeHandlers, id)
+}
+
+func (s *State) notifyChangeStatusChangedHandlers(chg *Change, old, new Status) {
+ s.reading()
+ for _, f := range s.changeHandlers {
+ f(chg, old, new)
+ }
+}
+
// SaveTimings implements timings.GetSaver
func (s *State) SaveTimings(timings interface{}) {
s.Set("timings", timings)
diff --git a/overlord/state/state_test.go b/overlord/state/state_test.go
index 0e2af25c89..39ead37eb2 100644
--- a/overlord/state/state_test.go
+++ b/overlord/state/state_test.go
@@ -1081,3 +1081,161 @@ func (ss *stateSuite) TestNoStateErrorString(c *C) {
err.Key = "foo"
c.Assert(err.Error(), Equals, `no state entry for key "foo"`)
}
+
+type taskAndStatus struct {
+ t *state.Task
+ old, new state.Status
+}
+
+func (ss *stateSuite) TestTaskChangedHandler(c *C) {
+ st := state.New(nil)
+ st.Lock()
+ defer st.Unlock()
+
+ var taskObservedChanges []taskAndStatus
+ oId := st.AddTaskStatusChangedHandler(func(t *state.Task, old, new state.Status) {
+ taskObservedChanges = append(taskObservedChanges, taskAndStatus{
+ t: t,
+ old: old,
+ new: new,
+ })
+ })
+
+ t1 := st.NewTask("foo", "...")
+
+ t1.SetStatus(state.DoingStatus)
+
+ // Set task status to identical status, we don't want
+ // task events when task don't actually change status.
+ t1.SetStatus(state.DoingStatus)
+
+ // Set task to done.
+ t1.SetStatus(state.DoneStatus)
+
+ // Unregister us, and make sure we do not receive more events.
+ st.RemoveTaskStatusChangedHandler(oId)
+
+ // must not appear in list.
+ t1.SetStatus(state.DoingStatus)
+
+ c.Check(taskObservedChanges, DeepEquals, []taskAndStatus{
+ {
+ t: t1,
+ old: state.DefaultStatus,
+ new: state.DoingStatus,
+ },
+ {
+ t: t1,
+ old: state.DoingStatus,
+ new: state.DoneStatus,
+ },
+ })
+}
+
+type changeAndStatus struct {
+ chg *state.Change
+ old, new state.Status
+}
+
+func (ss *stateSuite) TestChangeChangedHandler(c *C) {
+ st := state.New(nil)
+ st.Lock()
+ defer st.Unlock()
+
+ var observedChanges []changeAndStatus
+ oId := st.AddChangeStatusChangedHandler(func(chg *state.Change, old, new state.Status) {
+ observedChanges = append(observedChanges, changeAndStatus{
+ chg: chg,
+ old: old,
+ new: new,
+ })
+ })
+
+ chg := st.NewChange("test-chg", "...")
+ t1 := st.NewTask("foo", "...")
+ chg.AddTask(t1)
+
+ t1.SetStatus(state.DoingStatus)
+
+ // Set task status to identical status, we don't want
+ // change events when changes don't actually change status.
+ t1.SetStatus(state.DoingStatus)
+
+ // Set task to waiting
+ t1.SetToWait(state.DoneStatus)
+
+ // Unregister us, and make sure we do not receive more events.
+ st.RemoveChangeStatusChangedHandler(oId)
+
+ // must not appear in list.
+ t1.SetStatus(state.DoneStatus)
+
+ c.Check(observedChanges, DeepEquals, []changeAndStatus{
+ {
+ chg: chg,
+ old: state.DefaultStatus,
+ new: state.DoingStatus,
+ },
+ {
+ chg: chg,
+ old: state.DoingStatus,
+ new: state.WaitStatus,
+ },
+ })
+}
+
+func (ss *stateSuite) TestChangeSetStatusChangedHandler(c *C) {
+ st := state.New(nil)
+ st.Lock()
+ defer st.Unlock()
+
+ var observedChanges []changeAndStatus
+ oId := st.AddChangeStatusChangedHandler(func(chg *state.Change, old, new state.Status) {
+ observedChanges = append(observedChanges, changeAndStatus{
+ chg: chg,
+ old: old,
+ new: new,
+ })
+ })
+
+ chg := st.NewChange("test-chg", "...")
+ t1 := st.NewTask("foo", "...")
+ chg.AddTask(t1)
+
+ t1.SetStatus(state.DoingStatus)
+
+ // We have a single task in Doing, now we manipulate the status
+ // of the change to ensure we are receiving correct events
+ chg.SetStatus(state.WaitStatus)
+
+ // Change to a new status
+ chg.SetStatus(state.ErrorStatus)
+
+ // Now return the status back to Default, which should result
+ // in the change reporting Doing
+ chg.SetStatus(state.DefaultStatus)
+ st.RemoveChangeStatusChangedHandler(oId)
+
+ c.Check(observedChanges, DeepEquals, []changeAndStatus{
+ {
+ chg: chg,
+ old: state.DefaultStatus,
+ new: state.DoingStatus,
+ },
+ {
+ chg: chg,
+ old: state.DoingStatus,
+ new: state.WaitStatus,
+ },
+ {
+ chg: chg,
+ old: state.WaitStatus,
+ new: state.ErrorStatus,
+ },
+ {
+ chg: chg,
+ old: state.ErrorStatus,
+ new: state.DoingStatus,
+ },
+ })
+}
diff --git a/overlord/state/task.go b/overlord/state/task.go
index 74b6c926a1..72f56654ba 100644
--- a/overlord/state/task.go
+++ b/overlord/state/task.go
@@ -203,38 +203,37 @@ func (t *Task) Summary() string {
//
// Possible state transitions:
//
-// /----aborting lane--Do
-// | |
-// V V
-// Hold Doing-->Wait
-// ^ / | \
-// | abort / V V
-// no undo / Done Error
-// | V |
-// \----------Abort aborting lane
-// / | |
-// | finished or |
-// running not running |
-// V \------->|
-// kill goroutine |
-// | V
-// / \ ----->Undo
-// / no error / |
-// | from goroutine |
-// error |
-// from goroutine |
-// | V
-// | Undoing-->Wait
-// V | \
-// Error V V
-// Undone Error
+// /----aborting lane--Do
+// | |
+// V V
+// Hold Doing-->Wait
+// ^ / | \
+// | abort / V V
+// no undo / Done Error
+// | V |
+// \----------Abort aborting lane
+// / | |
+// | finished or |
+// running not running |
+// V \------->|
+// kill goroutine |
+// | V
+// / \ ----->Undo
+// / no error / |
+// | from goroutine |
+// error |
+// from goroutine |
+// | V
+// | Undoing-->Wait
+// V | \
+// Error V V
+// Undone Error
//
// Do -> Doing -> Done is the direct succcess scenario.
//
// Wait can transition to its waited status,
// usually Done|Undone or back to Doing.
// See Wait struct, SetToWait and WaitedStatus.
-//
func (t *Task) Status() Status {
t.state.reading()
if t.status == DefaultStatus {
@@ -244,6 +243,9 @@ func (t *Task) Status() Status {
}
func (t *Task) changeStatus(old, new Status) {
+ if old == new {
+ return
+ }
t.status = new
if !old.Ready() && new.Ready() {
t.readyTime = timeNow()
@@ -252,6 +254,7 @@ func (t *Task) changeStatus(old, new Status) {
if chg != nil {
chg.taskStatusChanged(t, old, new)
}
+ t.state.notifyTaskStatusChangedHandlers(t, old, new)
}
// SetStatus sets the task status, overriding the default behavior (see Status method).
diff --git a/packaging/arch/PKGBUILD b/packaging/arch/PKGBUILD
index 38528f0c79..0b65a5d6b4 100644
--- a/packaging/arch/PKGBUILD
+++ b/packaging/arch/PKGBUILD
@@ -11,7 +11,7 @@ pkgdesc="Service and tools for management of snap packages."
depends=('squashfs-tools' 'libseccomp' 'libsystemd' 'apparmor')
optdepends=('bash-completion: bash completion support'
'xdg-desktop-portal: desktop integration')
-pkgver=2.60
+pkgver=2.60.1
pkgrel=1
arch=('x86_64' 'i686' 'armv7h' 'aarch64')
url="https://github.com/snapcore/snapd"
diff --git a/packaging/debian-sid/changelog b/packaging/debian-sid/changelog
index ec90dbdd92..8173534bf2 100644
--- a/packaging/debian-sid/changelog
+++ b/packaging/debian-sid/changelog
@@ -1,3 +1,18 @@
+snapd (2.60.1-1) unstable; urgency=medium
+
+ * New upstream release, LP: #2024007
+ - install: fallback to lazy unmount() in writeFilesystemContent
+ - data: include "modprobe.d" and "modules-load.d" in preseeded blob
+ - gadget: fix install test on armhf
+ - interfaces: fix typo in network_manager_observe
+ - sandbox/apparmor: don't let vendored apparmor conflict with system
+ - gadget/update: set parts in laid out data from the ones matched
+ - many: move SnapConfineAppArmorDir from dirs to sandbox/apparmor
+ - many: stop using `-O no-expr-simplify` in apparmor_parser
+ - go.mod: update secboot to latest uc22 branch
+
+ -- Michael Vogt <michael.vogt@ubuntu.com> Tue, 04 Jul 2023 21:21:48 +0200
+
snapd (2.60-1) unstable; urgency=medium
* New upstream release, LP: #2024007
diff --git a/packaging/fedora/snapd.spec b/packaging/fedora/snapd.spec
index 84869dae8e..fe012fee26 100644
--- a/packaging/fedora/snapd.spec
+++ b/packaging/fedora/snapd.spec
@@ -102,7 +102,7 @@
%endif
Name: snapd
-Version: 2.60
+Version: 2.60.1
Release: 0%{?dist}
Summary: A transactional software package manager
License: GPLv3
@@ -996,6 +996,18 @@ fi
%changelog
+* Tue Jul 04 2023 Michael Vogt <michael.vogt@ubuntu.com>
+- New upstream release 2.60.1
+ - install: fallback to lazy unmount() in writeFilesystemContent
+ - data: include "modprobe.d" and "modules-load.d" in preseeded blob
+ - gadget: fix install test on armhf
+ - interfaces: fix typo in network_manager_observe
+ - sandbox/apparmor: don't let vendored apparmor conflict with system
+ - gadget/update: set parts in laid out data from the ones matched
+ - many: move SnapConfineAppArmorDir from dirs to sandbox/apparmor
+ - many: stop using `-O no-expr-simplify` in apparmor_parser
+ - go.mod: update secboot to latest uc22 branch
+
* Thu Jun 15 2023 Michael Vogt <michael.vogt@ubuntu.com>
- New upstream release 2.60
- Support for dynamic snapshot data exclusions
diff --git a/packaging/opensuse-15.4 b/packaging/opensuse-15.4
index 041e6ece8f..2caee2bc4f 120000
--- a/packaging/opensuse-15.4
+++ b/packaging/opensuse-15.4
@@ -1 +1 @@
-opensuse/ \ No newline at end of file
+opensuse \ No newline at end of file
diff --git a/packaging/opensuse-15.3 b/packaging/opensuse-15.5
index 2caee2bc4f..2caee2bc4f 120000
--- a/packaging/opensuse-15.3
+++ b/packaging/opensuse-15.5
diff --git a/packaging/opensuse/snapd.changes b/packaging/opensuse/snapd.changes
index ee0adade34..333509922d 100644
--- a/packaging/opensuse/snapd.changes
+++ b/packaging/opensuse/snapd.changes
@@ -1,4 +1,9 @@
-------------------------------------------------------------------
+Tue Jul 04 19:21:48 UTC 2023 - michael.vogt@ubuntu.com
+
+- Update to upstream release 2.60.1
+
+-------------------------------------------------------------------
Thu Jun 15 15:14:31 UTC 2023 - michael.vogt@ubuntu.com
- Update to upstream release 2.60
diff --git a/packaging/opensuse/snapd.spec b/packaging/opensuse/snapd.spec
index de4baac39c..40ca2a7ded 100644
--- a/packaging/opensuse/snapd.spec
+++ b/packaging/opensuse/snapd.spec
@@ -82,7 +82,7 @@
Name: snapd
-Version: 2.60
+Version: 2.60.1
Release: 0
Summary: Tools enabling systems to work with .snap files
License: GPL-3.0
diff --git a/packaging/ubuntu-14.04/changelog b/packaging/ubuntu-14.04/changelog
index f04d5ce3db..e666083b3a 100644
--- a/packaging/ubuntu-14.04/changelog
+++ b/packaging/ubuntu-14.04/changelog
@@ -1,3 +1,18 @@
+snapd (2.60.1~14.04) trusty; urgency=medium
+
+ * New upstream release, LP: #2024007
+ - install: fallback to lazy unmount() in writeFilesystemContent
+ - data: include "modprobe.d" and "modules-load.d" in preseeded blob
+ - gadget: fix install test on armhf
+ - interfaces: fix typo in network_manager_observe
+ - sandbox/apparmor: don't let vendored apparmor conflict with system
+ - gadget/update: set parts in laid out data from the ones matched
+ - many: move SnapConfineAppArmorDir from dirs to sandbox/apparmor
+ - many: stop using `-O no-expr-simplify` in apparmor_parser
+ - go.mod: update secboot to latest uc22 branch
+
+ -- Michael Vogt <michael.vogt@ubuntu.com> Tue, 04 Jul 2023 21:21:48 +0200
+
snapd (2.60~14.04) trusty; urgency=medium
* New upstream release, LP: #2024007
diff --git a/packaging/ubuntu-16.04/changelog b/packaging/ubuntu-16.04/changelog
index 4b97cadf43..3bdae87486 100644
--- a/packaging/ubuntu-16.04/changelog
+++ b/packaging/ubuntu-16.04/changelog
@@ -1,3 +1,18 @@
+snapd (2.60.1) xenial; urgency=medium
+
+ * New upstream release, LP: #2024007
+ - install: fallback to lazy unmount() in writeFilesystemContent
+ - data: include "modprobe.d" and "modules-load.d" in preseeded blob
+ - gadget: fix install test on armhf
+ - interfaces: fix typo in network_manager_observe
+ - sandbox/apparmor: don't let vendored apparmor conflict with system
+ - gadget/update: set parts in laid out data from the ones matched
+ - many: move SnapConfineAppArmorDir from dirs to sandbox/apparmor
+ - many: stop using `-O no-expr-simplify` in apparmor_parser
+ - go.mod: update secboot to latest uc22 branch
+
+ -- Michael Vogt <michael.vogt@ubuntu.com> Tue, 04 Jul 2023 21:21:48 +0200
+
snapd (2.60) xenial; urgency=medium
* New upstream release, LP: #2024007
diff --git a/sandbox/apparmor/apparmor.go b/sandbox/apparmor/apparmor.go
index 3726b7ba76..24d82f04ca 100644
--- a/sandbox/apparmor/apparmor.go
+++ b/sandbox/apparmor/apparmor.go
@@ -20,6 +20,7 @@
package apparmor
import (
+ "bufio"
"bytes"
"fmt"
"io/ioutil"
@@ -31,6 +32,7 @@ import (
"sync"
"github.com/snapcore/snapd/dirs"
+ "github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/snapdtool"
"github.com/snapcore/snapd/strutil"
@@ -85,6 +87,14 @@ func setupConfCacheDirs(newrootdir string) {
// TODO: it seems Solus has a different setup too, investigate this
SystemCacheDir = CacheDir
}
+
+ snapConfineDir := "snap-confine"
+ if _, internal, err := AppArmorParser(); err == nil {
+ if internal {
+ snapConfineDir = "snap-confine.internal"
+ }
+ }
+ SnapConfineAppArmorDir = filepath.Join(dirs.SnapdStateDir(newrootdir), "apparmor", snapConfineDir)
}
func init() {
@@ -93,9 +103,10 @@ func init() {
}
var (
- ConfDir string
- CacheDir string
- SystemCacheDir string
+ ConfDir string
+ CacheDir string
+ SystemCacheDir string
+ SnapConfineAppArmorDir string
)
func (level LevelType) String() string {
@@ -396,6 +407,32 @@ func probeParserFeatures() ([]string, error) {
return features, nil
}
+func systemAppArmorLoadsSnapPolicy() bool {
+ // on older Ubuntu systems the system installed apparmor may try and
+ // load snapd generated apparmor policy (LP: #2024637)
+ f, err := os.Open(filepath.Join(dirs.GlobalRootDir, "/lib/apparmor/functions"))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ logger.Debugf("cannot open apparmor functions file: %v", err)
+ }
+ return false
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.Contains(line, dirs.SnapAppArmorDir) {
+ return true
+ }
+ }
+ if scanner.Err() != nil {
+ logger.Debugf("cannot scan apparmor functions file: %v", scanner.Err())
+ }
+
+ return false
+}
+
func snapdAppArmorSupportsReexecImpl() bool {
hostInfoDir := filepath.Join(dirs.GlobalRootDir, dirs.CoreLibExecDir)
_, flags, err := snapdtool.SnapdVersionFromInfoFile(hostInfoDir)
@@ -413,7 +450,7 @@ func AppArmorParser() (cmd *exec.Cmd, internal bool, err error) {
// - but only use the internal one when we know that the system
// installed snapd-apparmor support re-exec
if path, err := snapdtool.InternalToolPath("apparmor_parser"); err == nil {
- if osutil.IsExecutable(path) && snapdAppArmorSupportsReexec() {
+ if osutil.IsExecutable(path) && snapdAppArmorSupportsReexec() && !systemAppArmorLoadsSnapPolicy() {
prefix := strings.TrimSuffix(path, "apparmor_parser")
// when using the internal apparmor_parser also use
// its own configuration and includes etc plus
diff --git a/sandbox/apparmor/apparmor_test.go b/sandbox/apparmor/apparmor_test.go
index 9a537ab68e..863e966c10 100644
--- a/sandbox/apparmor/apparmor_test.go
+++ b/sandbox/apparmor/apparmor_test.go
@@ -33,6 +33,7 @@ import (
. "gopkg.in/check.v1"
"github.com/snapcore/snapd/dirs"
+ "github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/sandbox/apparmor"
"github.com/snapcore/snapd/snapdtool"
@@ -514,3 +515,87 @@ func (s *apparmorSuite) TestSnapdAppArmorSupportsReexecImpl(c *C) {
c.Assert(ioutil.WriteFile(infoFile, []byte("VERSION=foo\nSNAPD_APPARMOR_REEXEC=1"), 0644), IsNil)
c.Check(apparmor.SnapdAppArmorSupportsRexecImpl(), Equals, true)
}
+
+func (s *apparmorSuite) TestSetupConfCacheDirs(c *C) {
+ apparmor.SetupConfCacheDirs("/newdir")
+ c.Check(apparmor.SnapConfineAppArmorDir, Equals, "/newdir/var/lib/snapd/apparmor/snap-confine")
+}
+
+func (s *apparmorSuite) TestSetupConfCacheDirsWithInternalApparmor(c *C) {
+ fakeroot := c.MkDir()
+ dirs.SetRootDir(fakeroot)
+
+ d := filepath.Join(dirs.SnapMountDir, "/snapd/42", "/usr/lib/snapd")
+ c.Assert(os.MkdirAll(d, 0755), IsNil)
+ p := filepath.Join(d, "apparmor_parser")
+ c.Assert(ioutil.WriteFile(p, nil, 0755), IsNil)
+ restore := snapdtool.MockOsReadlink(func(path string) (string, error) {
+ c.Assert(path, Equals, "/proc/self/exe")
+ return filepath.Join(d, "snapd"), nil
+ })
+ defer restore()
+ restore = apparmor.MockSnapdAppArmorSupportsReexec(func() bool { return true })
+ defer restore()
+
+ apparmor.SetupConfCacheDirs("/newdir")
+ c.Check(apparmor.SnapConfineAppArmorDir, Equals, "/newdir/var/lib/snapd/apparmor/snap-confine.internal")
+}
+
+func (s *apparmorSuite) TestSystemAppArmorLoadsSnapPolicyErr(c *C) {
+ fakeroot := c.MkDir()
+ dirs.SetRootDir(fakeroot)
+ fakeApparmorFunctionsPath := filepath.Join(fakeroot, "/lib/apparmor/functions")
+ err := os.MkdirAll(filepath.Dir(fakeApparmorFunctionsPath), 0750)
+ c.Assert(err, IsNil)
+
+ os.Setenv("SNAPD_DEBUG", "1")
+ defer os.Unsetenv("SNAPD_DEBUG")
+
+ log, restore := logger.MockLogger()
+ defer restore()
+
+ // no log output on missing file
+ c.Check(apparmor.SystemAppArmorLoadsSnapPolicy(), Equals, false)
+ c.Check(log.String(), Equals, "")
+
+ // permissions are ignored as root
+ if os.Getuid() == 0 {
+ return
+ }
+ // log generated for errors
+ err = ioutil.WriteFile(fakeApparmorFunctionsPath, nil, 0100)
+ c.Assert(err, IsNil)
+ c.Check(apparmor.SystemAppArmorLoadsSnapPolicy(), Equals, false)
+ c.Check(log.String(), Matches, `(?ms).* DEBUG: cannot open apparmor functions file: open .*/lib/apparmor/functions: permission denied`)
+}
+
+func (s *apparmorSuite) TestSystemAppArmorLoadsSnapPolicy(c *C) {
+ fakeroot := c.MkDir()
+ dirs.SetRootDir(fakeroot)
+
+ // systemAppArmorLoadsSnapPolicy() will look at this path so it
+ // needs to be the real path, not a faked one
+ dirs.SnapAppArmorDir = dirs.SnapAppArmorDir[len(fakeroot):]
+
+ fakeApparmorFunctionsPath := filepath.Join(fakeroot, "/lib/apparmor/functions")
+ err := os.MkdirAll(filepath.Dir(fakeApparmorFunctionsPath), 0755)
+ c.Assert(err, IsNil)
+
+ for _, tc := range []struct {
+ apparmorFunctionsContent string
+ expectedResult bool
+ }{
+ {"", false},
+ {"unrelated content", false},
+ // 16.04
+ {`PROFILES_SNAPPY="/var/lib/snapd/apparmor/profiles"`, true},
+ // 18.04
+ {`PROFILES_VAR="/var/lib/snapd/apparmor/profiles"`, true},
+ } {
+ err := ioutil.WriteFile(fakeApparmorFunctionsPath, []byte(tc.apparmorFunctionsContent), 0644)
+ c.Assert(err, IsNil)
+
+ loadsPolicy := apparmor.SystemAppArmorLoadsSnapPolicy()
+ c.Check(loadsPolicy, Equals, tc.expectedResult, Commentf("%v", tc))
+ }
+}
diff --git a/sandbox/apparmor/export_test.go b/sandbox/apparmor/export_test.go
index 4c7318e38a..0027859da5 100644
--- a/sandbox/apparmor/export_test.go
+++ b/sandbox/apparmor/export_test.go
@@ -28,7 +28,8 @@ import (
)
var (
- NumberOfJobsParam = numberOfJobsParam
+ NumberOfJobsParam = numberOfJobsParam
+ SetupConfCacheDirs = setupConfCacheDirs
)
func MockRuntimeNumCPU(new func() int) (restore func()) {
@@ -101,6 +102,7 @@ var (
PreferredParserFeatures = preferredParserFeatures
SnapdAppArmorSupportsRexecImpl = snapdAppArmorSupportsReexecImpl
+ SystemAppArmorLoadsSnapPolicy = systemAppArmorLoadsSnapPolicy
)
func FreshAppArmorAssessment() {
diff --git a/sandbox/apparmor/profile.go b/sandbox/apparmor/profile.go
index e98706dea6..94721c21e3 100644
--- a/sandbox/apparmor/profile.go
+++ b/sandbox/apparmor/profile.go
@@ -118,8 +118,7 @@ var LoadProfiles = func(fnames []string, cacheDir string, flags AaParserFlags) e
return nil
}
- // Use no-expr-simplify since expr-simplify is actually slower on armhf (LP: #1383858)
- args := []string{"--replace", "--write-cache", "-O", "no-expr-simplify", fmt.Sprintf("--cache-loc=%s", cacheDir)}
+ args := []string{"--replace", "--write-cache", fmt.Sprintf("--cache-loc=%s", cacheDir)}
if flags&ConserveCPU != 0 {
args = append(args, numberOfJobsParam())
}
@@ -295,7 +294,7 @@ var loadHomedirs = func() ([]string, error) {
// Returns whether any modifications was made to the snap-confine snippets.
func SetupSnapConfineSnippets() (wasChanged bool, err error) {
// Create the local policy directory if it is not there.
- if err := os.MkdirAll(dirs.SnapConfineAppArmorDir, 0755); err != nil {
+ if err := os.MkdirAll(SnapConfineAppArmorDir, 0755); err != nil {
return false, fmt.Errorf("cannot create snap-confine policy directory: %s", err)
}
@@ -349,7 +348,7 @@ func SetupSnapConfineSnippets() (wasChanged bool, err error) {
}
// Ensure that generated policy is what we computed above.
- created, removed, err := osutil.EnsureDirState(dirs.SnapConfineAppArmorDir, "*", policy)
+ created, removed, err := osutil.EnsureDirState(SnapConfineAppArmorDir, "*", policy)
if err != nil {
return false, fmt.Errorf("cannot synchronize snap-confine policy: %s", err)
}
@@ -359,6 +358,6 @@ func SetupSnapConfineSnippets() (wasChanged bool, err error) {
// RemoveSnapConfineSnippets clears out any previously written apparmor snippets
// for snap-confine.
func RemoveSnapConfineSnippets() error {
- _, _, err := osutil.EnsureDirState(dirs.SnapConfineAppArmorDir, "*", nil)
+ _, _, err := osutil.EnsureDirState(SnapConfineAppArmorDir, "*", nil)
return err
}
diff --git a/sandbox/apparmor/profile_test.go b/sandbox/apparmor/profile_test.go
index 2a207f7eb0..cbaece1066 100644
--- a/sandbox/apparmor/profile_test.go
+++ b/sandbox/apparmor/profile_test.go
@@ -63,7 +63,7 @@ func (s *appArmorSuite) TestLoadProfilesRunsAppArmorParserReplace(c *C) {
err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd"}, apparmor.CacheDir, 0)
c.Assert(err, IsNil)
c.Assert(cmd.Calls(), DeepEquals, [][]string{
- {"apparmor_parser", "--replace", "--write-cache", "-O", "no-expr-simplify", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"},
+ {"apparmor_parser", "--replace", "--write-cache", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"},
})
}
@@ -75,7 +75,7 @@ func (s *appArmorSuite) TestLoadProfilesMany(c *C) {
err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd", "/path/to/another.profile"}, apparmor.CacheDir, 0)
c.Assert(err, IsNil)
c.Assert(cmd.Calls(), DeepEquals, [][]string{
- {"apparmor_parser", "--replace", "--write-cache", "-O", "no-expr-simplify", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd", "/path/to/another.profile"},
+ {"apparmor_parser", "--replace", "--write-cache", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd", "/path/to/another.profile"},
})
}
@@ -99,7 +99,7 @@ func (s *appArmorSuite) TestLoadProfilesReportsErrors(c *C) {
apparmor_parser output:
`)
c.Assert(cmd.Calls(), DeepEquals, [][]string{
- {"apparmor_parser", "--replace", "--write-cache", "-O", "no-expr-simplify", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"},
+ {"apparmor_parser", "--replace", "--write-cache", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"},
})
}
@@ -114,7 +114,7 @@ apparmor_parser output:
parser error
`)
c.Assert(cmd.Calls(), DeepEquals, [][]string{
- {"apparmor_parser", "--replace", "--write-cache", "-O", "no-expr-simplify", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"},
+ {"apparmor_parser", "--replace", "--write-cache", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"},
})
}
@@ -128,7 +128,7 @@ func (s *appArmorSuite) TestLoadProfilesRunsAppArmorParserReplaceWithSnapdDebug(
err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd"}, apparmor.CacheDir, 0)
c.Assert(err, IsNil)
c.Assert(cmd.Calls(), DeepEquals, [][]string{
- {"apparmor_parser", "--replace", "--write-cache", "-O", "no-expr-simplify", "--cache-loc=/var/cache/apparmor", "/path/to/snap.samba.smbd"},
+ {"apparmor_parser", "--replace", "--write-cache", "--cache-loc=/var/cache/apparmor", "/path/to/snap.samba.smbd"},
})
}
@@ -395,7 +395,7 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsNoSnippets(c *C) {
// Because overlay/nfs is not used there are no local policy files but the
// directory was created.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
}
@@ -427,16 +427,16 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsHomedirs(c *C) {
// Homedirs was specified, so we expect an entry for each homedir in a
// snippet 'homedirs'
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "homedirs")
c.Assert(files[0].Mode(), Equals, os.FileMode(0644))
c.Assert(files[0].IsDir(), Equals, false)
- c.Assert(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()),
+ c.Assert(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name()),
testutil.FileContains, `"/mnt/foo/" -> "/tmp/snap.rootfs_*/mnt/foo/",`)
- c.Assert(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()),
+ c.Assert(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name()),
testutil.FileContains, `"/mnt/bar/" -> "/tmp/snap.rootfs_*/mnt/bar/",`)
}
@@ -459,7 +459,7 @@ func (s *appArmorSuite) TestSetupSnapConfineGeneratedPolicyWithHomedirsLoadError
// Probing apparmor_parser capabilities failed, so nothing gets written
// to the snap-confine policy directory
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
@@ -488,14 +488,14 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsBPF(c *C) {
// Capability bpf is supported by the parser, so an extra policy file
// for snap-confine is present
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "cap-bpf")
c.Assert(files[0].Mode(), Equals, os.FileMode(0644))
c.Assert(files[0].IsDir(), Equals, false)
- c.Assert(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()),
+ c.Assert(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name()),
testutil.FileContains, "capability bpf,")
}
@@ -522,7 +522,7 @@ func (s *appArmorSuite) TestSetupSnapConfineGeneratedPolicyWithBPFProbeError(c *
// Probing apparmor_parser capabilities failed, so nothing gets written
// to the snap-confine policy directory
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
@@ -547,7 +547,7 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsOverlay(c *C) {
c.Check(wasChanged, Equals, true)
// Because overlay is being used, we have the extra policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "overlay-root")
@@ -555,7 +555,7 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsOverlay(c *C) {
c.Assert(files[0].IsDir(), Equals, false)
// The policy allows upperdir access.
- data, err := ioutil.ReadFile(filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name()))
+ data, err := ioutil.ReadFile(filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name()))
c.Assert(err, IsNil)
c.Assert(string(data), testutil.Contains, "\"/upper/{,**/}\" r,")
}
@@ -577,7 +577,7 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsNFS(c *C) {
c.Check(wasChanged, Equals, true)
// Because NFS is being used, we have the extra policy file.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
c.Assert(files[0].Name(), Equals, "nfs-support")
@@ -585,7 +585,7 @@ func (s *appArmorSuite) TestSetupSnapConfineSnippetsNFS(c *C) {
c.Assert(files[0].IsDir(), Equals, false)
// The policy allows network access.
- fn := filepath.Join(dirs.SnapConfineAppArmorDir, files[0].Name())
+ fn := filepath.Join(apparmor.SnapConfineAppArmorDir, files[0].Name())
c.Assert(fn, testutil.FileContains, "network inet,")
c.Assert(fn, testutil.FileContains, "network inet6,")
}
@@ -619,7 +619,7 @@ func (s *appArmorSuite) TestSetupSnapConfineGeneratedPolicyError1(c *C) {
// While other stuff failed we created the policy directory and didn't
// write any files to it.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Check(err, IsNil)
c.Check(files, HasLen, 0)
@@ -633,16 +633,16 @@ func (s *appArmorSuite) TestSetupSnapConfineGeneratedPolicyError2(c *C) {
defer dirs.SetRootDir("")
// Create a file where we would expect to find the local policy.
- err := os.RemoveAll(filepath.Dir(dirs.SnapConfineAppArmorDir))
+ err := os.RemoveAll(filepath.Dir(apparmor.SnapConfineAppArmorDir))
c.Assert(err, IsNil)
- err = os.MkdirAll(filepath.Dir(dirs.SnapConfineAppArmorDir), 0755)
+ err = os.MkdirAll(filepath.Dir(apparmor.SnapConfineAppArmorDir), 0755)
c.Assert(err, IsNil)
- err = ioutil.WriteFile(dirs.SnapConfineAppArmorDir, []byte(""), 0644)
+ err = ioutil.WriteFile(apparmor.SnapConfineAppArmorDir, []byte(""), 0644)
c.Assert(err, IsNil)
wasChanged, err := apparmor.SetupSnapConfineSnippets()
c.Check(err, ErrorMatches, fmt.Sprintf(`cannot create snap-confine policy directory: mkdir %s: not a directory`,
- dirs.SnapConfineAppArmorDir))
+ apparmor.SnapConfineAppArmorDir))
c.Check(wasChanged, Equals, false)
}
@@ -666,23 +666,23 @@ func (s *appArmorSuite) TestSetupSnapConfineGeneratedPolicyError3(c *C) {
// Create the snap-confine directory and put a file. Because the file name
// matches the glob generated-* snapd will attempt to remove it but because
// the directory is not writable, that operation will fail.
- err := os.MkdirAll(dirs.SnapConfineAppArmorDir, 0755)
+ err := os.MkdirAll(apparmor.SnapConfineAppArmorDir, 0755)
c.Assert(err, IsNil)
- f := filepath.Join(dirs.SnapConfineAppArmorDir, "generated-test")
+ f := filepath.Join(apparmor.SnapConfineAppArmorDir, "generated-test")
err = ioutil.WriteFile(f, []byte("spurious content"), 0644)
c.Assert(err, IsNil)
- err = os.Chmod(dirs.SnapConfineAppArmorDir, 0555)
+ err = os.Chmod(apparmor.SnapConfineAppArmorDir, 0555)
c.Assert(err, IsNil)
// Make the directory writable for cleanup.
- defer os.Chmod(dirs.SnapConfineAppArmorDir, 0755)
+ defer os.Chmod(apparmor.SnapConfineAppArmorDir, 0755)
wasChanged, err := apparmor.SetupSnapConfineSnippets()
c.Check(err.Error(), testutil.Contains, "cannot synchronize snap-confine policy")
c.Check(wasChanged, Equals, false)
// The policy directory was unchanged.
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 1)
}
@@ -692,16 +692,16 @@ func (s *appArmorSuite) TestRemoveSnapConfineSnippets(c *C) {
defer dirs.SetRootDir("")
// Create the snap-confine directory and put a few files.
- err := os.MkdirAll(dirs.SnapConfineAppArmorDir, 0755)
+ err := os.MkdirAll(apparmor.SnapConfineAppArmorDir, 0755)
c.Assert(err, IsNil)
- c.Assert(ioutil.WriteFile(filepath.Join(dirs.SnapConfineAppArmorDir, "cap-test"), []byte("foo"), 0644), IsNil)
- c.Assert(ioutil.WriteFile(filepath.Join(dirs.SnapConfineAppArmorDir, "my-file"), []byte("foo"), 0644), IsNil)
+ c.Assert(ioutil.WriteFile(filepath.Join(apparmor.SnapConfineAppArmorDir, "cap-test"), []byte("foo"), 0644), IsNil)
+ c.Assert(ioutil.WriteFile(filepath.Join(apparmor.SnapConfineAppArmorDir, "my-file"), []byte("foo"), 0644), IsNil)
err = apparmor.RemoveSnapConfineSnippets()
c.Check(err, IsNil)
// The files were removed
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
}
@@ -711,14 +711,14 @@ func (s *appArmorSuite) TestRemoveSnapConfineSnippetsNoSnippets(c *C) {
defer dirs.SetRootDir("")
// Create the snap-confine directory and let it do nothing.
- err := os.MkdirAll(dirs.SnapConfineAppArmorDir, 0755)
+ err := os.MkdirAll(apparmor.SnapConfineAppArmorDir, 0755)
c.Assert(err, IsNil)
err = apparmor.RemoveSnapConfineSnippets()
c.Check(err, IsNil)
// Nothing happens
- files, err := ioutil.ReadDir(dirs.SnapConfineAppArmorDir)
+ files, err := ioutil.ReadDir(apparmor.SnapConfineAppArmorDir)
c.Assert(err, IsNil)
c.Assert(files, HasLen, 0)
}
diff --git a/secboot/luks2/cryptsetup.go b/secboot/luks2/cryptsetup.go
index c9bc8147ad..0881cb2ca4 100644
--- a/secboot/luks2/cryptsetup.go
+++ b/secboot/luks2/cryptsetup.go
@@ -21,11 +21,13 @@ package luks2
import (
"bytes"
+ "errors"
"fmt"
"io"
"os"
"os/exec"
"strconv"
+ "syscall"
"time"
"github.com/snapcore/snapd/osutil"
@@ -42,30 +44,12 @@ const (
// cryptsetupCmd is a helper for running the cryptsetup command. If stdin is supplied, data read
// from it is supplied to cryptsetup via its stdin. If callback is supplied, it will be invoked
// after cryptsetup has started.
-func cryptsetupCmd(stdin io.Reader, callback func(cmd *exec.Cmd) error, args ...string) error {
+func cryptsetupCmd(stdin io.Reader, args ...string) error {
cmd := exec.Command("cryptsetup", args...)
cmd.Stdin = stdin
- var b bytes.Buffer
- cmd.Stdout = &b
- cmd.Stderr = &b
-
- if err := cmd.Start(); err != nil {
- return xerrors.Errorf("cannot start cryptsetup: %w", err)
- }
-
- var cbErr error
- if callback != nil {
- cbErr = callback(cmd)
- }
-
- err := cmd.Wait()
-
- switch {
- case cbErr != nil:
- return cbErr
- case err != nil:
- return fmt.Errorf("cryptsetup failed with: %v", osutil.OutputErr(b.Bytes(), err))
+ if output, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("cryptsetup failed with: %v", osutil.OutputErr(output, err))
}
return nil
@@ -131,6 +115,22 @@ type AddKeyOptions struct {
Slot int
}
+var writeExistingKeyToFifo = func(fifoPath string, existingKey []byte) error {
+ f, err := os.OpenFile(fifoPath, os.O_WRONLY, 0)
+ if err != nil {
+ return xerrors.Errorf("cannot open FIFO for passing existing key to cryptsetup: %w", err)
+ }
+ defer f.Close()
+
+ if _, err := f.Write(existingKey); err != nil {
+ return xerrors.Errorf("cannot pass existing key to cryptsetup: %w", err)
+ }
+ if err := f.Close(); err != nil {
+ return xerrors.Errorf("cannot close write end of FIFO: %w", err)
+ }
+ return nil
+}
+
// AddKey adds the supplied key in to a new keyslot for specified LUKS2 container. In order to do this,
// an existing key must be provided. The KDF for the new keyslot will be configured to use argon2i with
// the supplied benchmark time. The key will be added to the supplied slot.
@@ -172,48 +172,58 @@ func AddKey(devicePath string, existingKey, key []byte, options *AddKeyOptions)
// in order to be able to do this.
"-")
- writeExistingKeyToFifo := func(cmd *exec.Cmd) error {
- f, err := os.OpenFile(fifoPath, os.O_WRONLY, 0)
- if err != nil {
- // If we fail to open the write end, the read end will be blocked in open(), so
- // kill the process.
- cmd.Process.Kill()
- return xerrors.Errorf("cannot open FIFO for passing existing key to cryptsetup: %w", err)
- }
-
- if _, err := f.Write(existingKey); err != nil {
- // The read end is open and blocked inside read(). Closing our write end will result in the
- // read end returning 0 bytes (EOF) and continuing cleanly.
- if err := f.Close(); err != nil {
- // If we can't close the write end, the read end will remain blocked inside read(),
- // so kill the process.
+ cmd := exec.Command("cryptsetup", args...)
+ cmd.Stdin = bytes.NewReader(key)
+
+ // Writing to the fifo must happen in a go-routine as it may block
+ // if the other side is not connected. Special care must be taken
+ // about the cleanup.
+ fifoErrCh := make(chan error)
+ go func() {
+ fifoErr := writeExistingKeyToFifo(fifoPath, existingKey)
+ if fifoErr != nil {
+ // kill to ensure cmd is not lingering
+ if cmd.Process != nil {
cmd.Process.Kill()
}
- return xerrors.Errorf("cannot pass existing key to cryptsetup: %w", err)
- }
-
- if err := f.Close(); err != nil {
- // If we can't close the write end, the read end will remain blocked inside read(),
- // so kill the process.
- cmd.Process.Kill()
- return xerrors.Errorf("cannot close write end of FIFO: %w", err)
+ // also ensure fifo is closed
+ cleanupFifo()
}
+ fifoErrCh <- fifoErr
+ }()
+
+ output, cmdErr := cmd.CombinedOutput()
+ if cmdErr != nil {
+ // cleanupFifo will open/close the fifo to ensure the
+ // writeExistingKeyToFifo() does not leak while waiting
+ // for the other side of the fifo to connect (it may never
+ // do)
+ cleanupFifo()
+ }
+ fifoErr := <-fifoErrCh
- return nil
+ switch {
+ case cmdErr != nil && (fifoErr == nil || errors.Is(fifoErr, syscall.EPIPE)):
+ // cmdErr and EPIPE means the problem is with cmd, no
+ // need to display the EPIPE error
+ return fmt.Errorf("cryptsetup failed with: %v", osutil.OutputErr(output, cmdErr))
+ case cmdErr != nil || fifoErr != nil:
+ // For all other cases show a generic error message
+ return fmt.Errorf("cryptsetup failed with: %v (fifo failed with: %v)", osutil.OutputErr(output, err), fifoErr)
}
- return cryptsetupCmd(bytes.NewReader(key), writeExistingKeyToFifo, args...)
+ return nil
}
// KillSlot erases the keyslot with the supplied slot number from the specified LUKS2 container.
// Note that a valid key for a remaining keyslot must be supplied, in order to prevent the last
// keyslot from being erased.
func KillSlot(devicePath string, slot int, key []byte) error {
- return cryptsetupCmd(bytes.NewReader(key), nil, "luksKillSlot", "--type", "luks2", "--key-file", "-", devicePath, strconv.Itoa(slot))
+ return cryptsetupCmd(bytes.NewReader(key), "luksKillSlot", "--type", "luks2", "--key-file", "-", devicePath, strconv.Itoa(slot))
}
// SetSlotPriority sets the priority of the keyslot with the supplied slot number on
// the specified LUKS2 container.
func SetSlotPriority(devicePath string, slot int, priority SlotPriority) error {
- return cryptsetupCmd(nil, nil, "config", "--priority", priority.String(), "--key-slot", strconv.Itoa(slot), devicePath)
+ return cryptsetupCmd(nil, "config", "--priority", priority.String(), "--key-slot", strconv.Itoa(slot), devicePath)
}
diff --git a/secboot/luks2/cryptsetup_test.go b/secboot/luks2/cryptsetup_test.go
new file mode 100644
index 0000000000..2d32df273c
--- /dev/null
+++ b/secboot/luks2/cryptsetup_test.go
@@ -0,0 +1,118 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package luks2_test
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ . "gopkg.in/check.v1"
+
+ "github.com/snapcore/snapd/dirs"
+ "github.com/snapcore/snapd/secboot/luks2"
+ "github.com/snapcore/snapd/testutil"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type luks2Suite struct {
+ testutil.BaseTest
+
+ tmpdir string
+ mockCryptsetup *testutil.MockCmd
+}
+
+var _ = Suite(&luks2Suite{})
+
+func (s *luks2Suite) SetUpTest(c *C) {
+ s.BaseTest.SetUpTest(c)
+
+ dirs.SetRootDir(c.MkDir())
+ s.tmpdir = dirs.GlobalRootDir
+
+ s.mockCryptsetup = testutil.MockCommand(c, "cryptsetup", fmt.Sprintf("cat - > %[1]s/stdout 2>%[1]s/stderr", s.tmpdir))
+ s.AddCleanup(s.mockCryptsetup.Restore)
+}
+
+func (s *luks2Suite) TestKillSlot(c *C) {
+ err := luks2.KillSlot("/my/device", 123, []byte("some-key"))
+ c.Check(err, IsNil)
+ c.Check(s.mockCryptsetup.Calls(), DeepEquals, [][]string{
+ {"cryptsetup", "luksKillSlot", "--type", "luks2", "--key-file", "-", "/my/device", "123"},
+ })
+ c.Check(filepath.Join(s.tmpdir, "stdout"), testutil.FileEquals, "some-key")
+ c.Check(filepath.Join(s.tmpdir, "stderr"), testutil.FileEquals, "")
+}
+
+func (s *luks2Suite) TestAddKeyHappy(c *C) {
+ err := os.MkdirAll(filepath.Join(s.tmpdir, "run"), 0755)
+ c.Assert(err, IsNil)
+
+ mockCryptsetup := testutil.MockCommand(c, "cryptsetup", fmt.Sprintf(`
+FIFO="$5"
+cat "$FIFO" > %[1]s/fifo
+cat - > %[1]s/stdout 2>%[1]s/stderr
+`, s.tmpdir))
+ defer mockCryptsetup.Restore()
+
+ err = luks2.AddKey("/my/device", []byte("old-key"), []byte("new-key"), nil)
+ c.Check(err, IsNil)
+ c.Check(mockCryptsetup.Calls(), HasLen, 1)
+ fifoPath := mockCryptsetup.Calls()[0][5]
+ c.Check(mockCryptsetup.Calls(), DeepEquals, [][]string{
+ {"cryptsetup", "luksAddKey", "--type", "luks2", "--key-file", fifoPath, "--pbkdf", "argon2i", "/my/device", "-"},
+ })
+ c.Check(filepath.Join(s.tmpdir, "stdout"), testutil.FileEquals, "new-key")
+ c.Check(filepath.Join(s.tmpdir, "stderr"), testutil.FileEquals, "")
+ c.Check(filepath.Join(s.tmpdir, "fifo"), testutil.FileEquals, "old-key")
+}
+
+func (s *luks2Suite) TestAddKeyBadCryptsetup(c *C) {
+ err := os.MkdirAll(filepath.Join(s.tmpdir, "run"), 0755)
+ c.Assert(err, IsNil)
+
+ mockCryptsetup := testutil.MockCommand(c, "cryptsetup", "echo some-error; exit 1")
+ defer mockCryptsetup.Restore()
+
+ err = luks2.AddKey("/my/device", []byte("old-key"), []byte("new-key"), nil)
+ c.Check(err, ErrorMatches, "cryptsetup failed with: some-error")
+}
+
+func (s *luks2Suite) TestAddKeyBadWriteExistingKeyToFifo(c *C) {
+ err := os.MkdirAll(filepath.Join(s.tmpdir, "run"), 0755)
+ c.Assert(err, IsNil)
+
+ mockCryptsetup := testutil.MockCommand(c, "cryptsetup", fmt.Sprintf(`
+FIFO="$5"
+cat "$FIFO" > %[1]s/fifo
+cat - > %[1]s/stdout 2>%[1]s/stderr
+`, s.tmpdir))
+ defer mockCryptsetup.Restore()
+
+ restore := luks2.MockWriteExistingKeyToFifo(func(string, []byte) error {
+ return fmt.Errorf("writeExistingKeyToFifo error")
+ })
+ defer restore()
+
+ err = luks2.AddKey("/my/device", []byte("old-key"), []byte("new-key"), nil)
+ c.Check(err, ErrorMatches, `cryptsetup failed with: .* \(fifo failed with: writeExistingKeyToFifo error\)`)
+}
diff --git a/secboot/luks2/export_test.go b/secboot/luks2/export_test.go
new file mode 100644
index 0000000000..de391c8bcc
--- /dev/null
+++ b/secboot/luks2/export_test.go
@@ -0,0 +1,31 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package luks2
+
+import (
+ "github.com/snapcore/snapd/testutil"
+)
+
+func MockWriteExistingKeyToFifo(f func(string, []byte) error) (restore func()) {
+ restore = testutil.Backup(&writeExistingKeyToFifo)
+ writeExistingKeyToFifo = f
+ return restore
+
+}
diff --git a/secboot/luks2/fifo.go b/secboot/luks2/fifo.go
index ecc4835350..7157b32453 100644
--- a/secboot/luks2/fifo.go
+++ b/secboot/luks2/fifo.go
@@ -39,8 +39,17 @@ func mkFifo() (string, func(), error) {
if err != nil {
return "", nil, xerrors.Errorf("cannot create temporary directory: %w", err)
}
+ fifo := filepath.Join(dir, "fifo")
cleanup := func() {
+ // Cleanup any pending readers/writers of the FIFO by
+ // opening/closing (O_RDRW will not block on
+ // linux). Otherwise we may leave file
+ // descriptors/go-routine behind that are stuck in opening
+ // one side of the fifo.
+ if f, err := os.OpenFile(fifo, os.O_RDWR, 0); err == nil {
+ f.Close()
+ }
if err := os.RemoveAll(dir); err != nil {
fmt.Fprintf(stderr, "luks2.mkFifo: cannot remove fifo: %v\n", err)
}
@@ -54,7 +63,6 @@ func mkFifo() (string, func(), error) {
cleanup()
}()
- fifo := filepath.Join(dir, "fifo")
if err := unix.Mkfifo(fifo, 0600); err != nil {
return "", nil, xerrors.Errorf("cannot create FIFO: %w", err)
}
diff --git a/snap/naming/core_version.go b/snap/naming/core_version.go
new file mode 100644
index 0000000000..2c7e53d21d
--- /dev/null
+++ b/snap/naming/core_version.go
@@ -0,0 +1,56 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package naming
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+var (
+ coreNameFormat = regexp.MustCompile("^core(?P<version>[0-9]*)$")
+)
+
+// CoreVersion extract the version component of the core snap name
+// Most core snap names are of the form coreXX where XX is a number.
+// CoreVersion returns that number. In case of "core", it returns
+// 16.
+func CoreVersion(base string) (int, error) {
+ foundCore := coreNameFormat.FindStringSubmatch(base)
+
+ if foundCore == nil {
+ return 0, fmt.Errorf("not a core base")
+ }
+
+ coreVersionStr := foundCore[coreNameFormat.SubexpIndex("version")]
+
+ if coreVersionStr == "" {
+ return 16, nil
+ }
+
+ v, err := strconv.Atoi(coreVersionStr)
+ if err != nil {
+ // Unreachable
+ return 0, fmt.Errorf("Unexpected error converting %s to version: %v", base, err)
+ }
+
+ return v, nil
+}
diff --git a/snap/naming/core_version_test.go b/snap/naming/core_version_test.go
new file mode 100644
index 0000000000..7ee8118522
--- /dev/null
+++ b/snap/naming/core_version_test.go
@@ -0,0 +1,53 @@
+// -*- Mode: Go; indent-tabs-mode: t -*-
+
+/*
+ * Copyright (C) 2023 Canonical Ltd
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 3 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package naming_test
+
+import (
+ . "gopkg.in/check.v1"
+
+ "github.com/snapcore/snapd/snap/naming"
+)
+
+type CoreVersionTestSuite struct{}
+
+var _ = Suite(&CoreVersionTestSuite{})
+
+func (s *CoreVersionTestSuite) TestCoreVersion(c *C) {
+ for _, tst := range []struct {
+ name string
+ version int
+ }{
+ {"core", 16},
+ {"core20", 20},
+ {"core22", 22},
+ {"core24", 24},
+ } {
+ v, err := naming.CoreVersion(tst.name)
+ c.Check(err, IsNil)
+ c.Check(v, Equals, tst.version)
+ }
+}
+
+func (s *CoreVersionTestSuite) TestCoreOther(c *C) {
+ for _, tst := range []string{"bare", "coreXX"} {
+ _, err := naming.CoreVersion(tst)
+ c.Check(err, ErrorMatches, "not a core base")
+ }
+}
diff --git a/spread.yaml b/spread.yaml
index 88baec9d21..10fadaca57 100644
--- a/spread.yaml
+++ b/spread.yaml
@@ -15,6 +15,7 @@ environment:
TESTSLIB: $PROJECT_PATH/tests/lib
TESTSTOOLS: $PROJECT_PATH/tests/lib/tools
TESTSTMP: /var/tmp/snapd-tools
+ RUNTIME_STATE_PATH: $TESTSTMP/runtime-state
# turn debug off so that we don't get errant debug messages while running
# tests, and in some cases like on UC20 we have the kernel command line
# parameter, snapd.debug=1 turned on to enable early boot debugging before
@@ -73,6 +74,8 @@ environment:
# Use the installed snapd and reset the systems without removing snapd
REUSE_SNAPD: '$(HOST: echo "${SPREAD_REUSE_SNAPD:-0}")'
EXPERIMENTAL_FEATURES: '$(HOST: echo "${SPREAD_EXPERIMENTAL_FEATURES:-}")'
+ # set to 1 when the snapd memory limit has to be removed
+ SNAPD_NO_MEMORY_LIMIT: '$(HOST: echo "${SPREAD_SNAPD_NO_MEMORY_LIMIT:-}")'
SNAPD_PUBLISHED_VERSION: '$(HOST: echo "$SPREAD_SNAPD_PUBLISHED_VERSION")'
# Build and use snapd from current branch
@@ -143,11 +146,14 @@ backends:
- ubuntu-23.04-64:
storage: 12G
workers: 8
+ - ubuntu-23.10-64:
+ storage: 12G
+ workers: 8
- - debian-10-64:
- workers: 6
- debian-11-64:
workers: 6
+ - debian-12-64:
+ workers: 6
- debian-sid-64:
storage: 12G
workers: 6
@@ -177,12 +183,12 @@ backends:
storage: preserve-size
image: centos-9-64
- # unstable systems below
- opensuse-15.4-64:
workers: 6
+ - opensuse-15.5-64:
+ workers: 6
- opensuse-tumbleweed-64:
workers: 6
- manual: true
google-arm:
type: google
@@ -350,10 +356,16 @@ backends:
- ubuntu-22.04-64:
username: ubuntu
password: ubuntu
- - debian-10-64:
+ - ubuntu-23.04-64:
+ username: ubuntu
+ password: ubuntu
+ - ubuntu-23.10-64:
+ username: ubuntu
+ password: ubuntu
+ - debian-11-64:
username: debian
password: debian
- - debian-11-64:
+ - debian-12-64:
username: debian
password: debian
- debian-sid-64:
@@ -404,22 +416,6 @@ backends:
- ubuntu-16.04-s390x:
username: ubuntu
password: ubuntu
- # Artful
- - ubuntu-17.10-amd64:
- username: ubuntu
- password: ubuntu
- - ubuntu-17.10-i386:
- username: ubuntu
- password: ubuntu
- - ubuntu-17.10-ppc64el:
- username: ubuntu
- password: ubuntu
- - ubuntu-17.10-armhf:
- username: ubuntu
- password: ubuntu
- - ubuntu-17.10-s390x:
- username: ubuntu
- password: ubuntu
# Bionic
- ubuntu-18.04-amd64:
username: ubuntu
@@ -458,6 +454,25 @@ backends:
- ubuntu-20.04-arm64:
username: ubuntu
password: ubuntu
+ # Jammy
+ - ubuntu-22.04-amd64:
+ username: ubuntu
+ password: ubuntu
+ - ubuntu-22.04-i386:
+ username: ubuntu
+ password: ubuntu
+ - ubuntu-22.04-ppc64el:
+ username: ubuntu
+ password: ubuntu
+ - ubuntu-22.04-armhf:
+ username: ubuntu
+ password: ubuntu
+ - ubuntu-22.04-s390x:
+ username: ubuntu
+ password: ubuntu
+ - ubuntu-22.04-arm64:
+ username: ubuntu
+ password: ubuntu
external:
type: adhoc
diff --git a/store/download_test.go b/store/download_test.go
index b8bf7d3002..ea5a7d9936 100644
--- a/store/download_test.go
+++ b/store/download_test.go
@@ -103,7 +103,7 @@ func (s *downloadSuite) TestActualDownloadAutoRefresh(c *C) {
var buf SillyBuffer
// keep tests happy
sha3 := ""
- err := store.Download(context.TODO(), "foo", sha3, mockServer.URL, nil, theStore, &buf, 0, nil, &store.DownloadOptions{IsAutoRefresh: true})
+ err := store.Download(context.TODO(), "foo", sha3, mockServer.URL, nil, theStore, &buf, 0, nil, &store.DownloadOptions{Scheduled: true})
c.Assert(err, IsNil)
c.Check(buf.String(), Equals, "response-data")
c.Check(n, Equals, 1)
diff --git a/store/store_action.go b/store/store_action.go
index efab586142..2e7582b574 100644
--- a/store/store_action.go
+++ b/store/store_action.go
@@ -40,7 +40,7 @@ type RefreshOptions struct {
// RefreshManaged indicates to the store that the refresh is
// managed via snapd-control.
RefreshManaged bool
- IsAutoRefresh bool
+ Scheduled bool
PrivacyKey string
}
@@ -536,7 +536,7 @@ func (s *Store) snapAction(ctx context.Context, currentSnaps []*CurrentSnap, act
APILevel: apiV2Endps,
}
- if opts.IsAutoRefresh {
+ if opts.Scheduled {
logger.Debugf("Auto-refresh; adding header Snap-Refresh-Reason: scheduled")
reqOptions.addHeader("Snap-Refresh-Reason", "scheduled")
}
diff --git a/store/store_action_test.go b/store/store_action_test.go
index 3e14b105ad..99ea539042 100644
--- a/store/store_action_test.go
+++ b/store/store_action_test.go
@@ -817,7 +817,7 @@ func (s *storeActionSuite) TestSnapActionAutoRefresh(c *C) {
SnapID: helloWorldSnapID,
InstanceName: "hello-world",
},
- }, nil, nil, &store.RefreshOptions{IsAutoRefresh: true})
+ }, nil, nil, &store.RefreshOptions{Scheduled: true})
c.Assert(err, IsNil)
c.Assert(results, HasLen, 1)
}
diff --git a/store/store_download.go b/store/store_download.go
index c86a88c96e..fcf2027d52 100644
--- a/store/store_download.go
+++ b/store/store_download.go
@@ -183,7 +183,7 @@ func (e HashError) Error() string {
type DownloadOptions struct {
RateLimit int64
- IsAutoRefresh bool
+ Scheduled bool
LeavePartialOnError bool
}
@@ -305,7 +305,7 @@ func downloadReqOpts(storeURL *url.URL, cdnHeader string, opts *DownloadOptions)
if cdnHeader != "" {
reqOptions.ExtraHeaders["Snap-CDN"] = cdnHeader
}
- if opts != nil && opts.IsAutoRefresh {
+ if opts != nil && opts.Scheduled {
reqOptions.ExtraHeaders["Snap-Refresh-Reason"] = "scheduled"
}
diff --git a/store/store_download_test.go b/store/store_download_test.go
index ca4e1ba1fc..0b90892ccd 100644
--- a/store/store_download_test.go
+++ b/store/store_download_test.go
@@ -587,7 +587,7 @@ func (s *storeDownloadSuite) TestDownloadDelta(c *C) {
for _, testCase := range downloadDeltaTests {
sto.SetDeltaFormat(testCase.format)
restore := store.MockDownload(func(ctx context.Context, name, sha3, url string, user *auth.UserState, _ *store.Store, w io.ReadWriteSeeker, resume int64, pbar progress.Meter, dlOpts *store.DownloadOptions) error {
- c.Check(dlOpts, DeepEquals, &store.DownloadOptions{IsAutoRefresh: true})
+ c.Check(dlOpts, DeepEquals, &store.DownloadOptions{Scheduled: true})
expectedUser := s.user
if !testCase.withUser {
expectedUser = nil
@@ -608,7 +608,7 @@ func (s *storeDownloadSuite) TestDownloadDelta(c *C) {
authedUser = nil
}
- err = sto.DownloadDelta("snapname", &testCase.info, w, nil, authedUser, &store.DownloadOptions{IsAutoRefresh: true})
+ err = sto.DownloadDelta("snapname", &testCase.info, w, nil, authedUser, &store.DownloadOptions{Scheduled: true})
if testCase.expectError {
c.Assert(err, NotNil)
diff --git a/tests/bin/tests.env b/tests/bin/tests.env
new file mode 120000
index 0000000000..fd2854638f
--- /dev/null
+++ b/tests/bin/tests.env
@@ -0,0 +1 @@
+../lib/tools/tests.env \ No newline at end of file
diff --git a/tests/core/basic18/task.yaml b/tests/core/basic18/task.yaml
index 2ba03a3e48..b898689e25 100644
--- a/tests/core/basic18/task.yaml
+++ b/tests/core/basic18/task.yaml
@@ -25,3 +25,12 @@ execute: |
echo "Ensure passwd/group is available for snaps"
test-snapd-sh-core18.sh -c 'cat /var/lib/extrausers/passwd' | MATCH test
+
+ # ensure apparmor works, see LP: 2024637
+ systemctl status apparmor.service
+
+ # reboot to double check that apparmor still works after the reboot
+ # (LP: 2024637)
+ if [ "$SPREAD_REBOOT" = 0 ]; then
+ REBOOT
+ fi
diff --git a/tests/core/basic20plus/task.yaml b/tests/core/basic20plus/task.yaml
index 40a297ca02..63647c39be 100644
--- a/tests/core/basic20plus/task.yaml
+++ b/tests/core/basic20plus/task.yaml
@@ -111,3 +111,12 @@ execute: |
echo "${loop}" | MATCH "/dev/loop[0-9]+"
losetup -O ro -n --raw "${loop}" | MATCH "1"
done
+
+ # ensure apparmor works, see LP: 2024637
+ systemctl status apparmor.service
+
+ # reboot to double check that apparmor still works after the reboot
+ # (LP: 2024637)
+ if [ "$SPREAD_REBOOT" = 0 ]; then
+ REBOOT
+ fi
diff --git a/tests/lib/external/snapd-testing-tools/spread.yaml b/tests/lib/external/snapd-testing-tools/spread.yaml
index ccbe3e9df6..d8e7315e2f 100644
--- a/tests/lib/external/snapd-testing-tools/spread.yaml
+++ b/tests/lib/external/snapd-testing-tools/spread.yaml
@@ -18,11 +18,12 @@ backends:
- ubuntu-22.04-64:
- ubuntu-22.10-64:
- ubuntu-23.04-64:
- - debian-10-64:
+ - ubuntu-23.10-64:
- debian-11-64:
+ - debian-12-64:
- debian-sid-64:
- - fedora-36-64:
- fedora-37-64:
+ - fedora-38-64:
- arch-linux-64:
- amazon-linux-2-64:
storage: preserve-size
@@ -32,8 +33,8 @@ backends:
storage: preserve-size
- centos-9-64:
storage: preserve-size
- - opensuse-15.3-64:
- opensuse-15.4-64:
+ - opensuse-15.5-64:
- opensuse-tumbleweed-64:
google-nested:
diff --git a/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml b/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml
index 465b0f136a..510c28888f 100644
--- a/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml
+++ b/tests/lib/external/snapd-testing-tools/tests/os.query/task.yaml
@@ -82,9 +82,15 @@ execute: |
! os.query is-ubuntu 21.04
! os.query is-core
;;
- debian-10-64)
+ ubuntu-23.10-64)
+ os.query is-classic
+ os.query is-ubuntu 23.10
+ ! os.query is-ubuntu 21.04
+ ! os.query is-core
+ ;;
+ debian-11-64)
os.query is-debian
- os.query is-debian 10
+ os.query is-debian 11
os.query is-classic
! os.query is-core
@@ -93,9 +99,9 @@ execute: |
os.query is-ubuntu-ge 20.04 | MATCH "os.query: comparing non ubuntu system"
os.query is-ubuntu-lt 2>&1 | MATCH "os.query: version id is expected"
;;
- debian-11-64)
+ debian-12-64)
os.query is-debian
- os.query is-debian 11
+ os.query is-debian 12
os.query is-classic
! os.query is-core
;;
@@ -105,16 +111,16 @@ execute: |
os.query is-classic
! os.query is-core
;;
- fedora-36-64)
+ fedora-37-64)
os.query is-fedora
- os.query is-fedora 36
+ os.query is-fedora 37
! os.query is-fedora rawhide
os.query is-classic
! os.query is-core
;;
- fedora-37-64)
+ fedora-38-64)
os.query is-fedora
- os.query is-fedora 37
+ os.query is-fedora 38
! os.query is-fedora rawhide
os.query is-classic
! os.query is-core
@@ -145,16 +151,16 @@ execute: |
os.query is-centos
! os.query is-core
;;
- opensuse-15.3-64)
+ opensuse-15.4-64)
os.query is-opensuse
- os.query is-opensuse 15.3
+ os.query is-opensuse 15.4
+ ! os.query is-opensuse tumbleweed
os.query is-classic
! os.query is-core
;;
- opensuse-15.4-64)
+ opensuse-15.5-64)
os.query is-opensuse
- os.query is-opensuse 15.4
- ! os.query is-opensuse tumbleweed
+ os.query is-opensuse 15.5
os.query is-classic
! os.query is-core
;;
diff --git a/tests/lib/external/snapd-testing-tools/tests/remote.wait-for/task.yaml b/tests/lib/external/snapd-testing-tools/tests/remote.wait-for/task.yaml
index 46c82a8a6c..5ac95c51a5 100644
--- a/tests/lib/external/snapd-testing-tools/tests/remote.wait-for/task.yaml
+++ b/tests/lib/external/snapd-testing-tools/tests/remote.wait-for/task.yaml
@@ -51,7 +51,7 @@ execute: |
# Check waiting when reboot
remote.exec "sudo reboot" || true
remote.wait-for no-ssh --wait 1 -n 20
- remote.wait-for ssh --wait 1 -n 60
+ remote.wait-for ssh --wait 1 -n 120
# Check waiting for reboot
initial_boot_id="$(remote.exec "cat /proc/sys/kernel/random/boot_id")"
diff --git a/tests/lib/external/snapd-testing-tools/tests/tests.pkgs/task.yaml b/tests/lib/external/snapd-testing-tools/tests/tests.pkgs/task.yaml
index b4573913c2..09f97ad0dc 100644
--- a/tests/lib/external/snapd-testing-tools/tests/tests.pkgs/task.yaml
+++ b/tests/lib/external/snapd-testing-tools/tests/tests.pkgs/task.yaml
@@ -10,9 +10,9 @@ execute: |
# pkgs tool presents the usage screen when invoked without arguments
# or with the -h or --help options.
- tests.pkgs | MATCH 'usage: tests.pkgs {install,remove} \[PACKAGE...\]'
- tests.pkgs -h | MATCH 'usage: tests.pkgs {install,remove} \[PACKAGE...\]'
- tests.pkgs --help | MATCH 'usage: tests.pkgs {install,remove} \[PACKAGE...\]'
+ tests.pkgs | MATCH 'usage: tests.pkgs install \[--no-install-recommends\] \[PACKAGE...\]'
+ tests.pkgs -h | MATCH 'usage: tests.pkgs install \[--no-install-recommends\] \[PACKAGE...\]'
+ tests.pkgs --help | MATCH 'usage: tests.pkgs install \[--no-install-recommends\] \[PACKAGE...\]'
# Update the packages db
@@ -29,6 +29,11 @@ execute: |
tests.pkgs query test-snapd-pkg-1
not tests.pkgs is-installed test-snapd-pkg-1
+ # Install a package with --no-install-recommends option
+ tests.pkgs install --no-install-recommends test-snapd-pkg-1
+ tests.pkgs query test-snapd-pkg-1
+ tests.pkgs remove test-snapd-pkg-1
+
# Install 2 test pkgs and check they are is installed
tests.pkgs install test-snapd-pkg-1 test-snapd-pkg-2
tests.pkgs is-installed test-snapd-pkg-1
diff --git a/tests/lib/external/snapd-testing-tools/tools/os.query b/tests/lib/external/snapd-testing-tools/tools/os.query
index 5ae9c0398b..6f04c1db1d 100755
--- a/tests/lib/external/snapd-testing-tools/tools/os.query
+++ b/tests/lib/external/snapd-testing-tools/tools/os.query
@@ -3,6 +3,7 @@
show_help() {
echo "usage: os.query is-core, is-classic"
echo " os.query is-core16, is-core18, is-core20, is-core22, is-core24"
+ echo " os.query is-core-gt, is-core-ge, is-core-lt, is-core-le"
echo " os.query is-trusty, is-xenial, is-bionic, is-focal, is-jammy"
echo " os.query is-ubuntu [ID], is-debian [ID], is-fedora [ID], is-amazon-linux, is-arch-linux, is-centos [ID], is-opensuse [ID]"
echo " os.query is-ubuntu-gt [ID], is-ubuntu-ge [ID], is-ubuntu-lt [ID], is-ubuntu-le [ID]"
@@ -71,6 +72,22 @@ is_core24() {
fi
}
+is_core_gt() {
+ compare_ubuntu "${1:-}" "-gt"
+}
+
+is_core_ge() {
+ compare_ubuntu "${1:-}" "-ge"
+}
+
+is_core_lt() {
+ compare_ubuntu "${1:-}" "-lt"
+}
+
+is_core_le() {
+ compare_ubuntu "${1:-}" "-le"
+}
+
is_classic() {
! is_core
}
diff --git a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs
index 97677e09fb..a0beef1c6b 100755
--- a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs
+++ b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs
@@ -1,8 +1,10 @@
#!/bin/bash -e
show_help() {
- echo "usage: tests.pkgs {install,remove} [PACKAGE...]"
- echo " tests.pkgs {is-installed,query} [PACKAGE]"
+ echo "usage: tests.pkgs install [--no-install-recommends] [PACKAGE...]"
+ echo " tests.pkgs remove [PACKAGE...]"
+ echo " tests.pkgs is-installed [PACKAGE]"
+ echo " tests.pkgs query [PACKAGE]"
echo
echo "Package names are standardized based on Debian package names"
echo "internally, package names are re-mapped to fit the convention"
@@ -19,6 +21,11 @@ cmd_install() {
unsupported
}
+cmd_install_local() {
+ # This is re-defined by the backend file.
+ unsupported
+}
+
cmd_is_installed() {
# This is re-defined by the backend file.
unsupported
@@ -130,7 +137,8 @@ main() {
case "$action" in
install)
- cmd_install "$(remap_many "$@")"
+ # shellcheck disable=SC2046
+ cmd_install $(remap_many "$@")
;;
is-installed)
cmd_is_installed "$(remap_one "$@")"
diff --git a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.apt.sh b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.apt.sh
index c26b17d5a8..a2ec54a20b 100644
--- a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.apt.sh
+++ b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.apt.sh
@@ -30,8 +30,21 @@ remap_one() {
cmd_install() {
apt-get update
- # shellcheck disable=SC2068
- apt-get install --yes $@
+
+ local APT_FLAGS="--yes"
+ while [ -n "$1" ]; do
+ case "$1" in
+ --no-install-recommends)
+ APT_FLAGS="$APT_FLAGS --no-install-recommends"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+ # shellcheck disable=SC2068,SC2086
+ apt-get install $APT_FLAGS $@
}
cmd_is_installed() {
diff --git a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.dnf-yum.sh b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.dnf-yum.sh
index 43cd8b9685..16a9e02db7 100644
--- a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.dnf-yum.sh
+++ b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.dnf-yum.sh
@@ -27,12 +27,26 @@ remap_one() {
}
cmd_install() {
- # shellcheck disable=SC2068
- if [ "$(command -v dnf)" != "" ]; then
- dnf install -y $@
- else
- yum install -y $@
+ local CMD="dnf"
+ if [ -z "$(command -v dnf)" ]; then
+ CMD="yum"
fi
+ local DNF_YUM_FLAGS="-y"
+
+ while [ -n "$1" ]; do
+ case "$1" in
+ --no-install-recommends)
+ DNF_YUM_FLAGS="$DNF_YUM_FLAGS --setopt=install_weak_deps=False"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+
+ # shellcheck disable=SC2068,SC2086
+ $CMD install $DNF_YUM_FLAGS $@
}
cmd_is_installed() {
diff --git a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.pacman.sh b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.pacman.sh
index 0885a674fd..3a98b0ad1c 100644
--- a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.pacman.sh
+++ b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.pacman.sh
@@ -25,7 +25,7 @@ remap_one() {
echo "python-gobject"
;;
test-snapd-pkg-1)
- echo "curseofwar"
+ echo "freeglut"
;;
test-snapd-pkg-2)
echo "robotfindskitten"
@@ -37,8 +37,20 @@ remap_one() {
}
cmd_install() {
- # shellcheck disable=SC2068
- pacman -S --noconfirm $@
+ local PACMAN_FLAGS="--noconfirm"
+ while [ -n "$1" ]; do
+ case "$1" in
+ --no-install-recommends)
+ # Pacman only ever installs the required dependencies
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+ # shellcheck disable=SC2068,SC2086
+ pacman -S $PACMAN_FLAGS $@
}
cmd_is_installed() {
diff --git a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.zypper.sh b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.zypper.sh
index 86a1e5c37b..044ec25502 100644
--- a/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.zypper.sh
+++ b/tests/lib/external/snapd-testing-tools/tools/tests.pkgs.zypper.sh
@@ -31,8 +31,21 @@ remap_one() {
}
cmd_install() {
- # shellcheck disable=SC2068
- zypper install -y $@
+ local ZYPPER_FLAGS="-y"
+ while [ -n "$1" ]; do
+ case "$1" in
+ --no-install-recommends)
+ ZYPPER_FLAGS="$ZYPPER_FLAGS --no-recommends"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+
+ # shellcheck disable=SC2068,SC2086
+ zypper install $ZYPPER_FLAGS $@
}
cmd_is_installed() {
diff --git a/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck b/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck
index 351316e0ca..4769a7dc5d 100755
--- a/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck
+++ b/tests/lib/external/snapd-testing-tools/utils/spread-shellcheck
@@ -124,11 +124,16 @@ def checksection(data, env: Dict[str, str]):
export_disabled_warnings = set()
def replacement(match):
if match.group(0) == '"':
+ # SC2089 and SC2090 are about quotes vs arrays
+ # We cannot have arrays in environment variables of spread
+ # So we do have to use quotes
disabled_warnings.add('SC2089')
export_disabled_warnings.add('SC2090')
return r'\"'
else:
assert(match.group('command') is not None)
+ # "Useless" echo. This is what we get.
+ # We cannot just evaluate to please shellcheck.
disabled_warnings.add('SC2116')
return '$({})'.format(match.group('command'))
value = re.sub(r'[$][(]HOST:(?P<command>.*)[)]|"', replacement, value)
diff --git a/tests/lib/external/snapd-testing-tools/utils/spreadJ b/tests/lib/external/snapd-testing-tools/utils/spreadJ
new file mode 100755
index 0000000000..26f6b4ae66
--- /dev/null
+++ b/tests/lib/external/snapd-testing-tools/utils/spreadJ
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+show_help() {
+ echo "usage: spreadJ rerun [--suite SUITE] <RESULTS-PATH>"
+ echo " spreadJ show [--suite SUITE] <TARGET> <RESULTS-PATH>"
+ echo " spreadJ stats [--suite SUITE] <RESULTS-PATH>"
+ echo " spreadJ list [--suite SUITE] <TARGET> <RESULTS-PATH>"
+ echo ""
+ echo "Available options:"
+ echo " -h --help show this help message."
+ echo ""
+ echo "TARGET:"
+ echo " all,failed,passed,aborted"
+ echo ""
+ echo "Tool used to help with functions that are not already implemented in spread"
+}
+
+_filter_suite() {
+ local suite="$1"
+ if [ -z "$suite" ]; then
+ echo ".testsuites[]"
+ else
+ echo ".testsuites[] | select(.name == \"$suite\")"
+ fi
+}
+
+rerun() {
+ local suite=""
+ if [ "$1" == "--suite" ]; then
+ suite="$2"
+ shift 2
+ fi
+ local res_path="$1"
+ if [ ! -e "$res_path" ]; then
+ echo "spreadJ: results path not found: $res_path"
+ exit 1
+ fi
+
+ local query
+ query="$(_filter_suite $suite).tests[] | select((.result == \"failed\") or (.result == \"aborted\")).name"
+ jq -r "$query" "$res_path"
+}
+
+stats() {
+ local suite=""
+ if [ "$1" == "--suite" ]; then
+ suite="$2"
+ shift 2
+ fi
+ local res_path="$1"
+
+ if [ ! -e "$res_path" ]; then
+ echo "spreadJ: results path not found: $res_path"
+ exit 1
+ fi
+
+ local query
+ if [ -z "$suite" ]; then
+ query="del(.testsuites)"
+ else
+ query="$(_filter_suite $suite) | del(.tests) | del(.name)"
+ fi
+ jq -r "$query" "$res_path"
+}
+
+list() {
+ local suite=""
+ if [ "$1" == "--suite" ]; then
+ suite="$2"
+ shift 2
+ fi
+ local target="$1"
+ local res_path="$2"
+
+ if [ ! -e "$res_path" ]; then
+ echo "spreadJ: results path not found: $res_path"
+ exit 1
+ fi
+
+ if [ -z "$target" ]; then
+ echo "spreadJ: result target cannot be empty"
+ exit 1
+ fi
+
+ local query
+ if [ "$target" == "all" ]; then
+ query="$(_filter_suite $suite).tests[]).name"
+ else
+ query="$(_filter_suite $suite).tests[] | select((.result == \"$target\")).name"
+ fi
+ jq -r "$query" "$res_path"
+}
+
+main() {
+ if [ $# -eq 0 ]; then
+ show_help
+ exit 0
+ fi
+
+ local subcommand="$1"
+ local action=
+ if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
+ show_help
+ exit 0
+ else
+ action=$(echo "$subcommand" | tr '-' '_')
+ shift
+ fi
+
+ if [ -z "$(declare -f "$action")" ]; then
+ echo "spreadJ: no such command: $subcommand" >&2
+ show_help
+ exit 1
+ fi
+
+ "$action" "$@"
+}
+
+main "$@"
diff --git a/tests/lib/image.sh b/tests/lib/image.sh
index 034f820b20..4ea9ccb37c 100644
--- a/tests/lib/image.sh
+++ b/tests/lib/image.sh
@@ -72,6 +72,9 @@ get_google_image_url_for_vm() {
ubuntu-23.04-64*)
echo "https://storage.googleapis.com/snapd-spread-tests/images/cloudimg/lunar-server-cloudimg-amd64.img"
;;
+ ubuntu-23.10-64*)
+ echo "https://storage.googleapis.com/snapd-spread-tests/images/cloudimg/mantic-server-cloudimg-amd64.img"
+ ;;
*)
echo "unsupported system"
exit 1
@@ -106,6 +109,9 @@ get_ubuntu_image_url_for_vm() {
ubuntu-23.04-64*)
echo "https://cloud-images.ubuntu.com/lunar/current/lunar-server-cloudimg-amd64.img"
;;
+ ubuntu-23.10-64*)
+ echo "https://cloud-images.ubuntu.com/mantic/current/mantic-server-cloudimg-amd64.img"
+ ;;
*)
echo "unsupported system"
exit 1
diff --git a/tests/lib/muinstaller/go.mod b/tests/lib/muinstaller/go.mod
index 87e9157161..847c9f0ba1 100644
--- a/tests/lib/muinstaller/go.mod
+++ b/tests/lib/muinstaller/go.mod
@@ -2,7 +2,7 @@ module github.com/snapcore/snapd/tests/lib/muinstaller
go 1.18
-require github.com/snapcore/snapd v0.0.0-20230214100613-34da9d970bab
+require github.com/snapcore/snapd v0.0.0-20230705065623-befebe0d505e
require (
github.com/canonical/go-efilib v0.3.1-0.20220815143333-7e5151412e93 // indirect
@@ -13,11 +13,11 @@ require (
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/mvo5/goconfigparser v0.0.0-20200803085309-72e476556adb // indirect
github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785 // indirect
- github.com/snapcore/secboot v0.0.0-20230119174011-57239c9f324a // indirect
+ github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
- golang.org/x/net v0.7.0 // indirect
- golang.org/x/sys v0.5.0 // indirect
+ golang.org/x/net v0.9.0 // indirect
+ golang.org/x/sys v0.7.0 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gopkg.in/retry.v1 v1.0.3 // indirect
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect
diff --git a/tests/lib/muinstaller/go.sum b/tests/lib/muinstaller/go.sum
index 261323e57e..7c1d960e9d 100644
--- a/tests/lib/muinstaller/go.sum
+++ b/tests/lib/muinstaller/go.sum
@@ -8,40 +8,31 @@ github.com/canonical/go-tpm2 v0.0.0-20210827151749-f80ff5afff61 h1:DsyeCtFXqOduk
github.com/canonical/go-tpm2 v0.0.0-20210827151749-f80ff5afff61/go.mod h1:vG41hdbBjV4+/fkubTT1ENBBqSkLwLr7mCeW9Y6kpZY=
github.com/canonical/tcglog-parser v0.0.0-20210824131805-69fa1e9f0ad2 h1:CbwVq64ruNLx/S3XA0LO6QMsw6Vc2inK+RcS6D2c4Ns=
github.com/canonical/tcglog-parser v0.0.0-20210824131805-69fa1e9f0ad2/go.mod h1:QoW2apR2tBl6T/4czdND/EHjL1Ia9cCmQnIj9Xe0Kt8=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9IUY=
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42 h1:q3pnF5JFBNRz8sRD+IRj7Y6DMyYGTNqnZ9axTbSfoNI=
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/gorilla/mux v1.7.4-0.20190701202633-d83b6ffe499a/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gvalkov/golang-evdev v0.0.0-20191114124502-287e62b94bcb/go.mod h1:SAzVFKCRezozJTGavF3GX8MBUruETCqzivVLYiywouA=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
-github.com/jessevdk/go-flags v1.5.1-0.20210607101731-3927b71304df/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
-github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.2-0.20200810074440-814ac30b4b18 h1:fth7xdJYakAjo/XH38edyXuBEqYGJ8Me0RPolN1ZiQE=
-github.com/kr/pretty v0.2.2-0.20200810074440-814ac30b4b18/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mvo5/goconfigparser v0.0.0-20200803085309-72e476556adb h1:1I/JqsB+FffFssjcOeEP0popLhJ46+OwtXztJ/1DhM0=
github.com/mvo5/goconfigparser v0.0.0-20200803085309-72e476556adb/go.mod h1:xmt4k1xLDl8Tdan+0S/jmMK2uSUBSzTc18+5GN5Vea8=
-github.com/mvo5/libseccomp-golang v0.9.1-0.20180308152521-f4de83b52afb/go.mod h1:RduRpSkQHOCvZTbGgT/NJUGjFBFkYlVedimxssQ64ag=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a h1:3QH7VyOaaiUHNrA9Se4YQIRkDTCw1EJls9xTUCaCeRM=
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20220502024300-f57e1d55ea18/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
-github.com/snapcore/bolt v1.3.2-0.20210908134111-63c8bfcf7af8/go.mod h1:Z6z3sf12AMDjT/4tbT/PmzzdACAxkWGhkuKWiVpTWLM=
github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785 h1:PaunR+BhraKSLxt2awQ42zofkP+NKh/VjQ0PjIMk/y4=
github.com/snapcore/go-gettext v0.0.0-20191107141714-82bbea49e785/go.mod h1:D3SsWAXK7wCCBZu+Vk5hc1EuKj/L3XN1puEMXTU4LrQ=
-github.com/snapcore/secboot v0.0.0-20230119174011-57239c9f324a h1:MwEn6ADhO9DYtqRnat71TOYxcNxBVUeqfDCBtrYcu7Y=
-github.com/snapcore/secboot v0.0.0-20230119174011-57239c9f324a/go.mod h1:72paVOkm4sJugXt+v9ItmnjXgO921D8xqsbH2OekouY=
+github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830 h1:SCJ9Uiekv6uMqzMGP50Y0KBxkLP7IzPW35aI3Po6iyM=
+github.com/snapcore/secboot v0.0.0-20230623151406-4d331d24f830/go.mod h1:72paVOkm4sJugXt+v9ItmnjXgO921D8xqsbH2OekouY=
github.com/snapcore/snapd v0.0.0-20201005140838-501d14ac146e/go.mod h1:3xrn7QDDKymcE5VO2rgWEQ5ZAUGb9htfwlXnoel6Io8=
-github.com/snapcore/snapd v0.0.0-20230214100613-34da9d970bab h1:nOEQCVw/BA4E3wiq88mPt4oYQdTIr3odiEslLL1el18=
-github.com/snapcore/snapd v0.0.0-20230214100613-34da9d970bab/go.mod h1:6f8/YFmYLQ3H4ySp21aAZ/LMlps3De9xPwCI3KfDr9k=
+github.com/snapcore/snapd v0.0.0-20230705065623-befebe0d505e h1:NeMrXplzKRl0TYi7hWn6Pnqih/Z0B5SppyuqZ9sltDg=
+github.com/snapcore/snapd v0.0.0-20230705065623-befebe0d505e/go.mod h1:V5asOeS+gTIFFl7Kgd0Hdv+G88GuDHiSdNkIZHPsKfM=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -50,28 +41,16 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
@@ -79,16 +58,12 @@ golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNq
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/macaroon.v1 v1.0.0-20150121114231-ab3940c6c165/go.mod h1:PABpHZvxAbIuSYTPWJdQsNu0mtx+HX/1NIm3IT95IX0=
-gopkg.in/mgo.v2 v2.0.0-20180704144907-a7e2c1d573e1/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/retry.v1 v1.0.3 h1:a9CArYczAVv6Qs6VGoLMio99GEs7kY9UzSF9+LD+iGs=
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
-gopkg.in/tylerb/graceful.v1 v1.2.15/go.mod h1:yBhekWvR20ACXVObSSdD3u6S9DeSylanL2PAbAC/uJ8=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
maze.io/x/crypto v0.0.0-20190131090603-9b94c9afe066 h1:UrD21H1Ue5Nl8f2x/NQJBRdc49YGmla3mRStinH8CCE=
maze.io/x/crypto v0.0.0-20190131090603-9b94c9afe066/go.mod h1:DEvumi+swYmlKxSlnsvPwS15tRjoypCCeJFXswU5FfQ=
diff --git a/tests/lib/muinstaller/main.go b/tests/lib/muinstaller/main.go
index a8786aa114..170c42d8ac 100644
--- a/tests/lib/muinstaller/main.go
+++ b/tests/lib/muinstaller/main.go
@@ -30,6 +30,7 @@ import (
"os/exec"
"path/filepath"
"sort"
+ "strconv"
"strings"
"time"
@@ -37,6 +38,7 @@ import (
"github.com/snapcore/snapd/dirs"
"github.com/snapcore/snapd/gadget"
"github.com/snapcore/snapd/gadget/install"
+ "github.com/snapcore/snapd/gadget/quantity"
"github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/osutil/disks"
@@ -154,11 +156,6 @@ func maybeCreatePartitionTable(bootDevice, schema string) error {
}
func createPartitions(bootDevice string, volumes map[string]*gadget.Volume, encType secboot.EncryptionType) ([]gadget.OnDiskStructure, error) {
- // TODO: support multiple volumes, see gadget/install/install.go
- if len(volumes) != 1 {
- return nil, fmt.Errorf("got unexpected number of volumes %v", len(volumes))
- }
-
vol := firstVol(volumes)
// snapd does not create partition tables so we have to do it here
// or gadget.OnDiskVolumeFromDevice() will fail
@@ -170,7 +167,7 @@ func createPartitions(bootDevice string, volumes map[string]*gadget.Volume, encT
if err != nil {
return nil, fmt.Errorf("cannot read %v partitions: %v", bootDevice, err)
}
- if len(diskLayout.Structure) > 0 {
+ if len(diskLayout.Structure) > 0 && !vol.HasPartial(gadget.PartialStructure) {
return nil, fmt.Errorf("cannot yet install on a disk that has partitions")
}
@@ -187,7 +184,7 @@ func createPartitions(bootDevice string, volumes map[string]*gadget.Volume, encT
opts := &install.CreateOptions{CreateAllMissingPartitions: true}
created, err := install.CreateMissingPartitions(diskLayout, lvol, opts)
if err != nil {
- return nil, fmt.Errorf("cannot create parititons: %v", err)
+ return nil, fmt.Errorf("cannot create partitions: %v", err)
}
logger.Noticef("created %v partitions", created)
@@ -421,6 +418,87 @@ func detectStorageEncryption(seedLabel string) (bool, error) {
return details.StorageEncryption.Support == client.StorageEncryptionSupportAvailable, nil
}
+// fillPartiallyDefinedVolume fills partial gadget information by
+// looking at the provided disk. Schema, filesystems, and sizes are
+// filled. If partial structure is set, to remove it we would need to
+// add to the volume the existing partitions present on the disk but
+// not in the gadget. But as snapd is fine with these partitions as
+// far as partial strucuture is defined, we just do nothing.
+func fillPartiallyDefinedVolume(vol *gadget.Volume, bootDevice string) error {
+ if len(vol.Partial) == 0 {
+ return nil
+ }
+
+ logger.Noticef("partial gadget for: %q", vol.Partial)
+
+ if vol.HasPartial(gadget.PartialSchema) && vol.Schema == "" {
+ vol.Schema = "gpt"
+ logger.Debugf("volume %q schema set to %q", vol.Name, vol.Schema)
+ }
+
+ if vol.HasPartial(gadget.PartialFilesystem) {
+ for sidx := range vol.Structure {
+ s := &vol.Structure[sidx]
+ if s.HasFilesystem() && s.Filesystem == "" {
+ switch s.Role {
+ case gadget.SystemSeed, gadget.SystemSeedNull:
+ s.Filesystem = "vfat"
+ default:
+ s.Filesystem = "ext4"
+ }
+ logger.Debugf("%q filesystem set to %s", s.Name, s.Filesystem)
+ }
+ }
+ }
+
+ // Fill sizes: for the moment, to avoid complicating unnecessarily the
+ // code, we do size=min-size except for the last partition.
+ output, err := exec.Command("lsblk", "--bytes", "--noheadings", "--output", "SIZE", bootDevice).CombinedOutput()
+ exitCode, err := osutil.ExitCode(err)
+ if err != nil {
+ return err
+ }
+ if exitCode != 0 {
+ return fmt.Errorf("cannot find size of %q: %q", bootDevice, string(output))
+ }
+ lines := strings.Split(string(output), "\n")
+ if len(lines) == 0 {
+ return fmt.Errorf("error splitting %q", string(output))
+ }
+ diskSize, err := strconv.Atoi(lines[0])
+ if err != nil {
+ return fmt.Errorf("while converting %s to a size: %v", string(output), err)
+ }
+ partStart := quantity.Offset(0)
+ if vol.HasPartial(gadget.PartialSize) {
+ lastIdx := len(vol.Structure) - 1
+ for sidx := range vol.Structure {
+ s := &vol.Structure[sidx]
+ if s.Offset != nil {
+ partStart = *s.Offset
+ }
+ if s.Size == 0 {
+ if sidx == lastIdx {
+ // Last partition, give it all remaining space
+ // (except space for secondary GPT header).
+ s.Size = quantity.Size(diskSize) - quantity.Size(partStart) - 6*4096
+ } else {
+ s.Size = s.MinSize
+ }
+ logger.Debugf("size of %q set to %d", s.Name, s.Size)
+ }
+ if s.Offset == nil {
+ offset := partStart
+ s.Offset = &offset
+ logger.Debugf("offset of %q set to %d", s.Name, *s.Offset)
+ }
+ partStart += quantity.Offset(s.Size)
+ }
+ }
+
+ return nil
+}
+
func run(seedLabel, rootfsCreator, bootDevice string) error {
logger.Noticef("installing on %q", bootDevice)
@@ -433,6 +511,16 @@ func run(seedLabel, rootfsCreator, bootDevice string) error {
if err != nil {
return err
}
+ // TODO: support multiple volumes, see gadget/install/install.go
+ if len(details.Volumes) != 1 {
+ return fmt.Errorf("gadget defines %v volumes, while we support only one at the moment", len(details.Volumes))
+ }
+
+ // If partial gadget, fill missing information based on the installation target
+ if err := fillPartiallyDefinedVolume(firstVol(details.Volumes), bootDevice); err != nil {
+ return err
+ }
+
// TODO: grow the data-partition based on disk size
encType := secboot.EncryptionTypeNone
if shouldEncrypt {
@@ -473,7 +561,7 @@ func run(seedLabel, rootfsCreator, bootDevice string) error {
func main() {
if len(os.Args) != 4 {
// XXX: allow installing real UC without a classic-rootfs later
- fmt.Fprintf(os.Stderr, "need seed-label, target-device and classic-rootfs as argument\n")
+ fmt.Fprintf(os.Stderr, "Usage: %s <seed-label> <rootfs-creator> <target-device>\n", os.Args[0])
os.Exit(1)
}
logger.SimpleSetup()
diff --git a/tests/lib/muinstaller/snapcraft.yaml b/tests/lib/muinstaller/snapcraft.yaml
index 851119745c..6ac3f99a02 100644
--- a/tests/lib/muinstaller/snapcraft.yaml
+++ b/tests/lib/muinstaller/snapcraft.yaml
@@ -8,10 +8,9 @@ confinement: classic
base: core22
apps:
- muinstaller:
+ auto:
command: bin/muinstaller classic $SNAP/bin/mk-classic-rootfs.sh auto
- daemon: simple
- cli:
+ muinstaller:
command: bin/muinstaller
# TODO: add spread test that builds the muinstaller from snapd to ensure
diff --git a/tests/lib/nested.sh b/tests/lib/nested.sh
index c1b0db1b9c..84779fc306 100644
--- a/tests/lib/nested.sh
+++ b/tests/lib/nested.sh
@@ -1117,7 +1117,7 @@ nested_start_core_vm_unit() {
else
cp -f "/usr/share/OVMF/OVMF_VARS.$OVMF_VARS.fd" "$NESTED_ASSETS_DIR/OVMF_VARS.$OVMF_VARS.fd"
PARAM_BIOS="-drive file=/usr/share/OVMF/OVMF_CODE.$OVMF_CODE.fd,if=pflash,format=raw,unit=0,readonly=on -drive file=$NESTED_ASSETS_DIR/OVMF_VARS.$OVMF_VARS.fd,if=pflash,format=raw"
- PARAM_MACHINE="-machine q35${ATTR_KVM} -global ICH9-LPC.disable_s3=1"
+ PARAM_MACHINE="-machine q35${ATTR_KVM} -global ICH9-LPC.disable_s3=1"
fi
fi
@@ -1387,7 +1387,7 @@ nested_start_classic_vm() {
exit 1
fi
- PARAM_IMAGE="-drive file=$NESTED_IMAGES_DIR/$IMAGE_NAME,if=virtio"
+ PARAM_IMAGE="-drive file=$NESTED_IMAGES_DIR/$IMAGE_NAME,if=none,id=disk1 -device virtio-blk-pci,drive=disk1,bootindex=1"
PARAM_SEED="-drive file=$NESTED_ASSETS_DIR/seed.img,if=virtio"
# Open port 7777 on the host so that failures in the nested VM (e.g. to
# create users) can be debugged interactively via
diff --git a/tests/lib/pkgdb.sh b/tests/lib/pkgdb.sh
index 4498ff5998..79f3517fdb 100755
--- a/tests/lib/pkgdb.sh
+++ b/tests/lib/pkgdb.sh
@@ -637,6 +637,7 @@ pkg_dependencies_ubuntu_classic(){
debian-*)
echo "
autopkgtest
+ bpftool
cryptsetup-bin
debootstrap
eatmydata
@@ -645,22 +646,16 @@ pkg_dependencies_ubuntu_classic(){
gcc-multilib
libc6-dev-i386
linux-libc-dev
+ lsof
net-tools
packagekit
sbuild
schroot
+ strace
+ systemd-timesyncd
"
;;
esac
- case "$SPREAD_SYSTEM" in
- debian-11-*|debian-sid-*)
- echo "
- bpftool
- strace
- systemd-timesyncd
- "
- ;;
- esac
}
pkg_linux_image_extra (){
diff --git a/tests/lib/prepare-restore.sh b/tests/lib/prepare-restore.sh
index de28063762..980720d589 100755
--- a/tests/lib/prepare-restore.sh
+++ b/tests/lib/prepare-restore.sh
@@ -521,20 +521,6 @@ prepare_project() {
case "$SPREAD_SYSTEM" in
debian-*|ubuntu-*)
best_golang=golang-1.18
- if [[ "$SPREAD_SYSTEM" == debian-10-* ]]; then
- # debian-10 needs backports for dh-golang
- # TODO: drop when we drop debian-10 support fully
- echo "deb http://deb.debian.org/debian buster-backports-sloppy main" >> /etc/apt/sources.list
- # debian-10 needs backports for golang-1.18, there is no
- # buser-backports anymore so we can only use a PPA
- echo "deb https://ppa.launchpadcontent.net/snappy-dev/image/ubuntu xenial main" >> /etc/apt/sources.list
- curl 'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x78e1918602959b9c59103100f1831ddafc42e99d' | apt-key add -
- apt update
- # dh-golang must come from backports, gdebi/apt cannot
- # resolve this on their own
- apt install -y -t buster-backports-sloppy dh-golang
- sed -i -e "s/golang-go (>=2:1.18~).*,/${best_golang},/" ./debian/control
- fi
# in 16.04: "apt build-dep -y ./" would also work but not on 14.04
gdebi --quiet --apt-line ./debian/control >deps.txt
quiet xargs -r eatmydata apt-get install -y < deps.txt
diff --git a/tests/lib/prepare.sh b/tests/lib/prepare.sh
index fd70674521..42aa27fbc9 100755
--- a/tests/lib/prepare.sh
+++ b/tests/lib/prepare.sh
@@ -215,6 +215,14 @@ update_core_snap_for_classic_reexec() {
}
prepare_memory_limit_override() {
+ # First time it is needed to save the initial env var value
+ if not tests.env is-set initial SNAPD_NO_MEMORY_LIMIT; then
+ tests.env set initial SNAPD_NO_MEMORY_LIMIT "$SNAPD_NO_MEMORY_LIMIT"
+ # Then if the new value is the same than the initial, then no new configuration needed
+ elif [ "$(tests.env get initial SNAPD_NO_MEMORY_LIMIT)" = "$SNAPD_NO_MEMORY_LIMIT" ]; then
+ return
+ fi
+
# set up memory limits for snapd bu default unless explicit requested not to
# or the system is known to be problematic
local set_limit=1
@@ -232,7 +240,7 @@ prepare_memory_limit_override() {
set_limit=0
;;
*)
- if [ -n "${SNAPD_NO_MEMORY_LIMIT:-}" ]; then
+ if [ "$SNAPD_NO_MEMORY_LIMIT" = 1 ]; then
set_limit=0
fi
;;
@@ -254,8 +262,7 @@ prepare_memory_limit_override() {
# systemd is backwards compatible so the limit is still set.
cat <<EOF > /etc/systemd/system/snapd.service.d/memory-max.conf
[Service]
-# mvo: disabled because of many failures in restore, e.g. in PR#11014
-#MemoryLimit=100M
+MemoryLimit=150M
EOF
fi
# the service setting may have changed in the service so we need
@@ -264,38 +271,31 @@ EOF
systemctl restart snapd
}
-create_reexec_file(){
- local reexec_file=$1
- cat <<EOF > "$reexec_file"
-[Service]
-Environment=SNAP_REEXEC=$SNAP_REEXEC
-EOF
-}
-
prepare_reexec_override() {
local reexec_file=/etc/systemd/system/snapd.service.d/reexec.conf
- local updated=false
+
+ # First time it is needed to save the initial env var value
+ if not tests.env is-set initial SNAP_REEXEC; then
+ tests.env set initial SNAP_REEXEC "$SNAP_REEXEC"
+ # Then if the new value is the same than the initial, then no new configuration needed
+ elif [ "$(tests.env get initial SNAP_REEXEC)" = "$SNAP_REEXEC" ]; then
+ return
+ fi
# Just update reexec configuration when the SNAP_REEXEC var has been updated
# Otherwise it is used the configuration set during project preparation
mkdir -p /etc/systemd/system/snapd.service.d
- if [ -z "${SNAP_REEXEC:-}" ] && [ -f "$reexec_file" ] ; then
+ if [ -z "${SNAP_REEXEC:-}" ]; then
rm -f "$reexec_file"
- updated=true
- elif [ -n "${SNAP_REEXEC:-}" ] && [ ! -f "$reexec_file" ]; then
- create_reexec_file "$reexec_file"
- updated=true
- elif [ -n "${SNAP_REEXEC:-}" ] && NOMATCH "Environment=SNAP_REEXEC=$SNAP_REEXEC" < "$reexec_file"; then
- create_reexec_file "$reexec_file"
- updated=true
+ else
+ cat <<EOF > "$reexec_file"
+[Service]
+Environment=SNAP_REEXEC=$SNAP_REEXEC
+EOF
fi
- if [ "$updated" = true ]; then
- # the re-exec setting may have changed in the service so we need
- # to ensure snapd is reloaded
- systemctl daemon-reload
- systemctl restart snapd
- fi
+ systemctl daemon-reload
+ systemctl restart snapd
}
prepare_each_classic() {
@@ -303,7 +303,9 @@ prepare_each_classic() {
echo "/etc/systemd/system/snapd.service.d/local.conf vanished!"
exit 1
fi
+
prepare_reexec_override
+ prepare_memory_limit_override
}
prepare_classic() {
@@ -360,6 +362,9 @@ prepare_classic() {
# Snapshot the state including core.
if ! is_snapd_state_saved; then
+ # Create the file with the initial environment before saving the state
+ tests.env start initial
+
# need to be seeded to proceed with snap install
# also make sure the captured state is seeded
snap wait system seed.loaded
@@ -1420,6 +1425,9 @@ prepare_ubuntu_core() {
# Snapshot the fresh state (including boot/bootenv)
if ! is_snapd_state_saved; then
+ # Create the file with the initial environment before saving the state
+ tests.env start initial
+
# important to remove disabled snaps before calling save_snapd_state
# or restore will break
remove_disabled_snaps
diff --git a/tests/lib/state.sh b/tests/lib/state.sh
index 73cdad0b87..6188cb4a60 100755
--- a/tests/lib/state.sh
+++ b/tests/lib/state.sh
@@ -2,10 +2,8 @@
SNAPD_STATE_PATH="$TESTSTMP/snapd-state"
SNAPD_STATE_FILE="$TESTSTMP/snapd-state/snapd-state.tar"
-RUNTIME_STATE_PATH="$TESTSTMP/runtime-state"
SNAPD_ACTIVE_UNITS="$RUNTIME_STATE_PATH/snapd-active-units"
-
delete_snapd_state() {
rm -rf "$SNAPD_STATE_PATH"
}
diff --git a/tests/lib/tools/suite/tests.env/task.yaml b/tests/lib/tools/suite/tests.env/task.yaml
new file mode 100644
index 0000000000..dcfc6be923
--- /dev/null
+++ b/tests/lib/tools/suite/tests.env/task.yaml
@@ -0,0 +1,59 @@
+summary: tests for tests.env
+
+restore: |
+ rm -f "$RUNTIME_STATE_PATH"/test1.env "$RUNTIME_STATE_PATH"/test2.env
+
+execute: |
+ # Both -h and --help are also recognized.
+ tests.env --help | MATCH "usage: tests.env start <ENV_NAME>"
+ tests.env -h | MATCH "usage: tests.env start <ENV_NAME>"
+
+ # check start env file
+ tests.env start test1
+ test -f "$RUNTIME_STATE_PATH"/test1.env
+
+ # check commands is-set and set
+ not tests.env is-set test1 var1
+ tests.env set test1 var1 val1
+ tests.env is-set test1 var1
+ tests.env set test1 var3
+ tests.env set test1 var4 ""
+
+ # check command get
+ test "$(tests.env get test1 var1)" = "val1"
+ test "$(tests.env get test1 var3)" = ""
+ test "$(tests.env get test1 var4)" = ""
+
+ # check set another value
+ not tests.env is-set test1 var2
+ tests.env set test1 var2 val2
+ tests.env is-set test1 var2
+ test "$(tests.env get test1 var2)" = "val2"
+ test "$(tests.env get test1 var1)" = "val1"
+
+ # check update the value
+ tests.env set test1 var1 val3
+ test "$(tests.env get test1 var1)" = "val3"
+
+ # create another env
+ tests.env start test2
+ tests.env set test2 var1 val1
+ test "$(tests.env get test1 var1)" = "val3"
+ test "$(tests.env get test2 var1)" = "val1"
+
+ # check errors
+ tests.env test 2>&1 | MATCH "tests.env: no such command: test"
+
+ tests.env start 2>&1 | MATCH "tests.env: name for the env file is required"
+
+ tests.env is-set 2>&1 | MATCH "tests.env: name for the env file is required"
+ tests.env is-set test1 2>&1 | MATCH "tests.env: variable to check in env file is required"
+ tests.env is-set test10 var1 2>&1 | MATCH "tests.env: env file $RUNTIME_STATE_PATH/test10.env does not exist"
+
+ tests.env get 2>&1 | MATCH "tests.env: name for the env file is required"
+ tests.env get test1 2>&1 | MATCH "tests.env: variable to check in env file is required"
+ tests.env get test10 var1 2>&1 | MATCH "tests.env: env file $RUNTIME_STATE_PATH/test10.env does not exist"
+
+ tests.env set 2>&1 | MATCH "tests.env: name for the env file is required"
+ tests.env set test1 2>&1 | MATCH "tests.env: variable to set in env file is required"
+ tests.env set test10 var1 val1 2>&1 | MATCH "tests.env: env file $RUNTIME_STATE_PATH/test10.env does not exist"
diff --git a/tests/lib/tools/tests.env b/tests/lib/tools/tests.env
new file mode 100755
index 0000000000..a302a254e3
--- /dev/null
+++ b/tests/lib/tools/tests.env
@@ -0,0 +1,130 @@
+#!/bin/bash
+
+show_help() {
+ echo "usage: tests.env start <ENV_NAME>"
+ echo " tests.env is-set <ENV_NAME> <VAR>"
+ echo " tests.env get <ENV_NAME> <VAR>"
+ echo " tests.env set <ENV_NAME> <VAR> <VAL>"
+ echo ""
+ echo "The tool is used to create an environment file"
+ echo " which can be shared across different tests and suites"
+}
+
+start() {
+ local NAME=$1
+ if [ -z "$NAME" ]; then
+ echo "tests.env: name for the env file is required"
+ exit 1
+ fi
+
+ if [ -f "$RUNTIME_STATE_PATH/$NAME.env" ]; then
+ echo "tests.env: env file $RUNTIME_STATE_PATH/$NAME.env already exists, deleting..."
+ rm -f "$RUNTIME_STATE_PATH/$NAME.env"
+ fi
+ mkdir -p "$RUNTIME_STATE_PATH"
+ touch "$RUNTIME_STATE_PATH/$NAME.env"
+}
+
+is_set() {
+ local NAME=$1
+ local VAR=$2
+
+ if [ -z "$NAME" ]; then
+ echo "tests.env: name for the env file is required"
+ exit 1
+ fi
+ if [ -z "$VAR" ]; then
+ echo "tests.env: variable to check in env file is required"
+ exit 1
+ fi
+
+ if [ ! -f "$RUNTIME_STATE_PATH/$NAME.env" ]; then
+ echo "tests.env: env file $RUNTIME_STATE_PATH/$NAME.env does not exist"
+ exit 1
+ fi
+
+ grep -Eq "^${VAR}=" "$RUNTIME_STATE_PATH/$NAME.env"
+}
+
+get() {
+ local NAME=$1
+ local VAR=$2
+
+ if [ -z "$NAME" ]; then
+ echo "tests.env: name for the env file is required"
+ exit 1
+ fi
+ if [ -z "$VAR" ]; then
+ echo "tests.env: variable to check in env file is required"
+ exit 1
+ fi
+
+ if [ ! -f "$RUNTIME_STATE_PATH/$NAME.env" ]; then
+ echo "tests.env: env file $RUNTIME_STATE_PATH/$NAME.env does not exist"
+ exit 1
+ fi
+
+ if is_set "$NAME" "$VAR"; then
+ grep -E "^${VAR}=" "$RUNTIME_STATE_PATH/$NAME.env" | cut -d "=" -f2-
+ fi
+}
+
+set() {
+ local NAME=$1
+ local VAR=$2
+ local VAL=$3
+
+ if [ -z "$NAME" ]; then
+ echo "tests.env: name for the env file is required"
+ exit 1
+ fi
+ if [ -z "$VAR" ]; then
+ echo "tests.env: variable to set in env file is required"
+ exit 1
+ fi
+
+ if [ ! -f "$RUNTIME_STATE_PATH/$NAME.env" ]; then
+ echo "tests.env: env file $RUNTIME_STATE_PATH/$NAME.env does not exist"
+ exit 1
+ fi
+
+ if is_set "$NAME" "$VAR"; then
+ sed -i -E "s/^${VAR}=.*/${VAR}=${VAL}/" "$RUNTIME_STATE_PATH/$NAME.env"
+ else
+ echo "${VAR}=${VAL}" >> "$RUNTIME_STATE_PATH/$NAME.env"
+ fi
+
+}
+
+main() {
+ if [ $# -eq 0 ]; then
+ show_help
+ exit 0
+ fi
+
+ local subcommand="$1"
+ local action=
+ while [ $# -gt 0 ]; do
+ case "$subcommand" in
+ -h|--help)
+ show_help
+ exit 0
+ ;;
+ *)
+ action=$(echo "$subcommand" | tr '-' '_')
+ shift
+ break
+ ;;
+ esac
+ done
+
+ if [ -z "$(declare -f "$action")" ]; then
+ echo "tests.env: no such command: $subcommand"
+ show_help
+ exit 1
+ fi
+
+ "$action" "$@"
+}
+
+main "$@"
diff --git a/tests/main/cgroup-devices-v1/task.yaml b/tests/main/cgroup-devices-v1/task.yaml
index 0a12bec3ec..b744544e3b 100644
--- a/tests/main/cgroup-devices-v1/task.yaml
+++ b/tests/main/cgroup-devices-v1/task.yaml
@@ -1,6 +1,6 @@
summary: measuring basic properties of device cgroup
# Disable the test on all systems that boot with cgroup v2
-systems: [ -fedora-36-*, -fedora-37-*, -debian-11-*, -debian-sid-*, -arch-*, -opensuse-tumbleweed-*, -ubuntu-22.*, -ubuntu-23.*, -ubuntu-core-22-*, -centos-9-*]
+systems: [ -fedora-36-*, -fedora-37-*, -debian-*, -arch-*, -opensuse-tumbleweed-*, -ubuntu-22.*, -ubuntu-23.*, -ubuntu-core-22-*, -centos-9-*]
execute: ./task.sh
diff --git a/tests/main/cgroup-devices-v2/task.yaml b/tests/main/cgroup-devices-v2/task.yaml
index 08a2379879..a32325aee8 100644
--- a/tests/main/cgroup-devices-v2/task.yaml
+++ b/tests/main/cgroup-devices-v2/task.yaml
@@ -9,7 +9,6 @@ systems:
- -ubuntu-18.04-*
- -ubuntu-20.04-*
- -ubuntu-core-*
- - -debian-10-*
- -centos-7-*
- -centos-8-*
- -centos-9-*
diff --git a/tests/main/cgroup-freezer/task.yaml b/tests/main/cgroup-freezer/task.yaml
index 81c514c08a..ef06a6b4f3 100644
--- a/tests/main/cgroup-freezer/task.yaml
+++ b/tests/main/cgroup-freezer/task.yaml
@@ -5,7 +5,7 @@ details: |
placed into the appropriate hierarchy under the freezer cgroup.
# Disable the test on all systems that boot with cgroup v2
-systems: [ -fedora-36-*, -fedora-37-*, -debian-11-*, -debian-sid-*, -arch-*, -opensuse-tumbleweed-*, -ubuntu-22.*, -ubuntu-23.*, -ubuntu-core-22-*, -centos-9-*]
+systems: [ -fedora-36-*, -fedora-37-*, -debian-*, -arch-*, -opensuse-tumbleweed-*, -ubuntu-22.*, -ubuntu-23.*, -ubuntu-core-22-*, -centos-9-*]
prepare: |
"$TESTSTOOLS"/snaps-state install-local test-snapd-sh
diff --git a/tests/main/debug-sandbox/task.yaml b/tests/main/debug-sandbox/task.yaml
index ee3ea79901..34c10a218a 100644
--- a/tests/main/debug-sandbox/task.yaml
+++ b/tests/main/debug-sandbox/task.yaml
@@ -6,7 +6,7 @@ execute: |
snap debug sandbox-features | grep "confinement-options: " | NOMATCH "classic"
snap debug sandbox-features | MATCH "apparmor: .+"
;;
- ubuntu-*|opensuse-*|debian-10-*|debian-sid-*|arch-linux-*)
+ ubuntu-*|opensuse-*|debian-sid-*|arch-linux-*)
# Debian, openSUSE, Arch because partial apparmor is enabled
snap debug sandbox-features | MATCH "apparmor: .+"
;;
diff --git a/tests/main/degraded/task.yaml b/tests/main/degraded/task.yaml
index 4b34ff4918..8f8ec2ae6b 100644
--- a/tests/main/degraded/task.yaml
+++ b/tests/main/degraded/task.yaml
@@ -21,11 +21,6 @@ execute: |
systemctl mask systemd-vconsole-setup.service
systemctl reset-failed systemd-vconsole-setup.service
;;
- debian-10-*)
- # modules-load fails trying to modprobe lp and parallel port drivers
- # which are not part of the cloud kernel package
- systemctl reset-failed systemd-modules-load.service
- ;;
centos-8-*)
# tries to load ipmi_si module which fails with ENODEV
systemctl reset-failed systemd-modules-load.service
diff --git a/tests/main/download-timeout/task.yaml b/tests/main/download-timeout/task.yaml
index 7ef8dd9d2d..c5f464df02 100644
--- a/tests/main/download-timeout/task.yaml
+++ b/tests/main/download-timeout/task.yaml
@@ -37,7 +37,7 @@ restore: |
# We need to skip this step in 22.10 because it fails with error:
# Error: Qdisc not classful. We have an error talking to the kernel
# The test works well even after skipping this
- if not os.query is-ubuntu 22.10 && not os.query is-ubuntu 23.04 ; then
+ if os.query is-ubuntu-le 22.04; then
tc filter del dev ens4
fi
tc qdisc del dev ens4 ingress
diff --git a/tests/main/fake-netplan-apply/task.yaml b/tests/main/fake-netplan-apply/task.yaml
index dad073814a..8ed4cc0149 100644
--- a/tests/main/fake-netplan-apply/task.yaml
+++ b/tests/main/fake-netplan-apply/task.yaml
@@ -38,7 +38,7 @@ prepare: |
sed "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml.in" -e "s/base: BASESNAP/base: core20/" > "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml"
snap pack "$TESTSLIB/snaps/netplan-snap" --filename=netplan-snap-20.snap
snap install --dangerous netplan-snap-20.snap
- elif os.query is-ubuntu 22.04 || os.query is-ubuntu 22.10 || os.query is-ubuntu 23.04; then
+ elif os.query is-ubuntu-ge 22.04; then
# use base: core22
sed "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml.in" -e "s/base: BASESNAP/base: core22/" > "$TESTSLIB/snaps/netplan-snap/meta/snap.yaml"
snap pack "$TESTSLIB/snaps/netplan-snap" --filename=netplan-snap-22.snap
diff --git a/tests/main/interfaces-avahi-observe/task.yaml b/tests/main/interfaces-avahi-observe/task.yaml
index e3f0f9b7f3..6b0d28f5fc 100644
--- a/tests/main/interfaces-avahi-observe/task.yaml
+++ b/tests/main/interfaces-avahi-observe/task.yaml
@@ -30,7 +30,10 @@ execute: |
snap connect generic-consumer:avahi-observe
echo "Then the snap is able to access avahi provided info"
- hostname=$(hostname)
+ # Support the 2 possible outputs for hostname in google machines
+ # jun221608-881720
+ # jun221608-881720.c.snapd-spread.internal
+ hostname="$(hostname | cut -d . -f1)"
avahi_dbus_call | MATCH "$hostname"
if [ "$(snap debug confinement)" = partial ]; then
diff --git a/tests/main/interfaces-calendar-service/task.yaml b/tests/main/interfaces-calendar-service/task.yaml
index 20423e0a2e..62b3df6fe3 100644
--- a/tests/main/interfaces-calendar-service/task.yaml
+++ b/tests/main/interfaces-calendar-service/task.yaml
@@ -20,12 +20,11 @@ systems:
- -amazon-* # no need to run this on amazon
- -arch-linux-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -centos-*
- - -debian-11-*
- - -debian-sid-*
+ - -debian-*
- -fedora-36-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -fedora-37-* # test-snapd-eds is incompatible with eds version shipped with the distro
- - -opensuse-15.3-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -opensuse-15.4-* # test-snapd-eds is incompatible with eds version shipped with the distro
+ - -opensuse-15.5-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -opensuse-tumbleweed-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -ubuntu-14.04-* # no tests.session support, eds is too old
- -ubuntu-2* # test-snapd-eds is incompatible with eds shipped with the distro
diff --git a/tests/main/interfaces-contacts-service/task.yaml b/tests/main/interfaces-contacts-service/task.yaml
index dd40655603..65df40edb9 100644
--- a/tests/main/interfaces-contacts-service/task.yaml
+++ b/tests/main/interfaces-contacts-service/task.yaml
@@ -15,12 +15,11 @@ systems:
- -amazon-* # no need to run this on amazon
- -arch-linux-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -centos-*
- - -debian-11-*
- - -debian-sid-*
+ - -debian-*
- -fedora-36-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -fedora-37-* # test-snapd-eds is incompatible with eds version shipped with the distro
- - -opensuse-15.3-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -opensuse-15.4-* # test-snapd-eds is incompatible with eds version shipped with the distro
+ - -opensuse-15.5-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -opensuse-tumbleweed-* # test-snapd-eds is incompatible with eds version shipped with the distro
- -ubuntu-14.04-* # no tests.session support, eds is too old
- -ubuntu-2* # test-snapd-eds is incompatible with eds shipped with the distro
diff --git a/tests/main/lxd-mount-units/task.yaml b/tests/main/lxd-mount-units/task.yaml
index 3496cd601a..05d1a3033f 100644
--- a/tests/main/lxd-mount-units/task.yaml
+++ b/tests/main/lxd-mount-units/task.yaml
@@ -22,8 +22,8 @@ execute: |
core_snap=core22
fi
- # There isn't an official image for lunar yet, let's use the community one
- if os.query is-ubuntu 23.04; then
+ # There isn't an official image for mantic yet, let's use the community one
+ if os.query is-ubuntu 23.10; then
CODENAME=$(. /etc/os-release && echo "$VERSION_CODENAME")
lxc launch --quiet "images:ubuntu/$CODENAME" ubuntu
core_snap=core22
@@ -47,9 +47,9 @@ execute: |
DEB=$(basename "$GOHOME"/snapd_*.deb)
lxd.lxc exec ubuntu -- apt update
- # As for ubuntu lunar it is not using the official image, snapd is not installed by default
+ # As for ubuntu mantic it is not using the official image, snapd is not installed by default
# This should be removed once the official image is released
- if os.query is-ubuntu 23.04; then
+ if os.query is-ubuntu 23.10; then
lxd.lxc exec ubuntu -- apt install -y snapd
lxd.lxc exec ubuntu -- snap install "$core_snap"
fi
diff --git a/tests/main/lxd-postrm-purge/task.yaml b/tests/main/lxd-postrm-purge/task.yaml
index b64a073d0b..48711f2b0d 100644
--- a/tests/main/lxd-postrm-purge/task.yaml
+++ b/tests/main/lxd-postrm-purge/task.yaml
@@ -22,8 +22,8 @@ prepare: |
echo "Install lxd"
"$TESTSTOOLS"/lxd-state prepare-snap
- # There isn't an official image for lunar yet, let's use the community one
- if os.query is-ubuntu 23.04; then
+ # There isn't an official image for mantic yet, let's use the community one
+ if os.query is-ubuntu 23.10; then
CODENAME=$(. /etc/os-release && echo "$VERSION_CODENAME")
lxc launch --quiet "images:ubuntu/$CODENAME" my-ubuntu
else
diff --git a/tests/main/lxd-try/task.yaml b/tests/main/lxd-try/task.yaml
index 5235f01d86..7d60f0a79e 100644
--- a/tests/main/lxd-try/task.yaml
+++ b/tests/main/lxd-try/task.yaml
@@ -6,8 +6,8 @@ prepare: |
echo "Install lxd"
"$TESTSTOOLS"/lxd-state prepare-snap
- # There isn't an official image for lunar yet, let's use the community one
- if os.query is-ubuntu 23.04; then
+ # There isn't an official image for mantic yet, let's use the community one
+ if os.query is-ubuntu 23.10; then
CODENAME=$(. /etc/os-release && echo "$VERSION_CODENAME")
lxc launch --quiet "images:ubuntu/$CODENAME" ubuntu
else
diff --git a/tests/main/lxd/task.yaml b/tests/main/lxd/task.yaml
index b9428c6c70..68957622a0 100644
--- a/tests/main/lxd/task.yaml
+++ b/tests/main/lxd/task.yaml
@@ -78,8 +78,8 @@ execute: |
# prep two containers, the my-ubuntu normal container and the
# my-nesting-ubuntu nesting container
- # There isn't an official image for lunar yet, let's use the community one
- if os.query is-ubuntu 23.04; then
+ # There isn't an official image for mantic yet, let's use the community one
+ if os.query is-ubuntu 23.10; then
CODENAME=$(. /etc/os-release && echo "$VERSION_CODENAME")
lxc launch --quiet "images:ubuntu/$CODENAME" my-ubuntu
lxc launch --quiet "images:ubuntu/$CODENAME" my-nesting-ubuntu -c security.nesting=true
@@ -161,8 +161,8 @@ execute: |
lxd.lxc exec my-nesting-ubuntu -- lxd waitready
lxd.lxc exec my-nesting-ubuntu -- lxd init --auto
- # There isn't an official image for lunar yet, let's use the community one
- if os.query is-ubuntu 23.04; then
+ # There isn't an official image for mantic yet, let's use the community one
+ if os.query is-ubuntu 23.10; then
CODENAME=$(. /etc/os-release && echo "$VERSION_CODENAME")
lxd.lxc exec my-nesting-ubuntu -- lxc launch --quiet "images:ubuntu/$CODENAME" my-inner-ubuntu
else
diff --git a/tests/main/microk8s-smoke/task.yaml b/tests/main/microk8s-smoke/task.yaml
index 73ae17dcf5..21f0189390 100644
--- a/tests/main/microk8s-smoke/task.yaml
+++ b/tests/main/microk8s-smoke/task.yaml
@@ -10,7 +10,6 @@ systems:
- -centos-9-* # fails to start service daemon-containerd
- -fedora-36-* # fails to start service daemon-containerd
- -fedora-37-* # fails to start service daemon-containerd
- - -debian-10-* # doesn't have libseccomp >= 2.4
- -ubuntu-14.04-* # doesn't have libseccomp >= 2.4
- -ubuntu-*-32 # no microk8s snap for 32 bit systems
- -arch-linux-* # XXX: no curl to the pod for unknown reasons
diff --git a/tests/main/nfs-support/task.yaml b/tests/main/nfs-support/task.yaml
index 2bed421500..dc41a8b61a 100644
--- a/tests/main/nfs-support/task.yaml
+++ b/tests/main/nfs-support/task.yaml
@@ -206,7 +206,7 @@ execute: |
# - arch: mount.nfs: requested NFS version or transport protocol is not supported
# - debian-sid: mount.nfs: an incorrect mount option was specified
# - ubuntu-2*: mount.nfs: an incorrect mount option was specified
- if not os.query is-arch-linux && not os.query is-debian sid && not os.query is-debian 11 && [[ "$SPREAD_SYSTEM" != ubuntu-2* ]]; then
+ if not os.query is-arch-linux && not os.query is-debian && not os.query is-ubuntu-ge 20.04; then
# Mount NFS-exported /home over real /home using NFSv3 and UDP transport
mount -t nfs localhost:/home /home -o nfsvers=3,proto=udp
diff --git a/tests/main/security-seccomp/task.yaml b/tests/main/security-seccomp/task.yaml
index 6289937395..85df469900 100644
--- a/tests/main/security-seccomp/task.yaml
+++ b/tests/main/security-seccomp/task.yaml
@@ -44,7 +44,7 @@ prepare: |
if snap debug sandbox-features --required apparmor:parser:snapd-internal; then
APPARMOR_PARSER="/snap/snapd/current/usr/lib/snapd/apparmor_parser --config-file /snap/snapd/current/usr/lib/snapd/apparmor/parser.conf -b /snap/snapd/current/usr/lib/snapd/apparmor.d --policy-features /snap/snapd/current/usr/lib/snapd/apparmor.d/abi/3.0"
fi
- $APPARMOR_PARSER -r "$AAP"
+ $APPARMOR_PARSER -K -r "$AAP"
fi
restore: |
@@ -59,7 +59,7 @@ restore: |
if snap debug sandbox-features --required apparmor:parser:snapd-internal; then
APPARMOR_PARSER="/snap/snapd/current/usr/lib/snapd/apparmor_parser --config-file /snap/snapd/current/usr/lib/snapd/apparmor/parser.conf -b /snap/snapd/current/usr/lib/snapd/apparmor.d --policy-features /snap/snapd/current/usr/lib/snapd/apparmor.d/abi/3.0"
fi
- $APPARMOR_PARSER -r "$AAP"
+ $APPARMOR_PARSER -K -r "$AAP"
fi
execute: |
diff --git a/tests/main/snap-logs-journal/task.yaml b/tests/main/snap-logs-journal/task.yaml
index 0f17549042..e622a8b850 100644
--- a/tests/main/snap-logs-journal/task.yaml
+++ b/tests/main/snap-logs-journal/task.yaml
@@ -8,7 +8,6 @@ systems:
- -amazon-linux-2-*
- -centos-7-*
- -centos-8-*
- - -debian-10-64
- -ubuntu-14.04-*
- -ubuntu-16.04-*
- -ubuntu-18.04-*
diff --git a/tests/main/snap-quota-journal/task.yaml b/tests/main/snap-quota-journal/task.yaml
index 49c6702d90..61bb32d65a 100644
--- a/tests/main/snap-quota-journal/task.yaml
+++ b/tests/main/snap-quota-journal/task.yaml
@@ -10,7 +10,6 @@ systems:
- -amazon-linux-2-*
- -centos-7-*
- -centos-8-*
- - -debian-10-64
- -ubuntu-14.04-*
- -ubuntu-16.04-*
- -ubuntu-18.04-*
diff --git a/tests/main/snap-quota-services/task.yaml b/tests/main/snap-quota-services/task.yaml
index 84a6a81103..02559ba3c8 100644
--- a/tests/main/snap-quota-services/task.yaml
+++ b/tests/main/snap-quota-services/task.yaml
@@ -10,7 +10,6 @@ systems:
- -amazon-linux-2-*
- -centos-7-*
- -centos-8-*
- - -debian-10-64
- -ubuntu-14.04-*
- -ubuntu-16.04-*
- -ubuntu-18.04-*
diff --git a/tests/main/snapd-snap/task.yaml b/tests/main/snapd-snap/task.yaml
index 4f811299b7..56d90a6a1b 100644
--- a/tests/main/snapd-snap/task.yaml
+++ b/tests/main/snapd-snap/task.yaml
@@ -155,6 +155,9 @@ execute: |
if ! os.query is-trusty && ! os.query is-xenial; then
systemctl status snapd.apparmor.service
fi
+ if os.query is-ubuntu; then
+ systemctl status apparmor.service
+ fi
cat /var/lib/snapd/system-key
echo "Rebooted successfully"
@@ -235,9 +238,20 @@ execute: |
echo "Ensure we restarted into the snapd snap"
"$TESTSTOOLS"/journal-state match-log 'restarting into "/snap/snapd/'
+ # see LP:2024637
+ if grep -q /var/lib/snapd/apparmor/ /lib/apparmor/functions; then
+ echo "SKIP: cannot test builtin apparmor parser until /lib/apparmor/functions stops loading the snapd profiles"
+ exit 0
+ fi
+
echo "Ensure sandbox-features shows the internal apparmor_parser"
snap debug sandbox-features --required apparmor:parser:snapd-internal
+ echo "Ensure snap-confine apparmor profile points to snap-confine.internal"
+ for profile in /var/lib/snapd/apparmor/profiles/snap-confine.*; do
+ MATCH '#include "/var/lib/snapd/apparmor/snap-confine.internal"' < "$profile"
+ done
+
echo "Ensure we support posix mqueue and userns in the internal apparmor_parser"
snap debug sandbox-features --required apparmor:parser:mqueue
snap debug sandbox-features --required apparmor:parser:userns
diff --git a/tests/main/store-state/task.yaml b/tests/main/store-state/task.yaml
index 15cbbaf2af..a359ade44a 100644
--- a/tests/main/store-state/task.yaml
+++ b/tests/main/store-state/task.yaml
@@ -1,5 +1,8 @@
summary: smoke test for the store-state tool
+# cannot work with the staging store without a testing build with compiled-in staging keys
+backends: [-external]
+
# ubuntu-14.04: systemd-run not supported
systems: [-ubuntu-14.04-64]
diff --git a/tests/main/system-usernames-illegal/task.yaml b/tests/main/system-usernames-illegal/task.yaml
index ea7f9438eb..76a2670deb 100644
--- a/tests/main/system-usernames-illegal/task.yaml
+++ b/tests/main/system-usernames-illegal/task.yaml
@@ -3,7 +3,7 @@ summary: ensure unapproved user cannot be used with system-usernames
# List of expected snap install failures due to libseccomp/golang-seccomp being
# too old. Since the illegal name check happens after verifying system support,
# we can ignore these.
-systems: [-amazon-linux-2-*, -centos-7-*, -debian-10-*, -ubuntu-14.04-*]
+systems: [-amazon-linux-2-*, -centos-7-*, -ubuntu-14.04-*]
execute: |
snap_path=$("$TESTSTOOLS"/snaps-state pack-local test-snapd-illegal-system-username)
diff --git a/tests/main/system-usernames-install-twice/task.yaml b/tests/main/system-usernames-install-twice/task.yaml
index 66afbf3338..5c900a9528 100644
--- a/tests/main/system-usernames-install-twice/task.yaml
+++ b/tests/main/system-usernames-install-twice/task.yaml
@@ -4,7 +4,7 @@ summary: ensure snap can be installed twice (reusing the created groups)
# too old. Since the illegal name check happens after verifying system support,
# we can ignore these. Ignore ubuntu-core since groupdel doesn't support
# --extrausers
-systems: [-amazon-linux-2-*, -centos-7-*, -debian-10-*, -ubuntu-14.04-*, -ubuntu-core-*]
+systems: [-amazon-linux-2-*, -centos-7-*, -ubuntu-14.04-*, -ubuntu-core-*]
prepare: |
snap install --edge test-snapd-daemon-user
diff --git a/tests/main/system-usernames-missing-user/task.yaml b/tests/main/system-usernames-missing-user/task.yaml
index fdbd327dd8..df09737f1f 100644
--- a/tests/main/system-usernames-missing-user/task.yaml
+++ b/tests/main/system-usernames-missing-user/task.yaml
@@ -4,7 +4,7 @@ summary: ensure snap fails to install if one of user or group doesn't exist
# too old. Since the illegal name check happens after verifying system support,
# we can ignore these. Ignore ubuntu-core since groupdel doesn't support
# --extrausers
-systems: [-amazon-linux-2-*, -centos-7-*, -debian-10-*, -ubuntu-14.04-*, -ubuntu-core-*]
+systems: [-amazon-linux-2-*, -centos-7-*, -ubuntu-14.04-*, -ubuntu-core-*]
prepare: |
groupadd --system snap_daemon
diff --git a/tests/main/system-usernames-snap-scoped/task.yaml b/tests/main/system-usernames-snap-scoped/task.yaml
index fd0443b53d..e36f7872e1 100644
--- a/tests/main/system-usernames-snap-scoped/task.yaml
+++ b/tests/main/system-usernames-snap-scoped/task.yaml
@@ -1,10 +1,10 @@
summary: ensure only approved snaps can use snap-scoped system user
# - not running on 14.04 as we have no real systemd here
-# - also exclude debian 10 and centos 7 because of old libseccomp (the
+# - also exclude centos 7 because of old libseccomp (the
# system-usernames test is already checking which distributions have the
# needed support, so there's no need to replicate that code here)
-systems: [-ubuntu-14.04-*, -debian-10-*, -centos-7-*]
+systems: [-ubuntu-14.04-*, -centos-7-*]
environment:
STORE_DIR: $(pwd)/fake-store-blobdir
diff --git a/tests/main/system-usernames/task.yaml b/tests/main/system-usernames/task.yaml
index 3e9e914c54..5c23a3ec1e 100644
--- a/tests/main/system-usernames/task.yaml
+++ b/tests/main/system-usernames/task.yaml
@@ -8,7 +8,7 @@ environment:
# List of expected snap install failures due to libseccomp/golang-seccomp
# being too old. This should only reduce with time since new systems should
# have newer libseccomp and golang-seccomp
- EXFAIL: "centos-7-64 debian-10-64 ubuntu-14"
+ EXFAIL: "centos-7-64 ubuntu-14"
prepare: |
echo "Install helper snaps with default confinement"
diff --git a/tests/main/uc20-create-partitions-encrypt/task.yaml b/tests/main/uc20-create-partitions-encrypt/task.yaml
index 862814dc40..9e3aedc699 100644
--- a/tests/main/uc20-create-partitions-encrypt/task.yaml
+++ b/tests/main/uc20-create-partitions-encrypt/task.yaml
@@ -1,7 +1,8 @@
summary: Integration tests for the snap-bootstrap binary
# use the same system and tooling as uc20
-systems: [ubuntu-2*]
+# TODO: revert skip for ubuntu-23*
+systems: [ubuntu-20.*,ubuntu-22.*]
environment:
SNAPD_DEBUG: "1"
@@ -100,7 +101,7 @@ execute: |
fi
channel=20
- if os.query is-ubuntu 22.04 || os.query is-ubuntu 22.10 || os.query is-ubuntu 23.04; then
+ if os.query is-ubuntu-ge 22.04; then
channel=22
fi
diff --git a/tests/main/uc20-create-partitions/task.yaml b/tests/main/uc20-create-partitions/task.yaml
index 577007cac8..389b214def 100644
--- a/tests/main/uc20-create-partitions/task.yaml
+++ b/tests/main/uc20-create-partitions/task.yaml
@@ -92,7 +92,7 @@ execute: |
fi
channel=20
- if os.query is-ubuntu 22.04 || os.query is-ubuntu 22.10 || os.query is-ubuntu 23.04; then
+ if os.query is-ubuntu-ge 22.04; then
channel=22
fi
diff --git a/tests/nested/core/core20-factory-reset/task.yaml b/tests/nested/core/core20-factory-reset/task.yaml
index 7a894de0dd..b0858281ce 100644
--- a/tests/nested/core/core20-factory-reset/task.yaml
+++ b/tests/nested/core/core20-factory-reset/task.yaml
@@ -6,8 +6,13 @@ details: |
systems: [ubuntu-20.04-64, ubuntu-22.04-64]
environment:
- NESTED_ENABLE_SECURE_BOOT: true
- NESTED_ENABLE_TPM: true
+ NESTED_ENABLE_SECURE_BOOT/fde: true
+ NESTED_ENABLE_TPM/fde: true
+ FDE/fde: "enabled"
+
+ NESTED_ENABLE_SECURE_BOOT/nofde: false
+ NESTED_ENABLE_TPM/nofde: false
+ FDE/nofde: "disabled"
execute: |
echo "Wait for the system to be seeded first"
@@ -28,7 +33,9 @@ execute: |
remote.exec sudo touch /writable/marker
# grab the ubuntu-save key
- remote.exec cat /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key > pre-reset-save-fallback-key
+ if [ "${FDE}" = enabled ]; then
+ remote.exec cat /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key > pre-reset-save-fallback-key
+ fi
# add || true in case the SSH connection is broken while executing this
# since this command causes an immediate reboot
@@ -62,8 +69,13 @@ execute: |
test "$old_ubuntu_seed" = "$new_ubuntu_seed"
# check ubuntu-save
- old_ubuntu_save="$(grep ' LABEL="ubuntu-save-enc"' < initial-disk)"
- new_ubuntu_save="$(grep ' LABEL="ubuntu-save-enc"' < current-disk)"
+ if [ "${FDE}" = enabled ]; then
+ old_ubuntu_save="$(grep ' LABEL="ubuntu-save-enc"' < initial-disk)"
+ new_ubuntu_save="$(grep ' LABEL="ubuntu-save-enc"' < current-disk)"
+ else
+ old_ubuntu_save="$(grep ' LABEL="ubuntu-save"' < initial-disk)"
+ new_ubuntu_save="$(grep ' LABEL="ubuntu-save"' < current-disk)"
+ fi
# ubuntu save is identical
test "$old_ubuntu_save" = "$new_ubuntu_save"
@@ -76,8 +88,13 @@ execute: |
test "$old_ubuntu_boot" != "$new_ubuntu_boot"
# check ubuntu-data
- old_ubuntu_data="$(grep ' LABEL="ubuntu-data-enc"' < initial-disk)"
- new_ubuntu_data="$(grep ' LABEL="ubuntu-data-enc"' < current-disk)"
+ if [ "${FDE}" = enabled ]; then
+ old_ubuntu_data="$(grep ' LABEL="ubuntu-data-enc"' < initial-disk)"
+ new_ubuntu_data="$(grep ' LABEL="ubuntu-data-enc"' < current-disk)"
+ else
+ old_ubuntu_data="$(grep ' LABEL="ubuntu-data"' < initial-disk)"
+ new_ubuntu_data="$(grep ' LABEL="ubuntu-data"' < current-disk)"
+ fi
# again same device
test "$(echo "$old_ubuntu_data" | cut -f1 -d:)" = "$(echo "$new_ubuntu_data" | cut -f1 -d:)"
# again, the UUIDs are different
@@ -92,19 +109,24 @@ execute: |
# the temp factory-reset key is gone
# TODO this is a very weak check
- remote.exec test ! -e /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key.factory-reset
+ if [ "${FDE}" = enabled ]; then
+ remote.exec test ! -e /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key.factory-reset
+ fi
# no factory reset marker
remote.exec test ! -e /var/lib/snapd/device/factory-reset
# verify that the factory-reset log was collected
remote.exec "zcat /var/log/factory-reset-mode.log.gz" | MATCH 'performing factory reset on an installed system'
- remote.exec cat /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key > post-reset-save-fallback-key
- # not a great check as the fallback key may have been resealed, but it
- # should be different nonetheless
- not cmp pre-reset-save-fallback-key post-reset-save-fallback-key
+ if [ "${FDE}" = enabled ]; then
+ remote.exec cat /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key > post-reset-save-fallback-key
+ # not a great check as the fallback key may have been resealed, but it
+ # should be different nonetheless
+ not cmp pre-reset-save-fallback-key post-reset-save-fallback-key
+ fi
echo "Perform subsequent factory reset"
+ boot_id=$(tests.nested boot-id)
remote.exec "sudo snap reboot --factory-reset" || true
remote.wait-for reboot "${boot_id}"
remote.exec cat /proc/cmdline | MATCH 'snapd_recovery_mode=run'
@@ -120,7 +142,9 @@ execute: |
# the markers are still there
remote.exec test -e /run/mnt/ubuntu-save/marker
remote.exec test -e /run/mnt/ubuntu-seed/marker
- # get the key
- remote.exec cat /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key > subsequent-reset-save-fallback-key
- # and the key is different again
- not cmp post-reset-save-fallback-key subsequent-reset-save-fallback-key
+ if [ "${FDE}" = enabled ]; then
+ # get the key
+ remote.exec cat /run/mnt/ubuntu-seed/device/fde/ubuntu-save.recovery.sealed-key > subsequent-reset-save-fallback-key
+ # and the key is different again
+ not cmp post-reset-save-fallback-key subsequent-reset-save-fallback-key
+ fi
diff --git a/tests/nested/manual/core20-new-snapd-does-not-break-old-initrd/task.yaml b/tests/nested/manual/core20-new-snapd-does-not-break-old-initrd/task.yaml
index ec2ca05e1f..275763cff0 100644
--- a/tests/nested/manual/core20-new-snapd-does-not-break-old-initrd/task.yaml
+++ b/tests/nested/manual/core20-new-snapd-does-not-break-old-initrd/task.yaml
@@ -34,6 +34,7 @@ environment:
NESTED_ENABLE_SECURE_BOOT: true
INITIAL_KERNEL_REV_URL: https://storage.googleapis.com/snapd-spread-tests/snaps/pc-kernel_838.snap
+ INITIAL_GADGET_REV_URL: https://storage.googleapis.com/snapd-spread-tests/snaps/pc_132.snap
prepare: |
# always build the snapd snap from this branch - on the new variant it gets
@@ -60,6 +61,8 @@ prepare: |
# use a specific version of the kernel snap and thus initramfs that we know
# doesn't support v2 secboot keys
wget --quiet "$INITIAL_KERNEL_REV_URL"
+ # use a gadget snap that works with this kernel
+ wget --quiet "$INITIAL_GADGET_REV_URL"
# unpack it and repack it so it doesn't match any store assertions and thus
# won't be automatically refreshed behind our backs when we boot the VM
@@ -68,6 +71,11 @@ prepare: |
snap pack pc-kernel-snap --filename=pc-kernel.snap
mv pc-kernel.snap "$(tests.nested get extra-snaps-path)"
+ unsquashfs -d pc-snap pc_132.snap
+ touch ./pc-snap/in-case-mksquashfs-becomes-deterministic-someday
+ snap pack pc-snap --filename=pc.snap
+ mv pc.snap "$(tests.nested get extra-snaps-path)"
+
# Get the nested system version
VERSION="$(tests.nested show version)"
diff --git a/tests/nested/manual/muinstaller-real/gadget-partial.yaml b/tests/nested/manual/muinstaller-real/gadget-partial.yaml
new file mode 100644
index 0000000000..0e55bb9b8d
--- /dev/null
+++ b/tests/nested/manual/muinstaller-real/gadget-partial.yaml
@@ -0,0 +1,50 @@
+volumes:
+ pc:
+ # TODO Leaving schema out of "partial" for the moment
+ partial: [structure, filesystem, size]
+ schema: gpt
+ # bootloader configuration is shipped and managed by snapd
+ bootloader: grub
+ structure:
+ - name: mbr
+ type: mbr
+ size: 440
+ update:
+ edition: 1
+ content:
+ - image: mbr.img
+ - name: ubuntu-seed
+ role: system-seed-null
+ filesystem: vfat
+ # UEFI will boot the ESP partition by default first
+ type: C12A7328-F81F-11D2-BA4B-00A0C93EC93B
+ # We leave extra space for partition not managed by snapd
+ offset: 2M
+ min-size: 1000M
+ update:
+ edition: 2
+ content:
+ - source: grubx64.efi
+ target: EFI/boot/grubx64.efi
+ - source: shim.efi.signed
+ target: EFI/boot/bootx64.efi
+ - name: ubuntu-boot
+ role: system-boot
+ type: 0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ # whats the appropriate size?
+ min-size: 750M
+ update:
+ edition: 1
+ content:
+ - source: grubx64.efi
+ target: EFI/boot/grubx64.efi
+ - source: shim.efi.signed
+ target: EFI/boot/bootx64.efi
+ - name: ubuntu-save
+ role: system-save
+ type: 0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ min-size: 32M
+ - name: ubuntu-data
+ role: system-data
+ type: 0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ min-size: 1G
diff --git a/tests/nested/manual/muinstaller-real/task.yaml b/tests/nested/manual/muinstaller-real/task.yaml
index c638b8c3b7..b34895ba88 100644
--- a/tests/nested/manual/muinstaller-real/task.yaml
+++ b/tests/nested/manual/muinstaller-real/task.yaml
@@ -3,23 +3,29 @@ summary: End-to-end test for install via muinstaller
systems: [ubuntu-22.04-64]
environment:
- # Test both encrypted and unencrypted install using the muinstaller
- NESTED_ENABLE_TPM/encrypted: true
- NESTED_ENABLE_SECURE_BOOT/encrypted: true
-
- # unencrypted case
- NESTED_ENABLE_TPM/plain: false
- NESTED_ENABLE_SECURE_BOOT/plain: false
-
- # ensure we use our latest code
- NESTED_BUILD_SNAPD_FROM_CURRENT: true
- NESTED_REPACK_KERNEL_SNAP: true
- NESTED_ENABLE_OVMF: true
- # store related setup
- STORE_ADDR: localhost:11028
- STORE_DIR: $(pwd)/fake-store-blobdir
- # image
- IMAGE_MOUNTPOINT: /mnt/cloudimg
+ # No partial gadget by default
+ PARTIAL_GADGET: false
+
+ # Test both encrypted and unencrypted install using the muinstaller
+ NESTED_ENABLE_TPM/encrypted,partial: true
+ NESTED_ENABLE_SECURE_BOOT/encrypted,partial: true
+
+ # unencrypted case
+ NESTED_ENABLE_TPM/plain: false
+ NESTED_ENABLE_SECURE_BOOT/plain: false
+
+ # Using partial case (TPM used, see above)
+ PARTIAL_GADGET/partial: true
+
+ # ensure we use our latest code
+ NESTED_BUILD_SNAPD_FROM_CURRENT: true
+ NESTED_REPACK_KERNEL_SNAP: true
+ NESTED_ENABLE_OVMF: true
+ # store related setup
+ STORE_ADDR: localhost:11028
+ STORE_DIR: $(pwd)/fake-store-blobdir
+ # image
+ IMAGE_MOUNTPOINT: /mnt/cloudimg
prepare: |
if [ "$TRUST_TEST_KEYS" = "false" ]; then
@@ -41,7 +47,7 @@ execute: |
#shellcheck source=tests/lib/nested.sh
. "$TESTSLIB"/nested.sh
- echo Expose the needed assertions through the fakestore
+ echo "Expose the needed assertions through the fakestore"
cp "$TESTSLIB"/assertions/developer1.account "$STORE_DIR/asserts"
cp "$TESTSLIB"/assertions/developer1.account-key "$STORE_DIR/asserts"
cp "$TESTSLIB"/assertions/testrootorg-store.account-key "$STORE_DIR/asserts"
@@ -49,17 +55,23 @@ execute: |
version="$(nested_get_version)"
- # build updated shim
- version=22
+ # Retrieve the gadget
snap download --basename=pc --channel="$version/edge" pc
- cp pc.snap pc.snap.orig
# the fakestore needs the assertion
snap ack pc.assert
+ # keep original blob just so we can find the assertion later
+ cp pc.snap pc.snap.orig
+
+ # New modified gadget
unsquashfs -d pc-gadget pc.snap
echo 'console=ttyS0 systemd.journald.forward_to_console=1' > pc-gadget/cmdline.extra
- # use the system-seed-null classic role
- sed -i 's/role: system-seed/role: system-seed-null/' pc-gadget/meta/gadget.yaml
-
+ if [ "$PARTIAL_GADGET" = true ]; then
+ # Change to a gadget.yaml where "partial" is being used
+ cp gadget-partial.yaml pc-gadget/meta/gadget.yaml
+ else
+ # use the system-seed-null classic role
+ sed -i 's/role: system-seed/role: system-seed-null/' pc-gadget/meta/gadget.yaml
+ fi
echo "Sign the shim binary"
KEY_NAME=$(tests.nested download snakeoil-key)
SNAKEOIL_KEY="$PWD/$KEY_NAME.key"
@@ -67,11 +79,13 @@ execute: |
tests.nested secboot-sign gadget pc-gadget "$SNAKEOIL_KEY" "$SNAKEOIL_CERT"
snap pack --filename=pc.snap pc-gadget/
- # get an updated kernel
+ # Retrieve kernel
snap download --basename=pc-kernel --channel="$version/edge" pc-kernel
- cp pc-kernel.snap pc-kernel.snap.orig
# the fakestore needs this assertion
snap ack pc-kernel.assert
+ # keep original blob just so we can find the assertion later
+ cp pc-kernel.snap pc-kernel.snap.orig
+ # Build kernel with initramfs with the compiled snap-bootstrap
uc20_build_initramfs_kernel_snap "$PWD/pc-kernel.snap" "$NESTED_ASSETS_DIR"
mv "${NESTED_ASSETS_DIR}"/pc-kernel_*.snap pc-kernel.snap
@@ -87,7 +101,7 @@ execute: |
--snap ./pc.snap \
my.model \
./classic-seed
- # make the seed label more predictable for fake-installer auto-mode
+ # make the seed label more predictable for muinstaller auto-mode
LABEL=classic
mv ./classic-seed/system-seed/systems/* ./classic-seed/system-seed/systems/"$LABEL"
cp -a ./classic-seed/system-seed/ /var/lib/snapd/seed
@@ -104,6 +118,13 @@ execute: |
# create new disk for the installer to work on and attach to VM
truncate --size=4G fake-disk.img
+ if [ "$PARTIAL_GADGET" = true ]; then
+ # create gpt volume and add a partition that should be ignored by snapd
+ cat << 'EOF' | sfdisk fake-disk.img
+ label: gpt
+ start=2048, size=2048, type=21686148-6449-6E6F-744E-656564454649, name="BIOS Boot"
+ EOF
+ fi
# create a VM and mount a cloud image
tests.nested build-image classic
@@ -115,7 +136,9 @@ execute: |
# from classic->core and use nested_start_core_vm (like below)
#
# start it so that cloud-init creates ssh keys and user
- NESTED_PARAM_EXTRA="-drive file=$(pwd)/fake-disk.img,if=virtio,snapshot=off"
+ # We set a serial for our disk to easily locate it when invoking muinstaller (virtio-target)
+ NESTED_PARAM_EXTRA="-drive file=$(pwd)/fake-disk.img,if=none,snapshot=off,format=raw,id=disk2 \
+ -device virtio-blk-pci,drive=disk2,serial=target"
tests.nested create-vm classic --extra-param "$NESTED_PARAM_EXTRA"
# make sure classic image is bootable with snakeoil keys
@@ -143,16 +166,11 @@ execute: |
# TODO: merge with classic /var/lib/snapd/seed eventually
# XXX: port scp -r to remote.push
#remote.push ./classic-seed/system-seed/ '~/'
- sshpass -p ubuntu scp -r -P 8022 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ./classic-seed/system-seed/ user1@localhost:~/install-seed
+ sshpass -p ubuntu scp -r -P 8022 -o ConnectTimeout=10 \
+ -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ./classic-seed/system-seed/ user1@localhost:~/install-seed
remote.exec "sudo mv /home/user1/install-seed /var/lib/snapd/"
- # XXX: the code in DeviceManager.SystemAndGadgetInfo() will only work on
- # classic systems with modeenv right now (which is something we may need
- # to fix to work from the classic installer).
- # For now pretend we have a modeenv
- remote.exec 'echo "mode=run" | sudo tee -a /var/lib/snapd/modeenv'
- remote.exec 'sudo systemctl restart snapd'
-
# shutdown the classic vm to install with a core VM that supports
# secboot/tpm
tests.nested vm stop
@@ -161,7 +179,9 @@ execute: |
# HACK: convert "classic" qcow2 to raw "core" image because we need
# to boot with OVMF we really should fix this so that classic and
# core VMs are more similar
- qemu-img convert -f qcow2 -O raw "$NESTED_IMAGES_DIR/$(nested_get_image_name classic)" "$NESTED_IMAGES_DIR/$(nested_get_image_name core)"
+ qemu-img convert -f qcow2 -O raw \
+ "$NESTED_IMAGES_DIR/$(nested_get_image_name classic)" \
+ "$NESTED_IMAGES_DIR/$(nested_get_image_name core)"
# and we don't need the classic image anymore
# TODO: uncomment
#rm -f "$NESTED_IMAGES_DIR/$(nested_get_image_name classic)"
@@ -173,23 +193,27 @@ execute: |
# bind mount new seed
remote.exec "sudo mount -o bind /var/lib/snapd/install-seed /var/lib/snapd/seed"
- # push the muinstaller
+ # push and install muinstaller
remote.push "$MUINSTALLER_SNAP"
remote.exec "sudo snap install --classic --dangerous $(basename "$MUINSTALLER_SNAP")"
+ # Run installation
+ install_disk=$(remote.exec "readlink -f /dev/disk/by-id/virtio-target")
+ remote.exec "sudo muinstaller classic \
+ /snap/muinstaller/current/bin/mk-classic-rootfs.sh $install_disk"
- # TODO: use retry
- while true; do
- if remote.exec "sudo snap logs muinstaller" | MATCH "install done"; then
- break
- fi
- sleep 5
- done
remote.exec "sudo sync"
- # boot into the just installed drive
+ # Stop and remove the classic vm now that the attached disk (fake-disk.img)
+ # contains a just installed UC image.
tests.nested vm remove
sync
+ if [ "$PARTIAL_GADGET" = true ]; then
+ # Check that the non-UC partition has been respected
+ sfdisk -d fake-disk.img |
+ MATCH '^fake-disk.img1 : start=\s*2048, size=\s*2048, type=21686148-6449-6E6F-744E-656564454649,.*, name="BIOS Boot"'
+ fi
+
# HACK: rename to "core" image because we need to boot with OVMF
# we really should fix this so that classic and core VMs are more similar
mv fake-disk.img "$NESTED_IMAGES_DIR/$IMAGE_NAME"
@@ -200,6 +224,7 @@ execute: |
fatlabel /dev/disk/by-label/ubuntu-seed UBUNTU-SEED
kpartx -d "$IMAGE_PATH"
+ # Start installed image
tests.nested create-vm core --tpm-no-restart
# things look fine
@@ -249,6 +274,8 @@ execute: |
local snap_new_dir=$3
printf "Test installing snap from file %s\n" "$snap_name"
+ # The original blob is just used to locate the original revision assertion
+ # that will be used as a template for the new snap file revision assertion.
fakestore make-refreshable --dir "$STORE_DIR" \
--snap-orig-blob "$snap_orig_blob" --snap-blob "$snap_new_dir" "$snap_name"
remote.exec mkdir -p asserts/
diff --git a/tests/regression/lp-1848567/task.yaml b/tests/regression/lp-1848567/task.yaml
index 79b1085929..6202fd8ae1 100644
--- a/tests/regression/lp-1848567/task.yaml
+++ b/tests/regression/lp-1848567/task.yaml
@@ -27,7 +27,7 @@ execute: |
# caches. Use memory-observe-do to record the maximum resident memory usage
# and store it in a file.
"$TESTSTOOLS"/memory-observe-do -o memory-kb.txt apparmor_parser \
- --skip-read-cache --skip-cache --skip-kernel-load -Ono-expr-simplify \
+ --skip-read-cache --skip-cache --skip-kernel-load \
/var/lib/snapd/apparmor/profiles/snap-update-ns.test-snapd-app
# Without de-duplicating mount rules the compiler would take about 1.5GB on a
# 64 bit system. With the de-duplication logic it took less than 38MB on an