diff options
| author | Michael Vogt <mvo@ubuntu.com> | 2022-12-01 21:57:05 +0100 |
|---|---|---|
| committer | Michael Vogt <mvo@ubuntu.com> | 2022-12-01 21:57:05 +0100 |
| commit | 3c85653f315072b838edb4d4ec701cc89099fa15 (patch) | |
| tree | 91965ccef0e92871a034432510d17360acad5b72 | |
| parent | 54754e50ed503cf6331cd6d1b520b65ae3f8aa2f (diff) | |
| parent | ec8b10b7b312bb390a2f7badcf7a8f05e16f3169 (diff) | |
Merge remote-tracking branch 'upstream/master' into changelog-2.58changelog-2.58
127 files changed, 5362 insertions, 1331 deletions
diff --git a/.github/labeler.yml b/.github/labeler.yml index 1594a2fbcf..a08c77bdbd 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -3,6 +3,14 @@ # - https://github.com/actions/labeler/issues/112 # - https://github.com/actions/labeler/issues/104 -# Add 'Run nested' label to either any change on nested lib or nested test -Run nested: - - any: ["tests/lib/nested.sh", "tests/nested/**/*"] +# Add 'Run nested -auto-' label to either any change on nested lib or nested test +Run nested -auto-: + - tests/lib/nested.sh + - tests/nested/**/* + +# Add 'Needs Documentation -auto-' label to indicate a change needs changes in the docs +Needs Documentation -auto-: + - cmd/snap/**/*" + - daemon/**/* + - overlord/hookstate/ctlcmd/**/* + - overlord/configstate/configcore/**/* diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml index 940c534a86..6d90797291 100644 --- a/.github/workflows/labeler.yaml +++ b/.github/workflows/labeler.yaml @@ -4,9 +4,12 @@ on: jobs: triage: + permissions: + contents: read + pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions/labeler@main - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - sync-labels: "" + - uses: actions/labeler@v4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: "true" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b284950e0a..22d78075fd 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -469,8 +469,8 @@ jobs: # "pipefail" ensures that a non-zero status from the spread is # propagated; and we use a subshell as this option could trigger # undesired changes elsewhere - echo "Running command: $SPREAD -abend $RUN_TESTS" - (set -o pipefail; $SPREAD -abend $RUN_TESTS | tee spread.log) + echo "Running command: $SPREAD $RUN_TESTS" + (set -o pipefail; $SPREAD $RUN_TESTS | tee spread.log) - name: Discard spread workers if: always() @@ -585,7 +585,7 @@ jobs: - name: Run spread tests # run if the commit is pushed to the release/* branch or there is a 'Run # nested' label set on the PR - if: "contains(github.event.pull_request.labels.*.name, 'Run nested') || contains(github.ref, 'refs/heads/release/')" + if: "contains(github.event.pull_request.labels.*.name, 'Run nested') || contains(github.event.pull_request.labels.*.name, 'Run nested -auto-') || contains(github.ref, 'refs/heads/release/')" env: SPREAD_GOOGLE_KEY: ${{ secrets.SPREAD_GOOGLE_KEY }} run: | @@ -601,7 +601,7 @@ jobs: # "pipefail" ensures that a non-zero status from the spread is # propagated; and we use a subshell as this option could trigger # undesired changes elsewhere - (set -o pipefail; spread -abend $RUN_TESTS | tee spread.log) + (set -o pipefail; spread $RUN_TESTS | tee spread.log) - name: Discard spread workers if: always() @@ -58,11 +58,12 @@ and on [Facebook](https://www.facebook.com/snapcraftio). | Service | Status | |-----|:---| -| [Github Actions](https://github.com/actions/) | ![Build Status][actions-image] | +| [Github Actions](https://github.com/actions/) | [![Build Status][actions-image]][actions-url] | | [GoReport](https://goreportcard.com/) | [![Go Report Card][goreportcard-image]][goreportcard-url] | | [Codecov](https://codecov.io/) | [![codecov][codecov-image]][codecov-url] | -[actions-image]: https://github.com/snapcore/snapd/actions +[actions-image]: https://github.com/snapcore/snapd/actions/workflows/test.yaml/badge.svg?branch=master +[actions-url]: https://github.com/snapcore/snapd/actions?query=branch%3Amaster+event%3Apush [goreportcard-image]: https://goreportcard.com/badge/github.com/snapcore/snapd [goreportcard-url]: https://goreportcard.com/report/github.com/snapcore/snapd diff --git a/aspects/aspects.go b/aspects/aspects.go index 79f9e4c592..d66f6a687f 100644 --- a/aspects/aspects.go +++ b/aspects/aspects.go @@ -24,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "regexp" "strings" "github.com/snapcore/snapd/jsonutil" @@ -93,10 +94,10 @@ type Directory struct { // and access patterns. func NewAspectDirectory(name string, aspects map[string]interface{}, dataBag DataBag, schema Schema) (*Directory, error) { if len(aspects) == 0 { - return nil, errors.New(`cannot create aspects directory: no aspects`) + return nil, errors.New(`cannot define aspects directory: no aspects`) } - aspectDir := Directory{ + aspectDir := &Directory{ Name: name, dataBag: dataBag, schema: schema, @@ -106,55 +107,126 @@ func NewAspectDirectory(name string, aspects map[string]interface{}, dataBag Dat for name, v := range aspects { aspectPatterns, ok := v.([]map[string]string) if !ok { - return nil, errors.New("cannot create aspect: access patterns should be a list of maps") + return nil, fmt.Errorf("cannot define aspect %q: access patterns should be a list of maps", name) } else if len(aspectPatterns) == 0 { - return nil, errors.New("cannot create aspect without access patterns") + return nil, fmt.Errorf("cannot define aspect %q: no access patterns found", name) } - aspect := &Aspect{ - Name: name, - accessPatterns: make([]*accessPattern, 0, len(aspectPatterns)), - directory: aspectDir, + aspect, err := newAspect(aspectDir, name, aspectPatterns) + if err != nil { + return nil, fmt.Errorf("cannot define aspect %q: %w", name, err) } - for _, aspectPattern := range aspectPatterns { - name, ok := aspectPattern["name"] - if !ok || name == "" { - return nil, errors.New(`cannot create aspect pattern without a "name" field`) - } + aspectDir.aspects[name] = aspect + } - // TODO: either - // * Validate that a path isn't a subset of another - // (possibly somewhere else). Otherwise, we can - // write a user value in a subkey of a path (that - // should be map). - // * Our schema should be able to provide - // allowed/expected types given a path; these should - // guide and take precedence resolving conflicts - // between data in the data bags or written E.g - // possibly return null or empty object if at a path - // were the schema expects an object there is scalar? - path, ok := aspectPattern["path"] - if !ok || path == "" { - return nil, errors.New(`cannot create aspect pattern without a "path" field`) - } + return aspectDir, nil +} - access, err := newAccessType(aspectPattern["access"]) - if err != nil { - return nil, fmt.Errorf("cannot create aspect pattern: %w", err) - } +func newAspect(dir *Directory, name string, aspectPatterns []map[string]string) (*Aspect, error) { + aspect := &Aspect{ + Name: name, + accessPatterns: make([]*accessPattern, 0, len(aspectPatterns)), + directory: dir, + } - aspect.accessPatterns = append(aspect.accessPatterns, &accessPattern{ - name: name, - path: path, - access: access, - }) + for _, aspectPattern := range aspectPatterns { + name, ok := aspectPattern["name"] + if !ok || name == "" { + return nil, errors.New(`access patterns must have a "name" field`) } - aspectDir.aspects[name] = aspect + path, ok := aspectPattern["path"] + if !ok || path == "" { + return nil, errors.New(`access patterns must have a "path" field`) + } + + if err := validateNamePathPair(name, path); err != nil { + return nil, err + } + + accPattern, err := newAccessPattern(name, path, aspectPattern["access"]) + if err != nil { + return nil, err + } + + aspect.accessPatterns = append(aspect.accessPatterns, accPattern) } - return &aspectDir, nil + return aspect, nil +} + +// validateNamePathPair checks that: +// * names and paths are composed of valid subkeys (see: validateAspectString) +// * all placeholders in a name are in the path and vice-versa +func validateNamePathPair(name, path string) error { + if err := validateAspectDottedPath(name); err != nil { + return fmt.Errorf("invalid access name %q: %w", name, err) + } + + if err := validateAspectDottedPath(path); err != nil { + return fmt.Errorf("invalid path %q: %w", path, err) + } + + namePlaceholders, pathPlaceholders := getPlaceholders(name), getPlaceholders(path) + if len(namePlaceholders) != len(pathPlaceholders) { + return fmt.Errorf("access name %q and path %q have mismatched placeholders", name, path) + } + + for placeholder := range namePlaceholders { + if !pathPlaceholders[placeholder] { + return fmt.Errorf("placeholder %q from access name %q is absent from path %q", + placeholder, name, path) + } + } + + return nil +} + +var ( + subkeyRegex = "(?:[a-z0-9]+-?)*[a-z](?:-?[a-z0-9])*" + validSubkey = regexp.MustCompile(fmt.Sprintf("^%s$", subkeyRegex)) + validPlaceholder = regexp.MustCompile(fmt.Sprintf("^{%s}$", subkeyRegex)) +) + +// validateAspectDottedPath validates that names/paths in an aspect definition are: +// * composed of non-empty, dot-separated subkeys with optional placeholders ("foo.{bar}") +// * non-placeholder subkeys are made up of lowercase alphanumeric ASCII characters, +// optionally with dashes between alphanumeric characters (e.g., "a-b-c") +// * placeholder subkeys are composed of non-placeholder subkeys wrapped in curly brackets +func validateAspectDottedPath(path string) (err error) { + subkeys := strings.Split(path, ".") + + for _, subkey := range subkeys { + if subkey == "" { + return errors.New("cannot have empty subkeys") + } + + if !(validSubkey.MatchString(subkey) || validPlaceholder.MatchString(subkey)) { + return fmt.Errorf("invalid subkey %q", subkey) + } + } + + return nil +} + +// getPlaceholders returns the set of placeholders in the string or nil, if +// there is none. +func getPlaceholders(aspectStr string) map[string]bool { + var placeholders map[string]bool + + subkeys := strings.Split(aspectStr, ".") + for _, subkey := range subkeys { + if subkey[0] == '{' && subkey[len(subkey)-1] == '}' { + if placeholders == nil { + placeholders = make(map[string]bool) + } + + placeholders[subkey] = true + } + } + + return placeholders } // Aspect returns an aspect from the aspect directory. @@ -166,21 +238,28 @@ func (d *Directory) Aspect(aspect string) *Aspect { type Aspect struct { Name string accessPatterns []*accessPattern - directory Directory + directory *Directory } // Set sets the named aspect to a specified value. func (a *Aspect) Set(name string, value interface{}) error { - for _, p := range a.accessPatterns { - if p.name != name { + nameSubkeys := strings.Split(name, ".") + for _, accessPatt := range a.accessPatterns { + placeholders, ok := accessPatt.match(nameSubkeys) + if !ok { continue } - if !p.isWriteable() { + path, err := accessPatt.getPath(placeholders) + if err != nil { + return err + } + + if !accessPatt.isWriteable() { return fmt.Errorf("cannot set %q: path is not writeable", name) } - if err := a.directory.dataBag.Set(p.path, value); err != nil { + if err := a.directory.dataBag.Set(path, value); err != nil { return err } @@ -190,7 +269,6 @@ func (a *Aspect) Set(name string, value interface{}) error { } return a.directory.schema.Validate(data) - } return &NotFoundError{fmt.Sprintf("cannot set %q: name not found", name)} @@ -199,16 +277,23 @@ func (a *Aspect) Set(name string, value interface{}) error { // Get returns the aspect value identified by the name. If either the named aspect // or the corresponding value can't be found, a NotFoundError is returned. func (a *Aspect) Get(name string, value interface{}) error { - for _, p := range a.accessPatterns { - if p.name != name { + subkeys := strings.Split(name, ".") + for _, accessPatt := range a.accessPatterns { + placeholders, ok := accessPatt.match(subkeys) + if !ok { continue } - if !p.isReadable() { + path, err := accessPatt.getPath(placeholders) + if err != nil { + return err + } + + if !accessPatt.isReadable() { return fmt.Errorf("cannot get %q: path is not readable", name) } - if err := a.directory.dataBag.Get(p.path, value); err != nil { + if err := a.directory.dataBag.Get(path, value); err != nil { if errors.Is(err, &NotFoundError{}) { return &NotFoundError{fmt.Sprintf("cannot get %q: %v", name, err)} } @@ -221,13 +306,95 @@ func (a *Aspect) Get(name string, value interface{}) error { return &NotFoundError{fmt.Sprintf("cannot get %q: name not found", name)} } -// accessPattern holds information on how to access an aspect. +func newAccessPattern(name, path, accesstype string) (*accessPattern, error) { + accType, err := newAccessType(accesstype) + if err != nil { + return nil, fmt.Errorf("cannot aspect pattern: %w", err) + } + + nameSubkeys := strings.Split(name, ".") + nameMatchers := make([]nameMatcher, 0, len(nameSubkeys)) + for _, subkey := range nameSubkeys { + var patt nameMatcher + if subkey[0] == '{' && subkey[len(subkey)-1] == '}' { + patt = placeholder(subkey[1 : len(subkey)-1]) + } else { + patt = literal(subkey) + } + + nameMatchers = append(nameMatchers, patt) + } + + pathSubkeys := strings.Split(path, ".") + pathWriters := make([]pathWriter, 0, len(pathSubkeys)) + for _, subkey := range pathSubkeys { + var patt pathWriter + if subkey[0] == '{' && subkey[len(subkey)-1] == '}' { + patt = placeholder(subkey[1 : len(subkey)-1]) + } else { + patt = literal(subkey) + } + + pathWriters = append(pathWriters, patt) + } + + return &accessPattern{ + name: nameMatchers, + path: pathWriters, + access: accType, + }, nil +} + +// accessPattern represents an individual aspect access pattern. It can be used +// to match an input name and map it into a corresponding path, potentially with +// placeholders filled in. type accessPattern struct { - name string - path string + name []nameMatcher + path []pathWriter access accessType } +// match takes a list of subkeys and returns true if those subkeys match the pattern's +// name. If the name contains placeholders, those will be mapped to their values in +// the supplied subkeys and set in the map. Example: if pattern.name=["{foo}", "b", "{bar}"], +// and nameSubkeys=["a", "b", "c"], then it returns true and the map will contain +// {"foo": "a", "bar": "c"}. +func (p *accessPattern) match(nameSubkeys []string) (map[string]string, bool) { + if len(p.name) != len(nameSubkeys) { + return nil, false + } + + placeholders := make(map[string]string) + for i, subkey := range nameSubkeys { + if !p.name[i].match(subkey, placeholders) { + return nil, false + } + } + + return placeholders, true +} + +// getPath takes a map of placeholders to their values in the aspect name and +// returns the path with its placeholder values filled in with the map's values. +func (p *accessPattern) getPath(placeholders map[string]string) (string, error) { + sb := &strings.Builder{} + + for _, subkey := range p.path { + if sb.Len() > 0 { + if _, err := sb.WriteRune('.'); err != nil { + return "", err + } + } + + if err := subkey.write(sb, placeholders); err != nil { + return "", err + } + + } + + return sb.String(), nil +} + func (p accessPattern) isReadable() bool { return p.access == readWrite || p.access == read } @@ -236,6 +403,55 @@ func (p accessPattern) isWriteable() bool { return p.access == readWrite || p.access == write } +// pattern is an individual subkey of a dot-separated name or path pattern. It +// can be a literal value of a placeholder delineated by curly brackets. +type nameMatcher interface { + match(subkey string, placeholders map[string]string) bool +} + +type pathWriter interface { + write(sb *strings.Builder, placeholders map[string]string) error +} + +// placeholder represents a subkey of a name/path (e.g., "{foo}") that can match +// with any value and map it from the input name to the path. +type placeholder string + +// match adds a mapping to the placeholders map from this placeholder key to the +// supplied name subkey and returns true (a placeholder matches with any value). +func (p placeholder) match(subkey string, placeholders map[string]string) bool { + placeholders[string(p)] = subkey + return true +} + +// write writes the value from the placeholders map corresponding to this placeholder +// key into the strings.Builder. +func (p placeholder) write(sb *strings.Builder, placeholders map[string]string) error { + subkey, ok := placeholders[string(p)] + if !ok { + // the validation at create-time checks for mismatched placeholders so this + // shouldn't be possible save for programmer error + return fmt.Errorf("cannot find path placeholder %q in the aspect name", p) + } + + _, err := sb.WriteString(subkey) + return err +} + +// literal is a non-placeholder name/path subkey. +type literal string + +// match returns true if the subkey is equal to the literal. +func (p literal) match(subkey string, _ map[string]string) bool { + return string(p) == subkey +} + +// write writes the literal subkey into the strings.Builder. +func (p literal) write(sb *strings.Builder, _ map[string]string) error { + _, err := sb.WriteString(string(p)) + return err +} + // JSONDataBag is a simple DataBag implementation that keeps JSON in-memory. type JSONDataBag map[string]json.RawMessage diff --git a/aspects/aspects_test.go b/aspects/aspects_test.go index 47c5ed4b2f..34ac97428f 100644 --- a/aspects/aspects_test.go +++ b/aspects/aspects_test.go @@ -37,31 +37,31 @@ var _ = Suite(&aspectSuite{}) func (*aspectSuite) TestNewAspectDirectory(c *C) { _, err := aspects.NewAspectDirectory("foo", nil, aspects.NewJSONDataBag(), aspects.NewJSONSchema()) - c.Assert(err, ErrorMatches, `cannot create aspects directory: no aspects`) + c.Assert(err, ErrorMatches, `cannot define aspects directory: no aspects`) _, err = aspects.NewAspectDirectory("foo", map[string]interface{}{ "bar": "baz", }, aspects.NewJSONDataBag(), aspects.NewJSONSchema()) - c.Assert(err, ErrorMatches, `cannot create aspect: access patterns should be a list of maps`) + c.Assert(err, ErrorMatches, `cannot define aspect "bar": access patterns should be a list of maps`) _, err = aspects.NewAspectDirectory("foo", map[string]interface{}{ "bar": []map[string]string{}, }, aspects.NewJSONDataBag(), aspects.NewJSONSchema()) - c.Assert(err, ErrorMatches, `cannot create aspect without access patterns`) + c.Assert(err, ErrorMatches, `cannot define aspect "bar": no access patterns found`) _, err = aspects.NewAspectDirectory("foo", map[string]interface{}{ "bar": []map[string]string{ {"path": "foo"}, }, }, aspects.NewJSONDataBag(), aspects.NewJSONSchema()) - c.Assert(err, ErrorMatches, `cannot create aspect pattern without a "name" field`) + c.Assert(err, ErrorMatches, `cannot define aspect "bar": access patterns must have a "name" field`) _, err = aspects.NewAspectDirectory("foo", map[string]interface{}{ "bar": []map[string]string{ {"name": "foo"}, }, }, aspects.NewJSONDataBag(), aspects.NewJSONSchema()) - c.Assert(err, ErrorMatches, `cannot create aspect pattern without a "path" field`) + c.Assert(err, ErrorMatches, `cannot define aspect "bar": access patterns must have a "path" field`) aspectDir, err := aspects.NewAspectDirectory("foo", map[string]interface{}{ "bar": []map[string]string{ @@ -270,3 +270,176 @@ func (s *aspectSuite) TestAspectsAccessControl(c *C) { } } } + +type witnessDataBag struct { + bag aspects.DataBag + getPath, setPath string +} + +func newSpyDataBag(bag aspects.DataBag) *witnessDataBag { + return &witnessDataBag{bag: bag} +} + +func (s *witnessDataBag) Get(path string, value interface{}) error { + s.getPath = path + return s.bag.Get(path, value) +} + +func (s *witnessDataBag) Set(path string, value interface{}) error { + s.setPath = path + return s.bag.Set(path, value) +} + +func (s *witnessDataBag) Data() ([]byte, error) { + return s.bag.Data() +} + +// getLastPaths returns the last paths passed into Get and Set and resets them. +func (s *witnessDataBag) getLastPaths() (get, set string) { + get, set = s.getPath, s.setPath + s.getPath, s.setPath = "", "" + return get, set +} + +func (s *aspectSuite) TestAspectAssertionWithPlaceholder(c *C) { + bag := newSpyDataBag(aspects.NewJSONDataBag()) + + aspectDir, err := aspects.NewAspectDirectory("dir", map[string]interface{}{ + "foo": []map[string]string{ + {"name": "defaults.{foo}", "path": "first.{foo}.last"}, + {"name": "{bar}.name", "path": "first.{bar}"}, + {"name": "first.{baz}.last", "path": "{baz}.last"}, + {"name": "first.{foo}.{bar}", "path": "{foo}.mid.{bar}"}, + {"name": "{foo}.mid2.{bar}", "path": "{bar}.mid2.{foo}"}, + {"name": "multi.{foo}", "path": "{foo}.multi.{foo}"}, + }, + }, bag, aspects.NewJSONSchema()) + c.Assert(err, IsNil) + + aspect := aspectDir.Aspect("foo") + + for _, t := range []struct { + testName string + name string + path string + }{ + { + testName: "placeholder last to mid", + name: "defaults.abc", + path: "first.abc.last", + }, + { + testName: "placeholder first to last", + name: "foo.name", + path: "first.foo", + }, + { + testName: "placeholder mid to first", + name: "first.foo.last", + path: "foo.last", + }, + { + testName: "two placeholders in order", + name: "first.one.two", + path: "one.mid.two", + }, + { + testName: "two placeholders out of order", + name: "first2.mid2.two2", + path: "two2.mid2.first2", + }, + { + testName: "one placeholder mapping to several", + name: "multi.firstLast", + path: "firstLast.multi.firstLast", + }, + } { + cmt := Commentf("sub-test %q failed", t.testName) + err := aspect.Set(t.name, "expectedValue") + c.Assert(err, IsNil, cmt) + + var obtainedValue string + err = aspect.Get(t.name, &obtainedValue) + c.Assert(err, IsNil, cmt) + + c.Assert(obtainedValue, Equals, "expectedValue", cmt) + + getPath, setPath := bag.getLastPaths() + c.Assert(getPath, Equals, t.path, cmt) + c.Assert(setPath, Equals, t.path, cmt) + } +} + +func (s *aspectSuite) TestAspectNameAndPathValidation(c *C) { + type testcase struct { + testName string + name string + path string + err string + } + + for _, tc := range []testcase{ + { + testName: "empty subkeys in name", + name: "a..b", path: "a.b", err: `invalid access name "a..b": cannot have empty subkeys`, + }, + { + testName: "empty subkeys in path", + name: "a.b", path: "c..b", err: `invalid path "c..b": cannot have empty subkeys`, + }, + { + testName: "placeholder mismatch (same number)", + name: "bad.{foo}", path: "bad.{bar}", err: `placeholder "{foo}" from access name "bad.{foo}" is absent from path "bad.{bar}"`, + }, + { + testName: "placeholder mismatch (different number)", + name: "{foo}", path: "{foo}.bad.{bar}", err: `access name "{foo}" and path "{foo}.bad.{bar}" have mismatched placeholders`, + }, + { + testName: "invalid character in name: $", + name: "a.b$", path: "bad", err: `invalid access name "a.b$": invalid subkey "b$"`, + }, + { + testName: "invalid character in path: é", + name: "a.b", path: "a.é", err: `invalid path "a.é": invalid subkey "é"`, + }, + { + testName: "invalid character in name: _", + name: "a.b_c", path: "a.b-c", err: `invalid access name "a.b_c": invalid subkey "b_c"`, + }, + { + testName: "invalid leading dash", + name: "-a", path: "a", err: `invalid access name "-a": invalid subkey "-a"`, + }, + { + testName: "invalid trailing dash", + name: "a", path: "a-", err: `invalid path "a-": invalid subkey "a-"`, + }, + { + testName: "missing closing curly bracket", + name: "{a{", path: "a", err: `invalid access name "{a{": invalid subkey "{a{"`, + }, + { + testName: "missing opening curly bracket", + name: "a", path: "}a}", err: `invalid path "}a}": invalid subkey "}a}"`, + }, + { + testName: "curly brackets not wrapping subkey", + name: "a", path: "a.b{a}c", err: `invalid path "a.b{a}c": invalid subkey "b{a}c"`, + }, + { + testName: "invalid whitespace character", + name: "a. .c", path: "a.b", err: `invalid access name "a. .c": invalid subkey " "`, + }, + } { + _, err := aspects.NewAspectDirectory("foo", map[string]interface{}{ + "foo": []map[string]string{ + {"name": tc.name, "path": tc.path}, + }, + }, nil, nil) + + cmt := Commentf("sub-test %q failed", tc.testName) + c.Assert(err, Not(IsNil), cmt) + c.Assert(err.Error(), Equals, `cannot define aspect "foo": `+tc.err, cmt) + } +} diff --git a/boot/boot.go b/boot/boot.go index 46b0d56128..41da0f2f98 100644 --- a/boot/boot.go +++ b/boot/boot.go @@ -268,18 +268,11 @@ func fixedInUse(inUse bool) InUseFunc { // InUse returns a checker for whether a given name/revision is used in the // boot environment for snaps of the relevant snap type. func InUse(typ snap.Type, dev snap.Device) (InUseFunc, error) { - if dev.Classic() { - // no boot state on classic - return fixedInUse(false), nil - } if !dev.RunMode() { // ephemeral mode, block manipulations for now return fixedInUse(true), nil } - switch typ { - case snap.TypeKernel, snap.TypeBase, snap.TypeOS: - break - default: + if !SnapTypeParticipatesInBoot(typ, dev) || typ == snap.TypeGadget { return fixedInUse(false), nil } cands := make([]snap.PlaceInfo, 0, 2) diff --git a/boot/boot_test.go b/boot/boot_test.go index 60c6a7df10..8194b47370 100644 --- a/boot/boot_test.go +++ b/boot/boot_test.go @@ -5221,3 +5221,39 @@ func (s *bootenv20Suite) TestCoreParticipant20UndoBaseSnapInstallNewNoReseal(c * // no reseal c.Check(resealCalls, Equals, 0) } + +func (s *bootenv20Suite) TestInUseClassicWithModes(c *C) { + classicWithModesDev := boottest.MockClassicWithModesDevice("", nil) + c.Assert(classicWithModesDev.IsCoreBoot(), Equals, true) + + r := setupUC20Bootenv( + c, + s.bootloader, + &bootenv20Setup{ + modeenv: &boot.Modeenv{ + // gadget is gadget1 + Gadget: s.gadget1.Filename(), + // current kernels is just kern1 + CurrentKernels: []string{s.kern1.Filename()}, + // operating mode is run + Mode: "run", + // RecoverySystem is unset, as it should be during run mode + RecoverySystem: "", + }, + // enabled kernel is kern1 + kern: s.kern1, + // no try kernel enabled + tryKern: nil, + // kernel status is default + kernStatus: boot.DefaultStatus, + }) + defer r() + + inUse, err := boot.InUse(snap.TypeKernel, classicWithModesDev) + c.Check(err, IsNil) + c.Check(inUse(s.kern1.SnapName(), s.kern1.SnapRevision()), Equals, true) + c.Check(inUse(s.kern2.SnapName(), s.kern2.SnapRevision()), Equals, false) + + _, err = boot.InUse(snap.TypeBase, classicWithModesDev) + c.Check(err, IsNil) +} diff --git a/boot/export_test.go b/boot/export_test.go index 440e68fd8a..39b6c79a00 100644 --- a/boot/export_test.go +++ b/boot/export_test.go @@ -64,7 +64,7 @@ var ( NewTrustedAssetsCache = newTrustedAssetsCache ObserveSuccessfulBootWithAssets = observeSuccessfulBootAssets - SealKeyToModeenv = sealKeyToModeenv + SealKeyToModeenv = sealKeyToModeenvImpl ResealKeyToModeenv = resealKeyToModeenv RecoveryBootChainsForSystems = recoveryBootChainsForSystems SealKeyModelParams = sealKeyModelParams @@ -77,7 +77,6 @@ var ( type BootAssetsMap = bootAssetsMap type BootCommandLines = bootCommandLines type TrackedAsset = trackedAsset -type SealKeyToModeenvFlags = sealKeyToModeenvFlags func (t *TrackedAsset) Equals(blName, name, hash string) error { equal := t.hash == hash && diff --git a/boot/makebootable.go b/boot/makebootable.go index 1fca5485b7..e3c5c5678d 100644 --- a/boot/makebootable.go +++ b/boot/makebootable.go @@ -32,6 +32,8 @@ import ( "github.com/snapcore/snapd/snap/snapfile" ) +var sealKeyToModeenv = sealKeyToModeenvImpl + // BootableSet represents the boot snaps of a system to be made bootable. type BootableSet struct { Base *snap.Info diff --git a/boot/seal.go b/boot/seal.go index c9cd880a0a..4dac6e3375 100644 --- a/boot/seal.go +++ b/boot/seal.go @@ -90,6 +90,18 @@ func MockResealKeyToModeenv(f func(rootdir string, modeenv *Modeenv, expectResea } } +// MockSealKeyToModeenvFlags is used for testing from other packages. +type MockSealKeyToModeenvFlags = sealKeyToModeenvFlags + +// MockSealKeyToModeenv is used for testing from other packages. +func MockSealKeyToModeenv(f func(key, saveKey keys.EncryptionKey, model *asserts.Model, modeenv *Modeenv, flags MockSealKeyToModeenvFlags) error) (restore func()) { + old := sealKeyToModeenv + sealKeyToModeenv = f + return func() { + sealKeyToModeenv = old + } +} + func bootChainsFileUnder(rootdir string) string { return filepath.Join(dirs.SnapFDEDirUnder(rootdir), "boot-chains") } @@ -109,10 +121,10 @@ type sealKeyToModeenvFlags struct { SnapsDir string } -// sealKeyToModeenv seals the supplied keys to the parameters specified +// sealKeyToModeenvImpl seals the supplied keys to the parameters specified // in modeenv. // It assumes to be invoked in install mode. -func sealKeyToModeenv(key, saveKey keys.EncryptionKey, model *asserts.Model, modeenv *Modeenv, flags sealKeyToModeenvFlags) error { +func sealKeyToModeenvImpl(key, saveKey keys.EncryptionKey, model *asserts.Model, modeenv *Modeenv, flags sealKeyToModeenvFlags) error { // make sure relevant locations exist for _, p := range []string{ InitramfsSeedEncryptionKeyDir, diff --git a/boot/seal_test.go b/boot/seal_test.go index 68343cd175..6ee987ab4a 100644 --- a/boot/seal_test.go +++ b/boot/seal_test.go @@ -316,7 +316,7 @@ func (s *sealSuite) TestSealKeyToModeenv(c *C) { }) defer restore() - err = boot.SealKeyToModeenv(myKey, myKey2, model, modeenv, boot.SealKeyToModeenvFlags{ + err = boot.SealKeyToModeenv(myKey, myKey2, model, modeenv, boot.MockSealKeyToModeenvFlags{ FactoryReset: tc.factoryReset, }) c.Check(pcrHandleOfKeyCalls, Equals, tc.expPCRHandleOfKeyCalls) @@ -1665,7 +1665,7 @@ func (s *sealSuite) TestSealToModeenvWithFdeHookHappy(c *C) { key := keys.EncryptionKey{1, 2, 3, 4} saveKey := keys.EncryptionKey{5, 6, 7, 8} - err := boot.SealKeyToModeenv(key, saveKey, model, modeenv, boot.SealKeyToModeenvFlags{HasFDESetupHook: true}) + err := boot.SealKeyToModeenv(key, saveKey, model, modeenv, boot.MockSealKeyToModeenvFlags{HasFDESetupHook: true}) c.Assert(err, IsNil) // check that runFDESetupHook was called the expected way c.Check(runFDESetupHookReqs, DeepEquals, []*fde.SetupRequest{ @@ -1705,7 +1705,7 @@ func (s *sealSuite) TestSealToModeenvWithFdeHookSad(c *C) { saveKey := keys.EncryptionKey{5, 6, 7, 8} model := boottest.MakeMockUC20Model() - err := boot.SealKeyToModeenv(key, saveKey, model, modeenv, boot.SealKeyToModeenvFlags{HasFDESetupHook: true}) + err := boot.SealKeyToModeenv(key, saveKey, model, modeenv, boot.MockSealKeyToModeenvFlags{HasFDESetupHook: true}) c.Assert(err, ErrorMatches, "hook failed") marker := filepath.Join(dirs.SnapFDEDirUnder(filepath.Join(dirs.GlobalRootDir, "/run/mnt/ubuntu-data/system-data")), "sealed-keys") c.Check(marker, testutil.FileAbsent) diff --git a/bootloader/grub.go b/bootloader/grub.go index 1b10205c8f..c77d9e5446 100644 --- a/bootloader/grub.go +++ b/bootloader/grub.go @@ -470,7 +470,11 @@ func (g *grub) getGrubBootAssetsForArch() (*grubBootAssetPath, error) { if g.prepareImageTime { return nil, fmt.Errorf("internal error: retrieving boot assets at prepare image time") } - assets := grubBootAssetsForArch[arch.DpkgArchitecture()] + archi := arch.DpkgArchitecture() + assets, ok := grubBootAssetsForArch[archi] + if !ok { + return nil, fmt.Errorf("cannot find grub assets for %q", archi) + } return &assets, nil } diff --git a/bootloader/grub_test.go b/bootloader/grub_test.go index 667932a458..731e712f42 100644 --- a/bootloader/grub_test.go +++ b/bootloader/grub_test.go @@ -29,6 +29,7 @@ import ( "github.com/mvo5/goconfigparser" . "gopkg.in/check.v1" + "github.com/snapcore/snapd/arch" "github.com/snapcore/snapd/arch/archtest" "github.com/snapcore/snapd/bootloader" "github.com/snapcore/snapd/bootloader/assets" @@ -621,6 +622,26 @@ func (s *grubTestSuite) TestKernelExtractionRunImageKernelNoSlashBoot(c *C) { c.Check(exists, Equals, false) } +func (s *grubTestSuite) TestListTrustedAssetsNotForArch(c *C) { + oldArch := arch.DpkgArchitecture() + defer arch.SetArchitecture(arch.ArchitectureType(oldArch)) + arch.SetArchitecture("non-existing-architecture") + + s.makeFakeGrubEFINativeEnv(c, []byte(`this is +some random boot config`)) + + opts := &bootloader.Options{NoSlashBoot: true} + g := bootloader.NewGrub(s.rootdir, opts) + c.Assert(g, NotNil) + + tg, ok := g.(bootloader.TrustedAssetsBootloader) + c.Assert(ok, Equals, true) + + ta, err := tg.TrustedAssets() + c.Check(err, ErrorMatches, `cannot find grub assets for "non-existing-architecture"`) + c.Check(ta, HasLen, 0) +} + func (s *grubTestSuite) TestListManagedAssets(c *C) { s.makeFakeGrubEFINativeEnv(c, []byte(`this is some random boot config`)) diff --git a/build-aux/snap/local/apparmor/af_names.h b/build-aux/snap/local/apparmor/af_names.h new file mode 100644 index 0000000000..27ad978458 --- /dev/null +++ b/build-aux/snap/local/apparmor/af_names.h @@ -0,0 +1,240 @@ +/* + this file was generated on a Ubuntu kinetic install from the upstream + apparmor-3.0.7 release tarball as follows: + + AA_VER=3.0.7 + wget \ + "https://launchpad.net/apparmor/3.0/${AA_VER}/+download/apparmor-${AA_VER}.tar.gz" + tar xf "apparmor-${AA_VER}.tar.gz" + cd "apparmor-${AA_VER}" + make -C parser af_names.h + + */ +#ifndef AF_UNSPEC +# define AF_UNSPEC 0 +#endif +AA_GEN_NET_ENT("unspec", AF_UNSPEC) + +#ifndef AF_UNIX +# define AF_UNIX 1 +#endif +AA_GEN_NET_ENT("unix", AF_UNIX) + +#ifndef AF_INET +# define AF_INET 2 +#endif +AA_GEN_NET_ENT("inet", AF_INET) + +#ifndef AF_AX25 +# define AF_AX25 3 +#endif +AA_GEN_NET_ENT("ax25", AF_AX25) + +#ifndef AF_IPX +# define AF_IPX 4 +#endif +AA_GEN_NET_ENT("ipx", AF_IPX) + +#ifndef AF_APPLETALK +# define AF_APPLETALK 5 +#endif +AA_GEN_NET_ENT("appletalk", AF_APPLETALK) + +#ifndef AF_NETROM +# define AF_NETROM 6 +#endif +AA_GEN_NET_ENT("netrom", AF_NETROM) + +#ifndef AF_BRIDGE +# define AF_BRIDGE 7 +#endif +AA_GEN_NET_ENT("bridge", AF_BRIDGE) + +#ifndef AF_ATMPVC +# define AF_ATMPVC 8 +#endif +AA_GEN_NET_ENT("atmpvc", AF_ATMPVC) + +#ifndef AF_X25 +# define AF_X25 9 +#endif +AA_GEN_NET_ENT("x25", AF_X25) + +#ifndef AF_INET6 +# define AF_INET6 10 +#endif +AA_GEN_NET_ENT("inet6", AF_INET6) + +#ifndef AF_ROSE +# define AF_ROSE 11 +#endif +AA_GEN_NET_ENT("rose", AF_ROSE) + +#ifndef AF_NETBEUI +# define AF_NETBEUI 13 +#endif +AA_GEN_NET_ENT("netbeui", AF_NETBEUI) + +#ifndef AF_SECURITY +# define AF_SECURITY 14 +#endif +AA_GEN_NET_ENT("security", AF_SECURITY) + +#ifndef AF_KEY +# define AF_KEY 15 +#endif +AA_GEN_NET_ENT("key", AF_KEY) + +#ifndef AF_NETLINK +# define AF_NETLINK 16 +#endif +AA_GEN_NET_ENT("netlink", AF_NETLINK) + +#ifndef AF_PACKET +# define AF_PACKET 17 +#endif +AA_GEN_NET_ENT("packet", AF_PACKET) + +#ifndef AF_ASH +# define AF_ASH 18 +#endif +AA_GEN_NET_ENT("ash", AF_ASH) + +#ifndef AF_ECONET +# define AF_ECONET 19 +#endif +AA_GEN_NET_ENT("econet", AF_ECONET) + +#ifndef AF_ATMSVC +# define AF_ATMSVC 20 +#endif +AA_GEN_NET_ENT("atmsvc", AF_ATMSVC) + +#ifndef AF_RDS +# define AF_RDS 21 +#endif +AA_GEN_NET_ENT("rds", AF_RDS) + +#ifndef AF_SNA +# define AF_SNA 22 +#endif +AA_GEN_NET_ENT("sna", AF_SNA) + +#ifndef AF_IRDA +# define AF_IRDA 23 +#endif +AA_GEN_NET_ENT("irda", AF_IRDA) + +#ifndef AF_PPPOX +# define AF_PPPOX 24 +#endif +AA_GEN_NET_ENT("pppox", AF_PPPOX) + +#ifndef AF_WANPIPE +# define AF_WANPIPE 25 +#endif +AA_GEN_NET_ENT("wanpipe", AF_WANPIPE) + +#ifndef AF_LLC +# define AF_LLC 26 +#endif +AA_GEN_NET_ENT("llc", AF_LLC) + +#ifndef AF_IB +# define AF_IB 27 +#endif +AA_GEN_NET_ENT("ib", AF_IB) + +#ifndef AF_MPLS +# define AF_MPLS 28 +#endif +AA_GEN_NET_ENT("mpls", AF_MPLS) + +#ifndef AF_CAN +# define AF_CAN 29 +#endif +AA_GEN_NET_ENT("can", AF_CAN) + +#ifndef AF_TIPC +# define AF_TIPC 30 +#endif +AA_GEN_NET_ENT("tipc", AF_TIPC) + +#ifndef AF_BLUETOOTH +# define AF_BLUETOOTH 31 +#endif +AA_GEN_NET_ENT("bluetooth", AF_BLUETOOTH) + +#ifndef AF_IUCV +# define AF_IUCV 32 +#endif +AA_GEN_NET_ENT("iucv", AF_IUCV) + +#ifndef AF_RXRPC +# define AF_RXRPC 33 +#endif +AA_GEN_NET_ENT("rxrpc", AF_RXRPC) + +#ifndef AF_ISDN +# define AF_ISDN 34 +#endif +AA_GEN_NET_ENT("isdn", AF_ISDN) + +#ifndef AF_PHONET +# define AF_PHONET 35 +#endif +AA_GEN_NET_ENT("phonet", AF_PHONET) + +#ifndef AF_IEEE802154 +# define AF_IEEE802154 36 +#endif +AA_GEN_NET_ENT("ieee802154", AF_IEEE802154) + +#ifndef AF_CAIF +# define AF_CAIF 37 +#endif +AA_GEN_NET_ENT("caif", AF_CAIF) + +#ifndef AF_ALG +# define AF_ALG 38 +#endif +AA_GEN_NET_ENT("alg", AF_ALG) + +#ifndef AF_NFC +# define AF_NFC 39 +#endif +AA_GEN_NET_ENT("nfc", AF_NFC) + +#ifndef AF_VSOCK +# define AF_VSOCK 40 +#endif +AA_GEN_NET_ENT("vsock", AF_VSOCK) + +#ifndef AF_KCM +# define AF_KCM 41 +#endif +AA_GEN_NET_ENT("kcm", AF_KCM) + +#ifndef AF_QIPCRTR +# define AF_QIPCRTR 42 +#endif +AA_GEN_NET_ENT("qipcrtr", AF_QIPCRTR) + +#ifndef AF_SMC +# define AF_SMC 43 +#endif +AA_GEN_NET_ENT("smc", AF_SMC) + +#ifndef AF_XDP +# define AF_XDP 44 +#endif +AA_GEN_NET_ENT("xdp", AF_XDP) + +#ifndef AF_MCTP +# define AF_MCTP 45 +#endif +AA_GEN_NET_ENT("mctp", AF_MCTP) + + +#define AA_AF_MAX 46 + diff --git a/build-aux/snap/snapcraft.yaml b/build-aux/snap/snapcraft.yaml index cdd9baecfe..a32dd58c32 100644 --- a/build-aux/snap/snapcraft.yaml +++ b/build-aux/snap/snapcraft.yaml @@ -38,6 +38,7 @@ parts: build-packages: - git - dpkg-dev + after: [apparmor] override-pull: | snapcraftctl pull # set version, this needs dpkg-parsechangelog (from dpkg-dev) and git @@ -160,3 +161,37 @@ parts: cp -a fc-cache-bionic $SNAPCRAFT_PART_INSTALL/bin/fc-cache-v7 prime: - bin/fc-cache-v7 + apparmor: + plugin: autotools + build-packages: [bison, flex, gettext, g++, pkg-config] + source: https://launchpad.net/apparmor/3.0/3.0.7/+download/apparmor-3.0.7.tar.gz + override-build: | + cd $SNAPCRAFT_PART_BUILD/libraries/libapparmor + ./autogen.sh + ./configure --prefix=/usr --disable-man-pages --disable-perl --disable-python --disable-ruby + make -j4 + # place libapparmor into staging area for use by snap-confine + make -C src install DESTDIR=$SNAPCRAFT_STAGE + cd $SNAPCRAFT_PART_BUILD/parser + # copy in a pregenerated list of network address families so that the + # parser gets built to support as many as possible even if glibc in + # the current build environment does not support them + # For some reason, some snapcraft version remove the "build-aux" folder + # and move the contents up when the data is uploaded; this conditional + # manages it. + if [ -d "$SNAPCRAFT_PROJECT_DIR/build-aux" ]; then + cp $SNAPCRAFT_PROJECT_DIR/build-aux/snap/local/apparmor/af_names.h . + else + cp $SNAPCRAFT_PROJECT_DIR/snap/local/apparmor/af_names.h . + fi + make -j4 + mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/snapd + cp -a apparmor_parser $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/ + mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/apparmor + cp -a parser.conf $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/apparmor/ + cd $SNAPCRAFT_PART_BUILD/profiles + make -j4 + mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/apparmor.d + cp -a apparmor.d/abi $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/apparmor.d/ + cp -a apparmor.d/abstractions $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/apparmor.d/ + cp -a apparmor.d/tunables $SNAPCRAFT_PART_INSTALL/usr/lib/snapd/apparmor.d/ diff --git a/cmd/configure.ac b/cmd/configure.ac index 4578d711cd..c44fcbd1b9 100644 --- a/cmd/configure.ac +++ b/cmd/configure.ac @@ -86,8 +86,13 @@ AS_IF([test "x$with_unit_tests" = "xyes"], [ # Check if apparmor userspace library is available. AS_IF([test "x$enable_apparmor" = "xyes"], [ - PKG_CHECK_MODULES([APPARMOR], [libapparmor], [ - AC_DEFINE([HAVE_APPARMOR], [1], [Build with apparmor support])]) + # Expect AppArmor3 when building as a snap under snapcraft + AS_IF([test "x$SNAPCRAFT_PROJECT_NAME" = "xsnapd"], [ + PKG_CHECK_MODULES([APPARMOR3], [libapparmor = 3.0.7], [ + AC_DEFINE([HAVE_APPARMOR], [1], [Build with apparmor3 support])], [ + AC_MSG_ERROR([unable to find apparmor3 for snap build of snapd])])], [ + PKG_CHECK_MODULES([APPARMOR], [libapparmor], [ + AC_DEFINE([HAVE_APPARMOR], [1], [Build with apparmor support])])]) ], [ AC_MSG_WARN([ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/cmd/snap/cmd_prepare_image.go b/cmd/snap/cmd_prepare_image.go index afb18950f4..d429411bc1 100644 --- a/cmd/snap/cmd_prepare_image.go +++ b/cmd/snap/cmd_prepare_image.go @@ -51,8 +51,9 @@ type cmdPrepareImage struct { Customize string `long:"customize" hidden:"yes"` // TODO: introduce SnapWithChannel? - Snaps []string `long:"snap" value-name:"<snap>[=<channel>]"` - ExtraSnaps []string `long:"extra-snaps" hidden:"yes"` // DEPRECATED + Snaps []string `long:"snap" value-name:"<snap>[=<channel>]"` + ExtraSnaps []string `long:"extra-snaps" hidden:"yes"` // DEPRECATED + RevisionsFile string `long:"revisions"` } func init() { @@ -85,6 +86,8 @@ For preparing classic images it supports a --classic mode`), // TRANSLATORS: This should not start with a lowercase letter. "extra-snaps": i18n.G("Extra snaps to be installed (DEPRECATED)"), // TRANSLATORS: This should not start with a lowercase letter. + "revisions": i18n.G("Specify a seeds.manifest file referencing the exact revisions of the provided snaps which should be installed"), + // TRANSLATORS: This should not start with a lowercase letter. "channel": i18n.G("The channel to use"), // TRANSLATORS: This should not start with a lowercase letter. "customize": i18n.G("Image customizations specified as JSON file."), @@ -104,6 +107,7 @@ For preparing classic images it supports a --classic mode`), } var imagePrepare = image.Prepare +var imageReadSeedManifest = image.ReadSeedManifest func (x *cmdPrepareImage) Execute(args []string) error { opts := &image.Options{ @@ -113,6 +117,14 @@ func (x *cmdPrepareImage) Execute(args []string) error { Architecture: x.Architecture, } + if x.RevisionsFile != "" { + revisions, err := imageReadSeedManifest(x.RevisionsFile) + if err != nil { + return err + } + opts.Revisions = revisions + } + if x.Customize != "" { custo, err := readImageCustomizations(x.Customize) if err != nil { diff --git a/cmd/snap/cmd_prepare_image_test.go b/cmd/snap/cmd_prepare_image_test.go index 63f368aba8..07632ca536 100644 --- a/cmd/snap/cmd_prepare_image_test.go +++ b/cmd/snap/cmd_prepare_image_test.go @@ -26,8 +26,9 @@ import ( . "gopkg.in/check.v1" - snap "github.com/snapcore/snapd/cmd/snap" + cmdsnap "github.com/snapcore/snapd/cmd/snap" "github.com/snapcore/snapd/image" + "github.com/snapcore/snapd/snap" ) type SnapPrepareImageSuite struct { @@ -42,10 +43,10 @@ func (s *SnapPrepareImageSuite) TestPrepareImageCore(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir"}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir"}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) @@ -61,10 +62,10 @@ func (s *SnapPrepareImageSuite) TestPrepareImageClassic(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "--classic", "model", "prepare-dir"}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "--classic", "model", "prepare-dir"}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) @@ -81,10 +82,10 @@ func (s *SnapPrepareImageSuite) TestPrepareImageClassicArch(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "--classic", "--arch", "i386", "model", "prepare-dir"}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "--classic", "--arch", "i386", "model", "prepare-dir"}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) @@ -102,12 +103,12 @@ func (s *SnapPrepareImageSuite) TestPrepareImageClassicWideCohort(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() os.Setenv("UBUNTU_STORE_COHORT_KEY", "is-six-centuries") - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "--classic", "model", "prepare-dir"}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "--classic", "model", "prepare-dir"}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) @@ -127,10 +128,10 @@ func (s *SnapPrepareImageSuite) TestPrepareImageExtraSnaps(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir", "--channel", "candidate", "--snap", "foo", "--snap", "bar=t/edge", "--snap", "local.snap", "--extra-snaps", "local2.snap", "--extra-snaps", "store-snap"}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir", "--channel", "candidate", "--snap", "foo", "--snap", "bar=t/edge", "--snap", "local.snap", "--extra-snaps", "local2.snap", "--extra-snaps", "store-snap"}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) @@ -149,7 +150,7 @@ func (s *SnapPrepareImageSuite) TestPrepareImageCustomize(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() tmpdir := c.MkDir() @@ -160,7 +161,7 @@ func (s *SnapPrepareImageSuite) TestPrepareImageCustomize(c *C) { }`), 0644) c.Assert(err, IsNil) - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir", "--customize", customizeFile}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir", "--customize", customizeFile}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) @@ -174,8 +175,37 @@ func (s *SnapPrepareImageSuite) TestPrepareImageCustomize(c *C) { }) } +func (s *SnapPrepareImageSuite) TestReadSeedManifest(c *C) { + var opts *image.Options + prep := func(o *image.Options) error { + opts = o + return nil + } + r := cmdsnap.MockImagePrepare(prep) + defer r() + + var readManifestCalls int + r = cmdsnap.MockImageReadSeedManifest(func(manifestFile string) (map[string]snap.Revision, error) { + readManifestCalls++ + c.Check(manifestFile, Equals, "seed.manifest") + return map[string]snap.Revision{"snapd": snap.R(100)}, nil + }) + defer r() + + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "model", "prepare-dir", "--revisions", "seed.manifest"}) + c.Assert(err, IsNil) + c.Assert(rest, DeepEquals, []string{}) + + c.Check(readManifestCalls, Equals, 1) + c.Check(opts, DeepEquals, &image.Options{ + ModelFile: "model", + PrepareDir: "prepare-dir", + Revisions: map[string]snap.Revision{"snapd": snap.R(100)}, + }) +} + func (s *SnapPrepareImageSuite) TestPrepareImagePreseedArgError(c *C) { - _, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "--preseed-sign-key", "key", "model", "prepare-dir"}) + _, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "--preseed-sign-key", "key", "model", "prepare-dir"}) c.Assert(err, ErrorMatches, `--preseed-sign-key cannot be used without --preseed`) } @@ -185,10 +215,10 @@ func (s *SnapPrepareImageSuite) TestPrepareImagePreseed(c *C) { opts = o return nil } - r := snap.MockImagePrepare(prep) + r := cmdsnap.MockImagePrepare(prep) defer r() - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"prepare-image", "--preseed", "--preseed-sign-key", "key", "--apparmor-features-dir", "aafeatures-dir", "--sysfs-overlay", "sys-overlay", "model", "prepare-dir"}) + rest, err := cmdsnap.Parser(cmdsnap.Client()).ParseArgs([]string{"prepare-image", "--preseed", "--preseed-sign-key", "key", "--apparmor-features-dir", "aafeatures-dir", "--sysfs-overlay", "sys-overlay", "model", "prepare-dir"}) c.Assert(err, IsNil) c.Assert(rest, DeepEquals, []string{}) diff --git a/cmd/snap/cmd_snap_op.go b/cmd/snap/cmd_snap_op.go index 04e9634a17..494222722a 100644 --- a/cmd/snap/cmd_snap_op.go +++ b/cmd/snap/cmd_snap_op.go @@ -782,7 +782,13 @@ func (x *cmdRefresh) showRefreshTimes() error { fmt.Fprintf(Stdout, "last: n/a\n") } if !hold.IsZero() { - fmt.Fprintf(Stdout, "hold: %s\n", x.fmtTime(hold)) + // show holds over 100 years as "forever", like in the input of 'snap refresh + // --hold', instead of as a distant time (how they're internally represented) + if hold.After(timeNow().Add(100 * 365 * 24 * time.Hour)) { + fmt.Fprintf(Stdout, "hold: forever\n") + } else { + fmt.Fprintf(Stdout, "hold: %s\n", x.fmtTime(hold)) + } } // only show "next" if its after "hold" to not confuse users if !next.IsZero() { diff --git a/cmd/snap/cmd_snap_op_test.go b/cmd/snap/cmd_snap_op_test.go index 66caaf0e2c..67d4df39d9 100644 --- a/cmd/snap/cmd_snap_op_test.go +++ b/cmd/snap/cmd_snap_op_test.go @@ -1377,30 +1377,53 @@ next: 2017-04-26T00:58:00+02:00 } func (s *SnapSuite) TestRefreshTimeShowsHolds(c *check.C) { - n := 0 - s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { - switch n { - case 0: - c.Check(r.Method, check.Equals, "GET") - c.Check(r.URL.Path, check.Equals, "/v2/system-info") - fmt.Fprintln(w, `{"type": "sync", "status-code": 200, "result": {"refresh": {"timer": "0:00-24:00/4", "last": "2017-04-25T17:35:00+02:00", "next": "2017-04-26T00:58:00+02:00", "hold": "2017-04-28T00:00:00+02:00"}}}`) - default: - c.Fatalf("expected to get 1 requests, now on %d", n+1) - } + type testcase struct { + in string + out string + } - n++ - }) - rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"refresh", "--time", "--abs-time"}) + curTime, err := time.Parse(time.RFC3339, "2017-04-27T23:00:00+02:00") c.Assert(err, check.IsNil) - c.Assert(rest, check.DeepEquals, []string{}) - c.Check(s.Stdout(), check.Equals, `timer: 0:00-24:00/4 + restore := snap.MockTimeNow(func() time.Time { + return curTime + }) + defer restore() + + for _, tc := range []testcase{ + {in: "2017-04-28T00:00:00+02:00", out: "2017-04-28T00:00:00+02:00"}, + {in: "2117-04-28T00:00:00+02:00", out: "forever"}, + } { + n := 0 + s.RedirectClientToTestServer(func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + c.Check(r.Method, check.Equals, "GET") + c.Check(r.URL.Path, check.Equals, "/v2/system-info") + fmt.Fprintf(w, `{"type": "sync", "status-code": 200, "result": {"refresh": {"timer": "0:00-24:00/4", "last": "2017-04-25T17:35:00+02:00", "next": "2017-04-26T00:58:00+02:00", "hold": %q}}}`, tc.in) + default: + errMsg := fmt.Sprintf("expected to get 1 requests, now on %d", n+1) + c.Error(errMsg) + w.WriteHeader(500) + w.Write([]byte(errMsg)) + } + + n++ + }) + + rest, err := snap.Parser(snap.Client()).ParseArgs([]string{"refresh", "--time", "--abs-time"}) + c.Assert(err, check.IsNil) + c.Assert(rest, check.DeepEquals, []string{}) + expectedOutput := fmt.Sprintf(`timer: 0:00-24:00/4 last: 2017-04-25T17:35:00+02:00 -hold: 2017-04-28T00:00:00+02:00 +hold: %s next: 2017-04-26T00:58:00+02:00 (but held) -`) - c.Check(s.Stderr(), check.Equals, "") - // ensure that the fake server api was actually hit - c.Check(n, check.Equals, 1) +`, tc.out) + c.Check(s.Stdout(), check.Equals, expectedOutput) + c.Check(s.Stderr(), check.Equals, "") + // ensure that the fake server api was actually hit + c.Check(n, check.Equals, 1) + s.ResetStdStreams() + } } func (s *SnapSuite) TestRefreshHoldAllForever(c *check.C) { diff --git a/cmd/snap/export_test.go b/cmd/snap/export_test.go index ed03b7cece..d9168b58b7 100644 --- a/cmd/snap/export_test.go +++ b/cmd/snap/export_test.go @@ -34,6 +34,7 @@ import ( "github.com/snapcore/snapd/snap" "github.com/snapcore/snapd/store" "github.com/snapcore/snapd/store/tooling" + "github.com/snapcore/snapd/testutil" usersessionclient "github.com/snapcore/snapd/usersession/client" ) @@ -470,3 +471,9 @@ func ParseQuotaValues(maxMemory, cpuMax, cpuSet, threadsMax, journalSizeMax, jou return quotas.parseQuotas() } + +func MockImageReadSeedManifest(f func(manifestFile string) (map[string]snap.Revision, error)) (restore func()) { + restore = testutil.Backup(&imageReadSeedManifest) + imageReadSeedManifest = f + return restore +} diff --git a/cmd/snapd-apparmor/export_test.go b/cmd/snapd-apparmor/export_test.go index bae0a0dbbf..652326b033 100644 --- a/cmd/snapd-apparmor/export_test.go +++ b/cmd/snapd-apparmor/export_test.go @@ -25,4 +25,5 @@ var ( IsContainer = isContainer IsContainerWithInternalPolicy = isContainerWithInternalPolicy LoadAppArmorProfiles = loadAppArmorProfiles + MockParserSearchPath = mockParserSearchPath ) diff --git a/cmd/snapd-apparmor/main.go b/cmd/snapd-apparmor/main.go index 10d9f2f67e..7576f3d3dc 100644 --- a/cmd/snapd-apparmor/main.go +++ b/cmd/snapd-apparmor/main.go @@ -67,7 +67,6 @@ import ( // container's boot process to experience failed policy loads but the boot // process should continue without any loss of functionality. This is an // unsupported configuration that cannot be properly handled by this function. -// func isContainerWithInternalPolicy() bool { if release.OnWSL { return true @@ -138,6 +137,12 @@ func validateArgs(args []string) error { return nil } +func init() { + if err := logger.SimpleSetup(); err != nil { + fmt.Fprintf(os.Stderr, "WARNING: failed to activate logging: %v\n", err) + } +} + func main() { if err := run(); err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) @@ -165,3 +170,7 @@ func run() error { return loadAppArmorProfiles() } + +func mockParserSearchPath(parserSearchPath string) (restore func()) { + return apparmor_sandbox.MockParserSearchPath(parserSearchPath) +} diff --git a/cmd/snapd-apparmor/main_test.go b/cmd/snapd-apparmor/main_test.go index 7886b4ddd2..447528e977 100644 --- a/cmd/snapd-apparmor/main_test.go +++ b/cmd/snapd-apparmor/main_test.go @@ -114,6 +114,8 @@ func (s *mainSuite) TestIsContainerWithInternalPolicy(c *C) { func (s *mainSuite) TestLoadAppArmorProfiles(c *C) { parserCmd := testutil.MockCommand(c, "apparmor_parser", "") defer parserCmd.Restore() + restore := snapd_apparmor.MockParserSearchPath(parserCmd.BinDir()) + defer restore() err := snapd_apparmor.LoadAppArmorProfiles() c.Assert(err, IsNil) // since no profiles to load the parser should not have been called @@ -142,7 +144,10 @@ func (s *mainSuite) TestLoadAppArmorProfiles(c *C) { profile}}) // test error case - testutil.MockCommand(c, "apparmor_parser", "echo mocked parser failed > /dev/stderr; exit 1") + parserCmd = testutil.MockCommand(c, "apparmor_parser", "echo mocked parser failed > /dev/stderr; exit 1") + defer parserCmd.Restore() + restore = snapd_apparmor.MockParserSearchPath(parserCmd.BinDir()) + defer restore() err = snapd_apparmor.LoadAppArmorProfiles() c.Check(err.Error(), Equals, "cannot load apparmor profiles: exit status 1\napparmor_parser output:\nmocked parser failed\n") @@ -232,6 +237,8 @@ func (s *integrationSuite) SetUpTest(c *C) { // simulate a single profile to load s.parserCmd = testutil.MockCommand(c, "apparmor_parser", "") s.AddCleanup(s.parserCmd.Restore) + restore := snapd_apparmor.MockParserSearchPath(s.parserCmd.BinDir()) + s.AddCleanup(restore) err := os.MkdirAll(dirs.SnapAppArmorDir, 0755) c.Assert(err, IsNil) profile := filepath.Join(dirs.SnapAppArmorDir, "foo") diff --git a/daemon/api_quotas.go b/daemon/api_quotas.go index 924c62e266..5a29282c75 100644 --- a/daemon/api_quotas.go +++ b/daemon/api_quotas.go @@ -252,14 +252,18 @@ func postQuotaGroup(c *Command, r *http.Request, _ *auth.UserState) Response { } if err == servicestate.ErrQuotaNotFound { // then we need to create the quota - ts, err = servicestateCreateQuota(st, data.GroupName, data.Parent, data.Snaps, resourceLimits) + ts, err = servicestateCreateQuota(st, data.GroupName, servicestate.CreateQuotaOptions{ + ParentName: data.Parent, + Snaps: data.Snaps, + ResourceLimits: resourceLimits, + }) if err != nil { return errToResponse(err, nil, BadRequest, "cannot create quota group: %v") } chgSummary = "Create quota group" } else if err == nil { // the quota group already exists, update it - updateOpts := servicestate.QuotaGroupUpdate{ + updateOpts := servicestate.UpdateQuotaOptions{ AddSnaps: data.Snaps, NewResourceLimits: resourceLimits, } diff --git a/daemon/api_quotas_test.go b/daemon/api_quotas_test.go index 7975c6a3a9..a2856f1f22 100644 --- a/daemon/api_quotas_test.go +++ b/daemon/api_quotas_test.go @@ -147,11 +147,11 @@ func (s *apiQuotaSuite) TestPostQuotaInvalidGroupName(c *check.C) { } func (s *apiQuotaSuite) TestPostEnsureQuotaUnhappy(c *check.C) { - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { c.Check(name, check.Equals, "booze") - c.Check(parentName, check.Equals, "foo") - c.Check(snaps, check.DeepEquals, []string{"bar"}) - c.Check(resourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(1000)).Build()) + c.Check(createOpts.ParentName, check.Equals, "foo") + c.Check(createOpts.Snaps, check.DeepEquals, []string{"bar"}) + c.Check(createOpts.ResourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(1000)).Build()) return nil, fmt.Errorf("boom") }) defer r() @@ -175,12 +175,12 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaUnhappy(c *check.C) { func (s *apiQuotaSuite) TestPostEnsureQuotaCreateHappy(c *check.C) { var createCalled int - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { createCalled++ c.Check(name, check.Equals, "booze") - c.Check(parentName, check.Equals, "foo") - c.Check(snaps, check.DeepEquals, []string{"some-snap"}) - c.Check(resourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(1000)).Build()) + c.Check(createOpts.ParentName, check.Equals, "foo") + c.Check(createOpts.Snaps, check.DeepEquals, []string{"some-snap"}) + c.Check(createOpts.ResourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(1000)).Build()) ts := state.NewTaskSet(st.NewTask("foo-quota", "...")) return ts, nil }) @@ -205,11 +205,11 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaCreateHappy(c *check.C) { func (s *apiQuotaSuite) TestPostEnsureQuotaCreateQuotaConflicts(c *check.C) { var createCalled int - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { c.Check(name, check.Equals, "booze") - c.Check(parentName, check.Equals, "foo") - c.Check(snaps, check.DeepEquals, []string{"some-snap"}) - c.Check(resourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(1000)).Build()) + c.Check(createOpts.ParentName, check.Equals, "foo") + c.Check(createOpts.Snaps, check.DeepEquals, []string{"some-snap"}) + c.Check(createOpts.ResourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(1000)).Build()) createCalled++ switch createCalled { @@ -263,12 +263,12 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaCreateQuotaConflicts(c *check.C) { func (s *apiQuotaSuite) TestPostEnsureQuotaCreateJournalRateZeroHappy(c *check.C) { var createCalled int - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { createCalled++ c.Check(name, check.Equals, "booze") - c.Check(parentName, check.Equals, "foo") - c.Check(snaps, check.DeepEquals, []string{"some-snap"}) - c.Check(resourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithJournalRate(0, 0).Build()) + c.Check(createOpts.ParentName, check.Equals, "foo") + c.Check(createOpts.Snaps, check.DeepEquals, []string{"some-snap"}) + c.Check(createOpts.ResourceLimits, check.DeepEquals, quota.NewResourcesBuilder().WithJournalRate(0, 0).Build()) ts := state.NewTaskSet(st.NewTask("foo-quota", "...")) return ts, nil }) @@ -311,17 +311,17 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaUpdateCpuHappy(c *check.C) { st.Unlock() c.Assert(err, check.IsNil) - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { c.Errorf("should not have called create quota") return nil, fmt.Errorf("broken test") }) defer r() updateCalled := 0 - r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.QuotaGroupUpdate) (*state.TaskSet, error) { + r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.UpdateQuotaOptions) (*state.TaskSet, error) { updateCalled++ c.Assert(name, check.Equals, "ginger-ale") - c.Assert(opts, check.DeepEquals, servicestate.QuotaGroupUpdate{ + c.Assert(opts, check.DeepEquals, servicestate.UpdateQuotaOptions{ AddSnaps: []string{"some-snap"}, NewResourceLimits: quota.NewResourcesBuilder().WithCPUCount(2).WithCPUPercentage(100).WithThreadLimit(512).Build(), }) @@ -365,17 +365,17 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaUpdateCpu2Happy(c *check.C) { st.Unlock() c.Assert(err, check.IsNil) - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { c.Errorf("should not have called create quota") return nil, fmt.Errorf("broken test") }) defer r() updateCalled := 0 - r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.QuotaGroupUpdate) (*state.TaskSet, error) { + r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.UpdateQuotaOptions) (*state.TaskSet, error) { updateCalled++ c.Assert(name, check.Equals, "ginger-ale") - c.Assert(opts, check.DeepEquals, servicestate.QuotaGroupUpdate{ + c.Assert(opts, check.DeepEquals, servicestate.UpdateQuotaOptions{ AddSnaps: []string{"some-snap"}, NewResourceLimits: quota.NewResourcesBuilder().WithCPUCount(1).WithCPUPercentage(100).WithCPUSet([]int{0, 1}).Build(), }) @@ -421,17 +421,17 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaUpdateMemoryHappy(c *check.C) { st.Unlock() c.Assert(err, check.IsNil) - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { c.Errorf("should not have called create quota") return nil, fmt.Errorf("broken test") }) defer r() updateCalled := 0 - r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.QuotaGroupUpdate) (*state.TaskSet, error) { + r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.UpdateQuotaOptions) (*state.TaskSet, error) { updateCalled++ c.Assert(name, check.Equals, "ginger-ale") - c.Assert(opts, check.DeepEquals, servicestate.QuotaGroupUpdate{ + c.Assert(opts, check.DeepEquals, servicestate.UpdateQuotaOptions{ AddSnaps: []string{"some-snap"}, NewResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(9000).Build(), }) @@ -465,17 +465,17 @@ func (s *apiQuotaSuite) TestPostEnsureQuotaUpdateConflicts(c *check.C) { st.Unlock() c.Assert(err, check.IsNil) - r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { + r := daemon.MockServicestateCreateQuota(func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error) { c.Errorf("should not have called create quota") return nil, fmt.Errorf("broken test") }) defer r() updateCalled := 0 - r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.QuotaGroupUpdate) (*state.TaskSet, error) { + r = daemon.MockServicestateUpdateQuota(func(st *state.State, name string, opts servicestate.UpdateQuotaOptions) (*state.TaskSet, error) { updateCalled++ c.Assert(name, check.Equals, "ginger-ale") - c.Assert(opts, check.DeepEquals, servicestate.QuotaGroupUpdate{ + c.Assert(opts, check.DeepEquals, servicestate.UpdateQuotaOptions{ AddSnaps: []string{"some-snap"}, NewResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.Size(800 * quantity.SizeKiB)).Build(), }) diff --git a/daemon/api_users.go b/daemon/api_users.go index 53294d035d..0807a4f9d0 100644 --- a/daemon/api_users.go +++ b/daemon/api_users.go @@ -235,7 +235,7 @@ func removeUser(c *Command, username string, opts postUserDeleteData) Response { st.Lock() defer st.Unlock() - u, err := deviceStateRemoveUser(st, username) + u, err := deviceStateRemoveUser(st, username, &devicestate.RemoveUserOptions{}) if err != nil { if _, ok := err.(*devicestate.UserError); ok { return BadRequest(err.Error()) diff --git a/daemon/api_users_test.go b/daemon/api_users_test.go index 1307a0ed50..3afeec44f9 100644 --- a/daemon/api_users_test.go +++ b/daemon/api_users_test.go @@ -86,7 +86,7 @@ func (s *userSuite) SetUpTest(c *check.C) { return nil, &devicestate.UserError{Err: fmt.Errorf("unexpected create user %q call", email)} })) - s.AddCleanup(daemon.MockDeviceStateRemoveUser(func(st *state.State, username string) (*auth.UserState, error) { + s.AddCleanup(daemon.MockDeviceStateRemoveUser(func(st *state.State, username string, opts *devicestate.RemoveUserOptions) (*auth.UserState, error) { c.Fatalf("unexpected remove user %q call", username) return nil, &devicestate.UserError{Err: fmt.Errorf("unexpected remove user %q call", username)} })) @@ -642,7 +642,7 @@ func (s *userSuite) testpostUserActionRemoveDelUserErr(c *check.C, internalErr b c.Check(err, check.IsNil) called := 0 - defer daemon.MockDeviceStateRemoveUser(func(st *state.State, username string) (*auth.UserState, error) { + defer daemon.MockDeviceStateRemoveUser(func(st *state.State, username string, opts *devicestate.RemoveUserOptions) (*auth.UserState, error) { called++ if internalErr { return nil, fmt.Errorf("internal error: wat-internal") @@ -672,7 +672,7 @@ func (s *userSuite) TestPostUserActionRemove(c *check.C) { expectedEmail := "email@test.com" called := 0 - defer daemon.MockDeviceStateRemoveUser(func(st *state.State, username string) (*auth.UserState, error) { + defer daemon.MockDeviceStateRemoveUser(func(st *state.State, username string, opts *devicestate.RemoveUserOptions) (*auth.UserState, error) { called++ removedUser := &auth.UserState{ID: expectedID, Username: expectedUsername, Email: expectedEmail} diff --git a/daemon/export_api_quotas_test.go b/daemon/export_api_quotas_test.go index 60609aaac4..92e0bb44a9 100644 --- a/daemon/export_api_quotas_test.go +++ b/daemon/export_api_quotas_test.go @@ -30,7 +30,7 @@ type ( PostQuotaGroupData = postQuotaGroupData ) -func MockServicestateCreateQuota(f func(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error)) func() { +func MockServicestateCreateQuota(f func(st *state.State, name string, createOpts servicestate.CreateQuotaOptions) (*state.TaskSet, error)) func() { old := servicestateCreateQuota servicestateCreateQuota = f return func() { @@ -38,7 +38,7 @@ func MockServicestateCreateQuota(f func(st *state.State, name string, parentName } } -func MockServicestateUpdateQuota(f func(st *state.State, name string, opts servicestate.QuotaGroupUpdate) (*state.TaskSet, error)) func() { +func MockServicestateUpdateQuota(f func(st *state.State, name string, opts servicestate.UpdateQuotaOptions) (*state.TaskSet, error)) func() { old := servicestateUpdateQuota servicestateUpdateQuota = f return func() { diff --git a/daemon/export_api_users_test.go b/daemon/export_api_users_test.go index bc374205c2..6a4aff33de 100644 --- a/daemon/export_api_users_test.go +++ b/daemon/export_api_users_test.go @@ -46,7 +46,7 @@ func MockDeviceStateCreateKnownUsers(createKnownUser func(st *state.State, sudoe return restore } -func MockDeviceStateRemoveUser(removeUser func(st *state.State, username string) (*auth.UserState, error)) (restore func()) { +func MockDeviceStateRemoveUser(removeUser func(st *state.State, username string, opts *devicestate.RemoveUserOptions) (*auth.UserState, error)) (restore func()) { restore = testutil.Backup(&deviceStateRemoveUser) deviceStateRemoveUser = removeUser return restore diff --git a/gadget/gadget_test.go b/gadget/gadget_test.go index 4c06e78bf2..762bb09495 100644 --- a/gadget/gadget_test.go +++ b/gadget/gadget_test.go @@ -2660,14 +2660,10 @@ var mockDeviceLayout = gadget.OnDiskVolume{ // as existing on the disk - the code and test accounts for the MBR // structure not being present in the OnDiskVolume { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 1 * quantity.SizeMiB, - }, - StartOffset: 1 * quantity.OffsetMiB, - }, - Node: "/dev/node1", + Node: "/dev/node1", + Name: "BIOS Boot", + Size: 1 * quantity.SizeMiB, + StartOffset: 1 * quantity.OffsetMiB, }, }, ID: "anything", @@ -2774,15 +2770,11 @@ func (s *gadgetYamlTestSuite) TestLayoutCompatibilityTypeBareStructureAllowedMis // as existing on the disk - the code and test accounts for the MBR // structure not being present in the OnDiskVolume { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "some-filesystem", - Size: 1 * quantity.SizeGiB, - Filesystem: "ext4", - }, - StartOffset: 1*quantity.OffsetMiB + 4096, - }, - Node: "/dev/node1", + Node: "/dev/node1", + Name: "some-filesystem", + Size: 1 * quantity.SizeGiB, + Filesystem: "ext4", + StartOffset: 1*quantity.OffsetMiB + 4096, }, }, ID: "anything", @@ -2849,15 +2841,11 @@ func (s *gadgetYamlTestSuite) TestLayoutCompatibility(c *C) { deviceLayoutWithExtras := mockDeviceLayout deviceLayoutWithExtras.Structure = append(deviceLayoutWithExtras.Structure, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "Extra partition", - Size: 10 * quantity.SizeMiB, - Label: "extra", - }, - StartOffset: 2 * quantity.OffsetMiB, - }, - Node: "/dev/node2", + Node: "/dev/node2", + Name: "Extra partition", + Size: 10 * quantity.SizeMiB, + Label: "extra", + StartOffset: 2 * quantity.OffsetMiB, }, ) // extra structure (should fail) @@ -2892,28 +2880,20 @@ func (s *gadgetYamlTestSuite) TestMBRLayoutCompatibility(c *C) { var mockMBRDeviceLayout = gadget.OnDiskVolume{ Structure: []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - // partition names have no - // meaning in MBR schema - Name: "other", - Size: 440, - }, - StartOffset: 0, - }, Node: "/dev/node1", + // partition names have no + // meaning in MBR schema + Name: "other", + Size: 440, + StartOffset: 0, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - // partition names have no - // meaning in MBR schema - Name: "different BIOS Boot", - Size: 1 * quantity.SizeMiB, - }, - StartOffset: 1 * quantity.OffsetMiB, - }, Node: "/dev/node2", + // partition names have no + // meaning in MBR schema + Name: "different BIOS Boot", + Size: 1 * quantity.SizeMiB, + StartOffset: 1 * quantity.OffsetMiB, }, }, ID: "anything", @@ -2936,18 +2916,14 @@ func (s *gadgetYamlTestSuite) TestMBRLayoutCompatibility(c *C) { deviceLayoutWithExtras := mockMBRDeviceLayout deviceLayoutWithExtras.Structure = append(deviceLayoutWithExtras.Structure, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - // name is ignored with MBR schema - Name: "Extra partition", - Size: 1200 * quantity.SizeMiB, - Label: "extra", - Filesystem: "ext4", - Type: "83", - }, - StartOffset: 2 * quantity.OffsetMiB, - }, Node: "/dev/node2", + // name is ignored with MBR schema + Name: "Extra partition", + Size: 1200 * quantity.SizeMiB, + Label: "extra", + Filesystem: "ext4", + Type: "83", + StartOffset: 2 * quantity.OffsetMiB, }, ) err = gadget.EnsureLayoutCompatibility(gadgetLayoutWithExtras, &deviceLayoutWithExtras, nil) @@ -2968,15 +2944,11 @@ func (s *gadgetYamlTestSuite) TestMBRLayoutCompatibility(c *C) { // add another structure that's not part of the gadget deviceLayoutWithExtras.Structure = append(deviceLayoutWithExtras.Structure, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - // name is ignored with MBR schema - Name: "Extra extra partition", - Size: 1 * quantity.SizeMiB, - }, - StartOffset: 1202 * quantity.OffsetMiB, - }, Node: "/dev/node4", + // name is ignored with MBR schema + Name: "Extra extra partition", + Size: 1 * quantity.SizeMiB, + StartOffset: 1202 * quantity.OffsetMiB, }, ) err = gadget.EnsureLayoutCompatibility(gadgetLayoutWithExtras, &deviceLayoutWithExtras, nil) @@ -2991,16 +2963,12 @@ func (s *gadgetYamlTestSuite) TestLayoutCompatibilityWithCreatedPartitions(c *C) // device matches gadget except for the filesystem type deviceLayout.Structure = append(deviceLayout.Structure, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "Writable", - Size: 1200 * quantity.SizeMiB, - Label: "writable", - Filesystem: "something_else", - }, - StartOffset: 2 * quantity.OffsetMiB, - }, - Node: "/dev/node2", + Node: "/dev/node2", + Name: "Writable", + Size: 1200 * quantity.SizeMiB, + Label: "writable", + Filesystem: "something_else", + StartOffset: 2 * quantity.OffsetMiB, }, ) @@ -3071,16 +3039,12 @@ func (s *gadgetYamlTestSuite) TestLayoutCompatibilityWithUnspecifiedGadgetFilesy // device matches, but it has a filesystem deviceLayout.Structure = append(deviceLayout.Structure, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "foobar", - Size: 1200 * quantity.SizeMiB, - Label: "whatever", - Filesystem: "something", - }, - StartOffset: 2 * quantity.OffsetMiB, - }, - Node: "/dev/node2", + Node: "/dev/node2", + Name: "foobar", + Size: 1200 * quantity.SizeMiB, + Label: "whatever", + Filesystem: "something", + StartOffset: 2 * quantity.OffsetMiB, }, ) @@ -3119,26 +3083,18 @@ var mockEncDeviceLayout = gadget.OnDiskVolume{ // as existing on the disk - the code and test accounts for the MBR // structure not being present in the OnDiskVolume { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 1 * quantity.SizeMiB, - }, - StartOffset: 1 * quantity.OffsetMiB, - }, - Node: "/dev/node1", + Node: "/dev/node1", + Name: "BIOS Boot", + Size: 1 * quantity.SizeMiB, + StartOffset: 1 * quantity.OffsetMiB, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "Writable", - Size: 1200 * quantity.SizeMiB, - Filesystem: "crypto_LUKS", - Label: "Writable-enc", - }, - StartOffset: 2 * quantity.OffsetMiB, - }, - Node: "/dev/node2", + Node: "/dev/node2", + Name: "Writable", + Size: 1200 * quantity.SizeMiB, + Filesystem: "crypto_LUKS", + Label: "Writable-enc", + StartOffset: 2 * quantity.OffsetMiB, }, }, ID: "anything", diff --git a/gadget/gadgettest/examples.go b/gadget/gadgettest/examples.go index 95f93b35bc..fd00fe9a2e 100644 --- a/gadget/gadgettest/examples.go +++ b/gadget/gadgettest/examples.go @@ -1407,50 +1407,32 @@ const UC16YAMLImplicitSystemData = `volumes: var UC16DeviceLayout = gadget.OnDiskVolume{ Structure: []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Type: "21686148-6449-6E6F-744E-656564454649", - ID: "b2e891ee-b971-4a2b-b874-694bbf9b821a", - Size: quantity.SizeMiB, - }, - StartOffset: quantity.OffsetMiB, - }, - DiskIndex: 1, - Node: "/dev/sda1", - Size: quantity.SizeMiB, + Name: "BIOS Boot", + Type: "21686148-6449-6E6F-744E-656564454649", + StartOffset: quantity.OffsetMiB, + DiskIndex: 1, + Node: "/dev/sda1", + Size: quantity.SizeMiB, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "EFI System", - Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - ID: "a87e9dcb-b1e1-4eab-89cf-1c2fc057b038", - Label: "system-boot", - Filesystem: "vfat", - Size: 52428800, - }, - StartOffset: 2097152, - }, - DiskIndex: 2, - Node: "/dev/sda2", - Size: 52428800, + Name: "EFI System", + Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", + Label: "system-boot", + Filesystem: "vfat", + StartOffset: 2097152, + DiskIndex: 2, + Node: "/dev/sda2", + Size: 52428800, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "writable", - Type: "0FC63DAF-8483-4772-8E79-3D69D8477DE4", - ID: "cba2b8b3-c2e4-4e51-9a57-d35041b7bf9a", - Label: "writable", - Filesystem: "ext4", - Size: 10682875392, - }, - StartOffset: 54525952, - }, - DiskIndex: 3, - Node: "/dev/sda3", - Size: 10682875392, + Name: "writable", + Type: "0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Label: "writable", + Filesystem: "ext4", + Size: 10682875392, + StartOffset: 54525952, + DiskIndex: 3, + Node: "/dev/sda3", }, }, ID: "2a9b0671-4597-433b-b3ad-be99950e8c5e", diff --git a/gadget/gadgettest/gadgettest.go b/gadget/gadgettest/gadgettest.go index b8d92cce0d..24f8376353 100644 --- a/gadget/gadgettest/gadgettest.go +++ b/gadget/gadgettest/gadgettest.go @@ -26,8 +26,10 @@ import ( "path/filepath" "github.com/snapcore/snapd/asserts" + "github.com/snapcore/snapd/boot/boottest" "github.com/snapcore/snapd/gadget" "github.com/snapcore/snapd/gadget/quantity" + "github.com/snapcore/snapd/osutil/disks" ) // LayoutMultiVolumeFromYaml returns all LaidOutVolumes for the given @@ -120,3 +122,90 @@ func (m *ModelCharacteristics) Grade() asserts.ModelGrade { } return asserts.ModelGradeUnset } + +func MakeMockGadget(gadgetRoot, gadgetContent string) error { + if err := os.MkdirAll(filepath.Join(gadgetRoot, "meta"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "meta", "gadget.yaml"), []byte(gadgetContent), 0644); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "pc-boot.img"), []byte("pc-boot.img content"), 0644); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "pc-core.img"), []byte("pc-core.img content"), 0644); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "grubx64.efi"), []byte("grubx64.efi content"), 0644); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "shim.efi.signed"), []byte("shim.efi.signed content"), 0644); err != nil { + return err + } + + return nil +} + +func MockGadgetPartitionedDisk(gadgetYaml, gadgetRoot string) (ginfo *gadget.Info, laidVols map[string]*gadget.LaidOutVolume, model *asserts.Model, restore func(), err error) { + // TODO test for UC systems too + model = boottest.MakeMockClassicWithModesModel() + + // Create gadget with all files + err = MakeMockGadget(gadgetRoot, gadgetYaml) + if err != nil { + return nil, nil, nil, nil, err + } + _, laidVols, err = gadget.LaidOutVolumesFromGadget(gadgetRoot, "", model) + if err != nil { + return nil, nil, nil, nil, err + } + + ginfo, err = gadget.ReadInfo(gadgetRoot, model) + if err != nil { + return nil, nil, nil, nil, err + } + + // "Real" disk data that will be read + vdaSysPath := "/sys/devices/pci0000:00/0000:00:03.0/virtio1/block/vda" + disk := &disks.MockDiskMapping{ + Structure: []disks.Partition{ + { + PartitionLabel: "BIOS\x20Boot", + KernelDeviceNode: "/dev/vda1", + DiskIndex: 1, + }, + { + PartitionLabel: "EFI System partition", + KernelDeviceNode: "/dev/vda2", + DiskIndex: 2, + }, + { + PartitionLabel: "ubuntu-boot", + KernelDeviceNode: "/dev/vda3", + DiskIndex: 3, + }, + { + PartitionLabel: "ubuntu-save", + KernelDeviceNode: "/dev/vda4", + DiskIndex: 4, + }, + { + PartitionLabel: "ubuntu-data", + KernelDeviceNode: "/dev/vda5", + DiskIndex: 5, + }, + }, + DiskHasPartitions: true, + DevNum: "disk1", + DevNode: "/dev/vda", + DevPath: vdaSysPath, + } + diskMapping := map[string]*disks.MockDiskMapping{ + vdaSysPath: disk, + // this simulates a symlink in /sys/block which points to the above path + "/sys/block/vda": disk, + } + restore = disks.MockDevicePathToDiskMapping(diskMapping) + + return ginfo, laidVols, model, restore, nil +} diff --git a/gadget/install/content.go b/gadget/install/content.go index a673372629..6bd32c599f 100644 --- a/gadget/install/content.go +++ b/gadget/install/content.go @@ -23,7 +23,7 @@ import ( "fmt" "os" "path/filepath" - "strconv" + "strings" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/gadget" @@ -42,6 +42,14 @@ type mkfsParams struct { SectorSize quantity.Size } +// onDiskAndLaidoutStructure puts together the on disk and the laid +// out for a disk structure. +// TODO This is a temporary structure until we include StorageStructure in LaidOutStructure. +type onDiskAndLaidoutStructure struct { + onDisk *gadget.OnDiskStructure + laidOut *gadget.LaidOutStructure +} + // makeFilesystem creates a filesystem on the on-disk structure, according // to the filesystem type defined in the gadget. If sectorSize is specified, // that sector size is used when creating the filesystem, otherwise if it is @@ -70,16 +78,16 @@ func mountFilesystem(fsDevice, fs, mountpoint string) error { // writeContent populates the given on-disk filesystem structure with a // corresponding filesystem device, according to the contents defined in the // gadget. -func writeFilesystemContent(ds *gadget.OnDiskStructure, fsDevice string, observer gadget.ContentObserver) (err error) { - mountpoint := filepath.Join(dirs.SnapRunDir, "gadget-install", strconv.Itoa(ds.DiskIndex)) +func writeFilesystemContent(laidOut *gadget.LaidOutStructure, fsDevice string, observer gadget.ContentObserver) (err error) { + mountpoint := filepath.Join(dirs.SnapRunDir, "gadget-install", strings.ReplaceAll(strings.Trim(fsDevice, "/"), "/", "-")) if err := os.MkdirAll(mountpoint, 0755); err != nil { return err } // temporarily mount the filesystem - logger.Debugf("mounting %q in %q (fs type %q)", fsDevice, mountpoint, ds.Filesystem) - if err := sysMount(fsDevice, mountpoint, ds.Filesystem, 0, ""); err != nil { - return fmt.Errorf("cannot mount filesystem %q at %q: %v", ds.Node, mountpoint, err) + logger.Debugf("mounting %q in %q (fs type %q)", fsDevice, mountpoint, laidOut.Filesystem) + if err := sysMount(fsDevice, mountpoint, laidOut.Filesystem, 0, ""); err != nil { + return fmt.Errorf("cannot mount %q at %q: %v", fsDevice, mountpoint, err) } defer func() { errUnmount := sysUnmount(mountpoint, 0) @@ -87,7 +95,7 @@ func writeFilesystemContent(ds *gadget.OnDiskStructure, fsDevice string, observe err = errUnmount } }() - fs, err := gadget.NewMountedFilesystemWriter(&ds.LaidOutStructure, observer) + fs, err := gadget.NewMountedFilesystemWriter(laidOut, observer) if err != nil { return fmt.Errorf("cannot create filesystem image writer: %v", err) } diff --git a/gadget/install/content_test.go b/gadget/install/content_test.go index 3157254c22..41d53b4890 100644 --- a/gadget/install/content_test.go +++ b/gadget/install/content_test.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "io/ioutil" - "os" "path/filepath" . "gopkg.in/check.v1" @@ -31,6 +30,7 @@ import ( "github.com/snapcore/snapd/boot" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/gadget" + "github.com/snapcore/snapd/gadget/gadgettest" "github.com/snapcore/snapd/gadget/install" "github.com/snapcore/snapd/gadget/quantity" "github.com/snapcore/snapd/testutil" @@ -64,7 +64,7 @@ func (s *contentTestSuite) SetUpTest(c *C) { s.mockUnmountCalls = nil s.gadgetRoot = c.MkDir() - err := makeMockGadget(s.gadgetRoot, gadgetContent) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContent) c.Assert(err, IsNil) s.mockMountPoint = c.MkDir() @@ -82,15 +82,9 @@ func (s *contentTestSuite) SetUpTest(c *C) { s.AddCleanup(restore) } -var mockOnDiskStructureSystemSeed = gadget.OnDiskStructure{ - Node: "/dev/node2", - LaidOutStructure: gadget.LaidOutStructure{ +func mockOnDiskStructureSystemSeed(gadgetRoot string) *gadget.LaidOutStructure { + return &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ - Name: "Recovery", - Size: 1258291200, - Type: "EF,C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - Role: "system-seed", - Label: "ubuntu-seed", Filesystem: "vfat", Content: []gadget.VolumeContent{ { @@ -99,33 +93,17 @@ var mockOnDiskStructureSystemSeed = gadget.OnDiskStructure{ }, }, }, - StartOffset: 2097152, - YamlIndex: 1000, // to demonstrate we do not use the laid out index - }, - DiskIndex: 2, -} - -func makeMockGadget(gadgetRoot, gadgetContent string) error { - if err := os.MkdirAll(filepath.Join(gadgetRoot, "meta"), 0755); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "meta", "gadget.yaml"), []byte(gadgetContent), 0644); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "pc-boot.img"), []byte("pc-boot.img content"), 0644); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "pc-core.img"), []byte("pc-core.img content"), 0644); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "grubx64.efi"), []byte("grubx64.efi content"), 0644); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(gadgetRoot, "shim.efi.signed"), []byte("shim.efi.signed content"), 0644); err != nil { - return err + YamlIndex: 1000, // to demonstrate we do not use the laid out index + ResolvedContent: []gadget.ResolvedContent{ + { + VolumeContent: &gadget.VolumeContent{ + UnresolvedSource: "grubx64.efi", + Target: "EFI/boot/grubx64.efi", + }, + ResolvedSource: filepath.Join(gadgetRoot, "grubx64.efi"), + }, + }, } - - return nil } const gadgetContent = `volumes: @@ -200,7 +178,7 @@ func (s *contentTestSuite) TestWriteFilesystemContent(c *C) { }, { mountErr: errors.New("mount error"), unmountErr: nil, - err: "cannot mount filesystem .*: mount error", + err: "cannot mount .* at .*: mount error", }, { mountErr: nil, unmountErr: errors.New("unmount error"), @@ -215,7 +193,7 @@ func (s *contentTestSuite) TestWriteFilesystemContent(c *C) { restore := install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error { c.Check(source, Equals, "/dev/node2") c.Check(fstype, Equals, "vfat") - c.Check(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/2")) + c.Check(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-node2")) return tc.mountErr }) defer restore() @@ -226,22 +204,13 @@ func (s *contentTestSuite) TestWriteFilesystemContent(c *C) { defer restore() // copy existing mock - m := mockOnDiskStructureSystemSeed - m.ResolvedContent = []gadget.ResolvedContent{ - { - VolumeContent: &gadget.VolumeContent{ - UnresolvedSource: "grubx64.efi", - Target: "EFI/boot/grubx64.efi", - }, - ResolvedSource: filepath.Join(s.gadgetRoot, "grubx64.efi"), - }, - } + m := mockOnDiskStructureSystemSeed(s.gadgetRoot) obs := &mockWriteObserver{ c: c, observeErr: tc.observeErr, - expectedStruct: &m.LaidOutStructure, + expectedStruct: m, } - err := install.WriteFilesystemContent(&m, "/dev/node2", obs) + err := install.WriteFilesystemContent(m, "/dev/node2", obs) if tc.err == "" { c.Assert(err, IsNil) } else { @@ -250,11 +219,11 @@ func (s *contentTestSuite) TestWriteFilesystemContent(c *C) { if err == nil { // the target file system is mounted on a directory named after the structure index - content, err := ioutil.ReadFile(filepath.Join(dirs.SnapRunDir, "gadget-install/2", "EFI/boot/grubx64.efi")) + content, err := ioutil.ReadFile(filepath.Join(dirs.SnapRunDir, "gadget-install/dev-node2", "EFI/boot/grubx64.efi")) c.Assert(err, IsNil) c.Check(string(content), Equals, "grubx64.efi content") c.Assert(obs.content, DeepEquals, map[string][]*mockContentChange{ - filepath.Join(dirs.SnapRunDir, "gadget-install/2"): { + filepath.Join(dirs.SnapRunDir, "gadget-install/dev-node2"): { { path: "EFI/boot/grubx64.efi", change: &gadget.ContentChange{After: filepath.Join(s.gadgetRoot, "grubx64.efi")}, diff --git a/gadget/install/encrypt.go b/gadget/install/encrypt.go index 4fbf980a23..585e2e6135 100644 --- a/gadget/install/encrypt.go +++ b/gadget/install/encrypt.go @@ -119,8 +119,8 @@ func createEncryptedDeviceWithSetupHook(part *gadget.OnDiskStructure, key keys.E // for roles requiring encryption, the filesystem label is always set to // either the implicit value or a value that has been validated if part.Name != name || part.Label != name { - return nil, fmt.Errorf("cannot use partition name %q for an encrypted structure with %v role and filesystem with label %q", - name, part.Role, part.Label) + return nil, fmt.Errorf("cannot use partition name %q for an encrypted structure with partition label %q or filesystem label %q", + name, part.Name, part.Label) } // 1. create linear mapper device with 1Mb of reserved space diff --git a/gadget/install/encrypt_test.go b/gadget/install/encrypt_test.go index f3f8278332..4d84b4450b 100644 --- a/gadget/install/encrypt_test.go +++ b/gadget/install/encrypt_test.go @@ -49,17 +49,11 @@ type encryptSuite struct { var _ = Suite(&encryptSuite{}) var mockDeviceStructure = gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Role: gadget.SystemData, - Name: "Test structure", - Label: "some-label", - }, - StartOffset: 0, - YamlIndex: 1, - }, - Size: 3 * quantity.SizeMiB, - Node: "/dev/node1", + Name: "Test structure", + Label: "some-label", + StartOffset: 0, + Size: 3 * quantity.SizeMiB, + Node: "/dev/node1", } func (s *encryptSuite) SetUpTest(c *C) { @@ -135,17 +129,11 @@ func (s *encryptSuite) TestNewEncryptedDeviceLUKS(c *C) { } var mockDeviceStructureForDeviceSetupHook = gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Role: gadget.SystemData, - Name: "ubuntu-data", - Label: "ubuntu-data", - }, - StartOffset: 0, - YamlIndex: 1, - }, - Size: 3 * quantity.SizeMiB, - Node: "/dev/node1", + Name: "ubuntu-data", + Label: "ubuntu-data", + StartOffset: 0, + Size: 3 * quantity.SizeMiB, + Node: "/dev/node1", } func (s *encryptSuite) TestCreateEncryptedDeviceWithSetupHook(c *C) { @@ -212,17 +200,11 @@ func (s *encryptSuite) TestCreateEncryptedDeviceWithSetupHook(c *C) { func (s *encryptSuite) TestCreateEncryptedDeviceWithSetupHookPartitionNameCheck(c *C) { mockDeviceStructureBadName := gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Role: gadget.SystemData, - Name: "ubuntu-data", - Label: "ubuntu-data", - }, - StartOffset: 0, - YamlIndex: 1, - }, - Size: 3 * quantity.SizeMiB, - Node: "/dev/node1", + Name: "ubuntu-data", + Label: "ubuntu-data", + StartOffset: 0, + Size: 3 * quantity.SizeMiB, + Node: "/dev/node1", } restore := install.MockBootRunFDESetupHook(func(req *fde.SetupRequest) ([]byte, error) { c.Error("unexpected call") @@ -236,7 +218,7 @@ func (s *encryptSuite) TestCreateEncryptedDeviceWithSetupHookPartitionNameCheck( // pass a name that does not match partition name dev, err := install.CreateEncryptedDeviceWithSetupHook(&mockDeviceStructureBadName, s.mockedEncryptionKey, "some-name") - c.Assert(err, ErrorMatches, `cannot use partition name "some-name" for an encrypted structure with system-data role and filesystem with label "ubuntu-data"`) + c.Assert(err, ErrorMatches, `cannot use partition name "some-name" for an encrypted structure with partition label "ubuntu-data" or filesystem label "ubuntu-data"`) c.Check(dev, IsNil) c.Check(mockDmsetup.Calls(), HasLen, 0) // make structure name different than the label, which is set to either @@ -245,7 +227,7 @@ func (s *encryptSuite) TestCreateEncryptedDeviceWithSetupHookPartitionNameCheck( mockDeviceStructureBadName.Name = "bad-name" dev, err = install.CreateEncryptedDeviceWithSetupHook(&mockDeviceStructureBadName, s.mockedEncryptionKey, "bad-name") - c.Assert(err, ErrorMatches, `cannot use partition name "bad-name" for an encrypted structure with system-data role and filesystem with label "ubuntu-data"`) + c.Assert(err, ErrorMatches, `cannot use partition name "bad-name" for an encrypted structure with partition label "bad-name" or filesystem label "ubuntu-data"`) c.Check(dev, IsNil) c.Check(mockDmsetup.Calls(), HasLen, 0) } diff --git a/gadget/install/export_test.go b/gadget/install/export_test.go index d94a682415..6bb0d12dab 100644 --- a/gadget/install/export_test.go +++ b/gadget/install/export_test.go @@ -28,6 +28,7 @@ import ( ) type MkfsParams = mkfsParams +type OnDiskAndLaidoutStructure = onDiskAndLaidoutStructure var ( MakeFilesystem = makeFilesystem @@ -38,7 +39,8 @@ var ( RemoveCreatedPartitions = removeCreatedPartitions EnsureNodesExist = ensureNodesExist - CreatedDuringInstall = createdDuringInstall + CreatedDuringInstall = createdDuringInstall + TestCreateMissingPartitions = createMissingPartitions ) func MockSysMount(f func(source, target, fstype string, flags uintptr, data string) error) (restore func()) { @@ -57,7 +59,7 @@ func MockSysUnmount(f func(target string, flags int) error) (restore func()) { } } -func MockEnsureNodesExist(f func(dss []gadget.OnDiskStructure, timeout time.Duration) error) (restore func()) { +func MockEnsureNodesExist(f func(dss []OnDiskAndLaidoutStructure, timeout time.Duration) error) (restore func()) { old := ensureNodesExist ensureNodesExist = f return func() { @@ -81,17 +83,6 @@ func MockSysfsPathForBlockDevice(f func(device string) (string, error)) (restore } } -func BuildEncryptionSetupData(labelToEncDevice map[string]string) *EncryptionSetupData { - esd := &EncryptionSetupData{ - parts: map[string]partEncryptionData{}} - for label, encryptDev := range labelToEncDevice { - esd.parts[label] = partEncryptionData{ - encryptedDevice: encryptDev, - } - } - return esd -} - func CheckEncryptionSetupData(encryptSetup *EncryptionSetupData, labelToEncDevice map[string]string) error { for label, part := range encryptSetup.parts { switch part.role { @@ -111,3 +102,15 @@ func CheckEncryptionSetupData(encryptSetup *EncryptionSetupData, labelToEncDevic return nil } + +func MockOnDiskAndLaidoutStructure(onDisk *gadget.OnDiskStructure, laidOut *gadget.LaidOutStructure) OnDiskAndLaidoutStructure { + return OnDiskAndLaidoutStructure{onDisk, laidOut} +} + +func OnDiskFromOnDiskAndLaidoutStructure(odls OnDiskAndLaidoutStructure) *gadget.OnDiskStructure { + return odls.onDisk +} + +func LaidOutFromOnDiskAndLaidoutStructure(odls OnDiskAndLaidoutStructure) *gadget.LaidOutStructure { + return odls.laidOut +} diff --git a/gadget/install/install.go b/gadget/install/install.go index 2ee8be5aa5..fd31ca5efb 100644 --- a/gadget/install/install.go +++ b/gadget/install/install.go @@ -61,10 +61,10 @@ func diskWithSystemSeed(lv *gadget.LaidOutVolume) (device string, err error) { return "", fmt.Errorf("cannot find role system-seed in gadget") } -func roleOrLabelOrName(part *gadget.OnDiskStructure) string { +func roleOrLabelOrName(role string, part *gadget.OnDiskStructure) string { switch { - case part.Role != "": - return part.Role + case role != "": + return role case part.Label != "": return part.Label case part.Name != "": @@ -96,24 +96,27 @@ func saveStorageTraits(mod gadget.Model, allLaidOutVols map[string]*gadget.LaidO return nil } -func maybeEncryptPartition(part *gadget.OnDiskStructure, encryptionType secboot.EncryptionType, sectorSize quantity.Size, perfTimings timings.Measurer) (fsParams *mkfsParams, encryptionKey keys.EncryptionKey, err error) { +func maybeEncryptPartition(odls *onDiskAndLaidoutStructure, encryptionType secboot.EncryptionType, sectorSize quantity.Size, perfTimings timings.Measurer) (fsParams *mkfsParams, encryptionKey keys.EncryptionKey, err error) { mustEncrypt := (encryptionType != secboot.EncryptionTypeNone) - partDisp := roleOrLabelOrName(part) + onDisk := odls.onDisk + laidOut := odls.laidOut // fsParams.Device is the kernel device that carries the // filesystem, which is either the raw /dev/<partition>, or // the mapped LUKS device if the structure is encrypted (if // the latter, it will be filled below in this function). fsParams = &mkfsParams{ - Type: part.Filesystem, - Device: part.Node, - Label: part.Label, - Size: part.Size, + // Filesystem and label are as specified in the gadget + Type: laidOut.Filesystem, + Label: laidOut.Label, + // Rest come from disk data + Device: onDisk.Node, + Size: onDisk.Size, SectorSize: sectorSize, } - if mustEncrypt && roleNeedsEncryption(part.Role) { - timings.Run(perfTimings, fmt.Sprintf("make-key-set[%s]", partDisp), - fmt.Sprintf("Create encryption key set for %s", partDisp), + if mustEncrypt && roleNeedsEncryption(laidOut.Role) { + timings.Run(perfTimings, fmt.Sprintf("make-key-set[%s]", laidOut.Role), + fmt.Sprintf("Create encryption key set for %s", laidOut.Role), func(timings.Measurer) { encryptionKey, err = keys.NewEncryptionKey() if err != nil { @@ -123,24 +126,24 @@ func maybeEncryptPartition(part *gadget.OnDiskStructure, encryptionType secboot. if err != nil { return nil, nil, err } - logger.Noticef("encrypting partition device %v", part.Node) + logger.Noticef("encrypting partition device %v", onDisk.Node) var dataPart encryptedDevice switch encryptionType { case secboot.EncryptionTypeLUKS: - timings.Run(perfTimings, fmt.Sprintf("new-encrypted-device[%s]", partDisp), - fmt.Sprintf("Create encryption device for %s", partDisp), + timings.Run(perfTimings, fmt.Sprintf("new-encrypted-device[%s]", laidOut.Role), + fmt.Sprintf("Create encryption device for %s", laidOut.Role), func(timings.Measurer) { - dataPart, err = newEncryptedDeviceLUKS(part, encryptionKey, part.Label) + dataPart, err = newEncryptedDeviceLUKS(onDisk, encryptionKey, laidOut.Label) }) if err != nil { return nil, nil, err } case secboot.EncryptionTypeDeviceSetupHook: - timings.Run(perfTimings, fmt.Sprintf("new-encrypted-device-setup-hook[%s]", partDisp), - fmt.Sprintf("Create encryption device for %s using device-setup-hook", partDisp), + timings.Run(perfTimings, fmt.Sprintf("new-encrypted-device-setup-hook[%s]", laidOut.Role), + fmt.Sprintf("Create encryption device for %s using device-setup-hook", laidOut.Role), func(timings.Measurer) { - dataPart, err = createEncryptedDeviceWithSetupHook(part, encryptionKey, part.Name) + dataPart, err = createEncryptedDeviceWithSetupHook(onDisk, encryptionKey, laidOut.Name) }) if err != nil { return nil, nil, err @@ -161,9 +164,8 @@ func maybeEncryptPartition(part *gadget.OnDiskStructure, encryptionType secboot. return fsParams, encryptionKey, nil } -func createFilesystem(part *gadget.OnDiskStructure, fsParams *mkfsParams, perfTimings timings.Measurer) error { - partDisp := roleOrLabelOrName(part) - +// TODO probably we won't need to pass partDisp when we include storage in laidOut +func createFilesystem(part *gadget.OnDiskStructure, fsParams *mkfsParams, partDisp string, perfTimings timings.Measurer) error { var err error timings.Run(perfTimings, fmt.Sprintf("make-filesystem[%s]", partDisp), fmt.Sprintf("Create filesystem for %s", fsParams.Device), @@ -176,13 +178,13 @@ func createFilesystem(part *gadget.OnDiskStructure, fsParams *mkfsParams, perfTi return nil } -func writePartitionContent(part *gadget.OnDiskStructure, fsDevice string, observer gadget.ContentObserver, perfTimings timings.Measurer) error { - partDisp := roleOrLabelOrName(part) +// TODO probably we won't need to pass partDisp when we include storage in laidOut +func writePartitionContent(laidOut *gadget.LaidOutStructure, fsDevice string, observer gadget.ContentObserver, partDisp string, perfTimings timings.Measurer) error { var err error timings.Run(perfTimings, fmt.Sprintf("write-content[%s]", partDisp), fmt.Sprintf("Write content for %s", partDisp), func(timings.Measurer) { - err = writeFilesystemContent(part, fsDevice, observer) + err = writeFilesystemContent(laidOut, fsDevice, observer) }) if err != nil { return err @@ -190,22 +192,23 @@ func writePartitionContent(part *gadget.OnDiskStructure, fsDevice string, observ return nil } -func installOnePartition(part *gadget.OnDiskStructure, encryptionType secboot.EncryptionType, sectorSize quantity.Size, observer gadget.ContentObserver, perfTimings timings.Measurer) (fsDevice string, encryptionKey keys.EncryptionKey, err error) { +func installOnePartition(odls *onDiskAndLaidoutStructure, encryptionType secboot.EncryptionType, sectorSize quantity.Size, observer gadget.ContentObserver, perfTimings timings.Measurer) (fsDevice string, encryptionKey keys.EncryptionKey, err error) { // 1. Encrypt - partDisp := roleOrLabelOrName(part) - fsParams, encryptionKey, err := maybeEncryptPartition(part, encryptionType, sectorSize, perfTimings) + part := odls.onDisk + role := odls.laidOut.Role + fsParams, encryptionKey, err := maybeEncryptPartition(odls, encryptionType, sectorSize, perfTimings) if err != nil { - return "", nil, fmt.Errorf("cannot encrypt partition %s: %v", partDisp, err) + return "", nil, fmt.Errorf("cannot encrypt partition %s: %v", role, err) } fsDevice = fsParams.Device // 2. Create filesystem - if err := createFilesystem(part, fsParams, perfTimings); err != nil { + if err := createFilesystem(part, fsParams, role, perfTimings); err != nil { return "", nil, err } // 3. Write content - if err := writePartitionContent(part, fsDevice, observer, perfTimings); err != nil { + if err := writePartitionContent(odls.laidOut, fsDevice, observer, role, perfTimings); err != nil { return "", nil, err } @@ -217,7 +220,7 @@ func installOnePartition(part *gadget.OnDiskStructure, encryptionType secboot.En // structures after that, the laidout volumes, and the disk sector // size. func createPartitions(model gadget.Model, gadgetRoot, kernelRoot, bootDevice string, options Options, - perfTimings timings.Measurer) (bootVolGadgetName string, created []gadget.OnDiskStructure, allLaidOutVols map[string]*gadget.LaidOutVolume, bootVolSectorSize quantity.Size, err error) { + perfTimings timings.Measurer) (bootVolGadgetName string, created []onDiskAndLaidoutStructure, allLaidOutVols map[string]*gadget.LaidOutVolume, bootVolSectorSize quantity.Size, err error) { logger.Noticef("installing a new system") logger.Noticef(" gadget data from: %v", gadgetRoot) logger.Noticef(" encryption: %v", options.EncryptionType) @@ -273,7 +276,7 @@ func createPartitions(model gadget.Model, gadgetRoot, kernelRoot, bootDevice str opts := &CreateOptions{ GadgetRootDir: gadgetRoot, } - created, err = CreateMissingPartitions(diskLayout, laidOutBootVol, opts) + created, err = createMissingPartitions(diskLayout, laidOutBootVol, opts) }) if err != nil { return "", nil, nil, 0, fmt.Errorf("cannot create the partitions: %v", err) @@ -317,21 +320,19 @@ func Run(model gadget.Model, gadgetRoot, kernelRoot, bootDevice string, options hasSavePartition := false - for _, part := range created { - roleFmt := "" - if part.Role != "" { - roleFmt = fmt.Sprintf("role %v", part.Role) - } - logger.Noticef("created new partition %v for structure %v (size %v) %s", - part.Node, part, part.Size.IECString(), roleFmt) - if part.Role == gadget.SystemSave { + // Note that all partitions here will have a role (see + // gadget.IsCreatableAtInstall() which defines the list) + for _, odls := range created { + laidOut := odls.laidOut + onDisk := odls.onDisk + logger.Noticef("created new partition %v for structure %v (size %v) with role %s", + onDisk.Node, laidOut, laidOut.Size.IECString(), laidOut.Role) + if laidOut.Role == gadget.SystemSave { hasSavePartition = true } - if part.Role != "" { - // keep track of the /dev/<partition> (actual raw - // device) for each role - devicesForRoles[part.Role] = part.Node - } + // keep track of the /dev/<partition> (actual raw + // device) for each role + devicesForRoles[laidOut.Role] = onDisk.Node // use the diskLayout.SectorSize here instead of lv.SectorSize, we check // that if there is a sector-size specified in the gadget that it @@ -341,7 +342,7 @@ func Run(model gadget.Model, gadgetRoot, kernelRoot, bootDevice string, options // for encrypted device the filesystem device it will point to // the mapper device otherwise it's the raw device node - fsDevice, encryptionKey, err := installOnePartition(&part, options.EncryptionType, + fsDevice, encryptionKey, err := installOnePartition(&odls, options.EncryptionType, bootVolSectorSize, observer, perfTimings) if err != nil { return nil, err @@ -351,11 +352,11 @@ func Run(model gadget.Model, gadgetRoot, kernelRoot, bootDevice string, options if keyForRole == nil { keyForRole = map[string]keys.EncryptionKey{} } - keyForRole[part.Role] = encryptionKey - partsEncrypted[part.Name] = createEncryptionParams(options.EncryptionType) + keyForRole[laidOut.Role] = encryptionKey + partsEncrypted[laidOut.Name] = createEncryptionParams(options.EncryptionType) } - if options.Mount && part.Label != "" && part.HasFilesystem() { - if err := mountFilesystem(fsDevice, part.Filesystem, getMntPointForPart(part.VolumeStructure)); err != nil { + if options.Mount && laidOut.Label != "" && laidOut.HasFilesystem() { + if err := mountFilesystem(fsDevice, laidOut.Filesystem, getMntPointForPart(laidOut.VolumeStructure)); err != nil { return nil, err } } @@ -442,7 +443,7 @@ func onDiskVolumeFromPartitionSysfsPath(partPath string) (*gadget.OnDiskVolume, // applyLayoutToOnDiskStructure finds the on disk structure from a // partition node and takes the laid out information from laidOutVols // and inserts it there. -func applyLayoutToOnDiskStructure(onDiskVol *gadget.OnDiskVolume, partNode string, laidOutVols map[string]*gadget.LaidOutVolume, gadgetVolName string) (*gadget.OnDiskStructure, error) { +func applyLayoutToOnDiskStructure(onDiskVol *gadget.OnDiskVolume, partNode string, laidOutVols map[string]*gadget.LaidOutVolume, gadgetVolName string) (*onDiskAndLaidoutStructure, error) { onDiskStruct, err := structureFromPartDevice(onDiskVol, partNode) if err != nil { return nil, fmt.Errorf("cannot find partition %q: %v", partNode, err) @@ -453,9 +454,12 @@ func applyLayoutToOnDiskStructure(onDiskVol *gadget.OnDiskVolume, partNode strin return nil, err } // This fills LaidOutStructure, including (importantly) the ResolvedContent field - onDiskStruct.LaidOutStructure = *laidOutStruct + odls := &onDiskAndLaidoutStructure{ + onDisk: onDiskStruct, + laidOut: laidOutStruct, + } - return onDiskStruct, nil + return odls, nil } func deviceForMaybeEncryptedVolume(volStruct *gadget.VolumeStructure, encSetupData *EncryptionSetupData) string { @@ -508,14 +512,15 @@ func WriteContent(onVolumes map[string]*gadget.Volume, allLaidOutVols map[string // TODO: do we need to consider different // sector sizes for the encrypted/unencrypted // cases here? - onDiskStruct, err := applyLayoutToOnDiskStructure(onDiskVol, volStruct.Device, allLaidOutVols, volName) + odls, err := applyLayoutToOnDiskStructure(onDiskVol, volStruct.Device, allLaidOutVols, volName) if err != nil { return nil, fmt.Errorf("cannot retrieve on disk info for %q: %v", volStruct.Device, err) } device := deviceForMaybeEncryptedVolume(&volStruct, encSetupData) logger.Debugf("writing content on partition %s", device) - if err := writePartitionContent(onDiskStruct, device, observer, perfTimings); err != nil { + partDisp := roleOrLabelOrName(odls.laidOut.Role, odls.onDisk) + if err := writePartitionContent(odls.laidOut, device, observer, partDisp, perfTimings); err != nil { return nil, err } } @@ -566,6 +571,7 @@ func MountVolumes(onVolumes map[string]*gadget.Volume, encSetupData *EncryptionS if part.Filesystem == "" { continue } + mntPt := getMntPointForPart(&part) switch part.Role { case gadget.SystemSeed, gadget.SystemSeedNull: @@ -647,7 +653,7 @@ func EncryptPartitions(onVolumes map[string]*gadget.Volume, encryptionType secbo } } // Obtain partition data and link with laid out information - onDiskStruct, err := applyLayoutToOnDiskStructure(onDiskVol, device, allLaidOutVols, volName) + odls, err := applyLayoutToOnDiskStructure(onDiskVol, device, allLaidOutVols, volName) if err != nil { return nil, fmt.Errorf("cannot retrieve on disk info for %q: %v", device, err) } @@ -655,11 +661,11 @@ func EncryptPartitions(onVolumes map[string]*gadget.Volume, encryptionType secbo logger.Debugf("encrypting partition %s", device) fsParams, encryptionKey, err := - maybeEncryptPartition(onDiskStruct, encryptionType, onDiskVol.SectorSize, perfTimings) + maybeEncryptPartition(odls, encryptionType, onDiskVol.SectorSize, perfTimings) if err != nil { return nil, fmt.Errorf("cannot encrypt %q: %v", device, err) } - setupData.parts[onDiskStruct.Name] = partEncryptionData{ + setupData.parts[odls.onDisk.Name] = partEncryptionData{ role: volStruct.Role, device: device, // EncryptedDevice will be /dev/mapper/ubuntu-data, etc. @@ -747,19 +753,19 @@ func FactoryReset(model gadget.Model, gadgetRoot, kernelRoot, bootDevice string, savePart := partitionsWithRolesAndContent(laidOutBootVol, diskLayout, []string{gadget.SystemSave}) hasSavePartition := len(savePart) != 0 if hasSavePartition { - deviceForRole[gadget.SystemSave] = savePart[0].Node + deviceForRole[gadget.SystemSave] = savePart[0].onDisk.Node } rolesToReset := []string{gadget.SystemBoot, gadget.SystemData} partsToReset := partitionsWithRolesAndContent(laidOutBootVol, diskLayout, rolesToReset) for _, part := range partsToReset { + onDisk := part.onDisk + laidOut := part.laidOut logger.Noticef("resetting %v structure %v (size %v) role %v", - part.Node, part, part.Size.IECString(), part.Role) + onDisk.Node, part, onDisk.Size.IECString(), laidOut.Role) - if part.Role != "" { - // keep track of the /dev/<partition> (actual raw - // device) for each role - deviceForRole[part.Role] = part.Node - } + // keep track of the /dev/<partition> (actual raw + // device) for each role + deviceForRole[laidOut.Role] = onDisk.Node fsDevice, encryptionKey, err := installOnePartition(&part, options.EncryptionType, diskLayout.SectorSize, observer, perfTimings) @@ -770,10 +776,10 @@ func FactoryReset(model gadget.Model, gadgetRoot, kernelRoot, bootDevice string, if keyForRole == nil { keyForRole = map[string]keys.EncryptionKey{} } - keyForRole[part.Role] = encryptionKey + keyForRole[laidOut.Role] = encryptionKey } - if options.Mount && part.Label != "" && part.HasFilesystem() { - if err := mountFilesystem(fsDevice, part.Filesystem, filepath.Join(boot.InitramfsRunMntDir, part.Label)); err != nil { + if options.Mount && onDisk.Label != "" && laidOut.HasFilesystem() { + if err := mountFilesystem(fsDevice, laidOut.Filesystem, getMntPointForPart(laidOut.VolumeStructure)); err != nil { return nil, err } } diff --git a/gadget/install/install_dummy.go b/gadget/install/install_dummy.go index 4a9d3d453a..2cdcc2c9dc 100644 --- a/gadget/install/install_dummy.go +++ b/gadget/install/install_dummy.go @@ -47,7 +47,7 @@ func WriteContent(onVolumes map[string]*gadget.Volume, allLaidOutVols map[string return nil, fmt.Errorf("build without secboot support") } -func MountVolumes(onVolumes map[string]*gadget.Volume, encSetupData *EncryptionSetupData) (espMntDir string, unmount func() error, err error) { +func MountVolumes(onVolumes map[string]*gadget.Volume, encSetupData *EncryptionSetupData) (seedMntDir string, unmount func() error, err error) { return "", nil, fmt.Errorf("build without secboot support") } diff --git a/gadget/install/install_test.go b/gadget/install/install_test.go index fcdbed8b28..8a93bad9cc 100644 --- a/gadget/install/install_test.go +++ b/gadget/install/install_test.go @@ -32,9 +32,7 @@ import ( . "gopkg.in/check.v1" - "github.com/snapcore/snapd/asserts" "github.com/snapcore/snapd/boot" - "github.com/snapcore/snapd/boot/boottest" "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/gadget" "github.com/snapcore/snapd/gadget/gadgettest" @@ -162,69 +160,85 @@ func (s *installSuite) testInstall(c *C, opts installOpts) { defer mockBlockdev.Restore() } - restore = install.MockEnsureNodesExist(func(dss []gadget.OnDiskStructure, timeout time.Duration) error { + restore = install.MockEnsureNodesExist(func(dss []install.OnDiskAndLaidoutStructure, timeout time.Duration) error { c.Assert(timeout, Equals, 5*time.Second) - c.Assert(dss, DeepEquals, []gadget.OnDiskStructure{ - { - LaidOutStructure: gadget.LaidOutStructure{ + c.Assert(dss, DeepEquals, []install.OnDiskAndLaidoutStructure{ + install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-boot", + Label: "ubuntu-boot", + Type: "0C", + Filesystem: "vfat", + StartOffset: (1 + 1200) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 2, + Node: "/dev/mmcblk0p2", + Size: 750 * quantity.SizeMiB, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", Name: "ubuntu-boot", Label: "ubuntu-boot", - Size: 750 * quantity.SizeMiB, Type: "0C", Role: gadget.SystemBoot, Filesystem: "vfat", + Size: 750 * quantity.SizeMiB, }, StartOffset: (1 + 1200) * quantity.OffsetMiB, YamlIndex: 1, + }), + install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-save", + Label: "ubuntu-save", + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Filesystem: "ext4", + StartOffset: (1 + 1200 + 750) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 3, + Node: "/dev/mmcblk0p3", + Size: 16 * quantity.SizeMiB, }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 2, - Node: "/dev/mmcblk0p2", - Size: 750 * quantity.SizeMiB, - }, - { - LaidOutStructure: gadget.LaidOutStructure{ + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", Name: "ubuntu-save", Label: "ubuntu-save", - Size: 16 * quantity.SizeMiB, Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", Role: gadget.SystemSave, Filesystem: "ext4", + Size: 16 * quantity.SizeMiB, }, StartOffset: (1 + 1200 + 750) * quantity.OffsetMiB, YamlIndex: 2, + }), + install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-data", + Label: "ubuntu-data", + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Filesystem: "ext4", + StartOffset: (1 + 1200 + 750 + 16) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 4, + Node: "/dev/mmcblk0p4", + Size: (30528 - (1 + 1200 + 750 + 16)) * quantity.SizeMiB, }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 3, - Node: "/dev/mmcblk0p3", - Size: 16 * quantity.SizeMiB, - }, - { - LaidOutStructure: gadget.LaidOutStructure{ + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", Name: "ubuntu-data", Label: "ubuntu-data", - // TODO: this is set from the yaml, not from the actual - // calculated disk size, probably should be updated - // somewhere - Size: 1500 * quantity.SizeMiB, Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", Role: gadget.SystemData, Filesystem: "ext4", + // as set in gadgettest.RaspiSimplifiedYaml + Size: 1500 * quantity.SizeMiB, }, StartOffset: (1 + 1200 + 750 + 16) * quantity.OffsetMiB, YamlIndex: 3, - }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 4, - Node: "/dev/mmcblk0p4", - Size: (30528 - (1 + 1200 + 750 + 16)) * quantity.SizeMiB, - }, + }), }) // after ensuring that the nodes exist, we now setup a different, full @@ -298,27 +312,33 @@ func (s *installSuite) testInstall(c *C, opts installOpts) { switch mountCall { case 1: c.Assert(source, Equals, "/dev/mmcblk0p2") - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/2")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mmcblk0p2")) c.Assert(fstype, Equals, "vfat") c.Assert(flags, Equals, uintptr(0)) c.Assert(data, Equals, "") case 2: + var mntPoint string if opts.encryption { c.Assert(source, Equals, "/dev/mapper/ubuntu-save") + mntPoint = "gadget-install/dev-mapper-ubuntu-save" } else { c.Assert(source, Equals, "/dev/mmcblk0p3") + mntPoint = "gadget-install/dev-mmcblk0p3" } - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/3")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, mntPoint)) c.Assert(fstype, Equals, "ext4") c.Assert(flags, Equals, uintptr(0)) c.Assert(data, Equals, "") case 3: + var mntPoint string if opts.encryption { c.Assert(source, Equals, "/dev/mapper/ubuntu-data") + mntPoint = "gadget-install/dev-mapper-ubuntu-data" } else { c.Assert(source, Equals, "/dev/mmcblk0p4") + mntPoint = "gadget-install/dev-mmcblk0p4" } - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/4")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, mntPoint)) c.Assert(fstype, Equals, "ext4") c.Assert(flags, Equals, uintptr(0)) c.Assert(data, Equals, "") @@ -335,13 +355,21 @@ func (s *installSuite) testInstall(c *C, opts installOpts) { umountCall++ switch umountCall { case 1: - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/2")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mmcblk0p2")) c.Assert(flags, Equals, 0) case 2: - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/3")) + mntPoint := "gadget-install/dev-mmcblk0p3" + if opts.encryption { + mntPoint = "gadget-install/dev-mapper-ubuntu-save" + } + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, mntPoint)) c.Assert(flags, Equals, 0) case 3: - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/4")) + mntPoint := "gadget-install/dev-mmcblk0p4" + if opts.encryption { + mntPoint = "gadget-install/dev-mapper-ubuntu-data" + } + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, mntPoint)) c.Assert(flags, Equals, 0) default: c.Errorf("unexpected umount call (%d)", umountCall) @@ -647,95 +675,117 @@ func (s *installSuite) testFactoryReset(c *C, opts factoryResetOpts) { if opts.encryption { dataDev = "/dev/mapper/ubuntu-data" } - restore = install.MockEnsureNodesExist(func(dss []gadget.OnDiskStructure, timeout time.Duration) error { + restore = install.MockEnsureNodesExist(func(dss []install.OnDiskAndLaidoutStructure, timeout time.Duration) error { c.Assert(timeout, Equals, 5*time.Second) - expectedDss := []gadget.OnDiskStructure{ - { - LaidOutStructure: gadget.LaidOutStructure{ + expectedDss := []install.OnDiskAndLaidoutStructure{ + install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-boot", + Label: "ubuntu-boot", + Size: 750 * quantity.SizeMiB, + Type: "0C", + Filesystem: "vfat", + StartOffset: (1 + 1200) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 2, + Node: "/dev/mmcblk0p2", + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", - Name: "ubuntu-boot", - Label: "ubuntu-boot", - Size: 750 * quantity.SizeMiB, - Type: "0C", - Role: gadget.SystemBoot, Filesystem: "vfat", + Size: 750 * quantity.SizeMiB, }, StartOffset: (1 + 1200) * quantity.OffsetMiB, YamlIndex: 1, }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 2, - Node: "/dev/mmcblk0p2", - Size: 750 * quantity.SizeMiB, - }, + ), } if opts.noSave { // just data - expectedDss = append(expectedDss, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ + expectedDss = append(expectedDss, install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-data", + Label: "ubuntu-data", + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Filesystem: "ext4", + StartOffset: (1 + 1200 + 750) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 3, + Node: dataDev, + Size: (30528 - (1 + 1200 + 750)) * quantity.SizeMiB, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", Name: "ubuntu-data", Label: "ubuntu-data", - // TODO: this is set from the yaml, not from the actual - // calculated disk size, probably should be updated - // somewhere - Size: 1500 * quantity.SizeMiB, Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", Role: gadget.SystemData, Filesystem: "ext4", + Size: (30528 - (1 + 1200 + 750)) * quantity.SizeMiB, }, StartOffset: (1 + 1200 + 750) * quantity.OffsetMiB, YamlIndex: 2, }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 3, - Node: dataDev, - Size: (30528 - (1 + 1200 + 750)) * quantity.SizeMiB, - }) + )) } else { // data + save - expectedDss = append(expectedDss, []gadget.OnDiskStructure{{ - LaidOutStructure: gadget.LaidOutStructure{ + expectedDss = append(expectedDss, install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-save", + Label: "ubuntu-save", + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Filesystem: "ext4", + StartOffset: (1 + 1200 + 750) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 3, + Node: "/dev/mmcblk0p3", + Size: 16 * quantity.SizeMiB, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", Name: "ubuntu-save", Label: "ubuntu-save", - Size: 16 * quantity.SizeMiB, Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", Role: gadget.SystemSave, Filesystem: "ext4", + Size: 16 * quantity.SizeMiB, }, StartOffset: (1 + 1200 + 750) * quantity.OffsetMiB, YamlIndex: 2, }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 3, - Node: "/dev/mmcblk0p3", - Size: 16 * quantity.SizeMiB, - }, { - LaidOutStructure: gadget.LaidOutStructure{ + )) + expectedDss = append(expectedDss, install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Name: "ubuntu-data", + Label: "ubuntu-data", + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Filesystem: "ext4", + StartOffset: (1 + 1200 + 750 + 16) * quantity.OffsetMiB, + // note this is YamlIndex + 1, the YamlIndex starts at 0 + DiskIndex: 4, + Node: dataDev, + Size: (30528 - (1 + 1200 + 750 + 16)) * quantity.SizeMiB, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pi", Name: "ubuntu-data", Label: "ubuntu-data", - // TODO: this is set from the yaml, not from the actual - // calculated disk size, probably should be updated - // somewhere - Size: 1500 * quantity.SizeMiB, Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", Role: gadget.SystemData, Filesystem: "ext4", + // TODO: this is set from the yaml, not from the actual + // calculated disk size, probably should be updated + // somewhere + Size: 1500 * quantity.SizeMiB, }, StartOffset: (1 + 1200 + 750 + 16) * quantity.OffsetMiB, YamlIndex: 3, }, - // note this is YamlIndex + 1, the YamlIndex starts at 0 - DiskIndex: 4, - Node: dataDev, - Size: (30528 - (1 + 1200 + 750 + 16)) * quantity.SizeMiB, - }}...) + )) } c.Assert(dss, DeepEquals, expectedDss) @@ -781,16 +831,20 @@ func (s *installSuite) testFactoryReset(c *C, opts factoryResetOpts) { switch mountCall { case 1: c.Assert(source, Equals, "/dev/mmcblk0p2") - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/2")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mmcblk0p2")) c.Assert(fstype, Equals, "vfat") c.Assert(flags, Equals, uintptr(0)) c.Assert(data, Equals, "") case 2: c.Assert(source, Equals, dataDev) if opts.noSave { - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/3")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mmcblk0p3")) } else { - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/4")) + mntPoint := "gadget-install/dev-mmcblk0p4" + if opts.encryption { + mntPoint = "gadget-install/dev-mapper-ubuntu-data" + } + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, mntPoint)) } c.Assert(fstype, Equals, "ext4") c.Assert(flags, Equals, uintptr(0)) @@ -808,13 +862,17 @@ func (s *installSuite) testFactoryReset(c *C, opts factoryResetOpts) { umountCall++ switch umountCall { case 1: - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/2")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mmcblk0p2")) c.Assert(flags, Equals, 0) case 2: if opts.noSave { - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/3")) + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mmcblk0p3")) } else { - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, "gadget-install/4")) + mntPoint := "gadget-install/dev-mmcblk0p4" + if opts.encryption { + mntPoint = "gadget-install/dev-mapper-ubuntu-data" + } + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, mntPoint)) } c.Assert(flags, Equals, 0) default: @@ -964,89 +1022,19 @@ func (s *installSuite) TestFactoryResetHappyEncrypted(c *C) { }) } -func mockGadgetPartitionedDisk(c *C) (*gadget.Info, map[string]*gadget.LaidOutVolume, *asserts.Model, string, func()) { - cleanups := []func(){} - addCleanup := func(r func()) { cleanups = append(cleanups, r) } - cleanup := func() { - for _, r := range cleanups { - r() - } - } - - // TODO test for UC systems too - model := boottest.MakeMockClassicWithModesModel() - - // Create gadget with all files - gadgetRoot := filepath.Join(c.MkDir(), "gadget") - err := makeMockGadget(gadgetRoot, gadgettest.SingleVolumeClassicWithModesGadgetYaml) - c.Assert(err, IsNil) - _, allLaidOutVols, err := gadget.LaidOutVolumesFromGadget(gadgetRoot, "", model) - c.Assert(err, IsNil) - - ginfo, err := gadget.ReadInfo(gadgetRoot, model) - c.Assert(err, IsNil) - - vdaSysPath := "/sys/devices/pci0000:00/0000:00:03.0/virtio1/block/vda" - restore := install.MockSysfsPathForBlockDevice(func(device string) (string, error) { - c.Assert(strings.HasPrefix(device, "/dev/vda"), Equals, true) - return filepath.Join(vdaSysPath, filepath.Base(device)), nil - }) - addCleanup(restore) - - // "Real" disk data that will be read - disk := &disks.MockDiskMapping{ - Structure: []disks.Partition{ - { - PartitionLabel: "BIOS\x20Boot", - KernelDeviceNode: "/dev/vda1", - DiskIndex: 1, - }, - { - PartitionLabel: "EFI System partition", - KernelDeviceNode: "/dev/vda2", - DiskIndex: 2, - }, - { - PartitionLabel: "ubuntu-boot", - KernelDeviceNode: "/dev/vda3", - DiskIndex: 3, - }, - { - PartitionLabel: "ubuntu-save", - KernelDeviceNode: "/dev/vda4", - DiskIndex: 4, - }, - { - PartitionLabel: "ubuntu-data", - KernelDeviceNode: "/dev/vda5", - DiskIndex: 5, - }, - }, - DiskHasPartitions: true, - DevNum: "disk1", - DevNode: "/dev/vda", - DevPath: vdaSysPath, - } - diskMapping := map[string]*disks.MockDiskMapping{ - vdaSysPath: disk, - // this simulates a symlink in /sys/block which points to the above path - "/sys/block/vda": disk, - } - restore = disks.MockDevicePathToDiskMapping(diskMapping) - addCleanup(restore) - - return ginfo, allLaidOutVols, model, gadgetRoot, cleanup -} - type writeContentOpts struct { encryption bool } func (s *installSuite) testWriteContent(c *C, opts writeContentOpts) { - espMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/2") - bootMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/3") - saveMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/4") - dataMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/5") + espMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/dev-vda2") + bootMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/dev-vda3") + saveMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/dev-vda4") + dataMntPt := filepath.Join(dirs.SnapRunDir, "gadget-install/dev-vda5") + if opts.encryption { + saveMntPt = filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mapper-ubuntu-save") + dataMntPt = filepath.Join(dirs.SnapRunDir, "gadget-install/dev-mapper-ubuntu-data") + } mountCall := 0 restore := install.MockSysMount(func(source, target, fstype string, flags uintptr, data string) error { mountCall++ @@ -1094,18 +1082,46 @@ func (s *installSuite) testWriteContent(c *C, opts writeContentOpts) { umountCall := 0 restore = install.MockSysUnmount(func(target string, flags int) error { umountCall++ - if umountCall > 4 { + switch umountCall { + case 1: + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, + "gadget-install/dev-vda2")) + case 2: + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, + "gadget-install/dev-vda3")) + case 3: + mntPoint := "gadget-install/dev-vda4" + if opts.encryption { + mntPoint = "gadget-install/dev-mapper-ubuntu-save" + } + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, + mntPoint)) + case 4: + mntPoint := "gadget-install/dev-vda5" + if opts.encryption { + mntPoint = "gadget-install/dev-mapper-ubuntu-data" + } + c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, + mntPoint)) + default: c.Errorf("unexpected umount call (%d)", umountCall) return fmt.Errorf("test broken") } - c.Assert(target, Equals, filepath.Join(dirs.SnapRunDir, - "gadget-install/"+strconv.Itoa(umountCall+1))) c.Assert(flags, Equals, 0) return nil }) defer restore() - ginfo, allLaidOutVols, _, _, restore := mockGadgetPartitionedDisk(c) + vdaSysPath := "/sys/devices/pci0000:00/0000:00:03.0/virtio1/block/vda" + restore = install.MockSysfsPathForBlockDevice(func(device string) (string, error) { + c.Assert(strings.HasPrefix(device, "/dev/vda"), Equals, true) + return filepath.Join(vdaSysPath, filepath.Base(device)), nil + }) + defer restore() + + gadgetRoot := filepath.Join(c.MkDir(), "gadget") + ginfo, allLaidOutVols, _, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgettest.SingleVolumeClassicWithModesGadgetYaml, gadgetRoot) + c.Assert(err, IsNil) defer restore() // 10 million mocks later ... @@ -1123,11 +1139,17 @@ func (s *installSuite) testWriteContent(c *C, opts writeContentOpts) { // Fill encrypted partitions if encrypting var esd *install.EncryptionSetupData if opts.encryption { - labelToEncDevice := map[string]string{ - "ubuntu-save": "/dev/mapper/ubuntu-save", - "ubuntu-data": "/dev/mapper/ubuntu-data", + labelToEncData := map[string]*install.MockEncryptedDeviceAndRole{ + "ubuntu-save": { + Role: "system-save", + EncryptedDevice: "/dev/mapper/ubuntu-save", + }, + "ubuntu-data": { + Role: "system-data", + EncryptedDevice: "/dev/mapper/ubuntu-data", + }, } - esd = install.BuildEncryptionSetupData(labelToEncDevice) + esd = install.MockEncryptionSetupData(labelToEncData) } onDiskVols, err := install.WriteContent(ginfo.Volumes, allLaidOutVols, esd, nil, timings.New(nil)) c.Assert(err, IsNil) @@ -1178,7 +1200,16 @@ type encryptPartitionsOpts struct { } func (s *installSuite) testEncryptPartitions(c *C, opts encryptPartitionsOpts) { - ginfo, _, model, gadgetRoot, restore := mockGadgetPartitionedDisk(c) + vdaSysPath := "/sys/devices/pci0000:00/0000:00:03.0/virtio1/block/vda" + restore := install.MockSysfsPathForBlockDevice(func(device string) (string, error) { + c.Assert(strings.HasPrefix(device, "/dev/vda"), Equals, true) + return filepath.Join(vdaSysPath, filepath.Base(device)), nil + }) + defer restore() + + gadgetRoot := filepath.Join(c.MkDir(), "gadget") + ginfo, _, model, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgettest.SingleVolumeClassicWithModesGadgetYaml, gadgetRoot) + c.Assert(err, IsNil) defer restore() mockCryptsetup := testutil.MockCommand(c, "cryptsetup", "") @@ -1222,7 +1253,16 @@ func (s *installSuite) TestInstallEncryptPartitionsLUKSHappy(c *C) { } func (s *installSuite) TestInstallEncryptPartitionsNoDeviceSet(c *C) { - ginfo, _, model, gadgetRoot, restore := mockGadgetPartitionedDisk(c) + vdaSysPath := "/sys/devices/pci0000:00/0000:00:03.0/virtio1/block/vda" + restore := install.MockSysfsPathForBlockDevice(func(device string) (string, error) { + c.Assert(strings.HasPrefix(device, "/dev/vda"), Equals, true) + return filepath.Join(vdaSysPath, filepath.Base(device)), nil + }) + defer restore() + + gadgetRoot := filepath.Join(c.MkDir(), "gadget") + ginfo, _, model, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgettest.SingleVolumeClassicWithModesGadgetYaml, gadgetRoot) + c.Assert(err, IsNil) defer restore() encryptSetup, err := install.EncryptPartitions(ginfo.Volumes, secboot.EncryptionTypeLUKS, model, gadgetRoot, "", timings.New(nil)) diff --git a/gadget/install/params.go b/gadget/install/params.go index aa6cc760b7..6d3c2dcc5b 100644 --- a/gadget/install/params.go +++ b/gadget/install/params.go @@ -50,8 +50,9 @@ type partEncryptionData struct { device string encryptedDevice string - volName string - encryptionKey keys.EncryptionKey + volName string + encryptionKey keys.EncryptionKey + // TODO: this is currently not used encryptedSectorSize quantity.Size encryptionParams gadget.StructureEncryptionParameters } @@ -71,3 +72,26 @@ func (esd *EncryptionSetupData) EncryptedDevices() map[string]string { } return m } + +// MockEncryptedDeviceAndRole is meant to be used for unit tests from other +// packages. +type MockEncryptedDeviceAndRole struct { + Role string + EncryptedDevice string +} + +// MockEncryptionSetupData is meant to be used for unit tests from other +// packages. +func MockEncryptionSetupData(labelToEncDevice map[string]*MockEncryptedDeviceAndRole) *EncryptionSetupData { + esd := &EncryptionSetupData{ + parts: map[string]partEncryptionData{}} + for label, encryptData := range labelToEncDevice { + esd.parts[label] = partEncryptionData{ + role: encryptData.Role, + encryptedDevice: encryptData.EncryptedDevice, + encryptionKey: keys.EncryptionKey{1, 2, 3}, + encryptedSectorSize: 512, + } + } + return esd +} diff --git a/gadget/install/partition.go b/gadget/install/partition.go index d7e46dd6a1..9102edb9a8 100644 --- a/gadget/install/partition.go +++ b/gadget/install/partition.go @@ -66,10 +66,25 @@ type CreateOptions struct { CreateAllMissingPartitions bool } -// CreateMissingPartitions creates the partitions listed in the laid out volume +// CreateMissingPartitions calls createMissingPartitions but returns only +// OnDiskStructure, as it is meant to be used externally (i.e. by +// muinstaller). +func CreateMissingPartitions(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts *CreateOptions) ([]gadget.OnDiskStructure, error) { + odlsStructures, err := createMissingPartitions(dl, pv, opts) + if err != nil { + return nil, err + } + onDiskStructures := []gadget.OnDiskStructure{} + for _, odls := range odlsStructures { + onDiskStructures = append(onDiskStructures, *odls.onDisk) + } + return onDiskStructures, nil +} + +// createMissingPartitions creates the partitions listed in the laid out volume // pv that are missing from the existing device layout, returning a list of // structures that have been created. -func CreateMissingPartitions(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts *CreateOptions) ([]gadget.OnDiskStructure, error) { +func createMissingPartitions(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts *CreateOptions) ([]onDiskAndLaidoutStructure, error) { if opts == nil { opts = &CreateOptions{} } @@ -118,7 +133,7 @@ func CreateMissingPartitions(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, // device contents and gadget structure list, in sfdisk dump format, and // returns a partitioning description suitable for sfdisk input and a // list of the partitions to be created. -func buildPartitionList(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts *CreateOptions) (sfdiskInput *bytes.Buffer, toBeCreated []gadget.OnDiskStructure, err error) { +func buildPartitionList(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts *CreateOptions) (sfdiskInput *bytes.Buffer, toBeCreated []onDiskAndLaidoutStructure, err error) { if opts == nil { opts = &CreateOptions{} } @@ -150,6 +165,8 @@ func buildPartitionList(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts // Write new partition data in named-fields format buf := &bytes.Buffer{} for _, p := range pv.LaidOutStructure { + // Make loop var per-iter as we store the pointer in the results + p := p if !p.IsPartition() { continue } @@ -180,11 +197,18 @@ func buildPartitionList(dl *gadget.OnDiskVolume, pv *gadget.LaidOutVolume, opts // synthesize the node name and on disk structure node := deviceName(dl.Device, pIndex) - ps := gadget.OnDiskStructure{ - LaidOutStructure: p, - Node: node, - DiskIndex: pIndex, - Size: quantity.Size(newSizeInSectors * sectorSize), + ps := onDiskAndLaidoutStructure{ + onDisk: &gadget.OnDiskStructure{ + Name: p.Name, + Label: p.Label, + Type: p.Type, + Filesystem: p.Filesystem, + StartOffset: p.StartOffset, + Node: node, + DiskIndex: pIndex, + Size: quantity.Size(newSizeInSectors * sectorSize), + }, + laidOut: &p, } // format sfdisk input for creating this partition @@ -282,7 +306,7 @@ func removeCreatedPartitions(gadgetRoot string, lv *gadget.LaidOutVolume, dl *ga return nil } -func partitionsWithRolesAndContent(lv *gadget.LaidOutVolume, dl *gadget.OnDiskVolume, roles []string) []gadget.OnDiskStructure { +func partitionsWithRolesAndContent(lv *gadget.LaidOutVolume, dl *gadget.OnDiskVolume, roles []string) []onDiskAndLaidoutStructure { roleForOffset := map[quantity.Offset]*gadget.LaidOutStructure{} for idx, gs := range lv.LaidOutStructure { if gs.Role != "" { @@ -290,41 +314,46 @@ func partitionsWithRolesAndContent(lv *gadget.LaidOutVolume, dl *gadget.OnDiskVo } } - var parts []gadget.OnDiskStructure + var odloStructures []onDiskAndLaidoutStructure for _, part := range dl.Structure { + // Create per-iter var from loop variable as we store the pointer in odls + part := part gs := roleForOffset[part.StartOffset] if gs == nil || gs.Role == "" || !strutil.ListContains(roles, gs.Role) { continue } - // now that we have a match, override the laid out structure - // such that the fields reflect what was declared in the gadget, - // the on-disk-structure already has the right size as read from - // the partition table - part.LaidOutStructure = *gs - parts = append(parts, part) - } - return parts + // now that we have a match, set the laid out structure such + // that the fields reflect what was declared in the gadget, the + // on-disk-structure already has the right size as read from the + // partition table + odls := onDiskAndLaidoutStructure{ + onDisk: &part, + laidOut: gs, + } + odloStructures = append(odloStructures, odls) + } + return odloStructures } // ensureNodeExists makes sure the device nodes for all device structures are // available and notified to udev, within a specified amount of time. -func ensureNodesExistImpl(dss []gadget.OnDiskStructure, timeout time.Duration) error { +func ensureNodesExistImpl(odloStructures []onDiskAndLaidoutStructure, timeout time.Duration) error { t0 := time.Now() - for _, ds := range dss { + for _, odls := range odloStructures { found := false for time.Since(t0) < timeout { - if osutil.FileExists(ds.Node) { + if osutil.FileExists(odls.onDisk.Node) { found = true break } time.Sleep(100 * time.Millisecond) } if found { - if err := udevTrigger(ds.Node); err != nil { + if err := udevTrigger(odls.onDisk.Node); err != nil { return err } } else { - return fmt.Errorf("device %s not available", ds.Node) + return fmt.Errorf("device %s not available", odls.onDisk.Node) } } return nil diff --git a/gadget/install/partition_test.go b/gadget/install/partition_test.go index c499544470..3a16ba2938 100644 --- a/gadget/install/partition_test.go +++ b/gadget/install/partition_test.go @@ -139,8 +139,37 @@ func makeMockDiskMappingIncludingPartitions(num int) *disks.MockDiskMapping { } var mockOnDiskStructureWritable = gadget.OnDiskStructure{ - Node: "/dev/node3", - LaidOutStructure: gadget.LaidOutStructure{ + Node: "/dev/node3", + Name: "Writable", + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Label: "ubuntu-data", + Filesystem: "ext4", + StartOffset: 1260388352, + // Note the DiskIndex appears to be the same as the YamlIndex, but this is + // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a + // yaml structure (the MBR) that does not appear on disk + DiskIndex: 3, + // expanded to fill the disk + Size: 2*quantity.SizeGiB + 845*quantity.SizeMiB + 1031680, +} + +var mockOnDiskAndLaidoutStructureWritable = install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Node: "/dev/node3", + Name: "Writable", + //Size: 1258291200, + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Label: "ubuntu-data", + Filesystem: "ext4", + StartOffset: 1260388352, + // Note the DiskIndex appears to be the same as the YamlIndex, but this is + // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a + // yaml structure (the MBR) that does not appear on disk + DiskIndex: 3, + // expanded to fill the disk + Size: 2*quantity.SizeGiB + 845*quantity.SizeMiB + 1031680, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pc", Name: "Writable", @@ -149,43 +178,60 @@ var mockOnDiskStructureWritable = gadget.OnDiskStructure{ Role: "system-data", Label: "ubuntu-data", Filesystem: "ext4", + // Note the DiskIndex appears to be the same as the YamlIndex, but this is + // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a + // yaml structure (the MBR) that does not appear on disk }, StartOffset: 1260388352, YamlIndex: 3, }, - // Note the DiskIndex appears to be the same as the YamlIndex, but this is - // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a - // yaml structure (the MBR) that does not appear on disk - DiskIndex: 3, - // expanded to fill the disk - Size: 2*quantity.SizeGiB + 845*quantity.SizeMiB + 1031680, -} +) -var mockOnDiskStructureSave = gadget.OnDiskStructure{ - Node: "/dev/node3", - LaidOutStructure: gadget.LaidOutStructure{ +var mockOnDiskAndLaidoutStructureSave = install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Node: "/dev/node3", + Name: "Save", + Size: 128 * quantity.SizeMiB, + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Label: "ubuntu-save", + Filesystem: "ext4", + StartOffset: 1260388352, + // Note the DiskIndex appears to be the same as the YamlIndex, but this is + // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a + // yaml structure (the MBR) that does not appear on disk + DiskIndex: 3, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pc", Name: "Save", + Label: "ubuntu-save", Size: 128 * quantity.SizeMiB, Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", Role: "system-save", - Label: "ubuntu-save", Filesystem: "ext4", }, StartOffset: 1260388352, YamlIndex: 3, }, - // Note the DiskIndex appears to be the same as the YamlIndex, but this is - // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a - // yaml structure (the MBR) that does not appear on disk - DiskIndex: 3, - Size: 128 * quantity.SizeMiB, -} +) -var mockOnDiskStructureWritableAfterSave = gadget.OnDiskStructure{ - Node: "/dev/node4", - LaidOutStructure: gadget.LaidOutStructure{ +var mockOnDiskAndLaidoutStructureWritableAfterSave = install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{ + Node: "/dev/node4", + Name: "Writable", + // expanded to fill the disk + Size: 2*quantity.SizeGiB + 717*quantity.SizeMiB + 1031680, + Type: "83,0FC63DAF-8483-4772-8E79-3D69D8477DE4", + Label: "ubuntu-data", + Filesystem: "ext4", + StartOffset: 1394606080, + // Note the DiskIndex appears to be the same as the YamlIndex, but this is + // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a + // yaml structure (the MBR) that does not appear on disk + DiskIndex: 4, + }, + &gadget.LaidOutStructure{ VolumeStructure: &gadget.VolumeStructure{ VolumeName: "pc", Name: "Writable", @@ -198,13 +244,7 @@ var mockOnDiskStructureWritableAfterSave = gadget.OnDiskStructure{ StartOffset: 1394606080, YamlIndex: 4, }, - // Note the DiskIndex appears to be the same as the YamlIndex, but this is - // because YamlIndex starts at 0 and DiskIndex starts at 1, and there is a - // yaml structure (the MBR) that does not appear on disk - DiskIndex: 4, - // expanded to fill the disk - Size: 2*quantity.SizeGiB + 717*quantity.SizeMiB + 1031680, -} +) type uc20Model struct{} @@ -221,7 +261,7 @@ func (s *partitionTestSuite) TestBuildPartitionList(c *C) { restore := disks.MockDeviceNameToDiskMapping(m) defer restore() - err := makeMockGadget(s.gadgetRoot, gptGadgetContentWithSave) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gptGadgetContentWithSave) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -238,7 +278,10 @@ func (s *partitionTestSuite) TestBuildPartitionList(c *C) { /dev/node4 : start= 2723840, size= 5664735, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, name="Writable" `) c.Check(create, NotNil) - c.Assert(create, DeepEquals, []gadget.OnDiskStructure{mockOnDiskStructureSave, mockOnDiskStructureWritableAfterSave}) + c.Assert(create, DeepEquals, []install.OnDiskAndLaidoutStructure{ + mockOnDiskAndLaidoutStructureSave, + mockOnDiskAndLaidoutStructureWritableAfterSave, + }) } func (s *partitionTestSuite) TestBuildPartitionListOnlyCreatablePartitions(c *C) { @@ -255,7 +298,7 @@ func (s *partitionTestSuite) TestBuildPartitionListOnlyCreatablePartitions(c *C) restore := disks.MockDeviceNameToDiskMapping(m) defer restore() - err := makeMockGadget(s.gadgetRoot, gptGadgetContentWithSave) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gptGadgetContentWithSave) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -282,15 +325,15 @@ func (s *partitionTestSuite) TestCreatePartitions(c *C) { defer cmdUdevadm.Restore() calls := 0 - restore = install.MockEnsureNodesExist(func(ds []gadget.OnDiskStructure, timeout time.Duration) error { + restore = install.MockEnsureNodesExist(func(ds []install.OnDiskAndLaidoutStructure, timeout time.Duration) error { calls++ c.Assert(ds, HasLen, 1) - c.Assert(ds[0].Node, Equals, "/dev/node3") + c.Assert(install.OnDiskFromOnDiskAndLaidoutStructure(ds[0]).Node, Equals, "/dev/node3") return nil }) defer restore() - err := makeMockGadget(s.gadgetRoot, gadgetContent) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContent) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -300,9 +343,9 @@ func (s *partitionTestSuite) TestCreatePartitions(c *C) { opts := &install.CreateOptions{ GadgetRootDir: s.gadgetRoot, } - created, err := install.CreateMissingPartitions(dl, pv, opts) + created, err := install.TestCreateMissingPartitions(dl, pv, opts) c.Assert(err, IsNil) - c.Assert(created, DeepEquals, []gadget.OnDiskStructure{mockOnDiskStructureWritable}) + c.Assert(created, DeepEquals, []install.OnDiskAndLaidoutStructure{mockOnDiskAndLaidoutStructureWritable}) c.Assert(calls, Equals, 1) // Check partition table write @@ -334,22 +377,25 @@ func (s *partitionTestSuite) TestCreatePartitionsNonRolePartitions(c *C) { defer cmdUdevadm.Restore() calls := 0 - restore = install.MockEnsureNodesExist(func(ds []gadget.OnDiskStructure, timeout time.Duration) error { + restore = install.MockEnsureNodesExist(func(ds []install.OnDiskAndLaidoutStructure, timeout time.Duration) error { calls++ c.Assert(ds, HasLen, 3) // Ensure all partitions are created as asked for via // the install.CreateOptions - c.Assert(ds[0].Node, Equals, "/dev/node1") - c.Assert(ds[0].Name, Equals, "BIOS Boot") - c.Assert(ds[1].Node, Equals, "/dev/node2") - c.Assert(ds[1].Name, Equals, "Recovery") - c.Assert(ds[2].Node, Equals, "/dev/node3") - c.Assert(ds[2].Name, Equals, "Writable") + onDisk0 := install.OnDiskFromOnDiskAndLaidoutStructure(ds[0]) + onDisk1 := install.OnDiskFromOnDiskAndLaidoutStructure(ds[1]) + onDisk2 := install.OnDiskFromOnDiskAndLaidoutStructure(ds[2]) + c.Assert(onDisk0.Node, Equals, "/dev/node1") + c.Assert(onDisk0.Name, Equals, "BIOS Boot") + c.Assert(onDisk1.Node, Equals, "/dev/node2") + c.Assert(onDisk1.Name, Equals, "Recovery") + c.Assert(onDisk2.Node, Equals, "/dev/node3") + c.Assert(onDisk2.Name, Equals, "Writable") return nil }) defer restore() - err := makeMockGadget(s.gadgetRoot, gadgetContent) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContent) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -360,7 +406,7 @@ func (s *partitionTestSuite) TestCreatePartitionsNonRolePartitions(c *C) { GadgetRootDir: s.gadgetRoot, CreateAllMissingPartitions: true, } - created, err := install.CreateMissingPartitions(dl, pv, opts) + created, err := install.TestCreateMissingPartitions(dl, pv, opts) c.Assert(err, IsNil) c.Assert(created, HasLen, 3) c.Assert(calls, Equals, 1) @@ -375,7 +421,7 @@ func (s *partitionTestSuite) TestRemovePartitionsTrivial(c *C) { restore := disks.MockDeviceNameToDiskMapping(m) defer restore() - err := makeMockGadget(s.gadgetRoot, gadgetContent) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContent) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -455,7 +501,7 @@ func (s *partitionTestSuite) TestRemovePartitions(c *C) { dl, err := gadget.OnDiskVolumeFromDevice("/dev/node") c.Assert(err, IsNil) - err = makeMockGadget(s.gadgetRoot, gadgetContent) + err = gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContent) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -474,35 +520,22 @@ func (s *partitionTestSuite) TestRemovePartitions(c *C) { // check that the OnDiskVolume was updated as expected c.Assert(dl.Structure, DeepEquals, []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 1024 * 1024, - Type: "21686148-6449-6E6F-744E-656564454649", - ID: "2E59D969-52AB-430B-88AC-F83873519F6F", - }, - StartOffset: 1024 * 1024, - }, - DiskIndex: 1, - Node: "/dev/node1", - Size: 1024 * 1024, + Name: "BIOS Boot", + Size: 1024 * 1024, + Type: "21686148-6449-6E6F-744E-656564454649", + StartOffset: 1024 * 1024, + DiskIndex: 1, + Node: "/dev/node1", }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Label: "ubuntu-seed", - Name: "Recovery", - Size: 2457600 * 512, - Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - ID: "44C3D5C3-CAE1-4306-83E8-DF437ACDB32F", - Filesystem: "vfat", - }, - - StartOffset: 1024*1024 + 1024*1024, - }, - DiskIndex: 2, - Node: "/dev/node2", - Size: 2457600 * 512, + Label: "ubuntu-seed", + Name: "Recovery", + Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", + Filesystem: "vfat", + StartOffset: 1024*1024 + 1024*1024, + DiskIndex: 2, + Node: "/dev/node2", + Size: 2457600 * 512, }, }) } @@ -587,7 +620,7 @@ func (s *partitionTestSuite) TestRemovePartitionsWithDeviceRescan(c *C) { dl, err := gadget.OnDiskVolumeFromDevice("/dev/node") c.Assert(err, IsNil) - err = makeMockGadget(s.gadgetRoot, gadgetContent) + err = gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContent) c.Assert(err, IsNil) // add the file to indicate we should do the device/rescan trick @@ -613,35 +646,22 @@ func (s *partitionTestSuite) TestRemovePartitionsWithDeviceRescan(c *C) { // check that the OnDiskVolume was updated as expected c.Assert(dl.Structure, DeepEquals, []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 1024 * 1024, - Type: "21686148-6449-6E6F-744E-656564454649", - ID: "2E59D969-52AB-430B-88AC-F83873519F6F", - }, - StartOffset: 1024 * 1024, - }, - DiskIndex: 1, - Node: "/dev/node1", - Size: 1024 * 1024, + Name: "BIOS Boot", + Type: "21686148-6449-6E6F-744E-656564454649", + StartOffset: 1024 * 1024, + DiskIndex: 1, + Node: "/dev/node1", + Size: 1024 * 1024, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Label: "ubuntu-seed", - Name: "Recovery", - Size: 2457600 * 512, - Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - ID: "44C3D5C3-CAE1-4306-83E8-DF437ACDB32F", - Filesystem: "vfat", - }, - - StartOffset: 1024*1024 + 1024*1024, - }, - DiskIndex: 2, - Node: "/dev/node2", - Size: 2457600 * 512, + Label: "ubuntu-seed", + Name: "Recovery", + Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", + Filesystem: "vfat", + StartOffset: 1024*1024 + 1024*1024, + DiskIndex: 2, + Node: "/dev/node2", + Size: 2457600 * 512, }, }) } @@ -746,7 +766,7 @@ func (s *partitionTestSuite) TestRemovePartitionsNonAdjacent(c *C) { dl, err := gadget.OnDiskVolumeFromDevice("/dev/node") c.Assert(err, IsNil) - err = makeMockGadget(s.gadgetRoot, gadgetContentDifferentOrder) + err = gadgettest.MakeMockGadget(s.gadgetRoot, gadgetContentDifferentOrder) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -761,36 +781,22 @@ func (s *partitionTestSuite) TestRemovePartitionsNonAdjacent(c *C) { // check that the OnDiskVolume was updated as expected c.Assert(dl.Structure, DeepEquals, []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 1024 * 1024, - Type: "21686148-6449-6E6F-744E-656564454649", - ID: "2E59D969-52AB-430B-88AC-F83873519F6F", - }, - StartOffset: 1024 * 1024, - }, - DiskIndex: 1, - Node: "/dev/node1", - Size: 1024 * 1024, + Name: "BIOS Boot", + Type: "21686148-6449-6E6F-744E-656564454649", + StartOffset: 1024 * 1024, + DiskIndex: 1, + Node: "/dev/node1", + Size: 1024 * 1024, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Label: "ubuntu-seed", - Name: "Recovery", - Size: 2457600 * 512, - Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - ID: "44C3D5C3-CAE1-4306-83E8-DF437ACDB32F", - Filesystem: "vfat", - }, - - StartOffset: 1024*1024 + 1024*1024 + 2457600*512, - }, - - Node: "/dev/node3", - DiskIndex: 3, - Size: 2457600 * 512, + Label: "ubuntu-seed", + Name: "Recovery", + Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", + Filesystem: "vfat", + StartOffset: 1024*1024 + 1024*1024 + 2457600*512, + Node: "/dev/node3", + DiskIndex: 3, + Size: 2457600 * 512, }, }) } @@ -813,7 +819,12 @@ func (s *partitionTestSuite) TestEnsureNodesExist(c *C) { cmdUdevadm := testutil.MockCommand(c, "udevadm", fmt.Sprintf(mockUdevadmScript, tc.utErr)) defer cmdUdevadm.Restore() - ds := []gadget.OnDiskStructure{{Node: node}} + ds := []install.OnDiskAndLaidoutStructure{ + install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{Node: node}, + &gadget.LaidOutStructure{}, + ), + } err = install.EnsureNodesExist(ds, 10*time.Millisecond) if tc.err == "" { c.Assert(err, IsNil) @@ -832,7 +843,12 @@ func (s *partitionTestSuite) TestEnsureNodesExistTimeout(c *C) { defer cmdUdevadm.Restore() node := filepath.Join(c.MkDir(), "node") - ds := []gadget.OnDiskStructure{{Node: node}} + ds := []install.OnDiskAndLaidoutStructure{ + install.MockOnDiskAndLaidoutStructure( + &gadget.OnDiskStructure{Node: node}, + &gadget.LaidOutStructure{}, + ), + } t := time.Now() timeout := 1 * time.Second err := install.EnsureNodesExist(ds, timeout) @@ -949,7 +965,7 @@ func (s *partitionTestSuite) TestCreatedDuringInstallGPT(c *C) { restore := disks.MockDeviceNameToDiskMapping(m) defer restore() - err := makeMockGadget(s.gadgetRoot, gptGadgetContentWithSave) + err := gadgettest.MakeMockGadget(s.gadgetRoot, gptGadgetContentWithSave) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) @@ -1074,7 +1090,7 @@ func (s *partitionTestSuite) TestCreatedDuringInstallMBR(c *C) { dl, err := gadget.OnDiskVolumeFromDevice("node") c.Assert(err, IsNil) - err = makeMockGadget(s.gadgetRoot, mbrGadgetContentWithSave) + err = gadgettest.MakeMockGadget(s.gadgetRoot, mbrGadgetContentWithSave) c.Assert(err, IsNil) pv, err := gadgettest.MustLayOutSingleVolumeFromGadget(s.gadgetRoot, "", uc20Mod) c.Assert(err, IsNil) diff --git a/gadget/ondisk.go b/gadget/ondisk.go index f139f38845..0321cbde45 100644 --- a/gadget/ondisk.go +++ b/gadget/ondisk.go @@ -30,7 +30,24 @@ import ( // OnDiskStructure represents a gadget structure laid on a block device. type OnDiskStructure struct { - LaidOutStructure + // Name, when non empty, provides the name of the structure + Name string + // Label provides the filesystem label + Label string + // Type of the structure, which can be 2-hex digit MBR partition, + // 36-char GUID partition, comma separated <mbr>,<guid> for hybrid + // partitioning schemes, or 'bare' when the structure is not considered + // a partition. + // + // For backwards compatibility type 'mbr' is also accepted, and the + // structure is treated as if it is of role 'mbr'. + Type string + // Filesystem used for the partition, 'vfat', 'ext4' or 'none' for + // structures of type 'bare' + Filesystem string + // StartOffset defines the start offset of the structure within the + // enclosing volume + StartOffset quantity.Offset // Node identifies the device node of the block device. Node string @@ -154,22 +171,15 @@ func OnDiskStructureFromPartition(p disks.Partition) (OnDiskStructure, error) { return OnDiskStructure{}, fmt.Errorf("cannot decode filesystem label for partition %s: %v", p.KernelDeviceNode, err) } - volStruct := VolumeStructure{ - Name: decodedPartLabel, - Size: quantity.Size(p.SizeInBytes), - Label: decodedFsLabel, - Type: p.PartitionType, - Filesystem: p.FilesystemType, - ID: p.PartitionUUID, - } - + // TODO add ID in second part of the gadget refactoring? return OnDiskStructure{ - LaidOutStructure: LaidOutStructure{ - VolumeStructure: &volStruct, - StartOffset: quantity.Offset(p.StartInBytes), - }, - DiskIndex: int(p.DiskIndex), - Size: quantity.Size(p.SizeInBytes), - Node: p.KernelDeviceNode, + Name: decodedPartLabel, + Label: decodedFsLabel, + Type: p.PartitionType, + Filesystem: p.FilesystemType, + StartOffset: quantity.Offset(p.StartInBytes), + DiskIndex: int(p.DiskIndex), + Size: quantity.Size(p.SizeInBytes), + Node: p.KernelDeviceNode, }, nil } diff --git a/gadget/ondisk_test.go b/gadget/ondisk_test.go index 06d7a7a768..bebb4dc277 100644 --- a/gadget/ondisk_test.go +++ b/gadget/ondisk_test.go @@ -167,36 +167,24 @@ func (s *ondiskTestSuite) TestDeviceInfoGPT(c *C) { Size: quantity.Size(8388575 * 512), Structure: []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 0x100000, - Label: "", - Type: "21686148-6449-6E6F-744E-656564454649", - ID: "2E59D969-52AB-430B-88AC-F83873519F6F", - Filesystem: "", - }, - StartOffset: 0x100000, - }, - DiskIndex: 1, - Size: 0x100000, - Node: "/dev/node1", + DiskIndex: 1, + Size: 0x100000, + Node: "/dev/node1", + Name: "BIOS Boot", + Label: "", + Type: "21686148-6449-6E6F-744E-656564454649", + Filesystem: "", + StartOffset: 0x100000, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "Recovery", - Size: 0x4b000000, - Label: "ubuntu seed", - Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - ID: "44C3D5C3-CAE1-4306-83E8-DF437ACDB32F", - Filesystem: "vfat", - }, - StartOffset: 0x200000, - }, - DiskIndex: 2, - Size: 0x4b000000, - Node: "/dev/node2", + DiskIndex: 2, + Size: 0x4b000000, + Node: "/dev/node2", + Name: "Recovery", + Label: "ubuntu seed", + Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", + Filesystem: "vfat", + StartOffset: 0x200000, }, }, }) @@ -254,36 +242,24 @@ func (s *ondiskTestSuite) TestDeviceInfoGPT4096SectorSize(c *C) { Size: quantity.Size(8388575 * 4096), Structure: []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "BIOS Boot", - Size: 0x800000, - Label: "", - Type: "21686148-6449-6E6F-744E-656564454649", - ID: "2E59D969-52AB-430B-88AC-F83873519F6F", - Filesystem: "", - }, - StartOffset: 0x800000, - }, - DiskIndex: 1, - Size: 0x800000, - Node: "/dev/node1", + DiskIndex: 1, + Size: 0x800000, + Node: "/dev/node1", + Name: "BIOS Boot", + Label: "", + Type: "21686148-6449-6E6F-744E-656564454649", + Filesystem: "", + StartOffset: 0x800000, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "Recovery", - Size: 0x258000000, - Label: "ubuntu-seed", - Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", - ID: "44C3D5C3-CAE1-4306-83E8-DF437ACDB32F", - Filesystem: "vfat", - }, - StartOffset: 0x1000000, - }, - DiskIndex: 2, - Size: 0x258000000, - Node: "/dev/node2", + DiskIndex: 2, + Size: 0x258000000, + Node: "/dev/node2", + Name: "Recovery", + Label: "ubuntu-seed", + Type: "C12A7328-F81F-11D2-BA4B-00A0C93EC93B", + Filesystem: "vfat", + StartOffset: 0x1000000, }, }, }) @@ -370,64 +346,44 @@ func (s *ondiskTestSuite) TestDeviceInfoMBR(c *C) { Size: quantity.Size(12345670 * 512), Structure: []gadget.OnDiskStructure{ { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "ubuntu-seed", - Size: 2457600 * 512, - Label: "ubuntu-seed", - Type: "0C", - Filesystem: "vfat", - }, - StartOffset: 4096 * 512, - }, - DiskIndex: 1, - Size: 2457600 * 512, - Node: "/dev/node1", + DiskIndex: 1, + Size: 2457600 * 512, + Node: "/dev/node1", + Name: "ubuntu-seed", + Label: "ubuntu-seed", + Type: "0C", + Filesystem: "vfat", + StartOffset: 4096 * 512, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "ubuntu-boot", - Size: 1048576 * 512, - Label: "ubuntu-boot", - Type: "0D", - Filesystem: "vfat", - }, - StartOffset: (4096 + 2457600) * 512, - }, - DiskIndex: 2, - Size: 1048576 * 512, - Node: "/dev/node2", + DiskIndex: 2, + Size: 1048576 * 512, + Node: "/dev/node2", + Name: "ubuntu-boot", + Label: "ubuntu-boot", + Type: "0D", + Filesystem: "vfat", + StartOffset: (4096 + 2457600) * 512, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "ubuntu-save", - Size: 1048576 * 512, - Label: "ubuntu-save", - Type: "0D", - Filesystem: "ext4", - }, - StartOffset: (4096 + 2457600 + 1048576) * 512, - }, - DiskIndex: 3, - Size: 1048576 * 512, - Node: "/dev/node3", + DiskIndex: 3, + Size: 1048576 * 512, + Node: "/dev/node3", + Name: "ubuntu-save", + Label: "ubuntu-save", + Type: "0D", + Filesystem: "ext4", + StartOffset: (4096 + 2457600 + 1048576) * 512, }, { - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "ubuntu-data", - Size: 1048576 * 512, - Label: "ubuntu-data", - Type: "0D", - Filesystem: "ext4", - }, - StartOffset: (4096 + 2457600 + 1048576 + 1048576) * 512, - }, - DiskIndex: 4, - Size: 1048576 * 512, - Node: "/dev/node4", + DiskIndex: 4, + Size: 1048576 * 512, + Node: "/dev/node4", + Name: "ubuntu-data", + Label: "ubuntu-data", + Type: "0D", + Filesystem: "ext4", + StartOffset: (4096 + 2457600 + 1048576 + 1048576) * 512, }, }, }) @@ -451,19 +407,13 @@ func (s *ondiskTestSuite) TestOnDiskStructureFromPartition(c *C) { c.Assert(err, IsNil) c.Assert(res, DeepEquals, gadget.OnDiskStructure{ - LaidOutStructure: gadget.LaidOutStructure{ - VolumeStructure: &gadget.VolumeStructure{ - Name: "foobar", - Type: "83", - Label: "foobarfs", - Size: 1024, - ID: "abcdef-01234", - Filesystem: "ext4", - }, - StartOffset: 1024 * 1024, - }, - DiskIndex: 2, - Size: 1024, - Node: "/dev/sda2", + DiskIndex: 2, + Size: 1024, + Node: "/dev/sda2", + Name: "foobar", + Type: "83", + Label: "foobarfs", + Filesystem: "ext4", + StartOffset: 1024 * 1024, }) } diff --git a/gadget/update.go b/gadget/update.go index c253b54779..dd9c78589f 100644 --- a/gadget/update.go +++ b/gadget/update.go @@ -327,11 +327,10 @@ func EnsureLayoutCompatibility(gadgetLayout *LaidOutVolume, diskLayout *OnDiskVo opts = &EnsureLayoutCompatibilityOptions{} } eq := func(ds OnDiskStructure, gs LaidOutStructure) (bool, string) { - dv := ds.VolumeStructure gv := gs.VolumeStructure // name mismatch - if gv.Name != dv.Name { + if gv.Name != ds.Name { // partitions have no names in MBR so bypass the name check if gadgetLayout.Schema != "mbr" { // don't return a reason if the names don't match @@ -347,16 +346,16 @@ func EnsureLayoutCompatibility(gadgetLayout *LaidOutVolume, diskLayout *OnDiskVo switch { // on disk size too small - case dv.Size < gv.Size: + case ds.Size < gv.Size: return false, fmt.Sprintf("on disk size %d (%s) is smaller than gadget size %d (%s)", - dv.Size, dv.Size.IECString(), gv.Size, gv.Size.IECString()) + ds.Size, ds.Size.IECString(), gv.Size, gv.Size.IECString()) // on disk size too large - case dv.Size > gv.Size: + case ds.Size > gv.Size: // larger on disk size is allowed specifically only for system-data if gv.Role != SystemData { return false, fmt.Sprintf("on disk size %d (%s) is larger than gadget size %d (%s) (and the role should not be expanded)", - dv.Size, dv.Size.IECString(), gv.Size, gv.Size.IECString()) + ds.Size, ds.Size.IECString(), gv.Size, gv.Size.IECString()) } } @@ -387,12 +386,12 @@ func EnsureLayoutCompatibility(gadgetLayout *LaidOutVolume, diskLayout *OnDiskVo case EncryptionLUKS: // then this partition is expected to have been encrypted, the // filesystem label on disk will need "-enc" appended - if dv.Label != gv.Name+"-enc" { + if ds.Label != gv.Name+"-enc" { return false, fmt.Sprintf("partition %[1]s is expected to be encrypted but is not named %[1]s-enc", gv.Name) } // the filesystem should also be "crypto_LUKS" - if dv.Filesystem != "crypto_LUKS" { + if ds.Filesystem != "crypto_LUKS" { return false, fmt.Sprintf("partition %[1]s is expected to be encrypted but does not have an encrypted filesystem", gv.Name) } @@ -422,14 +421,14 @@ func EnsureLayoutCompatibility(gadgetLayout *LaidOutVolume, diskLayout *OnDiskVo // case we don't care about the filesystem at all because snapd does // not touch it, unless a gadget asset update says to update that // image file with a new binary image file. - if gv.Filesystem != "" && gv.Filesystem != dv.Filesystem { + if gv.Filesystem != "" && gv.Filesystem != ds.Filesystem { // use more specific error message for structures that are // not creatable at install when we are not being strict if !IsCreatableAtInstall(gv) && !opts.AssumeCreatablePartitionsCreated { - return false, fmt.Sprintf("filesystems do not match (and the partition is not creatable at install): declared as %s, got %s", gv.Filesystem, dv.Filesystem) + return false, fmt.Sprintf("filesystems do not match (and the partition is not creatable at install): declared as %s, got %s", gv.Filesystem, ds.Filesystem) } // otherwise generic - return false, fmt.Sprintf("filesystems do not match: declared as %s, got %s", gv.Filesystem, dv.Filesystem) + return false, fmt.Sprintf("filesystems do not match: declared as %s, got %s", gv.Filesystem, ds.Filesystem) } } diff --git a/image/image_linux.go b/image/image_linux.go index 51497c23bb..6059b4fe22 100644 --- a/image/image_linux.go +++ b/image/image_linux.go @@ -57,7 +57,7 @@ var ( ) func (custo *Customizations) validate(model *asserts.Model) error { - core20 := model.Grade() != asserts.ModelGradeUnset + hasModes := model.Grade() != asserts.ModelGradeUnset var unsupported []string unsupportedConsoleConfDisable := func() { if custo.ConsoleConf == "disabled" { @@ -72,7 +72,7 @@ func (custo *Customizations) validate(model *asserts.Model) error { kind := "UC16/18" switch { - case core20: + case hasModes: kind = "UC20+" // TODO:UC20: consider supporting these with grade dangerous? unsupportedConsoleConfDisable() @@ -276,12 +276,12 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt return fmt.Errorf("internal error: classic model but classic mode not set") } - core20 := model.Grade() != asserts.ModelGradeUnset + hasModes := model.Grade() != asserts.ModelGradeUnset var rootDir string var bootRootDir string var seedDir string var label string - if !core20 { + if !hasModes { if opts.Classic { // Classic, PrepareDir is the root dir itself rootDir = opts.PrepareDir @@ -419,6 +419,20 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt return err } + // Check local snaps again, but now after InfoDerived has been called. InfoDerived + // fills out the snap revisions for the local snaps, and we need this to verify against + // expected revisions. + for _, sn := range localSnaps { + // Its a bit more tricky to deal with local snaps, as we only have that specific revision + // available. Therefore the revision in the local snap must be exactly the revision specified + // in the manifest. If it's not, we fail. + specifiedRevision := opts.Revisions[sn.Info.SnapName()] + if !specifiedRevision.Unset() && specifiedRevision != sn.Info.Revision { + return fmt.Errorf("cannot use snap %s for image, unknown/local revision does not match the value specified by revisions file (%s != %s)", + sn.Path, sn.Info.Revision, specifiedRevision) + } + } + for { toDownload, err := w.SnapsToDownload() if err != nil { @@ -431,7 +445,12 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt if sn == nil { return "", fmt.Errorf("internal error: downloading unexpected snap %q", info.SnapName()) } - fmt.Fprintf(Stdout, "Fetching %s\n", sn.SnapName()) + rev := opts.Revisions[sn.SnapName()] + if !rev.Unset() { + fmt.Fprintf(Stdout, "Fetching %s (%d)\n", sn.SnapName(), rev) + } else { + fmt.Fprintf(Stdout, "Fetching %s\n", sn.SnapName()) + } if err := w.SetInfo(sn, info); err != nil { return "", err } @@ -442,6 +461,7 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt byName[sn.SnapName()] = sn snapToDownloadOptions[i].Snap = sn snapToDownloadOptions[i].Channel = sn.Channel + snapToDownloadOptions[i].Revision = opts.Revisions[sn.SnapName()] snapToDownloadOptions[i].CohortKey = opts.WideCohortKey } downloadedSnaps, err := tsto.DownloadMany(snapToDownloadOptions, curSnaps, tooling.DownloadManyOptions{ @@ -518,7 +538,7 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt // This will need to be handled here eventually too. if opts.Classic { var fpath string - if core20 { + if hasModes { fpath = filepath.Join(seedDir, "systems") } else { fpath = filepath.Join(seedDir, "seed.yaml") @@ -544,7 +564,7 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt bootWith := &boot.BootableSet{ UnpackedGadgetDir: gadgetUnpackDir, - Recovery: core20, + Recovery: hasModes, } if label != "" { bootWith.RecoverySystemDir = filepath.Join("/systems/", label) @@ -596,7 +616,7 @@ var setupSeed = func(tsto *tooling.ToolingStore, model *asserts.Model, opts *Opt } // early config & cloud-init config (done at install for Core 20) - if !core20 { + if !hasModes { // and the cloud-init things if err := installCloudConfig(rootDir, gadgetUnpackDir); err != nil { return err diff --git a/image/image_test.go b/image/image_test.go index dfcd797cba..512efcade1 100644 --- a/image/image_test.go +++ b/image/image_test.go @@ -3480,3 +3480,405 @@ func (s *imageSuite) TestSetupSeedCore20DelegatedSnap(c *C) { err := image.SetupSeed(s.tsto, model, opts) c.Check(err, IsNil) } + +func (s *imageSuite) testSetupSeedWithMixedSnapsAndRevisions(c *C, revisions map[string]snap.Revision) error { + restore := image.MockTrusted(s.StoreSigning.Trusted) + defer restore() + + rootdir := filepath.Join(c.MkDir(), "image") + s.setupSnaps(c, map[string]string{ + "pc": "canonical", + "pc-kernel": "my-brand", + }, "") + + coreFn := snaptest.MakeTestSnapWithFiles(c, packageCore, [][]string{{"local", ""}}) + requiredSnap1Fn := snaptest.MakeTestSnapWithFiles(c, requiredSnap1, [][]string{{"local", ""}}) + + opts := &image.Options{ + Snaps: []string{ + coreFn, + requiredSnap1Fn, + }, + PrepareDir: filepath.Dir(rootdir), + Customizations: image.Customizations{ + Validation: "ignore", + }, + Revisions: revisions, + } + + if err := image.SetupSeed(s.tsto, s.model, opts); err != nil { + // let each unit test test against this + return err + } + + // check seed + seeddir := filepath.Join(rootdir, "var/lib/snapd/seed") + seedsnapsdir := filepath.Join(seeddir, "snaps") + essSnaps, runSnaps, _ := s.loadSeed(c, seeddir) + c.Check(runSnaps, DeepEquals, []*seed.Snap{ + { + Path: filepath.Join(seedsnapsdir, "required-snap1_x1.snap"), + + SideInfo: &snap.SideInfo{ + RealName: "required-snap1", + }, + Required: true, + }, + }) + c.Check(runSnaps[0].Path, testutil.FilePresent) + + // check the essential snaps, we have to do this in runtime instead + // of hardcoding as the information is "calculated". + c.Check(essSnaps, HasLen, 3) + for i, name := range []string{"core_x1.snap", "pc-kernel", "pc"} { + channel := stableChannel + info := s.AssertedSnapInfo(name) + var pinfo snap.PlaceInfo = info + var sideInfo *snap.SideInfo + var snapType snap.Type + if info == nil { + switch name { + case "core_x1.snap": + pinfo = snap.MinimalPlaceInfo("core", snap.R(-1)) + sideInfo = &snap.SideInfo{ + RealName: "core", + } + channel = "" + snapType = snap.TypeOS + } + } else { + sideInfo = &info.SideInfo + snapType = info.Type() + } + + fn := pinfo.Filename() + p := filepath.Join(seedsnapsdir, fn) + c.Check(p, testutil.FilePresent) + c.Check(essSnaps[i], DeepEquals, &seed.Snap{ + Path: p, + + SideInfo: sideInfo, + + EssentialType: snapType, + Essential: true, + Required: true, + + Channel: channel, + }) + } + + l, err := ioutil.ReadDir(seedsnapsdir) + c.Assert(err, IsNil) + c.Check(l, HasLen, 4) + + // check the downloads + c.Check(s.storeActionsBunchSizes, DeepEquals, []int{2}) + c.Check(s.storeActions, DeepEquals, []*store.SnapAction{ + { + Action: "download", + InstanceName: "pc-kernel", + Revision: revisions["pc-kernel"], + Flags: store.SnapActionIgnoreValidation, + }, + { + Action: "download", + InstanceName: "pc", + Revision: revisions["pc"], + Flags: store.SnapActionIgnoreValidation, + }, + }) + return nil +} + +func (s *imageSuite) TestSetupSeedSnapRevisionsWithCorrectLocalSnap(c *C) { + // It doesn't make sense to use a local snap when doing a reproducible build, + // so if a revision is provided, and we are trying to provide that snap locally, + // then we should return an error. + // Our helper creates two local snaps + // 1. core. + // 2. required-snap. + // So lets provide a revision for one of them and it should then fail + err := s.testSetupSeedWithMixedSnapsAndRevisions(c, map[string]snap.Revision{ + "pc-kernel": snap.R(1), + "pc": snap.R(13), + "core": snap.R(-1), + }) + c.Check(err, IsNil) +} + +func (s *imageSuite) TestSetupSeedSnapRevisionsWithLocalSnapFails(c *C) { + // It doesn't make sense to use a local snap when doing a reproducible build, + // so if a revision is provided, and we are trying to provide that snap locally, + // then we should return an error. + // Our helper creates two local snaps + // 1. core. + // 2. required-snap. + // So lets provide a revision for one of them and it should then fail + err := s.testSetupSeedWithMixedSnapsAndRevisions(c, map[string]snap.Revision{ + "pc-kernel": snap.R(1), + "pc": snap.R(13), + "core": snap.R(5), + }) + c.Check(err, ErrorMatches, `cannot use snap .*/snapsrc/core_16.04_all.snap for image, unknown/local revision does not match the value specified by revisions file \(x1 != 5\)`) +} + +func (s *imageSuite) TestSetupSeedSnapRevisionsWithLocalSnapHappy(c *C) { + // Make sure we can still provide specific revisions for snaps that are + // non-local. + err := s.testSetupSeedWithMixedSnapsAndRevisions(c, map[string]snap.Revision{ + "pc-kernel": snap.R(15), + "pc": snap.R(28), + }) + c.Check(err, IsNil) +} + +func (s *imageSuite) TestSetupSeedSnapRevisionsDownloadHappy(c *C) { + bootloader.Force(nil) + restore := image.MockTrusted(s.StoreSigning.Trusted) + defer restore() + + // a model that uses core20 + model := s.makeUC20Model(nil) + prepareDir := c.MkDir() + + // Create a new core20 image with the following snaps: + // snapd, core20, pc-kernel, pc, required20 + // We will use revisions for each of them to guarantee that + // exact revisions will be used when the store action is invoked. + // The revisions provided to s.makeSnap won't matter, and they shouldn't. + // Instead the revision provided in the revisions map should be used instead. + s.makeSnap(c, "snapd", nil, snap.R(1), "") + s.makeSnap(c, "core20", nil, snap.R(20), "") + s.makeSnap(c, "pc-kernel=20", nil, snap.R(1), "") + gadgetContent := [][]string{ + {"uboot.conf", ""}, + {"meta/gadget.yaml", pcUC20GadgetYaml}, + } + s.makeSnap(c, "pc=20", gadgetContent, snap.R(22), "") + s.makeSnap(c, "required20", nil, snap.R(21), "other") + + opts := &image.Options{ + PrepareDir: prepareDir, + Customizations: image.Customizations{ + BootFlags: []string{"factory"}, + Validation: "ignore", + }, + Revisions: map[string]snap.Revision{ + "snapd": snap.R(133), + "core20": snap.R(58), + "pc-kernel": snap.R(15), + "pc": snap.R(12), + "required20": snap.R(59), + }, + } + + err := image.SetupSeed(s.tsto, model, opts) + c.Assert(err, IsNil) + + // check seed + seeddir := filepath.Join(prepareDir, "system-seed") + seedsnapsdir := filepath.Join(seeddir, "snaps") + essSnaps, runSnaps, _ := s.loadSeed(c, seeddir) + c.Check(essSnaps, HasLen, 4) + c.Check(runSnaps, HasLen, 1) + + stableChannel := "latest/stable" + + // check the files are in place + for i, name := range []string{"snapd", "pc-kernel", "core20", "pc"} { + info := s.AssertedSnapInfo(name) + + channel := stableChannel + switch name { + case "pc", "pc-kernel": + channel = "20" + } + + fn := info.Filename() + p := filepath.Join(seedsnapsdir, fn) + c.Check(p, testutil.FilePresent) + c.Check(essSnaps[i], DeepEquals, &seed.Snap{ + Path: p, + SideInfo: &info.SideInfo, + EssentialType: info.Type(), + Essential: true, + Required: true, + Channel: channel, + }) + } + c.Check(runSnaps[0], DeepEquals, &seed.Snap{ + Path: filepath.Join(seedsnapsdir, "required20_21.snap"), + SideInfo: &s.AssertedSnapInfo("required20").SideInfo, + Required: true, + Channel: stableChannel, + }) + c.Check(runSnaps[0].Path, testutil.FilePresent) + + l, err := ioutil.ReadDir(seedsnapsdir) + c.Assert(err, IsNil) + c.Check(l, HasLen, 5) + + // check the downloads + c.Check(s.storeActionsBunchSizes, DeepEquals, []int{5}) + c.Check(s.storeActions[0], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "snapd", + Revision: snap.R(133), + Flags: store.SnapActionIgnoreValidation, + }) + c.Check(s.storeActions[1], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "pc-kernel", + Revision: snap.R(15), + Flags: store.SnapActionIgnoreValidation, + }) + c.Check(s.storeActions[2], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "core20", + Revision: snap.R(58), + Flags: store.SnapActionIgnoreValidation, + }) + c.Check(s.storeActions[3], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "pc", + Revision: snap.R(12), + Flags: store.SnapActionIgnoreValidation, + }) + c.Check(s.storeActions[4], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "required20", + Revision: snap.R(59), + Flags: store.SnapActionIgnoreValidation, + }) +} + +func (s *imageSuite) TestLocalSnapRevisionMatchingStoreRevision(c *C) { + restore := image.MockTrusted(s.StoreSigning.Trusted) + defer restore() + + rootdir := filepath.Join(c.MkDir(), "image") + s.setupSnaps(c, map[string]string{ + "pc": "canonical", + "pc-kernel": "my-brand", + }, "") + + opts := &image.Options{ + Snaps: []string{ + s.AssertedSnap("core"), + }, + PrepareDir: filepath.Dir(rootdir), + Customizations: image.Customizations{ + Validation: "enforce", + }, + Revisions: map[string]snap.Revision{ + "core": snap.R(3), + }, + } + + err := image.SetupSeed(s.tsto, s.model, opts) + c.Assert(err, IsNil) + + // check seed + seeddir := filepath.Join(rootdir, "var/lib/snapd/seed") + seedsnapsdir := filepath.Join(seeddir, "snaps") + essSnaps, runSnaps, roDB := s.loadSeed(c, seeddir) + c.Check(essSnaps, HasLen, 3) + c.Check(runSnaps, HasLen, 1) + + // check the files are in place + for i, name := range []string{"core_3.snap", "pc-kernel", "pc"} { + info := s.AssertedSnapInfo(name) + if info == nil { + switch name { + case "core_3.snap": + info = &snap.Info{ + SideInfo: snap.SideInfo{ + RealName: "core", + SnapID: s.AssertedSnapID("core"), + Revision: snap.R(3), + }, + SnapType: snap.TypeOS, + } + default: + c.Errorf("cannot have %s", name) + } + } + + fn := info.Filename() + p := filepath.Join(seedsnapsdir, fn) + c.Check(p, testutil.FilePresent) + c.Check(essSnaps[i], DeepEquals, &seed.Snap{ + Path: p, + SideInfo: &info.SideInfo, + EssentialType: info.Type(), + Essential: true, + Required: true, + Channel: stableChannel, + }) + } + c.Check(runSnaps[0], DeepEquals, &seed.Snap{ + Path: filepath.Join(seedsnapsdir, "required-snap1_3.snap"), + Required: true, + SideInfo: &snap.SideInfo{ + RealName: "required-snap1", + SnapID: s.AssertedSnapID("required-snap1"), + Revision: snap.R(3), + LegacyEditedContact: "mailto:foo@example.com", + }, + Channel: stableChannel, + }) + c.Check(runSnaps[0].Path, testutil.FilePresent) + + l, err := ioutil.ReadDir(seedsnapsdir) + c.Assert(err, IsNil) + c.Check(l, HasLen, 4) + + // check assertions + decls, err := roDB.FindMany(asserts.SnapDeclarationType, nil) + c.Assert(err, IsNil) + c.Check(decls, HasLen, 4) + + // check the bootloader config + m, err := s.bootloader.GetBootVars("snap_kernel", "snap_core") + c.Assert(err, IsNil) + c.Check(m["snap_kernel"], Equals, "pc-kernel_2.snap") + c.Assert(err, IsNil) + c.Check(m["snap_core"], Equals, "core_3.snap") + + c.Check(s.stderr.String(), Equals, "") + + // check the downloads, make sure no core snap downloads are + // present as we are using the local file for this. + c.Check(s.storeActionsBunchSizes, DeepEquals, []int{3}) + c.Check(s.storeActions[0], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "pc-kernel", + Channel: stableChannel, + Flags: store.SnapActionEnforceValidation, + }) + c.Check(s.storeActions[1], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "pc", + Channel: stableChannel, + Flags: store.SnapActionEnforceValidation, + }) + c.Check(s.storeActions[2], DeepEquals, &store.SnapAction{ + Action: "download", + InstanceName: "required-snap1", + Channel: stableChannel, + Flags: store.SnapActionEnforceValidation, + }) + + // Verify that the local file is of correct revision (3) + c.Check(s.curSnaps, HasLen, 1) + c.Check(s.curSnaps[0], DeepEquals, []*store.CurrentSnap{ + { + InstanceName: "core", + SnapID: s.AssertedSnapID("core"), + Revision: snap.R(3), + TrackingChannel: "stable", + Epoch: snap.E("0"), + IgnoreValidation: false, + }, + }) +} diff --git a/image/manifest.go b/image/manifest.go new file mode 100644 index 0000000000..2f59bd97e5 --- /dev/null +++ b/image/manifest.go @@ -0,0 +1,112 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2022 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +package image + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/snapcore/snapd/snap" +) + +// The seed.manifest generated by ubuntu-image contains entries in the following +// format: +// <snap-name> <snap-revision> +// The goal in a future iteration of this will be to move the generation of the +// seed.manifest to this package, out of ubuntu-image. +// TODO: Move generation of seed.manifest from ubuntu-image to here + +// ReadSeedManifest reads a seed.manifest generated by ubuntu-image, and returns +// a map containing the snap names and their revisions. +func ReadSeedManifest(manifestFile string) (map[string]snap.Revision, error) { + f, err := os.Open(manifestFile) + if err != nil { + return nil, err + } + defer f.Close() + + revisions := make(map[string]snap.Revision) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "#") { + continue + } + if strings.HasPrefix(line, " ") { + return nil, fmt.Errorf("line cannot start with any spaces: %q", line) + } + + tokens := strings.Fields(line) + // Expect exactly two tokens + if len(tokens) != 2 { + return nil, fmt.Errorf("line is illegally formatted: %q", line) + } + + snapName := tokens[0] + revString := tokens[1] + if err := snap.ValidateName(snapName); err != nil { + return nil, err + } + + rev, err := snap.ParseRevision(revString) + if err != nil { + return nil, err + } + + // Values that are higher than 0 indicate the revision comes from the store, and values + // lower than 0 indicate the snap was sourced locally. We allow both in the seed.manifest as + // long as the user can provide us with the correct snaps. The only number we won't accept is + // 0. + if rev.Unset() { + return nil, fmt.Errorf("cannot use revision %d for snap %q: revision must not be 0", rev, snapName) + } + revisions[snapName] = rev + } + return revisions, nil +} + +// WriteSeedManifest generates the seed.manifest contents from the provided map of +// snaps and their revisions, and stores them in the given file path +func WriteSeedManifest(filePath string, revisions map[string]snap.Revision) error { + if len(revisions) == 0 { + return nil + } + + keys := make([]string, 0, len(revisions)) + for k := range revisions { + keys = append(keys, k) + } + sort.Strings(keys) + + buf := bytes.NewBuffer(nil) + for _, key := range keys { + rev := revisions[key] + if rev.N == 0 { + return fmt.Errorf("revision must not be 0 for snap %q", key) + } + fmt.Fprintf(buf, "%s %s\n", key, rev) + } + return ioutil.WriteFile(filePath, buf.Bytes(), 0755) +} diff --git a/image/manifest_test.go b/image/manifest_test.go new file mode 100644 index 0000000000..f7c6aa6765 --- /dev/null +++ b/image/manifest_test.go @@ -0,0 +1,122 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- + +/* + * Copyright (C) 2022 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +package image_test + +import ( + "io/ioutil" + "path/filepath" + + . "gopkg.in/check.v1" + + "github.com/snapcore/snapd/image" + "github.com/snapcore/snapd/osutil" + "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/testutil" +) + +type manifestSuite struct { + testutil.BaseTest + root string +} + +var _ = Suite(&manifestSuite{}) + +func (s *manifestSuite) SetUpTest(c *C) { + s.BaseTest.SetUpTest(c) + s.root = c.MkDir() +} + +func (s *manifestSuite) writeSeedManifest(c *C, contents string) string { + manifestFile := filepath.Join(s.root, "seed.manifest") + err := ioutil.WriteFile(manifestFile, []byte(contents), 0644) + c.Assert(err, IsNil) + return manifestFile +} + +func (s *manifestSuite) TestReadSeedManifestFull(c *C) { + // Include two entries that end on .snap as ubuntu-image + // once produced entries looking like this + manifestFile := s.writeSeedManifest(c, `# test line should not match +core22 275 +pc 128 +snapd 16681 +one-snap x6 +`) + snapRevs, err := image.ReadSeedManifest(manifestFile) + c.Assert(err, IsNil) + c.Check(snapRevs, DeepEquals, map[string]snap.Revision{ + "core22": snap.R(275), + "pc": snap.R(128), + "snapd": snap.R(16681), + "one-snap": snap.R(-6), + }) +} + +func (s *manifestSuite) TestReadSeedManifestParseFails(c *C) { + tests := []struct { + contents string + err string + }{ + {"my/invalid&name 33\n", `invalid snap name: "my/invalid&name"`}, + {"core 0\n", `invalid snap revision: "0"`}, + {"core\n", `line is illegally formatted: "core"`}, + {" test\n", `line cannot start with any spaces: " test"`}, + {"core 14 14\n", `line is illegally formatted: "core 14 14"`}, + } + + for _, t := range tests { + manifestFile := s.writeSeedManifest(c, t.contents) + _, err := image.ReadSeedManifest(manifestFile) + c.Check(err, ErrorMatches, t.err) + } +} + +func (s *manifestSuite) TestReadSeedManifestNoFile(c *C) { + snapRevs, err := image.ReadSeedManifest("noexists.manifest") + c.Assert(err, NotNil) + c.Check(snapRevs, IsNil) + c.Check(err, ErrorMatches, `open noexists.manifest: no such file or directory`) +} + +func (s *manifestSuite) testWriteSeedManifest(c *C, revisions map[string]snap.Revision) string { + manifestFile := filepath.Join(s.root, "seed.manifest") + err := image.WriteSeedManifest(manifestFile, revisions) + c.Assert(err, IsNil) + return manifestFile +} + +func (s *manifestSuite) TestWriteSeedManifestNoFile(c *C) { + filePath := s.testWriteSeedManifest(c, map[string]snap.Revision{}) + c.Check(osutil.FileExists(filePath), Equals, false) +} + +func (s *manifestSuite) TestWriteSeedManifest(c *C) { + filePath := s.testWriteSeedManifest(c, map[string]snap.Revision{"core": snap.R(12), "test": snap.R(-4)}) + contents, err := ioutil.ReadFile(filePath) + c.Assert(err, IsNil) + c.Check(string(contents), Equals, `core 12 +test x4 +`) +} + +func (s *manifestSuite) TestWriteSeedManifestInvalidRevision(c *C) { + err := image.WriteSeedManifest("", map[string]snap.Revision{"core": {}}) + c.Assert(err, ErrorMatches, `revision must not be 0 for snap "core"`) +} diff --git a/image/options.go b/image/options.go index 8eb3545243..9a29bc9b9f 100644 --- a/image/options.go +++ b/image/options.go @@ -19,6 +19,8 @@ package image +import "github.com/snapcore/snapd/snap" + type Options struct { ModelFile string Classic bool @@ -45,6 +47,7 @@ type Options struct { // TODO: use OptionsSnap directly here? Snaps []string SnapChannels map[string]string + Revisions map[string]snap.Revision // WideCohortKey can be used to supply a cohort covering all // the snaps in the image, there is no generally suppported API diff --git a/interfaces/apparmor/backend.go b/interfaces/apparmor/backend.go index e272335f65..d313a921fb 100644 --- a/interfaces/apparmor/backend.go +++ b/interfaces/apparmor/backend.go @@ -853,16 +853,20 @@ func (b *Backend) addContent(securityTag string, snapInfo *snap.Info, cmdName st %[2]s `, usrLibSnapdConfineTransitionRule, nonBaseCoreTransitionSnippet) + case "###INCLUDE_IF_EXISTS_SNAP_TUNING###": + features, _ := parserFeatures() + if strutil.ListContains(features, "include-if-exists") { + return `#include if exists "/var/lib/snapd/apparmor/snap-tuning"` + } + return "" case "###VAR###": return templateVariables(snapInfo, securityTag, cmdName) case "###PROFILEATTACH###": return fmt.Sprintf("profile \"%s\"", securityTag) case "###CHANGEPROFILE_RULE###": features, _ := parserFeatures() - for _, f := range features { - if f == "unsafe" { - return "change_profile unsafe /**," - } + if strutil.ListContains(features, "unsafe") { + return "change_profile unsafe /**," } return "change_profile," case "###SNIPPETS###": diff --git a/interfaces/apparmor/backend_test.go b/interfaces/apparmor/backend_test.go index 558e663795..0d5d787d43 100644 --- a/interfaces/apparmor/backend_test.go +++ b/interfaces/apparmor/backend_test.go @@ -1113,6 +1113,44 @@ func (s *backendSuite) TestCombineSnippetsChangeProfile(c *C) { } } +func (s *backendSuite) TestCombineSnippetsIncludeIfExists(c *C) { + restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) + defer restore() + restore = osutil.MockIsHomeUsingNFS(func() (bool, error) { return false, nil }) + defer restore() + restore = osutil.MockIsRootWritableOverlay(func() (string, error) { return "", nil }) + defer restore() + + restoreTemplate := apparmor.MockTemplate("###INCLUDE_IF_EXISTS_SNAP_TUNING###") + defer restoreTemplate() + + type includeIfExistsScenario struct { + features []string + expected string + } + + var includeIfExistsScenarios = []includeIfExistsScenario{{ + features: []string{}, + expected: "", + }, { + features: []string{"include-if-exists"}, + expected: `#include if exists "/var/lib/snapd/apparmor/snap-tuning"`, + }} + + for i, scenario := range includeIfExistsScenarios { + restore = apparmor.MockParserFeatures(func() ([]string, error) { return scenario.features, nil }) + defer restore() + + snapInfo := s.InstallSnap(c, interfaces.ConfinementOptions{}, "", ifacetest.SambaYamlV1, 1) + profile := filepath.Join(dirs.SnapAppArmorDir, "snap.samba.smbd") + c.Check(profile, testutil.FileEquals, scenario.expected, Commentf("scenario %d: %#v", i, scenario)) + stat, err := os.Stat(profile) + c.Assert(err, IsNil) + c.Check(stat.Mode(), Equals, os.FileMode(0644)) + s.RemoveSnap(c, snapInfo) + } +} + func (s *backendSuite) TestParallelInstallCombineSnippets(c *C) { restore := apparmor_sandbox.MockLevel(apparmor_sandbox.Full) defer restore() diff --git a/interfaces/apparmor/template.go b/interfaces/apparmor/template.go index ced8ebf12d..8ceac2c735 100644 --- a/interfaces/apparmor/template.go +++ b/interfaces/apparmor/template.go @@ -66,6 +66,8 @@ var templateCommon = ` #include <tunables/global> +###INCLUDE_IF_EXISTS_SNAP_TUNING### + # snapd supports the concept of 'parallel installs' where snaps with the same # name are differentiated by '_<instance>' such that foo, foo_bar and foo_baz # may all be installed on the system. To support this, SNAP_NAME is set to the diff --git a/interfaces/builtin/network_manager.go b/interfaces/builtin/network_manager.go index 5c67634a59..b95e9f8aab 100644 --- a/interfaces/builtin/network_manager.go +++ b/interfaces/builtin/network_manager.go @@ -177,6 +177,10 @@ dbus (receive, send) dbus (bind) bus=system name="org.freedesktop.NetworkManager", +# Allow binding OpenVPN names +dbus (bind) + bus=system + name="org.freedesktop.NetworkManager.openvpn.*", # Allow traffic to/from our path and interface with any method for unconfined # clients to talk to our service. @@ -381,6 +385,10 @@ const networkManagerPermanentSlotDBus = ` --> <allow own="org.freedesktop.NetworkManager.dnsmasq"/> <allow send_destination="org.freedesktop.NetworkManager.dnsmasq"/> + + <!-- VPN support --> + <allow own_prefix="org.freedesktop.NetworkManager.openvpn"/> + <allow send_destination="org.freedesktop.NetworkManager.openvpn"/> </policy> <policy context="default"> @@ -486,6 +494,10 @@ const networkManagerPermanentSlotDBus = ` <deny own="org.freedesktop.NetworkManager.dnsmasq"/> <deny send_destination="org.freedesktop.NetworkManager.dnsmasq"/> + + <!-- VPN support --> + <deny own_prefix="org.freedesktop.NetworkManager.openvpn"/> + <deny send_destination="org.freedesktop.NetworkManager.openvpn"/> </policy> <limit name="max_replies_per_connection">1024</limit> diff --git a/osutil/user.go b/osutil/user.go index 13d37dfb9f..1c29038b5a 100644 --- a/osutil/user.go +++ b/osutil/user.go @@ -261,6 +261,7 @@ func AddUser(name string, opts *AddUserOptions) error { type DelUserOptions struct { ExtraUsers bool + Force bool } // DelUser removes a "regular login user" from the system, including their @@ -275,6 +276,9 @@ func DelUser(name string, opts *DelUserOptions) error { if opts.ExtraUsers { cmdStr = append(cmdStr, "--extrausers") } + if opts.Force { + cmdStr = append(cmdStr, "--force") + } cmdStr = append(cmdStr, name) if output, err := exec.Command("userdel", cmdStr...).CombinedOutput(); err != nil { diff --git a/osutil/user_test.go b/osutil/user_test.go index 91b038add1..7cd079f2c4 100644 --- a/osutil/user_test.go +++ b/osutil/user_test.go @@ -313,6 +313,17 @@ func (s *delUserSuite) TestDelUser(c *check.C) { c.Assert(s.mockUserDel.Calls(), check.DeepEquals, [][]string{s.expectedCmd("u1")}) } +func (s *delUserSuite) TestDelUserForce(c *check.C) { + c.Assert(osutil.DelUser("u1", &osutil.DelUserOptions{Force: false}), check.IsNil) + c.Assert(osutil.DelUser("u2", &osutil.DelUserOptions{Force: true}), check.IsNil) + + // validity check + c.Check(s.mockUserDel.Calls(), check.DeepEquals, [][]string{ + {"userdel", "--remove", "u1"}, + {"userdel", "--remove", "--force", "u2"}, + }) +} + func (s *delUserSuite) TestDelUserRemovesSudoersIfPresent(c *check.C) { f1 := osutil.SudoersFile("u1") diff --git a/overlord/README.md b/overlord/README.md new file mode 100644 index 0000000000..1f70f68899 --- /dev/null +++ b/overlord/README.md @@ -0,0 +1,81 @@ +Notes on state and changes +=========================== + +State is central to the consistency and integrity of any snap system. It’s maintained by snapd by managing the external on-disk state of snap installations and its own persistent working state of metadata, expectations, and in-progress operations. + +Working persistent state is implemented by `overlord/state.State` with a global lock, `State.Lock/Unlock`, to govern updates. If state is modified after acquiring the lock, it’s atomically updated to disk when the lock is released. + +State managers +--------------- +State managers are used to manage both the working state and the on-disk snap state. They all implement the `overlord.StateManager` interface. Code-wise, together with a few other auxiliary components, they live in `overlord` and its subpackages. `overlord.Overlord` itself is responsible for the wiring and coordination of all of these. + +In broad terms, state managers have assigned responsibilities for different subsystems, and these are in mostly orthogonal areas. They then participate in the management and bookkeeping of the state via various mechanisms. + +During startup, and after construction, an optional `StartUp` method is invoked on each state manager. This is followed by the activation of an *ensure loop* which calls a state manager’s corresponding `Ensure` method at least once every 5 minutes. + +The *ensure loop* is intended to initiate any automatic state management and corresponding transitions, state repair, and any other consistency-maintaining operations. + +`state.Change` +--------------- +A `state.Change` is a graph of `state.Task` structs and their inter-dependencies as edges. The purpose of both a `state.Change` and a `state.Task` is identified by their kind (which should be an explanatory string value). + +Time-consuming and user-initiated operations, usually initiated from the API provided by the `daemon` package, should be performed using the `state.Change` functionality. + +`state.Change` and `state.Task` instances use the working state to remain persistent, and they can carry input parameters, and their own state, accessible with `Get` and `Set` methods. + + The goals of the `state.Change` mechanisms are such that operations should survive restarts and reboots and that, on error, snapd should try to bring back the external state to a previous good state if possible. + +`state.TaskRunner` +------------------- +The `state.TaskRunner` is responsible for `state.Change` and `state.Task` execution, and their state management. The do and undo logic of a `state.Task` is defined by `Task` kind using `TaskRunner.AddHandler`. + +During execution, a `Task` goes through a series of statuses. These are represented by `state.Status` and will finish in a ready status of either `DoneStatus, UndoneStatus, ErrorStatus` or `HoldStatus`. + +If errors are encountered, the `TaskRunner` will normally try to recursively execute the undo logic of any previously depended-upon `Task`s with the exception of the `Task` that generated the error. It is instead expected that any desired undo logic should be part of its error paths. + +Different `Change`s and independent `Task`s are normally executed concurrently. + +`Task`s and `State` locking and consistency +-------------------------------------------- +Currently, the `Task` do and undo handlers are started without holding the `State` lock, but to simplify consistency, it's easier if a `Task` executes while holding the `State` lock. + +Strictly, the `State` lock must only be released when performing slow operations, such as: +- copying, compressing or uncompressing large amounts of on-disk data +- network operations + +So in practice, most handler code should start with: + +``` +st.Lock() +defer st.Unlock() +``` + +where `st` is the runtime `state.State` instance, accessible via `Task.State()` or the handler manager. + +The deferred `Unlock` will implicitly commit any working state mutations at the end of the handler. + +Due to potential restarts, the do or undo handler logic in a `Task` may be re-executed if it hasn't already completed. This necessitates the following considerations: +- on-disk/external state manipulation should be idempotent or equivalent +- working state manipulation should either be idempotent or designed to combine working state mutations with setting the next status of the task. This approach currently requires using `Task.SetStatus` before returning from the handler + +If slow operations need to be performed, the required `Unlock/Lock` should happen before any working state manipulation. + +If the `State` lock is released and reacquired in a handler, the code needs to consider that other code could have manipulated some relevant working state. There may be also cases where it’s neither possible nor desirable to hold the `State` lock for the entirety of a state manipulation, such as when a manipulation spans multiple subsystems, and so spans multiple tasks. For all such cases, and to simplify reasoning, snapd offers other coordination mechanisms with differing granularity to the `State` lock. + +See also the comment in `overlord/snapstate/handlers.go` about state locking. + +Conflicts and `Task` precondition blocking +------------------------------------------- +At a higher level, it may be appropriate and simpler to manage whether at most one `Change`/sequence of `Task`s is operating on a given snap at a time. As this could, for example, stop the system connecting an interface on a snap that’s being removed, or disable service manipulation while its snap is being installed. + +While creating a new `Change` that will operate on a snap, snapd checks whether there are already any in-progress operations for the snap. If there are, a conflict error is returned rather than initiating the `Change`. + +The central logic for such checking lives in `overlord/snapstate/conflict.go`. + +Some tasks, or family of tasks, need to release the `State` lock but cannot run together with some other tasks. Such tasks include: +- hook tasks where at most one should be running at a time for a given snap +- interface-related tasks that might touch more than one snap at a time, beyond what conflicts can take care of, so preferably at most one of them should be running at a time + +To address this, precondition predicates can be hooked into the `TaskRunner` via `TaskRunner.AddBlocked`. + +Before running a task, the precondition predicates are invoked and, if none return a value of true, the task is run. The input for these predicates is any candidate-for-running task and the set of currently running tasks. diff --git a/overlord/devicestate/devicemgr.go b/overlord/devicestate/devicemgr.go index 2a4d9e7786..da06f71fb5 100644 --- a/overlord/devicestate/devicemgr.go +++ b/overlord/devicestate/devicemgr.go @@ -1618,7 +1618,9 @@ func (m *DeviceManager) ensureExpiredUsersRemoved() error { if !user.HasExpired() { continue } - if _, err := RemoveUser(st, user.Username); err != nil { + // Force the removal of the user as it's possible to block this expiration + // otherwise by the user having left a process or service running. + if _, err := RemoveUser(st, user.Username, &RemoveUserOptions{Force: true}); err != nil { return err } } diff --git a/overlord/devicestate/devicestate.go b/overlord/devicestate/devicestate.go index ff92785cdf..8bf07debb1 100644 --- a/overlord/devicestate/devicestate.go +++ b/overlord/devicestate/devicestate.go @@ -888,6 +888,14 @@ func Remodel(st *state.State, new *asserts.Model) (*state.Change, error) { return nil, fmt.Errorf("cannot remodel to different series yet") } + // don't allow remodel on classic for now + if current.Classic() { + return nil, fmt.Errorf("cannot remodel from classic model") + } + if current.Classic() != new.Classic() { + return nil, fmt.Errorf("cannot remodel across classic and non-classic models") + } + // TODO:UC20: ensure we never remodel to a lower // grade diff --git a/overlord/devicestate/devicestate_bootconfig_test.go b/overlord/devicestate/devicestate_bootconfig_test.go index 9aea425a46..f0dec03a15 100644 --- a/overlord/devicestate/devicestate_bootconfig_test.go +++ b/overlord/devicestate/devicestate_bootconfig_test.go @@ -159,8 +159,9 @@ func (s *deviceMgrBootconfigSuite) testBootConfigUpdateRun(c *C, updateAttempted if errMatch == "" && applied { // we log on success log := tsk.Log() - c.Assert(log, HasLen, 1) + c.Assert(log, HasLen, 2) c.Check(log[0], Matches, ".* updated boot config assets") + c.Check(log[1], Matches, ".* Requested system restart") // update was applied, thus a restart was requested c.Check(s.restartRequests, DeepEquals, []restart.RestartType{restart.RestartSystemNow}) } else { diff --git a/overlord/devicestate/devicestate_gadget_test.go b/overlord/devicestate/devicestate_gadget_test.go index 3434b3d5dd..a36c917c1d 100644 --- a/overlord/devicestate/devicestate_gadget_test.go +++ b/overlord/devicestate/devicestate_gadget_test.go @@ -1204,14 +1204,12 @@ func (s *deviceMgrGadgetSuite) testGadgetCommandlineUpdateRun(c *C, fromFiles, t // we log on success log := tsk.Log() if logMatch != "" { - if !isClassic { - c.Assert(log, HasLen, 1) - } else { - c.Assert(log, HasLen, 2) - } + c.Assert(log, HasLen, 2) c.Check(log[0], Matches, fmt.Sprintf(".* %v", logMatch)) if isClassic { c.Check(log[1], Matches, ".* Task set to wait until a manual system restart allows to continue") + } else { + c.Check(log[1], Matches, ".* Requested system restart") } } else { c.Check(log, HasLen, 0) @@ -1608,9 +1606,11 @@ func (s *deviceMgrGadgetSuite) TestGadgetCommandlineUpdateUndo(c *C) { c.Check(chg.Err(), ErrorMatches, "(?s)cannot perform the following tasks.*total undo.*") c.Check(tsk.Status(), Equals, state.UndoneStatus) log := tsk.Log() - c.Assert(log, HasLen, 2) + c.Assert(log, HasLen, 4) c.Check(log[0], Matches, ".* Updated kernel command line") - c.Check(log[1], Matches, ".* Reverted kernel command line change") + c.Check(log[1], Matches, ".* Requested system restart") + c.Check(log[2], Matches, ".* Reverted kernel command line change") + c.Check(log[3], Matches, ".* Requested system restart") // update was applied and then undone c.Check(s.restartRequests, DeepEquals, []restart.RestartType{restart.RestartSystemNow, restart.RestartSystemNow}) c.Check(restartCount, Equals, 2) diff --git a/overlord/devicestate/devicestate_install_api_test.go b/overlord/devicestate/devicestate_install_api_test.go new file mode 100644 index 0000000000..2c8c32ac24 --- /dev/null +++ b/overlord/devicestate/devicestate_install_api_test.go @@ -0,0 +1,554 @@ +// -*- Mode: Go; indent-tabs-mode: t -*- +//go:build !nosecboot +// +build !nosecboot + +/* + * Copyright (C) 2022 Canonical Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +package devicestate_test + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + . "gopkg.in/check.v1" + + "github.com/snapcore/snapd/arch" + "github.com/snapcore/snapd/asserts" + "github.com/snapcore/snapd/asserts/assertstest" + "github.com/snapcore/snapd/asserts/sysdb" + "github.com/snapcore/snapd/boot" + "github.com/snapcore/snapd/bootloader" + "github.com/snapcore/snapd/dirs" + "github.com/snapcore/snapd/gadget" + "github.com/snapcore/snapd/gadget/gadgettest" + "github.com/snapcore/snapd/gadget/install" + "github.com/snapcore/snapd/osutil" + "github.com/snapcore/snapd/overlord/devicestate" + "github.com/snapcore/snapd/release" + "github.com/snapcore/snapd/secboot" + "github.com/snapcore/snapd/secboot/keys" + "github.com/snapcore/snapd/seed" + "github.com/snapcore/snapd/seed/seedtest" + "github.com/snapcore/snapd/snap" + "github.com/snapcore/snapd/testutil" + "github.com/snapcore/snapd/timings" +) + +type deviceMgrInstallAPISuite struct { + deviceMgrBaseSuite + *seedtest.TestingSeed20 +} + +var _ = Suite(&deviceMgrInstallAPISuite{}) + +func (s *deviceMgrInstallAPISuite) SetUpTest(c *C) { + classic := true + s.deviceMgrBaseSuite.setupBaseTest(c, classic) + + // We uncompress a gadget with grub, and prefer not to mock in this case + bootloader.Force(nil) + + restore := devicestate.MockSystemForPreseeding(func() (string, error) { + return "fake system label", nil + }) + s.AddCleanup(restore) + + s.TestingSeed20 = &seedtest.TestingSeed20{} + s.SeedDir = dirs.SnapSeedDir + + s.state.Lock() + defer s.state.Unlock() + s.state.Set("seeded", true) +} + +func unpackSnap(snapBlob, targetDir string) error { + if out, err := exec.Command("unsquashfs", "-d", targetDir, "-f", snapBlob).CombinedOutput(); err != nil { + return fmt.Errorf("cannot unsquashfs: %v", osutil.OutputErr(out, err)) + } + return nil +} + +func (s *deviceMgrInstallAPISuite) setupSystemSeed(c *C, sysLabel, gadgetYaml string, isClassic bool) *asserts.Model { + s.StoreSigning = assertstest.NewStoreStack("can0nical", nil) + s.AddCleanup(sysdb.InjectTrusted(s.StoreSigning.Trusted)) + + s.Brands = assertstest.NewSigningAccounts(s.StoreSigning) + s.Brands.Register("my-brand", brandPrivKey, nil) + + // now create a minimal seed dir with snaps/assertions + testSeed := &seedtest.TestingSeed20{ + SeedSnaps: seedtest.SeedSnaps{ + StoreSigning: s.StoreSigning, + Brands: s.Brands, + }, + SeedDir: dirs.SnapSeedDir, + } + + restore := seed.MockTrusted(testSeed.StoreSigning.Trusted) + s.AddCleanup(restore) + + assertstest.AddMany(s.StoreSigning.Database, s.Brands.AccountsAndKeys("my-brand")...) + + s.MakeAssertedSnap(c, seedtest.SampleSnapYaml["snapd"], nil, snap.R(1), "my-brand", s.StoreSigning.Database) + s.MakeAssertedSnap(c, seedtest.SampleSnapYaml["pc-kernel=22"], + [][]string{{"kernel.efi", ""}}, snap.R(1), "my-brand", s.StoreSigning.Database) + s.MakeAssertedSnap(c, seedtest.SampleSnapYaml["core22"], nil, snap.R(1), "my-brand", s.StoreSigning.Database) + s.MakeAssertedSnap(c, seedtest.SampleSnapYaml["pc=22"], + [][]string{ + {"meta/gadget.yaml", gadgetYaml}, + {"pc-boot.img", ""}, {"pc-core.img", ""}, {"grubx64.efi", ""}, + {"shim.efi.signed", ""}, {"grub.conf", ""}}, + snap.R(1), "my-brand", s.StoreSigning.Database) + + model := map[string]interface{}{ + "display-name": "my model", + "architecture": "amd64", + "base": "core22", + "grade": "dangerous", + "snaps": []interface{}{ + map[string]interface{}{ + "name": "pc-kernel", + "id": s.AssertedSnapID("pc-kernel"), + "type": "kernel", + "default-channel": "20", + }, + map[string]interface{}{ + "name": "pc", + "id": s.AssertedSnapID("pc"), + "type": "gadget", + "default-channel": "20", + }, + map[string]interface{}{ + "name": "snapd", + "id": s.AssertedSnapID("snapd"), + "type": "snapd", + }, + map[string]interface{}{ + "name": "core22", + "id": s.AssertedSnapID("core22"), + "type": "base", + }, + }, + } + if isClassic { + model["classic"] = "true" + model["distribution"] = "ubuntu" + } + + return s.MakeSeed(c, sysLabel, "my-brand", "my-model", model, nil) +} + +type finishStepOpts struct { + encrypted bool + isClassic bool +} + +func (s *deviceMgrInstallAPISuite) mockSystemSeedWithLabel(c *C, label string, isClassic bool) (gadgetSnapPath, kernelSnapPath string, ginfo *gadget.Info, mountCmd *testutil.MockCmd) { + // Mock partitioned disk + gadgetYaml := gadgettest.SingleVolumeClassicWithModesGadgetYaml + gadgetRoot := filepath.Join(c.MkDir(), "gadget") + ginfo, _, _, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgetYaml, gadgetRoot) + c.Assert(err, IsNil) + s.AddCleanup(restore) + + // now create a label with snaps/assertions + // TODO This should be "gadgetYaml" instead of SingleVolumeUC20GadgetYaml, + // but we have to do it this way as otherwise snap pack will complain + // while validating, as it does not have information about the model at + // that time. When that is fixed this must change to gadgetYaml. + model := s.setupSystemSeed(c, label, gadgettest.SingleVolumeUC20GadgetYaml, isClassic) + c.Check(model, NotNil) + + // Create fake seed that will return information from the label we created + // (TODO: needs to be in sync with setupSystemSeed, fix that) + kernelSnapPath = filepath.Join(s.SeedDir, "snaps", "pc-kernel_1.snap") + baseSnapPath := filepath.Join(s.SeedDir, "snaps", "core22_1.snap") + gadgetSnapPath = filepath.Join(s.SeedDir, "snaps", "pc_1.snap") + restore = devicestate.MockSeedOpen(func(seedDir, label string) (seed.Seed, error) { + return &fakeSeed{ + essentialSnaps: []*seed.Snap{ + { + Path: kernelSnapPath, + SideInfo: &snap.SideInfo{RealName: "pc-kernel", Revision: snap.R(1), SnapID: s.SeedSnaps.AssertedSnapID("pc-kernel")}, + EssentialType: snap.TypeKernel, + }, + { + Path: baseSnapPath, + SideInfo: &snap.SideInfo{RealName: "core22", Revision: snap.R(1), SnapID: s.SeedSnaps.AssertedSnapID("core22")}, + EssentialType: snap.TypeBase, + }, + { + Path: gadgetSnapPath, + SideInfo: &snap.SideInfo{RealName: "pc", Revision: snap.R(1), SnapID: s.SeedSnaps.AssertedSnapID("pc")}, + EssentialType: snap.TypeGadget, + }, + }, + model: model, + }, nil + }) + s.AddCleanup(restore) + + // Mock calls to systemd-mount, which is used to mount snaps from the system label + mountCmd = testutil.MockCommand(c, "systemd-mount", "") + s.AddCleanup(func() { mountCmd.Restore() }) + + return gadgetSnapPath, kernelSnapPath, ginfo, mountCmd +} + +// TODO encryption case for the finish step is not tested yet, it needs more mocking +func (s *deviceMgrInstallAPISuite) testInstallFinishStep(c *C, opts finishStepOpts) { + // TODO UC case when supported + restore := release.MockOnClassic(opts.isClassic) + s.AddCleanup(restore) + + // only amd64/arm64 have trusted boot assets + oldArch := arch.DpkgArchitecture() + defer arch.SetArchitecture(arch.ArchitectureType(oldArch)) + arch.SetArchitecture("amd64") + + // Mock label + label := "classic" + gadgetSnapPath, kernelSnapPath, ginfo, mountCmd := s.mockSystemSeedWithLabel(c, label, opts.isClassic) + + // Unpack gadget snap from seed where it would have been mounted + gadgetDir := filepath.Join(dirs.SnapRunDir, "snap-content/gadget") + err := os.MkdirAll(gadgetDir, 0755) + c.Assert(err, IsNil) + err = unpackSnap(filepath.Join(s.SeedDir, "snaps/pc_1.snap"), gadgetDir) + c.Assert(err, IsNil) + + // Mock writing of contents + writeContentCalls := 0 + restore = devicestate.MockInstallWriteContent(func(onVolumes map[string]*gadget.Volume, allLaidOutVols map[string]*gadget.LaidOutVolume, encSetupData *install.EncryptionSetupData, observer gadget.ContentObserver, perfTimings timings.Measurer) ([]*gadget.OnDiskVolume, error) { + writeContentCalls++ + if opts.encrypted { + c.Check(encSetupData, NotNil) + + // Make sure we "observe" grub from boot partition + mockRunBootStruct := &gadget.LaidOutStructure{ + VolumeStructure: &gadget.VolumeStructure{ + Role: gadget.SystemBoot, + }, + } + writeChange := &gadget.ContentChange{ + // file that contains the data of the installed file + After: filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/EFI/boot/grubx64.efi"), + // there is no original file in place + Before: "", + } + action, err := observer.Observe(gadget.ContentWrite, mockRunBootStruct, + filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/"), + "EFI/boot/grubx64.efi", writeChange) + c.Check(err, IsNil) + c.Check(action, Equals, gadget.ChangeApply) + } else { + c.Check(encSetupData, IsNil) + } + return nil, nil + }) + s.AddCleanup(restore) + + seedDir := filepath.Join(dirs.RunDir, "mnt/ubuntu-seed") + + // Mock mounting of partitions + mountVolsCalls := 0 + restore = devicestate.MockInstallMountVolumes(func(onVolumes map[string]*gadget.Volume, encSetupData *install.EncryptionSetupData) (seedMntDir string, unmount func() error, err error) { + mountVolsCalls++ + return seedDir, func() error { return nil }, nil + }) + s.AddCleanup(restore) + + // Mock saving of traits + saveStorageTraitsCalls := 0 + restore = devicestate.MockInstallSaveStorageTraits(func(model gadget.Model, allLaidOutVols map[string]*gadget.LaidOutVolume, encryptSetupData *install.EncryptionSetupData) error { + saveStorageTraitsCalls++ + return nil + }) + s.AddCleanup(restore) + + // Insert encryption data when enabled + if opts.encrypted { + // Mock TPM and sealing + restore := devicestate.MockSecbootCheckTPMKeySealingSupported(func() error { return nil }) + s.AddCleanup(restore) + restore = boot.MockSealKeyToModeenv(func(key, saveKey keys.EncryptionKey, model *asserts.Model, modeenv *boot.Modeenv, flags boot.MockSealKeyToModeenvFlags) error { + c.Check(model.Classic(), Equals, opts.isClassic) + // Note that we cannot compare the full structure and we check + // separately bits as the types for these are not exported. + c.Check(len(modeenv.CurrentTrustedBootAssets), Equals, 1) + c.Check(modeenv.CurrentTrustedBootAssets["grubx64.efi"], DeepEquals, + []string{"0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004"}) + c.Check(len(modeenv.CurrentTrustedRecoveryBootAssets), Equals, 2) + c.Check(modeenv.CurrentTrustedRecoveryBootAssets["bootx64.efi"], DeepEquals, []string{"0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004"}) + c.Check(modeenv.CurrentTrustedRecoveryBootAssets["grubx64.efi"], DeepEquals, []string{"0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004"}) + c.Check(len(modeenv.CurrentKernelCommandLines), Equals, 1) + c.Check(modeenv.CurrentKernelCommandLines[0], Equals, + "snapd_recovery_mode=run console=ttyS0,115200n8 console=tty1 panic=-1") + return nil + }) + s.AddCleanup(restore) + + // Insert encryption set-up data in state cache + restore = devicestate.MockEncryptionSetupDataInCache(s.state, label) + s.AddCleanup(restore) + + // Write expected boot assets needed when creating bootchain + seedBootDir := filepath.Join(dirs.RunDir, "mnt/ubuntu-seed/EFI/boot/") + c.Assert(os.MkdirAll(seedBootDir, 0755), IsNil) + + for _, p := range []string{ + filepath.Join(seedBootDir, "bootx64.efi"), + filepath.Join(seedBootDir, "grubx64.efi"), + } { + c.Assert(ioutil.WriteFile(p, []byte{}, 0755), IsNil) + } + + bootDir := filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/EFI/boot/") + c.Assert(os.MkdirAll(bootDir, 0755), IsNil) + c.Assert(ioutil.WriteFile(filepath.Join(bootDir, "grubx64.efi"), []byte{}, 0755), IsNil) + } + + s.state.Lock() + defer s.state.Unlock() + + // Create change + chg := s.state.NewChange("install-step-finish", "finish setup of run system") + finishTask := s.state.NewTask("install-finish", "install API finish step") + finishTask.Set("system-label", label) + finishTask.Set("on-volumes", ginfo.Volumes) + + chg.AddTask(finishTask) + + // now let the change run - some checks will happen in the mocked functions + s.state.Unlock() + defer s.state.Lock() + + s.settle(c) + + s.state.Lock() + defer s.state.Unlock() + c.Assert(chg.Err(), IsNil) + + // Checks now + kernelDir := filepath.Join(dirs.SnapRunDir, "snap-content/kernel") + c.Check(mountCmd.Calls(), DeepEquals, [][]string{ + {"systemd-mount", gadgetSnapPath, gadgetDir}, + {"systemd-mount", kernelSnapPath, kernelDir}, + {"systemd-mount", "--umount", gadgetDir}, + {"systemd-mount", "--umount", kernelDir}, + }) + c.Check(writeContentCalls, Equals, 1) + c.Check(mountVolsCalls, Equals, 1) + c.Check(saveStorageTraitsCalls, Equals, 1) + + expectedFiles := []string{ + filepath.Join(seedDir, "EFI/ubuntu/grub.cfg"), + filepath.Join(seedDir, "EFI/ubuntu/grubenv"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/EFI/ubuntu/grub.cfg"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/EFI/ubuntu/grubenv"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/EFI/ubuntu/pc-kernel_1.snap/kernel.efi"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/EFI/ubuntu/kernel.efi"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-boot/device/model"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-data/var/lib/snapd/modeenv"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-data/var/lib/snapd/snaps/core22_1.snap"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-data/var/lib/snapd/snaps/pc_1.snap"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-data/var/lib/snapd/snaps/pc-kernel_1.snap"), + } + if opts.encrypted { + expectedFiles = append(expectedFiles, dirs.RunDir, + filepath.Join(dirs.RunDir, "mnt/ubuntu-data/var/lib/snapd/device/fde/marker"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-data/var/lib/snapd/device/fde/ubuntu-save.key"), + filepath.Join(dirs.RunDir, "mnt/ubuntu-save/device/fde/marker")) + } + for _, f := range expectedFiles { + c.Check(f, testutil.FilePresent) + } +} + +func (s *deviceMgrInstallAPISuite) TestInstallFinishNoEncryptionHappy(c *C) { + s.testInstallFinishStep(c, finishStepOpts{encrypted: false, isClassic: true}) +} + +func (s *deviceMgrInstallAPISuite) TestInstallFinishEncryptionHappy(c *C) { + s.testInstallFinishStep(c, finishStepOpts{encrypted: true, isClassic: true}) +} + +func (s *deviceMgrInstallAPISuite) TestInstallFinishNoLabel(c *C) { + // Mock partitioned disk, but there will be no label in the system + gadgetYaml := gadgettest.SingleVolumeClassicWithModesGadgetYaml + gadgetRoot := filepath.Join(c.MkDir(), "gadget") + ginfo, _, _, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgetYaml, gadgetRoot) + c.Assert(err, IsNil) + s.AddCleanup(restore) + + s.state.Lock() + defer s.state.Unlock() + + // Create change + label := "classic" + chg := s.state.NewChange("install-step-finish", "finish setup of run system") + finishTask := s.state.NewTask("install-finish", "install API finish step") + finishTask.Set("system-label", label) + finishTask.Set("on-volumes", ginfo.Volumes) + + chg.AddTask(finishTask) + + // now let the change run - some checks will happen in the mocked functions + s.state.Unlock() + defer s.state.Lock() + + s.settle(c) + + s.state.Lock() + defer s.state.Unlock() + + // Checks now + c.Check(chg.Err(), ErrorMatches, `cannot perform the following tasks: +- install API finish step \(cannot load assertions for label "classic": no seed assertions\)`) +} + +func (s *deviceMgrInstallAPISuite) testInstallSetupStorageEncryption(c *C, hasTPM bool) { + // Mock label + label := "classic" + isClassic := true + gadgetSnapPath, kernelSnapPath, ginfo, mountCmd := s.mockSystemSeedWithLabel(c, label, isClassic) + + // Simulate system with TPM + if hasTPM { + restore := devicestate.MockSecbootCheckTPMKeySealingSupported(func() error { return nil }) + s.AddCleanup(restore) + } + + // Mock encryption of partitions + encrytpPartCalls := 0 + restore := devicestate.MockInstallEncryptPartitions(func(onVolumes map[string]*gadget.Volume, encryptionType secboot.EncryptionType, model *asserts.Model, gadgetRoot, kernelRoot string, perfTimings timings.Measurer) (*install.EncryptionSetupData, error) { + encrytpPartCalls++ + c.Check(encryptionType, Equals, secboot.EncryptionTypeLUKS) + saveFound := false + dataFound := false + for _, strct := range onVolumes["pc"].Structure { + switch strct.Role { + case "system-save": + saveFound = true + case "system-data": + dataFound = true + } + } + c.Check(saveFound, Equals, true) + c.Check(dataFound, Equals, true) + return &install.EncryptionSetupData{}, nil + }) + s.AddCleanup(restore) + + s.state.Lock() + defer s.state.Unlock() + + // Create change + chg := s.state.NewChange("install-step-setup-storage-encryption", + "Setup storage encryption") + encryptTask := s.state.NewTask("install-setup-storage-encryption", + "install API set-up encryption step") + encryptTask.Set("system-label", label) + encryptTask.Set("on-volumes", ginfo.Volumes) + chg.AddTask(encryptTask) + + // now let the change run - some checks will happen in the mocked functions + s.state.Unlock() + defer s.state.Lock() + + s.settle(c) + + // Make sure that if anything was stored in cache it is removed after test is run + s.AddCleanup(func() { + devicestate.CleanUpEncryptionSetupDataInCache(s.state, label) + }) + + s.state.Lock() + defer s.state.Unlock() + + // Checks now + if !hasTPM { + c.Check(chg.Err(), ErrorMatches, `.* +.*encryption unavailable on this device: not encrypting device storage as checking TPM gave: .*`) + return + } + + c.Check(chg.Err(), IsNil) + gadgetDir := filepath.Join(dirs.SnapRunDir, "snap-content/gadget") + kernelDir := filepath.Join(dirs.SnapRunDir, "snap-content/kernel") + c.Check(mountCmd.Calls(), DeepEquals, [][]string{ + {"systemd-mount", gadgetSnapPath, gadgetDir}, + {"systemd-mount", kernelSnapPath, kernelDir}, + {"systemd-mount", "--umount", gadgetDir}, + {"systemd-mount", "--umount", kernelDir}, + }) + c.Check(encrytpPartCalls, Equals, 1) + // Check that some data has been stored in the change + apiData := make(map[string]interface{}) + c.Check(chg.Get("api-data", &apiData), IsNil) + _, ok := apiData["encrypted-devices"] + c.Check(ok, Equals, true) + // Check that state has been stored in the cache + c.Check(devicestate.CheckEncryptionSetupDataFromCache(s.state, label), IsNil) +} + +func (s *deviceMgrInstallAPISuite) TestInstallSetupStorageEncryptionHappy(c *C) { + s.testInstallSetupStorageEncryption(c, true) +} + +func (s *deviceMgrInstallAPISuite) TestInstallSetupStorageEncryptionNoCrypto(c *C) { + s.testInstallSetupStorageEncryption(c, false) +} + +func (s *deviceMgrInstallAPISuite) TestInstallSetupStorageEncryptionNoLabel(c *C) { + // Mock partitioned disk, but there will be no label in the system + gadgetYaml := gadgettest.SingleVolumeClassicWithModesGadgetYaml + gadgetRoot := filepath.Join(c.MkDir(), "gadget") + ginfo, _, _, restore, err := gadgettest.MockGadgetPartitionedDisk(gadgetYaml, gadgetRoot) + c.Assert(err, IsNil) + s.AddCleanup(restore) + + s.state.Lock() + defer s.state.Unlock() + + // Create change + label := "classic" + chg := s.state.NewChange("install-step-setup-storage-encryption", + "Setup storage encryption") + encryptTask := s.state.NewTask("install-setup-storage-encryption", + "install API set-up encryption step") + encryptTask.Set("system-label", label) + encryptTask.Set("on-volumes", ginfo.Volumes) + chg.AddTask(encryptTask) + + // now let the change run - some checks will happen in the mocked functions + s.state.Unlock() + defer s.state.Lock() + + s.settle(c) + + s.state.Lock() + defer s.state.Unlock() + + // Checks now + c.Check(chg.Err(), ErrorMatches, `cannot perform the following tasks: +- install API set-up encryption step \(cannot load assertions for label "classic": no seed assertions\)`) +} diff --git a/overlord/devicestate/devicestate_install_mode_test.go b/overlord/devicestate/devicestate_install_mode_test.go index 3d408480ce..4b212dfe6b 100644 --- a/overlord/devicestate/devicestate_install_mode_test.go +++ b/overlord/devicestate/devicestate_install_mode_test.go @@ -528,14 +528,15 @@ func (s *deviceMgrInstallModeSuite) TestInstallRestoresPreseedArtifactError(c *C type fakeSeed struct { modeSnaps []*seed.Snap essentialSnaps []*seed.Snap + model *asserts.Model } func (fakeSeed) LoadAssertions(db asserts.RODatabase, commitTo func(*asserts.Batch) error) error { return nil } -func (fakeSeed) Model() *asserts.Model { - return nil +func (fs fakeSeed) Model() *asserts.Model { + return fs.model } func (fakeSeed) Brand() (*asserts.Account, error) { diff --git a/overlord/devicestate/devicestate_remodel_test.go b/overlord/devicestate/devicestate_remodel_test.go index 68633e3906..6e4720cb54 100644 --- a/overlord/devicestate/devicestate_remodel_test.go +++ b/overlord/devicestate/devicestate_remodel_test.go @@ -155,6 +155,7 @@ func (s *deviceMgrRemodelSuite) TestRemodelUnhappy(c *C) { {map[string]interface{}{"base": "core18"}, "cannot remodel from core to bases yet"}, // pre-UC20 to UC20 {map[string]interface{}{"base": "core20", "kernel": nil, "gadget": nil, "snaps": mockCore20ModelSnaps}, `cannot remodel from pre-UC20 to UC20\+ models`}, + {map[string]interface{}{"base": "core20", "kernel": nil, "gadget": nil, "classic": "true", "distribution": "ubuntu", "snaps": mockCore20ModelSnaps}, `cannot remodel across classic and non-classic models`}, } { mergeMockModelHeaders(cur, t.new) new := s.brands.Model(t.new["brand"].(string), t.new["model"].(string), t.new) @@ -164,6 +165,41 @@ func (s *deviceMgrRemodelSuite) TestRemodelUnhappy(c *C) { } } +func (s *deviceMgrRemodelSuite) TestRemodelFromClassicUnhappy(c *C) { + s.state.Lock() + defer s.state.Unlock() + s.state.Set("seeded", true) + + // set a model assertion + cur := map[string]interface{}{ + "brand": "canonical", + "model": "pc-model", + "architecture": "amd64", + "classic": "true", + "gadget": "pc", + } + s.makeModelAssertionInState(c, cur["brand"].(string), cur["model"].(string), map[string]interface{}{ + "architecture": cur["architecture"], + "gadget": cur["gadget"], + "classic": cur["classic"], + }) + s.makeSerialAssertionInState(c, cur["brand"].(string), cur["model"].(string), "orig-serial") + devicestatetest.SetDevice(s.state, &auth.DeviceState{ + Brand: cur["brand"].(string), + Model: cur["model"].(string), + Serial: "orig-serial", + }) + + new := s.brands.Model(cur["brand"].(string), "new-model", map[string]interface{}{ + "architecture": cur["architecture"], + "gadget": cur["gadget"], + "classic": cur["classic"], + }) + + _, err := devicestate.Remodel(s.state, new) + c.Check(err, ErrorMatches, `cannot remodel from classic model`) +} + func (s *deviceMgrRemodelSuite) TestRemodelCheckGrade(c *C) { s.state.Lock() defer s.state.Unlock() diff --git a/overlord/devicestate/export_test.go b/overlord/devicestate/export_test.go index ab49e6e519..5af253bed9 100644 --- a/overlord/devicestate/export_test.go +++ b/overlord/devicestate/export_test.go @@ -21,6 +21,7 @@ package devicestate import ( "context" + "fmt" "net/http" "os/user" "time" @@ -366,6 +367,38 @@ func MockInstallFactoryReset(f func(model gadget.Model, gadgetRoot, kernelRoot, return restore } +func MockInstallWriteContent(f func(onVolumes map[string]*gadget.Volume, allLaidOutVols map[string]*gadget.LaidOutVolume, encSetupData *install.EncryptionSetupData, observer gadget.ContentObserver, perfTimings timings.Measurer) ([]*gadget.OnDiskVolume, error)) (restore func()) { + old := installWriteContent + installWriteContent = f + return func() { + installWriteContent = old + } +} + +func MockInstallMountVolumes(f func(onVolumes map[string]*gadget.Volume, encSetupData *install.EncryptionSetupData) (espMntDir string, unmount func() error, err error)) (restore func()) { + old := installMountVolumes + installMountVolumes = f + return func() { + installMountVolumes = old + } +} + +func MockInstallEncryptPartitions(f func(onVolumes map[string]*gadget.Volume, encryptionType secboot.EncryptionType, model *asserts.Model, gadgetRoot, kernelRoot string, perfTimings timings.Measurer) (*install.EncryptionSetupData, error)) (restore func()) { + old := installEncryptPartitions + installEncryptPartitions = f + return func() { + installEncryptPartitions = old + } +} + +func MockInstallSaveStorageTraits(f func(model gadget.Model, allLaidOutVols map[string]*gadget.LaidOutVolume, encryptSetupData *install.EncryptionSetupData) error) (restore func()) { + old := installSaveStorageTraits + installSaveStorageTraits = f + return func() { + installSaveStorageTraits = old + } +} + func MockSecbootStageEncryptionKeyChange(f func(node string, key keys.EncryptionKey) error) (restore func()) { restore = testutil.Backup(&secbootStageEncryptionKeyChange) secbootStageEncryptionKeyChange = f @@ -491,3 +524,40 @@ func MockCreateAllKnownSystemUsers(createAllUsers func(state *state.State, asser createAllKnownSystemUsers = createAllUsers return restore } + +func MockEncryptionSetupDataInCache(st *state.State, label string) (restore func()) { + st.Lock() + defer st.Unlock() + var esd *install.EncryptionSetupData + labelToEncData := map[string]*install.MockEncryptedDeviceAndRole{ + "ubuntu-save": { + Role: "system-save", + EncryptedDevice: "/dev/mapper/ubuntu-save", + }, + "ubuntu-data": { + Role: "system-data", + EncryptedDevice: "/dev/mapper/ubuntu-data", + }, + } + esd = install.MockEncryptionSetupData(labelToEncData) + st.Cache(encryptionSetupDataKey{label}, esd) + return func() { CleanUpEncryptionSetupDataInCache(st, label) } +} + +func CheckEncryptionSetupDataFromCache(st *state.State, label string) error { + cached := st.Cached(encryptionSetupDataKey{label}) + if cached == nil { + return fmt.Errorf("no EncryptionSetupData found in cache") + } + if _, ok := cached.(*install.EncryptionSetupData); !ok { + return fmt.Errorf("wrong data type under encryptionSetupDataKey") + } + return nil +} + +func CleanUpEncryptionSetupDataInCache(st *state.State, label string) { + st.Lock() + defer st.Unlock() + key := encryptionSetupDataKey{label} + st.Cache(key, nil) +} diff --git a/overlord/devicestate/firstboot.go b/overlord/devicestate/firstboot.go index 362e11aa6e..f2df2fcdb9 100644 --- a/overlord/devicestate/firstboot.go +++ b/overlord/devicestate/firstboot.go @@ -141,7 +141,8 @@ func populateStateFromSeedImpl(st *state.State, opts *populateStateFromSeedOptio timings.Run(tm, "load-verified-snap-metadata", "load verified snap metadata from seed", func(nested timings.Measurer) { err = deviceSeed.LoadMeta(mode, nil, nested) }) - if release.OnClassic && err == seed.ErrNoMeta { + // ErrNoMeta can happen only with Core 16/18-style seeds + if err == seed.ErrNoMeta && release.OnClassic { if preseed { return nil, fmt.Errorf("no snaps to preseed") } diff --git a/overlord/devicestate/firstboot20_test.go b/overlord/devicestate/firstboot20_test.go index 8005077a36..139753aec1 100644 --- a/overlord/devicestate/firstboot20_test.go +++ b/overlord/devicestate/firstboot20_test.go @@ -230,6 +230,17 @@ func stripSnapNamesWithChannels(snaps []string) []string { return names } +func (s *firstBoot20Suite) updateModel(c *C, sysLabel string, model *asserts.Model, modelUpdater func(c *C, headers map[string]interface{})) *asserts.Model { + if modelUpdater != nil { + hdrs := model.Headers() + modelUpdater(c, hdrs) + model = s.Brands.Model(model.BrandID(), model.Model(), hdrs) + modelFn := filepath.Join(s.SeedDir, "systems", sysLabel, "model") + seedtest.WriteAssertions(modelFn, model) + } + return model +} + func checkSnapstateDevModeFlags(c *C, tsAll []*state.TaskSet, snapsWithDevModeFlag ...string) { allDevModeSnaps := stripSnapNamesWithChannels(snapsWithDevModeFlag) @@ -913,7 +924,7 @@ func (s *firstBoot20Suite) TestPopulateFromSeedClassicWithModesRunModeNoKernelAn }}) } -func (s *firstBoot20Suite) testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c *C, modelGrade asserts.ModelGrade, expectedErr string) { +func (s *firstBoot20Suite) testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c *C, modelGrade asserts.ModelGrade, modelUpdater func(*C, map[string]interface{}), expectedErr string) { defer release.MockReleaseInfo(&release.OS{ID: "ubuntu", VersionID: "20.04"})() // XXX this shouldn't be needed defer release.MockOnClassic(true)() @@ -921,7 +932,7 @@ func (s *firstBoot20Suite) testPopulateFromSeedClassicWithModesRunModeNoKernelAn m := &boot.Modeenv{ Mode: "run", - RecoverySystem: "20191018", + RecoverySystem: "20221129", Base: "core20_1.snap", Classic: true, } @@ -953,6 +964,8 @@ apps: // validity check that our returned model has the expected grade c.Assert(model.Grade(), Equals, modelGrade) + s.updateModel(c, sysLabel, model, modelUpdater) + s.startOverlord(c) opts := devicestate.PopulateStateFromSeedOptions{ @@ -1093,7 +1106,7 @@ func (s *firstBoot20Suite) TestPopulateFromSeedClassicWithModesDangerousRunModeN "modes": []interface{}{"run"}, } - s.testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c, asserts.ModelDangerous, "") + s.testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c, asserts.ModelDangerous, nil, "") } func (s *firstBoot20Suite) TestPopulateFromSeedClassicWithModesSignedRunModeNoKernelAndGadgetClassicSnap(c *C) { @@ -1103,15 +1116,23 @@ func (s *firstBoot20Suite) TestPopulateFromSeedClassicWithModesSignedRunModeNoKe "modes": []interface{}{"run"}, } - s.testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c, asserts.ModelSigned, "") + s.testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c, asserts.ModelSigned, nil, "") } func (s *firstBoot20Suite) TestPopulateFromSeedClassicWithModesSignedRunModeNoKernelAndGadgetClassicSnapImplicitFails(c *C) { // classic snaps must be declared explicitly for non-dangerous models, // not doing so results in a seeding error + + // to evade the seedwriter checks to test the firstboot ones + // create the system with model grade dangerous and then + // switch/rewrite the model to be grade signed s.extraSnapModelDetails["classic-installer"] = map[string]interface{}{ "modes": []interface{}{"run"}, } - s.testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c, asserts.ModelSigned, `snap "classic-installer" requires classic confinement`) + switchToSigned := func(_ *C, modHeaders map[string]interface{}) { + modHeaders["grade"] = string(asserts.ModelSigned) + } + + s.testPopulateFromSeedClassicWithModesRunModeNoKernelAndGadgetClassicSnap(c, asserts.ModelDangerous, switchToSigned, `snap "classic-installer" requires classic confinement`) } diff --git a/overlord/devicestate/handlers_install.go b/overlord/devicestate/handlers_install.go index a8bfbe5aad..c3c9d80a44 100644 --- a/overlord/devicestate/handlers_install.go +++ b/overlord/devicestate/handlers_install.go @@ -75,6 +75,7 @@ var ( installMountVolumes = install.MountVolumes installWriteContent = install.WriteContent installEncryptPartitions = install.EncryptPartitions + installSaveStorageTraits = install.SaveStorageTraits secbootStageEncryptionKeyChange = secboot.StageEncryptionKeyChange secbootTransitionEncryptionKeyChange = secboot.TransitionEncryptionKeyChange @@ -1289,7 +1290,6 @@ func (m *DeviceManager) loadAndMountSystemLabelSnaps(systemLabel string) ( // - install gadget assets // - install kernel.efi // - make system bootable (including writing modeenv) -// TODO this needs unit tests func (m *DeviceManager) doInstallFinish(t *state.Task, _ *tomb.Tomb) error { var err error st := t.State() @@ -1367,7 +1367,7 @@ func (m *DeviceManager) doInstallFinish(t *state.Task, _ *tomb.Tomb) error { } defer unmountParts() - if err := install.SaveStorageTraits(sys.Model, allLaidOutVols, encryptSetupData); err != nil { + if err := installSaveStorageTraits(sys.Model, allLaidOutVols, encryptSetupData); err != nil { return err } diff --git a/overlord/devicestate/systems.go b/overlord/devicestate/systems.go index 49e99562d0..b6ca4432b7 100644 --- a/overlord/devicestate/systems.go +++ b/overlord/devicestate/systems.go @@ -82,7 +82,7 @@ func systemFromSeed(label string, current *currentSystem) (*System, error) { } func loadSeedAndSystem(label string, current *currentSystem) (seed.Seed, *System, error) { - s, err := seed.Open(dirs.SnapSeedDir, label) + s, err := seedOpen(dirs.SnapSeedDir, label) if err != nil { return nil, nil, fmt.Errorf("cannot open: %v", err) } diff --git a/overlord/devicestate/users.go b/overlord/devicestate/users.go index 6c189456e2..7da67bb986 100644 --- a/overlord/devicestate/users.go +++ b/overlord/devicestate/users.go @@ -53,6 +53,10 @@ func (e *UserError) Error() string { return e.Err.Error() } +type RemoveUserOptions struct { + Force bool +} + // CreatedUser holds the results from a create user operation. type CreatedUser struct { Username string @@ -111,8 +115,11 @@ func CreateKnownUsers(st *state.State, sudoer bool, email string) ([]*CreatedUse } // RemoveUser removes linux user account of passed username. -func RemoveUser(st *state.State, username string) (*auth.UserState, error) { +func RemoveUser(st *state.State, username string, opts *RemoveUserOptions) (*auth.UserState, error) { // TODO: allow to remove user entries by email as well + if opts == nil { + opts = &RemoveUserOptions{} + } // catch silly errors if username == "" { @@ -129,7 +136,11 @@ func RemoveUser(st *state.State, username string) (*auth.UserState, error) { } // first remove the system user - if err := osutilDelUser(username, &osutil.DelUserOptions{ExtraUsers: !release.OnClassic}); err != nil { + delUseropts := &osutil.DelUserOptions{ + ExtraUsers: !release.OnClassic, + Force: opts.Force, + } + if err := osutilDelUser(username, delUseropts); err != nil { return nil, err } diff --git a/overlord/devicestate/users_test.go b/overlord/devicestate/users_test.go index f9a1e70d9f..0f714352b2 100644 --- a/overlord/devicestate/users_test.go +++ b/overlord/devicestate/users_test.go @@ -206,7 +206,7 @@ func (s *usersSuite) TestUserActionRemoveDelUserErr(c *check.C) { })() s.state.Lock() - userState, userErr := devicestate.RemoveUser(s.state, "some-user") + userState, userErr := devicestate.RemoveUser(s.state, "some-user", &devicestate.RemoveUserOptions{}) s.state.Unlock() c.Check(userErr, check.NotNil) c.Check(s.errorIsInternal(err), check.Equals, true) @@ -215,6 +215,32 @@ func (s *usersSuite) TestUserActionRemoveDelUserErr(c *check.C) { c.Check(called, check.Equals, 1) } +func (s *usersSuite) TestUserActionRemoveDelUserForce(c *check.C) { + s.state.Lock() + _, err := auth.NewUser(s.state, auth.NewUserParams{ + Username: "some-user", + Email: "email@test.com", + Macaroon: "macaroon", + Discharges: []string{"discharge"}, + }) + s.state.Unlock() + c.Check(err, check.IsNil) + + calls := 0 + defer devicestate.MockOsutilDelUser(func(username string, opts *osutil.DelUserOptions) error { + calls++ + c.Check(username, check.Equals, "some-user") + c.Check(opts.Force, check.Equals, true) + return nil + })() + + s.state.Lock() + _, err = devicestate.RemoveUser(s.state, "some-user", &devicestate.RemoveUserOptions{Force: true}) + s.state.Unlock() + c.Check(err, check.IsNil) + c.Check(calls, check.Equals, 1) +} + func (s *usersSuite) TestUserActionRemoveStateErr(c *check.C) { s.state.Lock() s.state.Set("auth", 42) // breaks auth @@ -227,7 +253,7 @@ func (s *usersSuite) TestUserActionRemoveStateErr(c *check.C) { })() s.state.Lock() - userState, err := devicestate.RemoveUser(s.state, "some-user") + userState, err := devicestate.RemoveUser(s.state, "some-user", &devicestate.RemoveUserOptions{}) s.state.Unlock() c.Check(err, check.NotNil) @@ -246,7 +272,7 @@ func (s *usersSuite) TestUserActionRemoveNoUserInState(c *check.C) { }) s.state.Lock() - userState, err := devicestate.RemoveUser(s.state, "some-user") + userState, err := devicestate.RemoveUser(s.state, "some-user", &devicestate.RemoveUserOptions{}) s.state.Unlock() c.Check(err, check.NotNil) @@ -275,7 +301,7 @@ func (s *usersSuite) TestUserActionRemove(c *check.C) { })() s.state.Lock() - userState, err := devicestate.RemoveUser(s.state, "some-user") + userState, err := devicestate.RemoveUser(s.state, "some-user", &devicestate.RemoveUserOptions{}) s.state.Unlock() c.Check(err, check.IsNil) @@ -293,7 +319,7 @@ func (s *usersSuite) TestUserActionRemove(c *check.C) { func (s *usersSuite) TestUserActionRemoveNoUsername(c *check.C) { - userState, err := devicestate.RemoveUser(s.state, "") + userState, err := devicestate.RemoveUser(s.state, "", &devicestate.RemoveUserOptions{}) c.Check(err, check.NotNil) c.Check(err.Error(), check.Matches, "need a username to remove") c.Check(s.errorIsInternal(err), check.Equals, false) diff --git a/overlord/managers_test.go b/overlord/managers_test.go index 770320b912..875cde60fe 100644 --- a/overlord/managers_test.go +++ b/overlord/managers_test.go @@ -508,8 +508,9 @@ func (s *baseMgrsSuite) makeSerialAssertionInState(c *C, st *state.State, brandI } // XXX: We have some very similar code in hookstate/ctlcmd/is_connected_test.go -// should this be moved to overlord/snapstate/snapstatetest as a common -// helper +// +// should this be moved to overlord/snapstate/snapstatetest as a common +// helper func (ms *baseMgrsSuite) mockInstalledSnapWithFiles(c *C, snapYaml string, files [][]string) *snap.Info { return ms.mockInstalledSnapWithRevAndFiles(c, snapYaml, snap.R(1), files) } @@ -841,8 +842,10 @@ apps: tr.Commit() // add the snap to a quota group - ts, err := servicestate.CreateQuota(st, "grp", "", []string{"foo"}, - quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build()) + ts, err := servicestate.CreateQuota(st, "grp", servicestate.CreateQuotaOptions{ + Snaps: []string{"foo"}, + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build(), + }) c.Assert(err, IsNil) quotaUpdateChg := st.NewChange("update-quota", "...") quotaUpdateChg.AddAll(ts) diff --git a/overlord/restart/restart.go b/overlord/restart/restart.go index f91d0e65c8..2fc7de6aec 100644 --- a/overlord/restart/restart.go +++ b/overlord/restart/restart.go @@ -333,6 +333,8 @@ func FinishTaskWithRestart(task *state.Task, status state.Status, rt RestartType task.Logf("Skipped automatic system restart on classic system when undoing changes back to previous state") return nil } + } else { + task.Logf("Requested system restart") } } task.SetStatus(status) diff --git a/overlord/restart/restart_test.go b/overlord/restart/restart_test.go index a50abcd9cc..e367e5ca22 100644 --- a/overlord/restart/restart_test.go +++ b/overlord/restart/restart_test.go @@ -228,15 +228,16 @@ func (s *restartSuite) TestFinishTaskWithRestart(c *C) { classic bool restart bool wait bool + log string }{ {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartDaemon, classic: false, restart: true}, {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartDaemon, classic: true, restart: true}, {initial: state.UndoStatus, final: state.UndoneStatus, restartType: restart.RestartDaemon, classic: false, restart: true}, - {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartSystem, classic: false, restart: true}, - {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartSystem, classic: true, restart: false, wait: true}, - {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartSystemNow, classic: true, restart: false, wait: true}, - {initial: state.UndoStatus, final: state.UndoneStatus, restartType: restart.RestartSystem, classic: true, restart: false}, - {initial: state.UndoStatus, final: state.UndoneStatus, restartType: restart.RestartSystem, classic: false, restart: true}, + {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartSystem, classic: false, restart: true, log: ".* INFO Requested system restart"}, + {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartSystem, classic: true, restart: false, wait: true, log: ".* INFO Task set to wait until a manual system restart allows to continue"}, + {initial: state.DoStatus, final: state.DoneStatus, restartType: restart.RestartSystemNow, classic: true, restart: false, wait: true, log: ".* INFO Task set to wait until a manual system restart allows to continue"}, + {initial: state.UndoStatus, final: state.UndoneStatus, restartType: restart.RestartSystem, classic: true, restart: false, log: ".* INFO Skipped automatic system restart on classic system when undoing changes back to previous state"}, + {initial: state.UndoStatus, final: state.UndoneStatus, restartType: restart.RestartSystem, classic: false, restart: true, log: ".* INFO Requested system restart"}, } chg := st.NewChange("chg", "...") @@ -280,6 +281,12 @@ func (s *restartSuite) TestFinishTaskWithRestart(c *C) { } c.Check(ok, Equals, false) } + if t.log == "" { + c.Check(task.Log(), HasLen, 0) + } else { + c.Check(task.Log(), HasLen, 1) + c.Check(task.Log()[0], Matches, t.log) + } } } diff --git a/overlord/servicestate/quota_control.go b/overlord/servicestate/quota_control.go index d42c9cf7f4..ae449354de 100644 --- a/overlord/servicestate/quota_control.go +++ b/overlord/servicestate/quota_control.go @@ -82,10 +82,24 @@ func quotaGroupsAvailable(st *state.State) error { return nil } +// CreateQuotaOptions reflects all of options available when creating new quota +// groups. +type CreateQuotaOptions struct { + // ParentName is the name of the parent quota group, the group should be + // placed under. + ParentName string + + // Snaps is the set of snaps to add to the quota group. These are + // instance names of snaps. + Snaps []string + + // ResourceLimits is the resource limits to be used for the quota group. + ResourceLimits quota.Resources +} + // CreateQuota attempts to create the specified quota group with the specified // snaps in it. -// TODO: should this use something like QuotaGroupUpdate with fewer fields? -func CreateQuota(st *state.State, name string, parentName string, snaps []string, resourceLimits quota.Resources) (*state.TaskSet, error) { +func CreateQuota(st *state.State, name string, createOpts CreateQuotaOptions) (*state.TaskSet, error) { if err := quotaGroupsAvailable(st); err != nil { return nil, err } @@ -101,23 +115,23 @@ func CreateQuota(st *state.State, name string, parentName string, snaps []string } // validate the resource limits for the group - if err := resourceLimits.Validate(); err != nil { + if err := createOpts.ResourceLimits.Validate(); err != nil { return nil, fmt.Errorf("cannot create quota group %q: %v", name, err) } // validate that the system has the features needed for this resource - if err := resourcesCheckFeatureRequirements(&resourceLimits); err != nil { + if err := resourcesCheckFeatureRequirements(&createOpts.ResourceLimits); err != nil { return nil, fmt.Errorf("cannot create quota group %q: %v", name, err) } // make sure the specified snaps exist and aren't currently in another group - if err := validateSnapForAddingToGroup(st, snaps, name, allGrps); err != nil { + if err := validateSnapForAddingToGroup(st, createOpts.Snaps, name, allGrps); err != nil { return nil, err } if err := CheckQuotaChangeConflictMany(st, []string{name}); err != nil { return nil, err } - if err := snapstate.CheckChangeConflictMany(st, snaps, ""); err != nil { + if err := snapstate.CheckChangeConflictMany(st, createOpts.Snaps, ""); err != nil { return nil, err } @@ -125,9 +139,9 @@ func CreateQuota(st *state.State, name string, parentName string, snaps []string qc := QuotaControlAction{ Action: "create", QuotaName: name, - ResourceLimits: resourceLimits, - AddSnaps: snaps, - ParentName: parentName, + ResourceLimits: createOpts.ResourceLimits, + AddSnaps: createOpts.Snaps, + ParentName: createOpts.ParentName, } ts := state.NewTaskSet() @@ -188,9 +202,9 @@ func RemoveQuota(st *state.State, name string) (*state.TaskSet, error) { return ts, nil } -// QuotaGroupUpdate reflects all of the modifications that can be performed on +// UpdateQuotaOptions reflects all of the modifications that can be performed on // a quota group in one operation. -type QuotaGroupUpdate struct { +type UpdateQuotaOptions struct { // AddSnaps is the set of snaps to add to the quota group. These are // instance names of snaps, and are appended to the existing snaps in // the quota group @@ -205,7 +219,7 @@ type QuotaGroupUpdate struct { // TODO: this should support more kinds of updates such as moving groups between // parents, removing sub-groups from their parents, and removing snaps from // the group. -func UpdateQuota(st *state.State, name string, updateOpts QuotaGroupUpdate) (*state.TaskSet, error) { +func UpdateQuota(st *state.State, name string, updateOpts UpdateQuotaOptions) (*state.TaskSet, error) { if err := quotaGroupsAvailable(st); err != nil { return nil, err } diff --git a/overlord/servicestate/quota_control_test.go b/overlord/servicestate/quota_control_test.go index 998191b893..97467b2551 100644 --- a/overlord/servicestate/quota_control_test.go +++ b/overlord/servicestate/quota_control_test.go @@ -251,7 +251,9 @@ func (s *quotaControlSuite) TestCreateQuotaNotEnabled(c *C) { tr.Commit() // try to create an empty quota group - _, err := servicestate.CreateQuota(s.state, "foo", "", nil, quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build()) + _, err := servicestate.CreateQuota(s.state, "foo", servicestate.CreateQuotaOptions{ + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build(), + }) c.Assert(err, ErrorMatches, `experimental feature disabled - test it by setting 'experimental.quota-groups' to true`) } @@ -264,7 +266,9 @@ func (s *quotaControlSuite) TestCreateQuotaSystemdTooOld(c *C) { servicestate.CheckSystemdVersion() - _, err := servicestate.CreateQuota(s.state, "foo", "", nil, quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build()) + _, err := servicestate.CreateQuota(s.state, "foo", servicestate.CreateQuotaOptions{ + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build(), + }) c.Assert(err, ErrorMatches, `cannot use quotas with incompatible systemd: systemd version 229 is too old \(expected at least 230\)`) } @@ -288,8 +292,10 @@ func (s *quotaControlSuite) TestCreateQuotaPrecond(c *C) { } for _, t := range tests { - testConstraints := quota.NewResourcesBuilder().WithMemoryLimit(t.mem).Build() - _, err := servicestate.CreateQuota(st, t.name, "", t.snaps, testConstraints) + _, err := servicestate.CreateQuota(st, t.name, servicestate.CreateQuotaOptions{ + Snaps: t.snaps, + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(t.mem).Build(), + }) c.Check(err, ErrorMatches, t.err) } } @@ -306,7 +312,10 @@ func (s *quotaControlSuite) TestRemoveQuotaPreseeding(c *C) { snaptest.MockSnapCurrent(c, testYaml, s.testSnapSideInfo) // create a quota group - ts, err := servicestate.CreateQuota(s.state, "foo", "", []string{"test-snap"}, quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build()) + ts, err := servicestate.CreateQuota(s.state, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build(), + }) c.Assert(err, IsNil) chg := st.NewChange("quota-control", "...") @@ -360,7 +369,10 @@ func (s *quotaControlSuite) TestCreateUnhappyCheckFeatureReqs(c *C) { quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() // create the quota group - _, err := servicestate.CreateQuota(st, "foo", "", []string{"test-snap"}, quotaConstraits) + _, err := servicestate.CreateQuota(st, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quotaConstraits, + }) c.Check(err, ErrorMatches, `cannot create quota group "foo": check feature requirements error`) } @@ -383,7 +395,10 @@ func (s *quotaControlSuite) TestUpdateUnhappyCheckFeatureReqs(c *C) { quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() // create the quota group - ts, err := servicestate.CreateQuota(st, "foo", "", []string{"test-snap"}, quotaConstraits) + ts, err := servicestate.CreateQuota(st, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quotaConstraits, + }) c.Assert(err, IsNil) chg := st.NewChange("quota-control", "...") chg.AddAll(ts) @@ -400,7 +415,7 @@ func (s *quotaControlSuite) TestUpdateUnhappyCheckFeatureReqs(c *C) { }) defer r() - _, err = servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{NewResourceLimits: quotaConstraits}) + _, err = servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{NewResourceLimits: quotaConstraits}) c.Check(err, ErrorMatches, `cannot update group "foo": check feature requirements error`) } @@ -438,7 +453,10 @@ func (s *quotaControlSuite) TestCreateUpdateRemoveQuotaHappy(c *C) { quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() // create the quota group - ts, err := servicestate.CreateQuota(st, "foo", "", []string{"test-snap"}, quotaConstraits) + ts, err := servicestate.CreateQuota(st, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quotaConstraits, + }) c.Assert(err, IsNil) c.Check(resCheckFeatureRequirementsCalled, Equals, 1) @@ -471,7 +489,7 @@ func (s *quotaControlSuite) TestCreateUpdateRemoveQuotaHappy(c *C) { // increase the memory limit newConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB * 2).Build() - ts, err = servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{NewResourceLimits: newConstraits}) + ts, err = servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{NewResourceLimits: newConstraits}) c.Assert(err, IsNil) c.Check(resCheckFeatureRequirementsCalled, Equals, 2) @@ -565,7 +583,10 @@ func (s *quotaControlSuite) TestEnsureSnapAbsentFromQuotaGroup(c *C) { // create a quota group quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() - ts, err := servicestate.CreateQuota(s.state, "foo", "", []string{"test-snap", "test-snap2"}, quotaConstraits) + ts, err := servicestate.CreateQuota(s.state, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap", "test-snap2"}, + ResourceLimits: quotaConstraits, + }) c.Assert(err, IsNil) chg := st.NewChange("quota-control", "...") @@ -633,7 +654,7 @@ func (s *quotaControlSuite) TestUpdateQuotaGroupNotEnabled(c *C) { tr.Set("core", "experimental.quota-groups", false) tr.Commit() - opts := servicestate.QuotaGroupUpdate{} + opts := servicestate.UpdateQuotaOptions{} _, err := servicestate.UpdateQuota(s.state, "foo", opts) c.Assert(err, ErrorMatches, `experimental feature disabled - test it by setting 'experimental.quota-groups' to true`) } @@ -650,12 +671,12 @@ func (s *quotaControlSuite) TestUpdateQuotaPrecond(c *C) { newConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() tests := []struct { name string - opts servicestate.QuotaGroupUpdate + opts servicestate.UpdateQuotaOptions err string }{ - {"what", servicestate.QuotaGroupUpdate{}, `group "what" does not exist`}, - {"foo", servicestate.QuotaGroupUpdate{NewResourceLimits: newConstraits}, `cannot update group "foo": cannot decrease memory limit, remove and re-create it to decrease the limit`}, - {"foo", servicestate.QuotaGroupUpdate{AddSnaps: []string{"baz"}}, `cannot use snap "baz" in group "foo": snap "baz" is not installed`}, + {"what", servicestate.UpdateQuotaOptions{}, `group "what" does not exist`}, + {"foo", servicestate.UpdateQuotaOptions{NewResourceLimits: newConstraits}, `cannot update group "foo": cannot decrease memory limit, remove and re-create it to decrease the limit`}, + {"foo", servicestate.UpdateQuotaOptions{AddSnaps: []string{"baz"}}, `cannot use snap "baz" in group "foo": snap "baz" is not installed`}, } for _, t := range tests { @@ -685,7 +706,10 @@ func (s *quotaControlSuite) TestRemoveQuotaPrecond(c *C) { } func (s *quotaControlSuite) createQuota(c *C, name string, limits quota.Resources, snaps ...string) { - ts, err := servicestate.CreateQuota(s.state, name, "", snaps, limits) + ts, err := servicestate.CreateQuota(s.state, name, servicestate.CreateQuotaOptions{ + Snaps: snaps, + ResourceLimits: limits, + }) c.Assert(err, IsNil) chg := s.state.NewChange("quota-control", "...") @@ -733,7 +757,7 @@ func (s *quotaControlSuite) TestSnapOpUpdateQuotaConflict(c *C) { chg1 := s.state.NewChange("disable", "...") chg1.AddAll(ts) - _, err = servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{AddSnaps: []string{"test-snap2"}}) + _, err = servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{AddSnaps: []string{"test-snap2"}}) c.Assert(err, ErrorMatches, `snap "test-snap2" has "disable" change in progress`) } @@ -752,7 +776,10 @@ func (s *quotaControlSuite) TestSnapOpCreateQuotaConflict(c *C) { chg1.AddAll(ts) quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() - _, err = servicestate.CreateQuota(s.state, "foo", "", []string{"test-snap"}, quotaConstraits) + _, err = servicestate.CreateQuota(s.state, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quotaConstraits, + }) c.Assert(err, ErrorMatches, `snap "test-snap" has "disable" change in progress`) } @@ -795,7 +822,10 @@ func (s *quotaControlSuite) TestCreateQuotaSnapOpConflict(c *C) { snaptest.MockSnapCurrent(c, testYaml, s.testSnapSideInfo) quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() - ts, err := servicestate.CreateQuota(s.state, "foo", "", []string{"test-snap"}, quotaConstraits) + ts, err := servicestate.CreateQuota(s.state, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quotaConstraits, + }) c.Assert(err, IsNil) chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) @@ -834,7 +864,7 @@ func (s *quotaControlSuite) TestUpdateQuotaSnapOpConflict(c *C) { quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() s.createQuota(c, "foo", quotaConstraits, "test-snap") - ts, err := servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{AddSnaps: []string{"test-snap2"}}) + ts, err := servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{AddSnaps: []string{"test-snap2"}}) c.Assert(err, IsNil) chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) @@ -942,13 +972,13 @@ func (s *quotaControlSuite) TestUpdateQuotaUpdateQuotaConflict(c *C) { quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() s.createQuota(c, "foo", quotaConstraits, "test-snap") - ts, err := servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{AddSnaps: []string{"test-snap2"}}) + ts, err := servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{AddSnaps: []string{"test-snap2"}}) c.Assert(err, IsNil) chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) newConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB * 2).Build() - _, err = servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{NewResourceLimits: newConstraits}) + _, err = servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{NewResourceLimits: newConstraits}) c.Assert(err, ErrorMatches, `quota group "foo" has "quota-control" change in progress`) } @@ -982,7 +1012,7 @@ func (s *quotaControlSuite) TestUpdateQuotaRemoveQuotaConflict(c *C) { quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() s.createQuota(c, "foo", quotaConstraits, "test-snap") - ts, err := servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{AddSnaps: []string{"test-snap2"}}) + ts, err := servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{AddSnaps: []string{"test-snap2"}}) c.Assert(err, IsNil) chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) @@ -1026,7 +1056,7 @@ func (s *quotaControlSuite) TestRemoveQuotaUpdateQuotaConflict(c *C) { chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) - _, err = servicestate.UpdateQuota(st, "foo", servicestate.QuotaGroupUpdate{AddSnaps: []string{"test-snap2"}}) + _, err = servicestate.UpdateQuota(st, "foo", servicestate.UpdateQuotaOptions{AddSnaps: []string{"test-snap2"}}) c.Assert(err, ErrorMatches, `quota group "foo" has "quota-control" change in progress`) } @@ -1050,13 +1080,18 @@ func (s *quotaControlSuite) TestCreateQuotaCreateQuotaConflict(c *C) { snaptest.MockSnapCurrent(c, testYaml2, si2) quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() - ts, err := servicestate.CreateQuota(st, "foo", "", []string{"test-snap"}, quotaConstraits) + ts, err := servicestate.CreateQuota(st, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quotaConstraits, + }) c.Assert(err, IsNil) chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) - newConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB * 2).Build() - _, err = servicestate.CreateQuota(st, "foo", "", []string{"test-snap2"}, newConstraits) + _, err = servicestate.CreateQuota(st, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap2"}, + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB * 2).Build(), + }) c.Assert(err, ErrorMatches, `quota group "foo" has "quota-control" change in progress`) } @@ -1073,7 +1108,7 @@ func (s *quotaControlSuite) TestUpdateQuotaModifyExistingMixable(c *C) { c.Assert(err, IsNil) // try to update a quota value, this must fail - _, err = servicestate.UpdateQuota(st, "mixed-grp", servicestate.QuotaGroupUpdate{ + _, err = servicestate.UpdateQuota(st, "mixed-grp", servicestate.UpdateQuotaOptions{ NewResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB * 2).Build(), }) c.Assert(err, ErrorMatches, `quota group "mixed-grp" has mixed snaps and sub-groups, which is no longer supported; removal and re-creation is necessary to modify it`) @@ -1109,8 +1144,10 @@ func (s *quotaControlSuite) TestAddSnapToQuotaGroupQuotaConflict(c *C) { snapstate.Set(s.state, "test-snap2", snapst2) snaptest.MockSnapCurrent(c, testYaml2, si2) - quotaConstraits := quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build() - ts, err := servicestate.CreateQuota(st, "foo", "", []string{"test-snap"}, quotaConstraits) + ts, err := servicestate.CreateQuota(st, "foo", servicestate.CreateQuotaOptions{ + Snaps: []string{"test-snap"}, + ResourceLimits: quota.NewResourcesBuilder().WithMemoryLimit(quantity.SizeGiB).Build(), + }) c.Assert(err, IsNil) chg1 := s.state.NewChange("quota-control", "...") chg1.AddAll(ts) diff --git a/overlord/servicestate/quota_handlers_test.go b/overlord/servicestate/quota_handlers_test.go index ede68d4416..af7f0fefc0 100644 --- a/overlord/servicestate/quota_handlers_test.go +++ b/overlord/servicestate/quota_handlers_test.go @@ -1860,7 +1860,7 @@ func (s *quotaHandlersSuite) TestDoQuotaAddSnapSnapConflict(c *C) { chg1.SetStatus(state.DoingStatus) // Create a change that has a quota-control task in it for quota group foo - _, err = servicestate.UpdateQuota(st, "foo2", servicestate.QuotaGroupUpdate{ + _, err = servicestate.UpdateQuota(st, "foo2", servicestate.UpdateQuotaOptions{ AddSnaps: []string{"test-snap"}, }) c.Assert(err.Error(), Equals, "snap \"test-snap\" has \"snap-install\" change in progress") diff --git a/overlord/snapstate/backend/setup.go b/overlord/snapstate/backend/setup.go index 0a6c223e37..09ff9c2e0d 100644 --- a/overlord/snapstate/backend/setup.go +++ b/overlord/snapstate/backend/setup.go @@ -83,7 +83,8 @@ func (b Backend) SetupSnap(snapFilePath, instanceName string, sideInfo *snap.Sid } } - // in uc20 run mode, all snaps must be on the same device + // in uc20+ and classic with modes run mode, all snaps must be on the + // same device opts := &snap.InstallOptions{} if dev.HasModeenv() && dev.RunMode() { opts.MustNotCrossDevices = true diff --git a/overlord/snapstate/handlers.go b/overlord/snapstate/handlers.go index 590203ca74..9ae5cca714 100644 --- a/overlord/snapstate/handlers.go +++ b/overlord/snapstate/handlers.go @@ -1964,7 +1964,6 @@ func (m *SnapManager) finishTaskWithMaybeRestart(t *state.Task, status state.Sta st := t.State() if restartPoss.RebootRequired { - t.Logf("Requested system restart.") return FinishTaskWithRestart(t, status, restart.RestartSystem, &restartPoss.RebootInfo) } diff --git a/overlord/snapstate/handlers_link_test.go b/overlord/snapstate/handlers_link_test.go index 94efa0d0f9..30431ed5e9 100644 --- a/overlord/snapstate/handlers_link_test.go +++ b/overlord/snapstate/handlers_link_test.go @@ -903,9 +903,8 @@ func (s *linkSnapSuite) TestDoLinkSnapSuccessRebootForKernelClassicWithModes(c * c.Check(t.Status(), Equals, state.WaitStatus) c.Check(s.restartRequested, HasLen, 0) - // XXX avoid logging Requested system restart? - c.Assert(t.Log(), HasLen, 2) - c.Check(t.Log()[1], Matches, `.*INFO Task set to wait until a manual system restart allows to continue`) + c.Assert(t.Log(), HasLen, 1) + c.Check(t.Log()[0], Matches, `.*INFO Task set to wait until a manual system restart allows to continue`) } func (s *linkSnapSuite) TestDoLinkSnapSuccessRebootForCoreBaseSystemRestartImmediate(c *C) { diff --git a/sandbox/apparmor/apparmor.go b/sandbox/apparmor/apparmor.go index c3640e4938..b63b8fe732 100644 --- a/sandbox/apparmor/apparmor.go +++ b/sandbox/apparmor/apparmor.go @@ -32,6 +32,7 @@ import ( "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/osutil" + "github.com/snapcore/snapd/snapdtool" "github.com/snapcore/snapd/strutil" ) @@ -149,8 +150,8 @@ func ParserMtime() int64 { var mtime int64 mtime = 0 - if path, err := findAppArmorParser(); err == nil { - if fi, err := os.Stat(path); err == nil { + if cmd, _, err := AppArmorParser(); err == nil { + if fi, err := os.Stat(cmd.Path); err == nil { mtime = fi.ModTime().Unix() } } @@ -286,7 +287,11 @@ func (aaa *appArmorAssess) doAssess() (level LevelType, summary string) { } // If we got here then all features are available and supported. - return Full, "apparmor is enabled and all features are available" + note := "" + if strutil.SortedListContains(parserFeatures, "snapd-internal") { + note = " (using snapd provided apparmor_parser)" + } + return Full, "apparmor is enabled and all features are available" + note } type appArmorProbe struct { @@ -335,49 +340,117 @@ func probeKernelFeatures() ([]string, error) { } func probeParserFeatures() ([]string, error) { - parser, err := findAppArmorParser() + var featureProbes = []struct { + feature string + probe string + }{ + { + feature: "unsafe", + probe: "change_profile unsafe /**,", + }, + { + feature: "include-if-exists", + probe: `#include if exists "/foo"`, + }, + { + feature: "qipcrtr-socket", + probe: "network qipcrtr dgram,", + }, + { + feature: "mqueue", + probe: "mqueue,", + }, + { + feature: "cap-bpf", + probe: "capability bpf,", + }, + { + feature: "cap-audit-read", + probe: "capability audit_read,", + }, + { + feature: "xdp", + probe: "network xdp,", + }, + } + _, internal, err := AppArmorParser() if err != nil { return []string{}, err } - features := make([]string, 0, 4) - if tryAppArmorParserFeature(parser, "change_profile unsafe /**,") { - features = append(features, "unsafe") - } - if tryAppArmorParserFeature(parser, "network qipcrtr dgram,") { - features = append(features, "qipcrtr-socket") - } - if tryAppArmorParserFeature(parser, "capability bpf,") { - features = append(features, "cap-bpf") - } - if tryAppArmorParserFeature(parser, "capability audit_read,") { - features = append(features, "cap-audit-read") - } - if tryAppArmorParserFeature(parser, "mqueue,") { - features = append(features, "mqueue") + features := make([]string, 0, len(featureProbes)+1) + for _, fp := range featureProbes { + // recreate the Cmd each time so we can exec it each time + cmd, _, _ := AppArmorParser() + if tryAppArmorParserFeature(cmd, fp.probe) { + features = append(features, fp.feature) + } } - if tryAppArmorParserFeature(parser, "network xdp,") { - features = append(features, "xdp") + if internal { + features = append(features, "snapd-internal") } sort.Strings(features) return features, nil } -// findAppArmorParser returns the path of the apparmor_parser binary if one is found. -func findAppArmorParser() (string, error) { +func snapdAppArmorSupportsReexecImpl() bool { + hostInfoDir := filepath.Join(dirs.GlobalRootDir, dirs.CoreLibExecDir) + _, flags, err := snapdtool.SnapdVersionFromInfoFile(hostInfoDir) + return err == nil && flags["SNAPD_APPARMOR_REEXEC"] == "1" +} + +var snapdAppArmorSupportsReexec = snapdAppArmorSupportsReexecImpl + +// AppArmorParser returns an exec.Cmd for the apparmor_parser binary, and a +// boolean to indicate whether this is internal to snapd (ie is provided by +// snapd) +func AppArmorParser() (cmd *exec.Cmd, internal bool, err error) { + // first see if we have our own internal copy which could come from the + // snapd snap (likely) or be part of the snapd distro package (unlikely) + // - but only use the internal one when we know that the system + // installed snapd-apparmor support re-exec + + // TODO:apparmor-vendoring + // disabled until the test failures: + // - ubuntu-core-18-64:tests/core/snapd-refresh-vs-services + // and similar are fixed + /* + if path, err := snapdtool.InternalToolPath("apparmor_parser"); err == nil { + if osutil.IsExecutable(path) && snapdAppArmorSupportsReexec() { + prefix := strings.TrimSuffix(path, "apparmor_parser") + // when using the internal apparmor_parser also use + // its own configuration and includes etc plus + // also ensure we use the 3.0 feature ABI to get + // the widest array of policy features across the + // widest array of kernel versions + args := []string{ + "--config-file", filepath.Join(prefix, "/apparmor/parser.conf"), + "--base", filepath.Join(prefix, "/apparmor.d"), + "--policy-features", filepath.Join(prefix, "/apparmor.d/abi/3.0"), + } + return exec.Command(path, args...), true, nil + } + } + */ + + // now search for one in the configured parserSearchPath for _, dir := range filepath.SplitList(parserSearchPath) { path := filepath.Join(dir, "apparmor_parser") if _, err := os.Stat(path); err == nil { - return path, nil + return exec.Command(path), false, nil } } - return "", os.ErrNotExist + + return nil, false, os.ErrNotExist } // tryAppArmorParserFeature attempts to pre-process a bit of apparmor syntax with a given parser. -func tryAppArmorParserFeature(parser, rule string) bool { - cmd := exec.Command(parser, "--preprocess") +func tryAppArmorParserFeature(cmd *exec.Cmd, rule string) bool { + cmd.Args = append(cmd.Args, "--preprocess") cmd.Stdin = bytes.NewBufferString(fmt.Sprintf("profile snap-test {\n %s\n}", rule)) - if err := cmd.Run(); err != nil { + output, err := cmd.CombinedOutput() + // older versions of apparmor_parser can exit with success even + // though they fail to parse + if err != nil || strings.Contains(string(output), "parser error") { return false } return true @@ -479,3 +552,11 @@ func MockFeatures(kernelFeatures []string, kernelError error, parserFeatures []s } } + +func MockParserSearchPath(new string) (restore func()) { + oldAppArmorParserSearchPath := parserSearchPath + parserSearchPath = new + return func() { + parserSearchPath = oldAppArmorParserSearchPath + } +} diff --git a/sandbox/apparmor/apparmor_test.go b/sandbox/apparmor/apparmor_test.go index d28ddb800e..e16aeb9382 100644 --- a/sandbox/apparmor/apparmor_test.go +++ b/sandbox/apparmor/apparmor_test.go @@ -24,8 +24,10 @@ import ( "fmt" "io" "io/ioutil" + "math" "os" "path/filepath" + "sort" "testing" . "gopkg.in/check.v1" @@ -33,6 +35,7 @@ import ( "github.com/snapcore/snapd/dirs" "github.com/snapcore/snapd/osutil" "github.com/snapcore/snapd/sandbox/apparmor" + "github.com/snapcore/snapd/snapdtool" "github.com/snapcore/snapd/testutil" ) @@ -48,14 +51,63 @@ var _ = Suite(&apparmorSuite{}) func (s *apparmorSuite) SetUpTest(c *C) { s.BaseTest.SetUpTest(c) + + dirs.SetRootDir(c.MkDir()) + s.AddCleanup(func() { dirs.SetRootDir("") }) + s.AddCleanup(func() { configFile := filepath.Join(dirs.GlobalRootDir, "/etc/apparmor.d/tunables/home.d/snapd") - if err := os.Remove(configFile); err != nil { - c.Assert(os.IsNotExist(err), Equals, true) + if err := os.RemoveAll(configFile); err != nil { + panic(err) } }) } +func (*apparmorSuite) TestAppArmorParser(c *C) { + mockParserCmd := testutil.MockCommand(c, "apparmor_parser", "") + defer mockParserCmd.Restore() + restore := apparmor.MockParserSearchPath(mockParserCmd.BinDir()) + defer restore() + restore = apparmor.MockSnapdAppArmorSupportsReexec(func() bool { return false }) + defer restore() + cmd, internal, err := apparmor.AppArmorParser() + c.Check(cmd.Path, Equals, mockParserCmd.Exe()) + c.Check(cmd.Args, DeepEquals, []string{mockParserCmd.Exe()}) + c.Check(internal, Equals, false) + c.Check(err, Equals, nil) +} + +func (*apparmorSuite) TestAppArmorInternalAppArmorParser(c *C) { + // TODO:apparmor-vendoring + /* + fakeroot := c.MkDir() + dirs.SetRootDir(fakeroot) + + d := filepath.Join(dirs.SnapMountDir, "/snapd/42", "/usr/lib/snapd") + c.Assert(os.MkdirAll(d, 0755), IsNil) + p := filepath.Join(d, "apparmor_parser") + c.Assert(ioutil.WriteFile(p, nil, 0755), IsNil) + restore := snapdtool.MockOsReadlink(func(path string) (string, error) { + c.Assert(path, Equals, "/proc/self/exe") + return filepath.Join(d, "snapd"), nil + }) + defer restore() + restore = apparmor.MockSnapdAppArmorSupportsReexec(func() bool { return true }) + defer restore() + + cmd, internal, err := apparmor.AppArmorParser() + c.Check(err, IsNil) + c.Check(cmd.Path, Equals, p) + c.Check(cmd.Args, DeepEquals, []string{ + p, + "--config-file", filepath.Join(d, "/apparmor/parser.conf"), + "--base", filepath.Join(d, "/apparmor.d"), + "--policy-features", filepath.Join(d, "/apparmor.d/abi/3.0"), + }) + c.Check(internal, Equals, true) + */ +} + func (*apparmorSuite) TestAppArmorLevelTypeStringer(c *C) { c.Check(apparmor.Unknown.String(), Equals, "unknown") c.Check(apparmor.Unsupported.String(), Equals, "none") @@ -208,46 +260,25 @@ func (s *apparmorSuite) TestProbeAppArmorKernelFeatures(c *C) { } func (s *apparmorSuite) TestProbeAppArmorParserFeatures(c *C) { - - var testcases = []struct { - exitCodes []int - expFeatures []string - }{ - { - exitCodes: []int{1, 1, 1, 1, 1, 1}, - }, - { - exitCodes: []int{1, 0, 1, 1, 1, 1}, - expFeatures: []string{"qipcrtr-socket"}, - }, - { - exitCodes: []int{0, 1, 1, 1, 1, 1}, - expFeatures: []string{"unsafe"}, - }, - { - exitCodes: []int{1, 1, 1, 0, 1, 1}, - expFeatures: []string{"cap-audit-read"}, - }, - { - exitCodes: []int{0, 0, 1, 1, 1, 1}, - expFeatures: []string{"qipcrtr-socket", "unsafe"}, - }, - { - exitCodes: []int{0, 0, 0, 0, 0, 1}, - expFeatures: []string{"cap-audit-read", "cap-bpf", "mqueue", "qipcrtr-socket", "unsafe"}, - }, - { - exitCodes: []int{0, 0, 0, 0, 0, 0}, - expFeatures: []string{"cap-audit-read", "cap-bpf", "mqueue", "qipcrtr-socket", "unsafe", "xdp"}, - }, - } - - for _, t := range testcases { + var features = []string{"unsafe", "include-if-exists", "qipcrtr-socket", "mqueue", "cap-bpf", "cap-audit-read", "xdp"} + // test all combinations of features + for i := 0; i < int(math.Pow(2, float64(len(features)))); i++ { + expFeatures := []string{} d := c.MkDir() contents := "" - for _, code := range t.exitCodes { + var expectedCalls [][]string + for j, f := range features { + code := 0 + if i&(1<<j) == 0 { + expFeatures = append([]string{f}, expFeatures...) + } else { + code = 1 + } + expectedCalls = append(expectedCalls, []string{"apparmor_parser", "--preprocess"}) contents += fmt.Sprintf("%d ", code) } + // probeParserFeatures() sorts the features + sort.Strings(expFeatures) err := ioutil.WriteFile(filepath.Join(d, "codes"), []byte(contents), 0755) c.Assert(err, IsNil) mockParserCmd := testutil.MockCommand(c, "apparmor_parser", fmt.Sprintf(` @@ -265,16 +296,12 @@ exit "$EXIT_CODE" features, err := apparmor.ProbeParserFeatures() c.Assert(err, IsNil) - if len(t.expFeatures) == 0 { + if len(expFeatures) == 0 { c.Check(features, HasLen, 0) } else { - c.Check(features, DeepEquals, t.expFeatures) + c.Check(features, DeepEquals, expFeatures) } - var expectedCalls [][]string - for range t.exitCodes { - expectedCalls = append(expectedCalls, []string{"apparmor_parser", "--preprocess"}) - } c.Check(mockParserCmd.Calls(), DeepEquals, expectedCalls) data, err := ioutil.ReadFile(filepath.Join(d, "stdin")) c.Assert(err, IsNil) @@ -282,16 +309,19 @@ exit "$EXIT_CODE" change_profile unsafe /**, } profile snap-test { + #include if exists "/foo" +} +profile snap-test { network qipcrtr dgram, } profile snap-test { - capability bpf, + mqueue, } profile snap-test { - capability audit_read, + capability bpf, } profile snap-test { - mqueue, + capability audit_read, } profile snap-test { network xdp, @@ -305,6 +335,30 @@ profile snap-test { features, err := apparmor.ProbeParserFeatures() c.Check(err, Equals, os.ErrNotExist) c.Check(features, DeepEquals, []string{}) + + // pretend we have an internal apparmor_parser + fakeroot := c.MkDir() + dirs.SetRootDir(fakeroot) + + d := filepath.Join(dirs.SnapMountDir, "/snapd/42", "/usr/lib/snapd") + c.Assert(os.MkdirAll(d, 0755), IsNil) + p := filepath.Join(d, "apparmor_parser") + c.Assert(ioutil.WriteFile(p, nil, 0755), IsNil) + restore = snapdtool.MockOsReadlink(func(path string) (string, error) { + c.Assert(path, Equals, "/proc/self/exe") + return filepath.Join(d, "snapd"), nil + }) + defer restore() + restore = apparmor.MockSnapdAppArmorSupportsReexec(func() bool { return true }) + defer restore() + + // TODO:apparmor-vendoring + // disabled until the spread test failures are fixed + /* + features, err = apparmor.ProbeParserFeatures() + c.Check(err, Equals, nil) + c.Check(features, DeepEquals, []string{"snapd-internal"}) + */ } func (s *apparmorSuite) TestInterfaceSystemKey(c *C) { @@ -328,7 +382,7 @@ func (s *apparmorSuite) TestInterfaceSystemKey(c *C) { c.Check(features, DeepEquals, []string{"network", "policy"}) features, err = apparmor.ParserFeatures() c.Assert(err, IsNil) - c.Check(features, DeepEquals, []string{"cap-audit-read", "cap-bpf", "mqueue", "qipcrtr-socket", "unsafe", "xdp"}) + c.Check(features, DeepEquals, []string{"cap-audit-read", "cap-bpf", "include-if-exists", "mqueue", "qipcrtr-socket", "unsafe", "xdp"}) } func (s *apparmorSuite) TestAppArmorParserMtime(c *C) { @@ -368,7 +422,7 @@ func (s *apparmorSuite) TestFeaturesProbedOnce(c *C) { c.Check(features, DeepEquals, []string{"network", "policy"}) features, err = apparmor.ParserFeatures() c.Assert(err, IsNil) - c.Check(features, DeepEquals, []string{"cap-audit-read", "cap-bpf", "mqueue", "qipcrtr-socket", "unsafe", "xdp"}) + c.Check(features, DeepEquals, []string{"cap-audit-read", "cap-bpf", "include-if-exists", "mqueue", "qipcrtr-socket", "unsafe", "xdp"}) // this makes probing fails but is not done again err = os.RemoveAll(d) @@ -427,6 +481,9 @@ func (s *apparmorSuite) TestUpdateHomedirsTunableWriteFail(c *C) { } func (s *apparmorSuite) TestUpdateHomedirsTunableHappy(c *C) { + fakeroot := c.MkDir() + dirs.SetRootDir(fakeroot) + err := apparmor.UpdateHomedirsTunable([]string{"/home/a", "/dir2"}) c.Assert(err, IsNil) configFile := filepath.Join(dirs.GlobalRootDir, "/etc/apparmor.d/tunables/home.d/snapd") @@ -442,3 +499,23 @@ func (s *apparmorSuite) TestUpdateHomedirsTunableHappyNoDirs(c *C) { configFile := filepath.Join(dirs.GlobalRootDir, "/etc/apparmor.d/tunables/home.d/snapd") c.Check(osutil.FileExists(configFile), Equals, false) } + +func (s *apparmorSuite) TestSnapdAppArmorSupportsReexecImpl(c *C) { + fakeroot := c.MkDir() + dirs.SetRootDir(fakeroot) + + // with no info file should indicate it does not support reexec + c.Check(apparmor.SnapdAppArmorSupportsRexecImpl(), Equals, false) + + d := filepath.Join(dirs.GlobalRootDir, dirs.CoreLibExecDir) + c.Assert(os.MkdirAll(d, 0755), IsNil) + infoFile := filepath.Join(d, "info") + c.Assert(ioutil.WriteFile(infoFile, []byte("VERSION=foo"), 0644), IsNil) + c.Check(apparmor.SnapdAppArmorSupportsRexecImpl(), Equals, false) + c.Assert(ioutil.WriteFile(infoFile, []byte("VERSION=foo\nSNAPD_APPARMOR_REEXEC=0"), 0644), IsNil) + c.Check(apparmor.SnapdAppArmorSupportsRexecImpl(), Equals, false) + c.Assert(ioutil.WriteFile(infoFile, []byte("VERSION=foo\nSNAPD_APPARMOR_REEXEC=foo"), 0644), IsNil) + c.Check(apparmor.SnapdAppArmorSupportsRexecImpl(), Equals, false) + c.Assert(ioutil.WriteFile(infoFile, []byte("VERSION=foo\nSNAPD_APPARMOR_REEXEC=1"), 0644), IsNil) + c.Check(apparmor.SnapdAppArmorSupportsRexecImpl(), Equals, true) +} diff --git a/sandbox/apparmor/export_test.go b/sandbox/apparmor/export_test.go index 4eebc0ac06..81d8c272d6 100644 --- a/sandbox/apparmor/export_test.go +++ b/sandbox/apparmor/export_test.go @@ -79,12 +79,10 @@ func MockFsRootPath(path string) (restorer func()) { } } -func MockParserSearchPath(new string) (restore func()) { - oldAppArmorParserSearchPath := parserSearchPath - parserSearchPath = new - return func() { - parserSearchPath = oldAppArmorParserSearchPath - } +func MockSnapdAppArmorSupportsReexec(new func() bool) (restore func()) { + restore = testutil.Backup(&snapdAppArmorSupportsReexec) + snapdAppArmorSupportsReexec = new + return restore } var ( @@ -95,6 +93,8 @@ var ( RequiredParserFeatures = requiredParserFeatures PreferredKernelFeatures = preferredKernelFeatures PreferredParserFeatures = preferredParserFeatures + + SnapdAppArmorSupportsRexecImpl = snapdAppArmorSupportsReexecImpl ) func FreshAppArmorAssessment() { diff --git a/sandbox/apparmor/profile.go b/sandbox/apparmor/profile.go index e6c2f867d0..bec3aed281 100644 --- a/sandbox/apparmor/profile.go +++ b/sandbox/apparmor/profile.go @@ -23,7 +23,6 @@ import ( "fmt" "io" "os" - "os/exec" "path" "path/filepath" "runtime" @@ -99,10 +98,20 @@ var LoadProfiles = func(fnames []string, cacheDir string, flags AaParserFlags) e if !osutil.GetenvBool("SNAPD_DEBUG") { args = append(args, "--quiet") } - args = append(args, fnames...) - output, err := exec.Command("apparmor_parser", args...).CombinedOutput() + cmd, _, err := AppArmorParser() if err != nil { + return err + } + + cmd.Args = append(cmd.Args, args...) + cmd.Args = append(cmd.Args, fnames...) + output, err := cmd.CombinedOutput() + if err != nil || strings.Contains(string(output), "parser error") { + if err == nil { + // ensure we have an error to report + err = fmt.Errorf("exit status 0 with parser error") + } return fmt.Errorf("cannot load apparmor profiles: %s\napparmor_parser output:\n%s", err, string(output)) } return nil diff --git a/sandbox/apparmor/profile_test.go b/sandbox/apparmor/profile_test.go index 78b7460a1f..5b2976d433 100644 --- a/sandbox/apparmor/profile_test.go +++ b/sandbox/apparmor/profile_test.go @@ -54,6 +54,8 @@ func (s *appArmorSuite) SetUpTest(c *C) { func (s *appArmorSuite) TestLoadProfilesRunsAppArmorParserReplace(c *C) { cmd := testutil.MockCommand(c, "apparmor_parser", "") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd"}, apparmor.CacheDir, 0) c.Assert(err, IsNil) c.Assert(cmd.Calls(), DeepEquals, [][]string{ @@ -64,6 +66,8 @@ func (s *appArmorSuite) TestLoadProfilesRunsAppArmorParserReplace(c *C) { func (s *appArmorSuite) TestLoadProfilesMany(c *C) { cmd := testutil.MockCommand(c, "apparmor_parser", "") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd", "/path/to/another.profile"}, apparmor.CacheDir, 0) c.Assert(err, IsNil) c.Assert(cmd.Calls(), DeepEquals, [][]string{ @@ -74,6 +78,8 @@ func (s *appArmorSuite) TestLoadProfilesMany(c *C) { func (s *appArmorSuite) TestLoadProfilesNone(c *C) { cmd := testutil.MockCommand(c, "apparmor_parser", "") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() err := apparmor.LoadProfiles([]string{}, apparmor.CacheDir, 0) c.Assert(err, IsNil) c.Check(cmd.Calls(), HasLen, 0) @@ -82,6 +88,8 @@ func (s *appArmorSuite) TestLoadProfilesNone(c *C) { func (s *appArmorSuite) TestLoadProfilesReportsErrors(c *C) { cmd := testutil.MockCommand(c, "apparmor_parser", "exit 42") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd"}, apparmor.CacheDir, 0) c.Assert(err.Error(), Equals, `cannot load apparmor profiles: exit status 42 apparmor_parser output: @@ -91,11 +99,28 @@ apparmor_parser output: }) } +func (s *appArmorSuite) TestLoadProfilesReportsErrorWithZeroExitStatus(c *C) { + cmd := testutil.MockCommand(c, "apparmor_parser", "echo parser error; exit 0") + defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() + err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd"}, apparmor.CacheDir, 0) + c.Assert(err.Error(), Equals, `cannot load apparmor profiles: exit status 0 with parser error +apparmor_parser output: +parser error +`) + c.Assert(cmd.Calls(), DeepEquals, [][]string{ + {"apparmor_parser", "--replace", "--write-cache", "-O", "no-expr-simplify", "--cache-loc=/var/cache/apparmor", "--quiet", "/path/to/snap.samba.smbd"}, + }) +} + func (s *appArmorSuite) TestLoadProfilesRunsAppArmorParserReplaceWithSnapdDebug(c *C) { os.Setenv("SNAPD_DEBUG", "1") defer os.Unsetenv("SNAPD_DEBUG") cmd := testutil.MockCommand(c, "apparmor_parser", "") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() err := apparmor.LoadProfiles([]string{"/path/to/snap.samba.smbd"}, apparmor.CacheDir, 0) c.Assert(err, IsNil) c.Assert(cmd.Calls(), DeepEquals, [][]string{ @@ -118,6 +143,8 @@ func (s *appArmorSuite) TestUnloadProfilesNone(c *C) { func (s *appArmorSuite) TestUnloadRemovesCachedProfile(c *C) { cmd := testutil.MockCommand(c, "apparmor_parser", "") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() dirs.SetRootDir(c.MkDir()) defer dirs.SetRootDir("") @@ -135,6 +162,8 @@ func (s *appArmorSuite) TestUnloadRemovesCachedProfile(c *C) { func (s *appArmorSuite) TestUnloadRemovesCachedProfileInForest(c *C) { cmd := testutil.MockCommand(c, "apparmor_parser", "") defer cmd.Restore() + restore := apparmor.MockParserSearchPath(cmd.BinDir()) + defer restore() dirs.SetRootDir(c.MkDir()) defer dirs.SetRootDir("") diff --git a/seed/seedtest/sample.go b/seed/seedtest/sample.go index e121093f4b..18d6323b58 100644 --- a/seed/seedtest/sample.go +++ b/seed/seedtest/sample.go @@ -114,6 +114,19 @@ base: core20 version: 1.0 confinement: devmode `, + "core22": `name: core22 +type: base +version: 1.0 +`, + "pc-kernel=22": `name: pc-kernel +type: kernel +version: 1.0 +`, + "pc=22": `name: pc +type: gadget +base: core22 +version: 1.0 +`, } func MergeSampleSnapYaml(snapYaml ...map[string]string) map[string]string { diff --git a/seed/seedtest/seedtest.go b/seed/seedtest/seedtest.go index 52d18b965c..ce52ec360a 100644 --- a/seed/seedtest/seedtest.go +++ b/seed/seedtest/seedtest.go @@ -212,7 +212,7 @@ func (s *TestingSeed16) WriteAssertions(fn string, assertions ...asserts.Asserti } func WriteAssertions(fn string, assertions ...asserts.Assertion) { - f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { panic(err) } diff --git a/seed/seedwriter/seed16.go b/seed/seedwriter/seed16.go index 968bebe2f0..597eeb048a 100644 --- a/seed/seedwriter/seed16.go +++ b/seed/seedwriter/seed16.go @@ -60,6 +60,11 @@ func (pol *policy16) checkSnapChannel(_ channel.Channel, whichSnap string) error return nil } +func (pol *policy16) checkClassicSnap(_ *SeedSnap) error { + // Core 16/18 have no constraints on classic snaps + return nil +} + func makeSystemSnap(snapName string) *asserts.ModelSnap { return internal.MakeSystemSnap(snapName, "", []string{"run"}) } diff --git a/seed/seedwriter/seed20.go b/seed/seedwriter/seed20.go index 3037f796f9..9b742b0e70 100644 --- a/seed/seedwriter/seed20.go +++ b/seed/seedwriter/seed20.go @@ -61,6 +61,21 @@ func (pol *policy20) checkSnapChannel(ch channel.Channel, whichSnap string) erro return pol.checkAllowedDangerous() } +func (pol *policy20) checkClassicSnap(sn *SeedSnap) error { + if pol.model.Grade() == asserts.ModelDangerous { + // implicit classic snaps are accepted + return nil + } + modSnap, ok := sn.SnapRef.(*asserts.ModelSnap) + if !ok { + return fmt.Errorf("internal error: extra snap with non-dangerous grade") + } + if !modSnap.Classic { + return fmt.Errorf("cannot use classic snap %q with a model of grade higher than dangerous that does not allow it explicitly (missing classic: true in snap stanza)", modSnap.Name) + } + return nil +} + func (pol *policy20) systemSnap() *asserts.ModelSnap { return internal.MakeSystemSnap("snapd", "latest/stable", []string{"run", "ephemeral"}) } diff --git a/seed/seedwriter/writer.go b/seed/seedwriter/writer.go index c3dc12027a..d463eb24fe 100644 --- a/seed/seedwriter/writer.go +++ b/seed/seedwriter/writer.go @@ -213,6 +213,8 @@ type policy interface { checkAvailable(snpRef naming.SnapRef, modes []string, availableByMode map[string]*naming.SnapSet) bool + checkClassicSnap(sn *SeedSnap) error + needsImplicitSnaps(availableByMode map[string]*naming.SnapSet) (bool, error) implicitSnaps(availableByMode map[string]*naming.SnapSet) []*asserts.ModelSnap implicitExtraSnaps(availableByMode map[string]*naming.SnapSet) []*OptionsSnap @@ -570,7 +572,7 @@ func (w *Writer) InfoDerived() error { // SetInfo sets Info of the SeedSnap and possibly computes its // destination Path. func (w *Writer) SetInfo(sn *SeedSnap, info *snap.Info) error { - if info.Confinement == snap.DevModeConfinement { + if info.NeedsDevMode() { if err := w.policy.allowsDangerousFeatures(); err != nil { return err } @@ -912,8 +914,13 @@ func (w *Writer) downloaded(seedSnaps []*SeedSnap) error { } needsClassic := info.NeedsClassic() - if needsClassic && !w.model.Classic() { - return fmt.Errorf("cannot use classic snap %q in a core system", info.SnapName()) + if needsClassic { + if !w.model.Classic() { + return fmt.Errorf("cannot use classic snap %q in a core system", info.SnapName()) + } + if err := w.policy.checkClassicSnap(sn); err != nil { + return err + } } modes := sn.modes() diff --git a/seed/seedwriter/writer_test.go b/seed/seedwriter/writer_test.go index 6b628f1817..ff4f05713b 100644 --- a/seed/seedwriter/writer_test.go +++ b/seed/seedwriter/writer_test.go @@ -3364,3 +3364,72 @@ func (s *writerSuite) TestSeedSnapsWriteCore20ErrWhenDirExists(c *C) { c.Assert(err, ErrorMatches, `system "1234" already exists`) c.Assert(seedwriter.IsSytemDirectoryExistsError(err), Equals, true) } + +func (s *writerSuite) testDownloadedCore20CheckClassic(c *C, modelGrade asserts.ModelGrade, classicFlag bool) error { + classicSnap := map[string]interface{}{ + "name": "classic-snap", + "id": s.AssertedSnapID("classic-snap"), + "modes": []interface{}{"run"}, + } + if classicFlag { + classicSnap["classic"] = "true" + } + model := s.Brands.Model("my-brand", "my-model", map[string]interface{}{ + "display-name": "my model", + "architecture": "amd64", + "store": "my-store", + "base": "core20", + "classic": "true", + "distribution": "ubuntu", + "grade": string(modelGrade), + "snaps": []interface{}{ + map[string]interface{}{ + "name": "pc-kernel", + "id": s.AssertedSnapID("pc-kernel"), + "type": "kernel", + "default-channel": "20", + }, + map[string]interface{}{ + "name": "pc", + "id": s.AssertedSnapID("pc"), + "type": "gadget", + "default-channel": "20", + }, + map[string]interface{}{ + "name": "core", + "id": s.AssertedSnapID("core"), + "type": "core", + }, + classicSnap, + }, + }) + + // validity + c.Assert(model.Grade(), Equals, modelGrade) + + s.makeSnap(c, "snapd", "") + s.makeSnap(c, "core20", "") + s.makeSnap(c, "core", "") + s.makeSnap(c, "pc-kernel=20", "") + s.makeSnap(c, "pc=20", "") + s.makeSnap(c, "classic-snap", "developerid") + + s.opts.Label = "20221125" + _, _, err := s.upToDownloaded(c, model, s.fillDownloadedSnap) + return err +} + +func (s *writerSuite) TestDownloadedCore20CheckClassicDangerous(c *C) { + err := s.testDownloadedCore20CheckClassic(c, asserts.ModelDangerous, false) + c.Check(err, IsNil) +} + +func (s *writerSuite) TestDownloadedCore20CheckClassicSignedNoFlag(c *C) { + err := s.testDownloadedCore20CheckClassic(c, asserts.ModelSigned, false) + c.Check(err, ErrorMatches, `cannot use classic snap "classic-snap" with a model of grade higher than dangerous that does not allow it explicitly \(missing classic: true in snap stanza\)`) +} + +func (s *writerSuite) TestDownloadedCore20CheckClassicSignedWithFlag(c *C) { + err := s.testDownloadedCore20CheckClassic(c, asserts.ModelSigned, true) + c.Check(err, IsNil) +} diff --git a/spread.yaml b/spread.yaml index c5c18d2862..9a673eaba7 100644 --- a/spread.yaml +++ b/spread.yaml @@ -124,11 +124,11 @@ backends: workers: 6 - ubuntu-core-20-64: image: ubuntu-20.04-64 - workers: 6 + workers: 8 storage: 20G - ubuntu-core-22-64: image: ubuntu-22.04-64 - workers: 6 + workers: 8 storage: 20G - ubuntu-secboot-20.04-64: image: ubuntu-20.04-64 @@ -280,6 +280,7 @@ backends: password: ubuntu qemu: + memory: 4G systems: - ubuntu-14.04-32: username: ubuntu @@ -526,7 +527,8 @@ debug-each: | if tests.nested is-nested; then echo '# nested VM status' tests.nested vm status - tests.nested get serial-log + # filter out ^[ to ensure that the debug output gets not messed up + tests.nested get serial-log | sed 's/\x1b//g' # add another echo in case the serial log is missing a newline echo diff --git a/store/tooling/tooling.go b/store/tooling/tooling.go index 792048860c..8fa270d4af 100644 --- a/store/tooling/tooling.go +++ b/store/tooling/tooling.go @@ -259,6 +259,7 @@ func (tsto *ToolingStore) snapDownload(targetFn string, sar *store.SnapActionRes type SnapToDownload struct { Snap naming.SnapRef Channel string + Revision snap.Revision CohortKey string } @@ -313,10 +314,18 @@ func (tsto *ToolingStore) DownloadMany(toDownload []SnapToDownload, curSnaps []* actions := make([]*store.SnapAction, 0, len(toDownload)) for _, sn := range toDownload { + // One cannot specify both a channel and specific revision. The store + // will return an error if do this. + channel := sn.Channel + if !sn.Revision.Unset() { + channel = "" + } + actions = append(actions, &store.SnapAction{ Action: "download", InstanceName: sn.Snap.SnapName(), // XXX consider using snap-id first - Channel: sn.Channel, + Channel: channel, + Revision: sn.Revision, CohortKey: sn.CohortKey, Flags: actionFlag, }) diff --git a/tests/lib/fakeinstaller/mk-classic-rootfs.sh b/tests/lib/fakeinstaller/mk-classic-rootfs.sh deleted file mode 100755 index 4b7dae4da5..0000000000 --- a/tests/lib/fakeinstaller/mk-classic-rootfs.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -set -e - -# uncomment for better debug messages -#set -x -#exec > /tmp/mk-classic-rootfs.sh.log -#exec 2>&1 - - -# XXX: merge with the work from alfonso in -# tests/nested/manual/fde-on-classic/mk-image.sh (PR:12102) -create_classic_rootfs() { - set -x - local DESTDIR="$1" - - - # Create basic devices to be able to install packages - [ -e "$DESTDIR"/dev/null ] || sudo mknod -m 666 "$DESTDIR"/dev/null c 1 3 - [ -e "$DESTDIR"/dev/zero ] || sudo mknod -m 666 "$DESTDIR"/dev/zero c 1 5 - [ -e "$DESTDIR"/dev/random ] || sudo mknod -m 666 "$DESTDIR"/dev/random c 1 8 - [ -e "$DESTDIR"/dev/urandom ] || sudo mknod -m 666 "$DESTDIR"/dev/urandom c 1 9 - # ensure resolving works inside the chroot - echo "nameserver 8.8.8.8" | sudo tee -a "$DESTDIR"/etc/resolv.conf - # install additional packages - sudo chroot "$DESTDIR" /usr/bin/sh -c "DEBIAN_FRONTEND=noninteractive apt update" - local pkgs="snapd ssh openssh-server sudo iproute2 iputils-ping isc-dhcp-client netplan.io vim-tiny kmod cloud-init" - sudo chroot "$DESTDIR" /usr/bin/sh -c \ - "DEBIAN_FRONTEND=noninteractive apt install --no-install-recommends -y $pkgs" - # netplan config - cat <<'EOF' | sudo tee "$DESTDIR"/etc/netplan/00-ethernet.yaml -network: - ethernets: - any: - match: - name: e* - dhcp4: true - version: 2 -EOF - - # ensure we can login - sudo chroot "$DESTDIR" /usr/sbin/adduser --disabled-password --gecos "" user1 - printf "ubuntu\nubuntu\n" | sudo chroot "$DESTDIR" /usr/bin/passwd user1 - echo "user1 ALL=(ALL) NOPASSWD:ALL" | sudo tee -a "$DESTDIR"/etc/sudoers - - # set password for root user - sudo chroot "$DESTDIR" /usr/bin/sh -c 'echo root:root | chpasswd' - sudo tee -a "$DESTDIR/etc/ssh/sshd_config" <<'EOF' -PermitRootLogin yes -PasswordAuthentication yes -EOF - - # install the current in-development version of snapd when available, - # this will give us seeding support - GOPATH="${GOPATH:-./}" - package=$(find "$GOPATH" -maxdepth 1 -name "snapd_*.deb") - if [ -e "$package" ]; then - cp "$package" "$DESTDIR"/var/cache/apt/archives - sudo chroot "$DESTDIR" /usr/bin/sh -c \ - "DEBIAN_FRONTEND=noninteractive apt install -y /var/cache/apt/archives/$(basename "$package")" - fi - - # ensure that we have a mount point for the bind mount below - sudo mkdir -p "$DESTDIR"/boot/grub - # This is done by the the-modeenv script that is called by the - # populate-writable service from initramfs on UC20+, but we don't - # run it on classic. - sudo tee -a "$DESTDIR/etc/fstab" <<'EOF' -/run/mnt/ubuntu-boot/EFI/ubuntu /boot/grub none bind -EOF -} - -# get target dir from user -DST="$1" -if [ ! -d "$DST" ]; then - echo "target dir $DST is not a directory" - exit 1 -fi - -# extract the base -if [ -f /cdrom/casper/base.squashfs ]; then - sudo unsquashfs -f -d "$DST" /cdrom/casper/base.squashfs - # TODO: find out why the squashfs is preseeded - /usr/lib/snapd/snap-preseed --reset "$DST" -else - BASETAR=ubuntu-base.tar.gz - wget -c http://cdimage.ubuntu.com/ubuntu-base/releases/22.04/release/ubuntu-base-22.04.1-base-amd64.tar.gz -O "$BASETAR" - sudo tar -C "$DST" -xf "$BASETAR" -fi - -# create minimal rootfs -create_classic_rootfs "$DST" diff --git a/tests/lib/fakestore/store/store.go b/tests/lib/fakestore/store/store.go index 445de49eaf..c3891ec6c2 100644 --- a/tests/lib/fakestore/store/store.go +++ b/tests/lib/fakestore/store/store.go @@ -586,6 +586,7 @@ type snapAction struct { InstanceKey string `json:"instance-key"` SnapID string `json:"snap-id"` Name string `json:"name"` + Revision int `json:"revision,omitempty"` } type snapActionRequest struct { @@ -710,6 +711,7 @@ func (s *Store) snapActionEndpoint(w http.ResponseWriter, req *http.Request) { Base: essInfo.Base, }, } + logger.Debugf("requested snap %q revision %d", essInfo.Name, a.Revision) res.Snap.Publisher.ID = essInfo.DeveloperID res.Snap.Publisher.Username = essInfo.DevelName res.Snap.Download.URL = fmt.Sprintf("%s/download/%s", s.URL(), filepath.Base(fn)) diff --git a/tests/lib/muinstaller/go.mod b/tests/lib/muinstaller/go.mod new file mode 100644 index 0000000000..759191d77c --- /dev/null +++ b/tests/lib/muinstaller/go.mod @@ -0,0 +1,6 @@ +module github.com/snapcore/snapd/tests/lib/muinstaller + +go 1.18 + +require github.com/snapcore/snapd v0.0.0-20220929103851-d41483655caf + diff --git a/tests/lib/fakeinstaller/main.go b/tests/lib/muinstaller/main.go index e003a42822..4e5c5b9a2c 100644 --- a/tests/lib/fakeinstaller/main.go +++ b/tests/lib/muinstaller/main.go @@ -23,9 +23,12 @@ import ( "bytes" "errors" "fmt" + "io/ioutil" "os" "os/exec" "path/filepath" + "sort" + "strings" "time" "github.com/snapcore/snapd/client" @@ -36,9 +39,72 @@ import ( "github.com/snapcore/snapd/osutil" "github.com/snapcore/snapd/osutil/disks" "github.com/snapcore/snapd/osutil/mkfs" - "github.com/snapcore/snapd/overlord/state" ) +func waitForDevice() string { + for { + devices, err := emptyFixedBlockDevices() + if err != nil { + logger.Noticef("cannot list devices: %v", err) + } + switch len(devices) { + case 0: + logger.Noticef("cannot use automatic mode, no empty disk found") + case 1: + // found exactly one target + return devices[0] + default: + logger.Noticef("cannot use automatic mode, multiple empty disks found: %v", devices) + } + time.Sleep(5 * time.Second) + } +} + +// emptyFixedBlockDevices finds any non-removable physical disk that has +// no partitions. It will exclude loop devices. +func emptyFixedBlockDevices() (devices []string, err error) { + // eg. /sys/block/sda/removable + removable, err := filepath.Glob(filepath.Join(dirs.GlobalRootDir, "/sys/block/*/removable")) + if err != nil { + return nil, err + } +devicesLoop: + for _, removableAttr := range removable { + val, err := ioutil.ReadFile(removableAttr) + if err != nil || string(val) != "0\n" { + // removable, ignore + continue + } + dev := filepath.Base(filepath.Dir(removableAttr)) + if strings.HasPrefix(dev, "loop") { + // is loop device, ignore + continue + } + // let's see if it has partitions + pattern := fmt.Sprintf(filepath.Join(dirs.GlobalRootDir, "/sys/block/%s/%s*/partition"), dev, dev) + // eg. /sys/block/sda/sda1/partition + partitionAttrs, _ := filepath.Glob(pattern) + if len(partitionAttrs) != 0 { + // has partitions, ignore + continue + } + // check that there was no previous filesystem + devNode := fmt.Sprintf("/dev/%s", dev) + output, err := exec.Command("lsblk", "--output", "fstype", "--noheadings", devNode).CombinedOutput() + if err != nil { + return nil, osutil.OutputErr(output, err) + } + if strings.TrimSpace(string(output)) != "" { + // found a filesystem, ignore + continue devicesLoop + } + + devices = append(devices, devNode) + } + sort.Strings(devices) + return devices, nil +} + func firstVol(volumes map[string]*gadget.Volume) *gadget.Volume { for _, vol := range volumes { return vol @@ -125,7 +191,7 @@ func createPartitions(bootDevice string, volumes map[string]*gadget.Volume) ([]g } func runMntFor(label string) string { - return filepath.Join(dirs.GlobalRootDir, "/run/fakeinstaller-mnt/", label) + return filepath.Join(dirs.GlobalRootDir, "/run/muinstaller-mnt/", label) } func postSystemsInstallSetupStorageEncryption(cli *client.Client, @@ -170,7 +236,7 @@ func postSystemsInstallSetupStorageEncryption(cli *client.Client, } var encryptedDevices = make(map[string]string) - if err := chg.Get("encrypted-devices", &encryptedDevices); err != nil && !errors.Is(err, state.ErrNoState) { + if err := chg.Get("encrypted-devices", &encryptedDevices); err != nil { return nil, fmt.Errorf("cannot get encrypted-devices from change: %v", err) } @@ -254,7 +320,7 @@ func createAndMountFilesystems(bootDevice string, volumes map[string]*gadget.Vol var mountPoints []string for _, volStruct := range vol.Structure { - if volStruct.Label == "" || volStruct.Filesystem == "" { + if volStruct.Filesystem == "" { continue } @@ -266,7 +332,7 @@ func createAndMountFilesystems(bootDevice string, volumes map[string]*gadget.Vol } partNode = encryptedDevice } else { - part, err := disk.FindMatchingPartitionWithPartLabel(volStruct.Label) + part, err := disk.FindMatchingPartitionWithPartLabel(volStruct.Name) if err != nil { return nil, err } @@ -351,14 +417,7 @@ func detectStorageEncryption(seedLabel string) (bool, error) { return details.StorageEncryption.Support == client.StorageEncryptionSupportAvailable, nil } -func run(seedLabel, bootDevice, rootfsCreator string) error { - if len(os.Args) != 4 { - // xxx: allow installing real UC without a classic-rootfs later - return fmt.Errorf("need seed-label, target-device and classic-rootfs as argument\n") - } - os.Setenv("SNAPD_DEBUG", "1") - logger.SimpleSetup() - +func run(seedLabel, rootfsCreator, bootDevice string) error { logger.Noticef("installing on %q", bootDevice) cli := client.New(nil) @@ -404,13 +463,26 @@ func run(seedLabel, bootDevice, rootfsCreator string) error { } func main() { + if len(os.Args) != 4 { + // XXX: allow installing real UC without a classic-rootfs later + fmt.Fprintf(os.Stderr, "need seed-label, target-device and classic-rootfs as argument\n") + os.Exit(1) + } + logger.SimpleSetup() + seedLabel := os.Args[1] - bootDevice := os.Args[2] - rootfsCreator := os.Args[3] + rootfsCreator := os.Args[2] + bootDevice := os.Args[3] + if bootDevice == "auto" { + bootDevice = waitForDevice() + } - if err := run(seedLabel, bootDevice, rootfsCreator); err != nil { + if err := run(seedLabel, rootfsCreator, bootDevice); err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } - logger.Noticef("install done, please reboot") + + msg := "install done, please remove installation media and reboot" + fmt.Println(msg) + exec.Command("wall", msg).Run() } diff --git a/tests/lib/muinstaller/mk-classic-rootfs.sh b/tests/lib/muinstaller/mk-classic-rootfs.sh new file mode 100755 index 0000000000..327820dfa6 --- /dev/null +++ b/tests/lib/muinstaller/mk-classic-rootfs.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +set -e + +# uncomment for better debug messages +#set -x +#exec > /tmp/mk-classic-rootfs.sh.log +#exec 2>&1 + +# tests/nested/manual/fde-on-classic/mk-image.sh (PR:12102) +prepare_classic_rootfs() { + set -x + local DESTDIR="$1" + local ROLE="$2" + + if [ "$ROLE" = "" ]; then + echo "internal error: prepare_classic_rootfs called without 'ROLE'" + exit 1 + fi + + # Create basic devices to be able to install packages + [ -e "$DESTDIR"/dev/null ] || sudo mknod -m 666 "$DESTDIR"/dev/null c 1 3 + [ -e "$DESTDIR"/dev/zero ] || sudo mknod -m 666 "$DESTDIR"/dev/zero c 1 5 + [ -e "$DESTDIR"/dev/random ] || sudo mknod -m 666 "$DESTDIR"/dev/random c 1 8 + [ -e "$DESTDIR"/dev/urandom ] || sudo mknod -m 666 "$DESTDIR"/dev/urandom c 1 9 + + if [ "$ROLE" = spread ]; then + # ensure resolving works inside the chroot + echo "nameserver 8.8.8.8" | sudo tee -a "$DESTDIR"/etc/resolv.conf + + # install additional packages + sudo chroot "$DESTDIR" /usr/bin/sh -c "DEBIAN_FRONTEND=noninteractive apt update" + local pkgs="snapd ssh openssh-server sudo iproute2 iputils-ping isc-dhcp-client netplan.io vim-tiny kmod cloud-init cryptsetup" + sudo chroot "$DESTDIR" /usr/bin/sh -c \ + "DEBIAN_FRONTEND=noninteractive apt install --no-install-recommends -y $pkgs" + # netplan config + cat <<'EOF' | sudo tee "$DESTDIR"/etc/netplan/00-ethernet.yaml +network: + ethernets: + any: + match: + name: e* + dhcp4: true + version: 2 +EOF + # set password for root user + sudo chroot "$DESTDIR" /usr/bin/sh -c 'echo root:root | chpasswd' + sudo mkdir -p "$DESTDIR/etc/ssh" + sudo tee -a "$DESTDIR/etc/ssh/sshd_config" <<'EOF' +PermitRootLogin yes +PasswordAuthentication yes +EOF + + # install the current in-development version of snapd when available, + # this will give us seeding support + # + # TODO: find a better way to do this? + GOPATH="${GOPATH:-/var/lib/snapd}" + package=$(find "$GOPATH" -maxdepth 1 -name "snapd_*.deb") + if [ -e "$package" ]; then + cp "$package" "$DESTDIR"/var/cache/apt/archives + sudo chroot "$DESTDIR" /usr/bin/sh -c \ + "DEBIAN_FRONTEND=noninteractive apt install -y /var/cache/apt/archives/$(basename "$package")" + fi + fi + + # ensure we can login + sudo chroot "$DESTDIR" /usr/sbin/adduser --disabled-password --gecos "" user1 + printf "ubuntu\nubuntu\n" | sudo chroot "$DESTDIR" /usr/bin/passwd user1 + echo "user1 ALL=(ALL) NOPASSWD:ALL" | sudo tee -a "$DESTDIR"/etc/sudoers + + # ensure that we have a mount point for the bind mount below + sudo mkdir -p "$DESTDIR"/boot/grub + # This is done by the the-modeenv script that is called by the + # populate-writable service from initramfs on UC20+, but we don't + # run it on classic. + sudo tee -a "$DESTDIR/etc/fstab" <<'EOF' +/run/mnt/ubuntu-boot/EFI/ubuntu /boot/grub none bind +EOF +} + +# get target dir from user +DST="$1" +if [ ! -d "$DST" ]; then + echo "target dir $DST is not a directory" + exit 1 +fi + +# This script is either used as part of an installer image which will have +# a "base.squashfs". Here very little additional setup is needed or as part +# of a spread test in which case the installer needs to prepare the system +# to be used from spread. The "ROLE" var will be set accordingly so that +# the "prepare_classic_rootfs" knows what to do. +ROLE="" +if [ -f /cdrom/casper/base.squashfs ]; then + sudo unsquashfs -f -d "$DST" /cdrom/casper/base.squashfs + # TODO: find out why the squashfs is preseeded + /usr/lib/snapd/snap-preseed --reset "$DST" + ROLE=installer +else + BASETAR=ubuntu-base.tar.gz + # important to use "-q" to avoid journalctl suppressing log output + wget -q -c http://cdimage.ubuntu.com/ubuntu-base/releases/22.04/release/ubuntu-base-22.04.1-base-amd64.tar.gz -O "$BASETAR" + sudo tar -C "$DST" -xf "$BASETAR" + ROLE=spread +fi + +# create minimal rootfs +prepare_classic_rootfs "$DST" "$ROLE" diff --git a/tests/lib/muinstaller/snapcraft.yaml b/tests/lib/muinstaller/snapcraft.yaml new file mode 100644 index 0000000000..2861f8c9da --- /dev/null +++ b/tests/lib/muinstaller/snapcraft.yaml @@ -0,0 +1,26 @@ +name: muinstaller +version: "0.1" +summary: Minimal Unattended Installer +description: | + Minimal Unattended Installer (muinstaller) is a minimal installer + for Ubuntu Core +confinement: classic +base: core22 + +apps: + muinstaller: + command: bin/muinstaller classic $SNAP/bin/mk-classic-rootfs.sh auto + daemon: simple + cli: + command: bin/muinstaller + +# TODO: add spread test that builds the muinstaller from snapd to ensure +# we don't accidentally break it +parts: + muinstaller: + plugin: go + source: . + build-snaps: [go/1.13/stable] + override-build: | + snapcraftctl build + cp -a mk-classic-rootfs.sh $SNAPCRAFT_PART_INSTALL/bin diff --git a/tests/lib/nested.sh b/tests/lib/nested.sh index 1d11b30795..1c5ce7ef9a 100644 --- a/tests/lib/nested.sh +++ b/tests/lib/nested.sh @@ -436,7 +436,7 @@ nested_download_image() { local IMAGE_URL=$1 local IMAGE_NAME=$2 - curl -L -o "${NESTED_IMAGES_DIR}/${IMAGE_NAME}" "$IMAGE_URL" + curl -C - -L -o "${NESTED_IMAGES_DIR}/${IMAGE_NAME}" "$IMAGE_URL" if [[ "$IMAGE_URL" == *.img.xz ]]; then mv "${NESTED_IMAGES_DIR}/${IMAGE_NAME}" "${NESTED_IMAGES_DIR}/${IMAGE_NAME}.xz" @@ -1009,6 +1009,7 @@ nested_start_core_vm_unit() { PARAM_TRACE="-d cpu_reset" PARAM_LOG="-D $NESTED_LOGS_DIR/qemu.log" PARAM_RTC="${NESTED_PARAM_RTC:-}" + PARAM_EXTRA="${NESTED_PARAM_EXTRA:-}" # Open port 7777 on the host so that failures in the nested VM (e.g. to # create users) can be debugged interactively via @@ -1098,9 +1099,12 @@ nested_start_core_vm_unit() { if nested_is_tpm_enabled; then if snap list test-snapd-swtpm >/dev/null; then - # reset the tpm state - rm /var/snap/test-snapd-swtpm/current/tpm2-00.permall - snap restart test-snapd-swtpm > /dev/null + if [ -z "$NESTED_TPM_NO_RESTART" ]; then + # reset the tpm state + snap stop test-snapd-swtpm > /dev/null + rm /var/snap/test-snapd-swtpm/current/tpm2-00.permall + snap start test-snapd-swtpm > /dev/null + fi else snap install test-snapd-swtpm --edge fi @@ -1145,7 +1149,8 @@ nested_start_core_vm_unit() { ${PARAM_SERIAL} \ ${PARAM_MONITOR} \ ${PARAM_USB} \ - ${PARAM_CD} " "${PARAM_REEXEC_ON_FAILURE}" + ${PARAM_CD} \ + ${PARAM_EXTRA} " "${PARAM_REEXEC_ON_FAILURE}" # wait for the $NESTED_VM service to appear active wait_for_service "$NESTED_VM" @@ -1322,7 +1327,20 @@ nested_start_classic_vm() { PARAM_CPU="" PARAM_CD="${NESTED_PARAM_CD:-}" PARAM_RANDOM="-object rng-random,id=rng0,filename=/dev/urandom -device virtio-rng-pci,rng=rng0" - PARAM_SNAPSHOT="-snapshot" + # TODO: can this be removed? we create a "pristine" copy above? + #PARAM_SNAPSHOT="-snapshot" + PARAM_SNAPSHOT="" + PARAM_EXTRA="${NESTED_PARAM_EXTRA:-}" + + # XXX: duplicated from nested core vm + # Set kvm attribute + local ATTR_KVM + ATTR_KVM="" + if nested_is_kvm_enabled; then + ATTR_KVM=",accel=kvm" + # CPU can be defined just when kvm is enabled + PARAM_CPU="-cpu host" + fi local PARAM_MACHINE PARAM_IMAGE PARAM_SEED PARAM_SERIAL PARAM_BIOS PARAM_TPM if [[ "$SPREAD_BACKEND" = google-nested* ]]; then @@ -1383,6 +1401,7 @@ nested_start_classic_vm() { ${PARAM_SERIAL} \ ${PARAM_MONITOR} \ ${PARAM_USB} \ + ${PARAM_EXTRA} \ ${PARAM_CD} " nested_wait_for_ssh diff --git a/tests/lib/tools/suite/tests.session-support/task.yaml b/tests/lib/tools/suite/tests.session-support/task.yaml index af8b28435f..4adf8d2513 100644 --- a/tests/lib/tools/suite/tests.session-support/task.yaml +++ b/tests/lib/tools/suite/tests.session-support/task.yaml @@ -27,12 +27,12 @@ execute: | tests.session has-session-systemd-and-dbus | MATCH 'ok' tests.session has-session-systemd-and-dbus ;; - ubuntu-core-1[68]-*) + ubuntu-core-16-*) tests.session has-system-systemd-and-dbus | MATCH 'ok' tests.session has-system-systemd-and-dbus - # Ubuntu Core 16 and Ubuntu Core 18 did not support user sessions. - # Note that Ubuntu Core 20 is in the default case down below, and - # does support this feature. + # Ubuntu Core 16 did not support user sessions. + # Note that Ubuntu Core 18 and later are in the default case down + # below, and do support this feature. tests.session has-session-systemd-and-dbus | MATCH 'no user dbus.socket' not tests.session has-session-systemd-and-dbus ;; diff --git a/tests/main/apparmor-batch-reload/bin/apparmor_parser.fake b/tests/main/apparmor-batch-reload/bin/apparmor_parser.fake index 3a13b1fe6e..7928ee7415 100755 --- a/tests/main/apparmor-batch-reload/bin/apparmor_parser.fake +++ b/tests/main/apparmor-batch-reload/bin/apparmor_parser.fake @@ -17,7 +17,7 @@ while [ -n "$1" ]; do --quiet|--replace|--remove|--skip-read-cache) # Ignore ;; - -O) + -O|--config-file|--base|--policy-features) # Ignore, discard argument shift ;; diff --git a/tests/main/interfaces-libvirt/task.yaml b/tests/main/interfaces-libvirt/task.yaml index e806c4d07b..a9af748316 100644 --- a/tests/main/interfaces-libvirt/task.yaml +++ b/tests/main/interfaces-libvirt/task.yaml @@ -33,6 +33,9 @@ prepare: | echo "And a snap declaring a plug on the libvirt interface is installed" snap install --edge test-snapd-libvirt-consumer + # Temporary workaround until the updated test snap is released + mount -o bind "$TESTSLIB/snaps/store/test-snapd-libvirt-consumer/vm/ping-unikernel.xml" \ + /snap/test-snapd-libvirt-consumer/current/vm/ping-unikernel.xml echo "And the required tap interface is in place" ip tuntap add tap100 mode tap diff --git a/tests/main/lxd/task.yaml b/tests/main/lxd/task.yaml index 264e70cee8..0fd3215228 100644 --- a/tests/main/lxd/task.yaml +++ b/tests/main/lxd/task.yaml @@ -83,7 +83,7 @@ execute: | lxd.lxc config set core.proxy_http "$http_proxy" fi if [ -n "${https_proxy:-}" ]; then - lxd.lxc config set core.proxy_https "$http_proxy" + lxd.lxc config set core.proxy_https "$https_proxy" fi # The snapd package we build as part of the tests will only run on the diff --git a/tests/main/prepare-image-classic/task.yaml b/tests/main/prepare-image-classic/task.yaml index 36eafb4a47..b97bceea29 100644 --- a/tests/main/prepare-image-classic/task.yaml +++ b/tests/main/prepare-image-classic/task.yaml @@ -57,6 +57,8 @@ execute: | # prepare-image ran as user so it warns about the ownership MATCH 'WARNING: ensure that the contents under .* are owned by root:root in the \(final\) image' < stderr + # But there are not other warnings/errors on stderr + wc -l < stderr | MATCH "^1$" echo Verifying the result systemid="$(date +%Y%m%d)" diff --git a/tests/main/prepare-image-reproducible/task.yaml b/tests/main/prepare-image-reproducible/task.yaml new file mode 100644 index 0000000000..888d36776f --- /dev/null +++ b/tests/main/prepare-image-reproducible/task.yaml @@ -0,0 +1,147 @@ +summary: Verify that we are able to request specific revisions of snaps from the store through prepare-image + +# autopkgtest run only a subset of tests that deals with the integration +# with the distro +backends: [-autopkgtest] + +# disable the following distributions +# ubuntu-14, lack of systemd-run +# ubuntu-20.04-arm*, because we use pc kernel and gadget. +systems: +- -ubuntu-14.04-* +- -ubuntu-20.04-arm-* + +environment: + ROOT: /home/test/tmp/ + IMAGE: /home/test/tmp/image + GADGET: /home/test/tmp/gadget + STORE_DIR: $(pwd)/fake-store-blobdir + STORE_ADDR: localhost:11028 + +prepare: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + + mkdir -p "$ROOT" + chown test:test "$ROOT" + +restore: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + + #shellcheck source=tests/lib/store.sh + . "$TESTSLIB"/store.sh + teardown_fake_store "$STORE_DIR" + + rm -rf "$ROOT" + +execute: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + + install_snap_to_fakestore() { + local SNAP_NAME="$1" + local SNAP_REVISION="$2" + local SNAP_PATH + + SNAP_PATH=$("$TESTSTOOLS"/snaps-state pack-local "$SNAP_NAME") + make_snap_installable "$STORE_DIR" "$SNAP_PATH" + cat > snap-"$SNAP_NAME"-decl.json <<EOF + { + "type": "snap-declaration", + "format": "1", + "revision": "1", + "snap-name": "$SNAP_NAME", + "snap-id": "$SNAP_NAME-id", + "plugs": { + "snapd-control": { + "allow-installation": "true", + "allow-auto-connection": "true" + } + } + } + EOF + cat > snap-"$SNAP_NAME"-rev.json <<EOF + { + "type": "snap-revision", + "snap-id": "$SNAP_NAME-id", + "snap-revision": "$SNAP_REVISION" + } + EOF + fakestore new-snap-declaration --dir "${STORE_DIR}" "$SNAP_PATH" --snap-decl-json snap-"$SNAP_NAME"-decl.json + fakestore new-snap-revision --dir "${STORE_DIR}" "$SNAP_PATH" --snap-rev-json snap-"$SNAP_NAME"-rev.json + } + + # Ensure the following snaps are available on the host snap to avoid the + # store script trying to fetch these after setting up the fake store. + echo Installing required snaps we will need + snap install core + snap install remarshal + snap install jq + + echo Download snaps needed for the the model + snap download core + snap download pc + snap download pc-kernel + + #shellcheck source=tests/lib/store.sh + . "$TESTSLIB"/store.sh + + echo "Configure daemon to point to the fake store" + setup_fake_store "$STORE_DIR" + + echo Expose the needed assertions through the fakestore + cp "$TESTSLIB"/assertions/testrootorg-store.account-key "$STORE_DIR/asserts" + cp "$TESTSLIB"/assertions/developer1.account "$STORE_DIR/asserts" + cp "$TESTSLIB"/assertions/developer1.account-key "$STORE_DIR/asserts" + + # It is not enough to copy the assertions, we must also ack them otherwise we + # will get an error about not being able to resolve the account key + snap ack "$STORE_DIR/asserts/testrootorg-store.account-key" + snap ack "$STORE_DIR/asserts/developer1.account" + snap ack "$STORE_DIR/asserts/developer1.account-key" + + # We now add all the required snaps to the fake store. It hardly matter which revision + # we give to them, as the fake store does not handle requests of specific revisions. Currently + # the fake-store will just return whatever revision there is. + echo "Adding snaps to the fake store" + make_snap_installable_with_id "$STORE_DIR" "$(ls core_*.snap)" 99T7MUlRhtI3U0QFgl5mXXESAiSwt776 + make_snap_installable_with_id "$STORE_DIR" "$(ls pc_*.snap)" UqFziVZDHLSyO3TqSWgNBoAdHbLI4dAH + make_snap_installable_with_id "$STORE_DIR" "$(ls pc-kernel_*.snap)" pYVQrBcKmBa0mZ4CCN7ExT6jH8rY1hza + install_snap_to_fakestore test-snapd-sh 23 + + # Write the lists of revisions we want to get from the fake-store. We have purposely + # only put these revisions into the store. + cat > "$ROOT/seed.manifest" <<EOF + core 1 + pc 1 + pc-kernel 1 + test-snapd-sh 23 + EOF + + # Now we test, as mentioned the store does not really support getting revisions, and it + # doesn't make sense to implement this and also do a fail-version as we are only verifying + # our own behavior in that case, and those tests would never fail (and then what was the point?). + # Instead, until the official store supports asking for specific revisions, we should instead just + # verify that we are able to send a specific revision to the store. + export SNAPPY_FORCE_API_URL=http://$STORE_ADDR + echo Running prepare-image + snap prepare-image --channel edge --snap test-snapd-sh --revisions "$ROOT"/seed.manifest "$TESTSLIB"/assertions/developer1-pc.model $ROOT + + echo Verifying the expected revisions were retrieved + test -e "$IMAGE/var/lib/snapd/seed/snaps/core_1.snap" + test -e "$IMAGE/var/lib/snapd/seed/snaps/pc_1.snap" + test -e "$IMAGE/var/lib/snapd/seed/snaps/pc-kernel_1.snap" + test -e "$IMAGE/var/lib/snapd/seed/snaps/test-snapd-sh_23.snap" + + echo Verifying that the store acknowledged we requested the expected revisions + journalctl -u fakestore | grep 'requested snap "core" revision 1' + journalctl -u fakestore | grep 'requested snap "pc" revision 1' + journalctl -u fakestore | grep 'requested snap "pc-kernel" revision 1' + journalctl -u fakestore | grep 'requested snap "test-snapd-sh" revision 23' diff --git a/tests/main/snap-quota-memory/task.yaml b/tests/main/snap-quota-memory/task.yaml index c8f22cdf31..7e1722b14d 100644 --- a/tests/main/snap-quota-memory/task.yaml +++ b/tests/main/snap-quota-memory/task.yaml @@ -178,7 +178,7 @@ execute: | # minimum amount of accounting memory for an empty cgroup, which is observed # to be up to 12KiB due to cached memory. case "$snapdSaysMemUsage" in - null|0|4096|12288) + null|0|4096|8192|12288) # expected ;; *) @@ -188,7 +188,7 @@ execute: | snapdSaysMemUsage="$(sudo snap run test-snapd-curl.curl --unix-socket /run/snapd.socket http://localhost/v2/quotas/group-four | jq -r '.result.current.memory')" case "$snapdSaysMemUsage" in - null|0|4096|12288) + null|0|4096|8192|12288) # expected ;; *) diff --git a/tests/main/snap-quota/task.yaml b/tests/main/snap-quota/task.yaml index 635b1acf23..0eb5d09488 100644 --- a/tests/main/snap-quota/task.yaml +++ b/tests/main/snap-quota/task.yaml @@ -53,7 +53,7 @@ execute: | # it is omitted entirely, or it could be either 4096 or 12.3kB on some systems where # empty cgroups have memory usage even on empty cgroups MATCH " 4\s+group-three\s+group-top1\s+memory=15.0MB(\s*|\s*memory=[0-9.a-zA-Z]+)\s*$" < quotas.txt - MATCH " 5\s+group-sub-three\s+group-three\s+memory=4.00MB(\s*|\s*memory=4096B|\s*memory=12.3kB)\s*$" < quotas.txt + MATCH " 5\s+group-sub-three\s+group-three\s+memory=4.00MB(\s*|\s*memory=(4096B|8.19kB|12.3kB))\s*$" < quotas.txt MATCH " 6\s+group-sub-sub-three\s+group-sub-three\s+memory=1.00MB\s*$" < quotas.txt MATCH " 7\s+group-two\s+group-top1\s+memory=2.00MB\s*$" < quotas.txt MATCH " 8\s+group-top2\s+memory=500MB\s*$" < quotas.txt diff --git a/tests/main/snap-refresh-hold/task.yaml b/tests/main/snap-refresh-hold/task.yaml index 7c48a63910..49938b75bb 100644 --- a/tests/main/snap-refresh-hold/task.yaml +++ b/tests/main/snap-refresh-hold/task.yaml @@ -11,6 +11,11 @@ restore: | snap refresh --unhold test-snapd-tools || true snap remove --purge test-snapd-tools || true +debug: | + snap changes + # show last 3 changes in detail + snap changes | tail -n 4 | awk '{ if (NF != 0) system("snap change " $1) }' + execute: | reset() { snap refresh --channel=latest/stable test-snapd-tools @@ -27,7 +32,7 @@ execute: | "$TESTSTOOLS"/snapd-state force-autorefresh systemctl start snapd.{socket,service} - if retry -n 10 --quiet sh -c 'snap changes | tail -2 | grep "Done.*Auto-refresh"'; then + if retry -n 15 --quiet sh -c 'snap changes | tail -2 | grep "(Done|Doing).*Auto-refresh"'; then echo "expected 'snap refresh --hold' to prevent auto-refresh" exit 1 fi @@ -36,13 +41,14 @@ execute: | reset snap refresh --hold test-snapd-tools + CHANGE_ID=$(snap changes | tail -n 2 | head -n 1 | awk '{print $1}') systemctl stop snapd.{service,socket} "$TESTSTOOLS"/snapd-state change-snap-channel test-snapd-tools edge "$TESTSTOOLS"/snapd-state change-snap-channel test-snapd-tools_instance edge "$TESTSTOOLS"/snapd-state force-autorefresh systemctl start snapd.{socket,service} - if ! retry -n 10 --quiet sh -c 'snap changes | tail -2 | grep "Done.*Auto-refresh snap \"test-snapd-tools_instance\""'; then + if ! "$TESTSTOOLS"/snapd-state wait-for-snap-autorefresh test-snapd-tools_instance "$CHANGE_ID"; then echo 'expected "test-snapd-tools_instance" to have been auto-refreshed' exit 1 fi diff --git a/tests/main/snapd-apparmor/task.yaml b/tests/main/snapd-apparmor/task.yaml index b565d38207..ebbaeb2760 100644 --- a/tests/main/snapd-apparmor/task.yaml +++ b/tests/main/snapd-apparmor/task.yaml @@ -54,6 +54,9 @@ execute: | echo "And restart snapd.apparmor.service" systemctl restart snapd.apparmor.service + # check that logging from snapd-apparmor works + journalctl -u snapd.apparmor | MATCH "Loading profiles " + # get the set of profiles which now exist grep -v / /sys/kernel/security/apparmor/profiles | cut -f1 -d" " | sort > profiles_after_reload.txt diff --git a/tests/main/snapd-snap/task.yaml b/tests/main/snapd-snap/task.yaml index ebca6537bc..e07498a05d 100644 --- a/tests/main/snapd-snap/task.yaml +++ b/tests/main/snapd-snap/task.yaml @@ -32,12 +32,14 @@ systems: # Start early as it takes a long time. priority: 100 +kill-timeout: 35m environment: # the destructive build mode runs only on xenial, but we also run lxd on # xenial to make sure it builds there too SNAPCRAFT_BUILD_ENVIRONMENT/destructive: host SNAPCRAFT_BUILD_ENVIRONMENT/lxd: lxd + CONSUMER_SNAP: test-snapd-policy-app-consumer prepare: | # shellcheck source=tests/lib/systems.sh @@ -132,6 +134,14 @@ prepare: | fi fi +debug: | + # get the snapd sandbox features + snap debug sandbox-features + + # get the full journal to see any out-of-memory errors + # shellcheck disable=SC2119 + "$TESTSTOOLS"/journal-state get-log + execute: | # shellcheck source=tests/lib/systems.sh . "$TESTSLIB/systems.sh" @@ -149,8 +159,35 @@ execute: | exit 0 fi + if [ "$SPREAD_REBOOT" != "0" ]; then + if os.query is-trusty; then + # seems we have to kick trusty along + systemctl restart snapd + fi + snap list | MATCH snapd + systemctl status snapd.service + if ! os.query is-trusty && ! os.query is-xenial; then + systemctl status snapd.apparmor.service + fi + cat /var/lib/snapd/system-key + echo "Rebooted successfully" + + exit 0 + fi # shellcheck disable=SC2164 pushd "$PROJECT_PATH" + + echo "Ensure we use the correct version when host is 14.04" + if os.query is-trusty; then + # prepare-restore.sh will only have updated + # packaging/ubuntu-14.04/changelog so make sure this is also done for + # the ubuntu-16.04 packaging as well as this is what will be used to + # build the snapd snap + + # Use fake version to ensure we are always bigger than anything else + dch --changelog packaging/ubuntu-16.04/changelog --newversion "1337.$(dpkg-parsechangelog --show-field Version)" "testing build" + fi + echo "Build the snap" snap run snapcraft snap --output=snapd_spread-test.snap popd @@ -192,3 +229,194 @@ execute: | echo "Check /usr/lib/snapd/info" test -f squashfs-root/usr/lib/snapd/info MATCH SNAPD_ASSERTS_FORMATS < squashfs-root/usr/lib/snapd/info + + unsquashfs -ll snapd_spread-test.snap | MATCH libc.so + + # TODO:apparmor-vendoring + # remove this "exit 0" once apparmor-vendoring is ready + exit 0 + + echo "Ensure we have apparmor_parser" + unsquashfs -ll snapd_spread-test.snap | MATCH usr/lib/snapd/apparmor_parser + + echo "Ensure we can install the snapd snap" + snap install --dangerous snapd_spread-test.snap + cat >> snapd-cleanup.sh <<EOF + #!/bin/sh + if [ $(find /snap/snapd/ -maxdepth 1 -type d 2>/dev/null | wc -l) -gt 2 ]; then + snap revert snapd + fi + EOF + chmod +x snapd-cleanup.sh + tests.cleanup defer sh -c "$PWD/snapd-cleanup.sh" + + echo "Ensure we restarted into the snapd snap" + "$TESTSTOOLS"/journal-state match-log 'restarting into "/snap/snapd/' + + echo "Ensure sandbox-features shows the internal apparmor_parser" + snap debug sandbox-features --required apparmor:parser:snapd-internal + + echo "Then we should be able to compile policy using the internal apparmor_parser" + /snap/snapd/current/usr/lib/snapd/apparmor_parser \ + --config-file /snap/snapd/current/usr/lib/snapd/apparmor/parser.conf \ + -b /snap/snapd/current/usr/lib/snapd/apparmor.d \ + --policy-features /snap/snapd/current/usr/lib/snapd/apparmor.d/abi/3.0 \ + -r /var/lib/snapd/apparmor/profiles/snap.snapcraft.snapcraft + + echo "Then we should be able to successfully install a snap" + snap install hello-world + tests.cleanup defer snap remove --purge hello-world + + echo "Then hello-world.evil should fail due to snap confinement" + if hello-world.evil; then + echo "hello-world.evil should fail due to snap confinement" + exit 1 + fi + + echo "Then the profile should support include-if-exists" + MATCH "#include if exists \"/var/lib/snapd/apparmor/snap-tuning\"" < /var/lib/snapd/apparmor/profiles/snap.hello-world.evil + + # Replicate the tests from tests/main/interfaces-many-core-provided so + # we can exercise the vendored appamor_parser etc within the snapd snap + + # We remove the shared-memory plug and interface in trusty because it fails with the + # following error since adding private /dev/shm support to shared-memory interface: + # shared-memory plug with "private: true" cannot be connected if "/dev/shm" is a symlink) + if os.query is-trusty; then + cp -r "$TESTSLIB/snaps/$CONSUMER_SNAP" . + sed -e '/shared-memory:/,+2d' -i $CONSUMER_SNAP/meta/snap.yaml + fi + + echo "Given a snap is installed" + "$TESTSTOOLS"/snaps-state install-local "$CONSUMER_SNAP" + tests.cleanup defer snap remove --purge "$CONSUMER_SNAP" + + # If possible, prepare a session for the test user. On many systems this + # will allow running all tests as the unprivileged user. This shields us + # from accidentally triggering any additional processes from run in the + # session of the root user and stay behind after this test terminates. + if tests.session has-session-systemd-and-dbus; then + tests.session -u test prepare + tests.cleanup defer tests.session -u test restore + fi + + echo "For each core-provided slot" + SNAP_MOUNT_DIR="$(os.paths snap-mount-dir)" + for plugcmd in "$SNAP_MOUNT_DIR"/bin/"$CONSUMER_SNAP".* ; do + + # Only connect 1/8 of the interfaces on xenial - the Xenial GA + # kernel has a bad memory leak (LP: #1939915) triggered by + # reloading apparmor profiles too much and the test fails + # (kill-timeout) trying either to remove interfaces or removing the + # snap whilst waiting for apparmor_parser to finish, so avoid this + # by only touching some of the interfaces overall - there also + # seems to be another smaller leak on Bionic etc too so only do + # half the interfaces on other systems + if os.query is-xenial && [ "$((RANDOM % 8))" != 0 ] || [ "$((RANDOM % 2))" != 0 ]; then + echo "skipping plug: $plugcmd" + continue + fi + plugcmd_bn=$(basename "$plugcmd") + plug_iface=$(echo "$plugcmd_bn" | tr '.' ':') + #shellcheck disable=SC2001 + slot_iface=$(echo "$plug_iface" | sed "s/$CONSUMER_SNAP//") + + # we test browser-support two different ways, so account for that + if [ "$plug_iface" = "$CONSUMER_SNAP:browser-sandbox" ]; then + slot_iface=":browser-support" + fi + + CONNECTED_PATTERN="$slot_iface +.*$CONSUMER_SNAP" + DISCONNECTED_PATTERN="$slot_iface +-" + if [ "$SNAPCRAFT_BUILD_ENVIRONMENT" = "lxd" ]; then + # with lxd snap installed these slot will be connected to the lxd + # snap already + if [ "$slot_iface" = ":lxd-support" ] || [ "$slot_iface" = ":system-observe" ]; then + DISCONNECTED_PATTERN="$slot_iface +lxd" + fi + fi + # Skip any interfaces that core doesn't ship + if ! snap interfaces | grep -E -q "$slot_iface +"; then + echo "$slot_iface not present, skipping" + continue + fi + + if [ "$plug_iface" = "$CONSUMER_SNAP:mount-control" ] && os.query is-trusty ; then + # systemd version is too old, skipping + snap connect "$plug_iface" "$slot_iface" 2>&1 | MATCH "systemd version 204 is too old \\(expected at least 209\\)" + continue + fi + + echo "When slot $slot_iface is connected" + if snap interfaces | grep -E -q "$DISCONNECTED_PATTERN"; then + if [ "$SNAPCRAFT_BUILD_ENVIRONMENT" = "lxd" ]; then + # we have the lxd snap installed so it provides the lxd slot - + # connect to that explicitly + if [ "$slot_iface" = ":lxd" ]; then + slot_iface="lxd:lxd" + fi + fi + if [ "$slot_iface" = ":broadcom-asic-control" ] || [ "$slot_iface" = ":firewall-control" ] || [ "$slot_iface" = ":kubernetes-support" ] || [ "$slot_iface" = ":openvswitch-support" ] || [ "$slot_iface" = ":ppp" ]; then + # TODO: when the kmod backend no longer fails on missing + # modules, we can remove this + snap connect "$plug_iface" "$slot_iface" || true + else + snap connect "$plug_iface" "$slot_iface" + fi + fi + snap interfaces | MATCH "$CONNECTED_PATTERN" + + echo "Then $plugcmd should succeed" + if tests.session has-session-systemd-and-dbus; then + tests.session -u test exec "$plugcmd" | MATCH PASS + else + # If we cannot run the plug command as the test user, in the + # relative safety of the user session which gets torn down, then + # run the test directly EXCEPT when testing the desktop interface. + # + # The desktop interface causes, at minimum, XDG document portal to + # activate in the root users's session, which is not cleaned up. + # Since that interface will only be used in a real session, leaving + # it out is acceptable. + if [ "$plugcmd" != "${CONSUMER_SNAP}.desktop" ]; then + "$plugcmd" | MATCH PASS + else + echo "skipping $plugcmd on an unsupported system" + fi + fi + + echo "Finally disconnect the interface" + if snap interfaces | grep -E -q "$CONNECTED_PATTERN"; then + if [ "$plug_iface" = "$CONSUMER_SNAP:browser-sandbox" ]; then + snap disconnect "$CONSUMER_SNAP:browser-support" "$slot_iface" + else + snap disconnect "$plug_iface" "$slot_iface" + fi + fi + done + + # also check that snapd-apparmor service works + if ! os.query is-trusty && ! os.query is-xenial; then + systemctl status snapd.apparmor.service + fi + + # reboot to ensure snapd.apparmor still works then too + if [ "$SPREAD_REBOOT" = "0" ]; then + # downgrade the snapd deb from the distro package to test that we can + # still handle the generated apparmor profiles etc from the snapd snap + echo "Downgrading snapd to distro packaged version..." + # this fails on ubuntu-16.04 and 14.04 since the distro installed + # version of snapd comes from esm.ubuntu.com but the spread instances do + # not have credentials for esm.ubuntu.com + if ! os.query is-trusty && ! os.query is-xenial; then + apt install -y --allow-downgrades "snapd/$(lsb_release -sc)" + tests.cleanup defer apt install -y "$PROJECT_PATH/../"snapd_1337.*_"$(dpkg-architecture -qDEB_HOST_ARCH)".deb + + # check snapd.apparmor is still working after downgrade + if ! os.query is-trusty && ! os.query is-xenial; then + systemctl status snapd.apparmor.service + fi + echo "Rebooting to re-generate system-key..." + REBOOT + fi + fi diff --git a/tests/nested/manual/core20-auto-remove-user/defaults.yaml b/tests/nested/manual/core20-auto-remove-user/defaults.yaml new file mode 100644 index 0000000000..30292153b8 --- /dev/null +++ b/tests/nested/manual/core20-auto-remove-user/defaults.yaml @@ -0,0 +1,6 @@ +defaults: + system: + refresh: + hold: "HOLD-TIME" + journal: + persistent: true diff --git a/tests/nested/manual/core20-auto-remove-user/prepare-device b/tests/nested/manual/core20-auto-remove-user/prepare-device new file mode 100755 index 0000000000..357c0850fa --- /dev/null +++ b/tests/nested/manual/core20-auto-remove-user/prepare-device @@ -0,0 +1,3 @@ +#!/bin/sh +# 10.0.2.2 is the host from a nested VM +snapctl set device-service.url=http://10.0.2.2:11029 diff --git a/tests/nested/manual/core20-auto-remove-user/task.yaml b/tests/nested/manual/core20-auto-remove-user/task.yaml new file mode 100644 index 0000000000..160bbbf947 --- /dev/null +++ b/tests/nested/manual/core20-auto-remove-user/task.yaml @@ -0,0 +1,237 @@ +summary: Verify that snapd correctly removes expired users created with assertions. + +systems: [ubuntu-20.04-64, ubuntu-22.04-64] + +environment: + # use snapd from the spread run so that we have testkeys trusted in the + # snapd run + NESTED_BUILD_SNAPD_FROM_CURRENT: true + + # use secure boot and TPM to enable encryption + NESTED_ENABLE_TPM: true + NESTED_ENABLE_SECURE_BOOT: true + + # don't use cloud-init it doesn't work with grade secured + NESTED_USE_CLOUD_INIT: false + + # sign all the snaps we build for the image with fakestore + NESTED_SIGN_SNAPS_FAKESTORE: true + + NESTED_CUSTOM_AUTO_IMPORT_ASSERTION: $TESTSLIB/assertions/developer1-{VERSION}-auto-import.assert + NESTED_CUSTOM_MODEL: $TESTSLIB/assertions/developer1-{VERSION}-secured.model + + # for the fake store + NESTED_FAKESTORE_BLOB_DIR: $(pwd)/fake-store-blobdir + NESTED_UBUNTU_IMAGE_SNAPPY_FORCE_SAS_URL: http://localhost:11028 + + # unset this otherwise ubuntu-image complains about overriding the channel for + # a model with grade higher than dangerous when building the image + NESTED_CORE_CHANNEL: "" + +prepare: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + + snap install remarshal jq + tests.cleanup defer snap remove remarshal + tests.cleanup defer snap remove jq + + # set the expiration of the assert to 60 minutes, and replace the version + # with the version we are testing + VERSION="$(tests.nested show version)" + EXPIRATION_1HR=$(date --rfc-3339=seconds -d "$(date) + 60 minute" | sed 's/ /T/') + EXPIRATION_2HR=$(date --rfc-3339=seconds -d "$(date) + 120 minute" | sed 's/ /T/') + EXPIRATION_3HR=$(date --rfc-3339=seconds -d "$(date) + 180 minute" | sed 's/ /T/') + + #shellcheck disable=SC2002 + cat ./user2.json | jq --arg date "$EXPIRATION_1HR" '. + {until: $date}' > ./user2-final.json + sed -i "s/{VERSION}/$VERSION/g" ./user2-final.json + #shellcheck disable=SC2002 + cat ./user2-2.json | jq --arg date "$EXPIRATION_2HR" '. + {until: $date}' > ./user2-2-final.json + sed -i "s/{VERSION}/$VERSION/g" ./user2-2-final.json + #shellcheck disable=SC2002 + cat ./user3.json | jq --arg date "$EXPIRATION_3HR" '. + {until: $date}' > ./user3-final.json + sed -i "s/{VERSION}/$VERSION/g" ./user3-final.json + + # sign the users using gendeveloper1 + gendeveloper1 sign-model < ./user2-final.json > user2.assert + gendeveloper1 sign-model < ./user2-2-final.json > user2-2.assert + gendeveloper1 sign-model < ./user3-final.json > user3.assert + + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB/nested.sh" + + #shellcheck source=tests/lib/store.sh + . "$TESTSLIB"/store.sh + + # Setup the fake-store for ubuntu-image to use when creating our core image. + # We immediately tear down the staging store, to make sure snapd is not pointed + # towards this once we invoke ubuntu-image. + # We also need to point NESTED_UBUNTU_IMAGE_SNAPPY_FORCE_SAS_URL towards the newly + # setup fake-store to actually have it use it. + setup_fake_store "$NESTED_FAKESTORE_BLOB_DIR" + teardown_staging_store + + echo "Expose the needed assertions through the fakestore" + cp "$TESTSLIB"/assertions/developer1.account "$NESTED_FAKESTORE_BLOB_DIR/asserts" + cp "$TESTSLIB"/assertions/developer1.account-key "$NESTED_FAKESTORE_BLOB_DIR/asserts" + + KEY_NAME=$(tests.nested download snakeoil-key) + SNAKEOIL_KEY="$PWD/$KEY_NAME.key" + SNAKEOIL_CERT="$PWD/$KEY_NAME.pem" + + # Get the nested system version + VERSION="$(tests.nested show version)" + + echo "Grab and prepare the gadget snap" + snap download --basename=pc --channel="$VERSION/edge" pc + unsquashfs -d pc-gadget pc.snap + + echo "Sign the shim binary" + tests.nested secboot-sign gadget pc-gadget "$SNAKEOIL_KEY" "$SNAKEOIL_CERT" + + echo "Add the prepare-device hook" + mkdir -p pc-gadget/meta/hooks + cp prepare-device pc-gadget/meta/hooks/prepare-device + + # delay all refreshes for a week from now, as otherwise refreshes for our + # snaps (which are asserted by the testrootorg authority-id) may happen, which + # will break things because the signing keys won't match, etc. and + # specifically snap-bootstrap in the kernel snap from the store won't trust + # the seed keys to unlock the encrypted data partition in the initramfs + sed defaults.yaml -e "s/HOLD-TIME/$(date --date="next week" +%Y-%m-%dT%H:%M:%S%:z)/" >> \ + pc-gadget/meta/gadget.yaml + + snap pack pc-gadget/ "$(tests.nested get extra-snaps-path)" + + #shellcheck disable=SC2148 + systemd-run --collect --unit fakedevicesvc fakedevicesvc localhost:11029 + + tests.nested build-image core + tests.nested create-vm core + +restore: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + + systemctl stop fakedevicesvc + + #shellcheck source=tests/lib/store.sh + . "$TESTSLIB"/store.sh + teardown_fake_store "$NESTED_FAKESTORE_BLOB_DIR" + +execute: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB/nested.sh" + + # wait until device is initialized and has a serial + tests.nested wait-for device-initialized + + setup_ramdisk() { + local ASSERT="$1" + if ! remote.exec "test -e /dev/ram0"; then + remote.exec "sudo mknod -m 660 /dev/ram0 b 1 0" + remote.exec "sudo chown root.disk /dev/ram0" + fi + remote.exec "sudo mkfs.ext3 /dev/ram0" + remote.exec "sudo mount /dev/ram0 /mnt" + remote.push "$ASSERT" + remote.exec "sudo cp $ASSERT /mnt/auto-import.assert" + remote.exec "sync" + } + teardown_ramdisk() { + remote.exec "sudo umount /mnt" + } + + # Setup a ramdisk with the revision 1 of user2 + setup_ramdisk ./user2.assert + + # The system is already managed, which means we need to delete user1 from the auth + # state to allow auto-importing a new user. The new user has an expiration and will + # be removed automatically, so we only need to do this hacking once. + remote.exec "sudo systemctl stop snapd.socket snapd.service" + remote.exec "sudo cat /var/lib/snapd/state.json" > state.json + #shellcheck disable=SC2002 + cat ./state.json | jq 'del(.data.auth.users)' > ./state-updated.json + remote.push ./state-updated.json + remote.exec "sudo mv ./state-updated.json /var/lib/snapd/state.json" + remote.exec "sudo rm -rf /var/lib/snapd/assertions/asserts-v0/system-user/developer1" + remote.exec "sudo systemctl start snapd.socket snapd.service" + + echo "$(snap auto-import) imports assertions from the mounted ramdisk" + remote.exec "sudo snap auto-import" + + # ensure we are looking at the correct user and revision + remote.exec "snap known system-user" | MATCH "format: 2" + remote.exec "snap known system-user" | MATCH "revision: 1" + remote.exec "snap known system-user" | MATCH "name: user2" + remote.exec "snap known system-user" | MATCH "user-presence: until-expiration" + + # check the user exists in the list of users + remote.exec "getent passwd user2" | MATCH 'user2:' + + # remove the ramdisk + teardown_ramdisk + + # lets put the system time ahead by 1h + remote.exec "sudo timedatectl set-ntp false" + remote.exec "sudo date -s 'next hour'" + + # run ensure state to have the user removed + remote.exec "sudo snap debug ensure-state-soon" + + # wait for user to get removed + retry -n 30 --wait 1 sh -c "getent passwd user2 | NOMATCH 'user2:'" + + # next step is to verify we can reimport the user, but with a higher revision + # and a new expiration date. + setup_ramdisk ./user2-2.assert + + echo "$(snap auto-import) imports assertions from the mounted ramdisk" + remote.exec "sudo snap auto-import" + + # ensure we are looking at the correct user and revision + remote.exec "snap known system-user" | MATCH "format: 2" + remote.exec "snap known system-user" | MATCH "revision: 2" + remote.exec "snap known system-user" | MATCH "name: user2" + remote.exec "snap known system-user" | MATCH "user-presence: until-expiration" + + # remove the ramdisk + teardown_ramdisk + + # lets put the system time ahead by 1h + remote.exec "sudo date -s 'next hour'" + + # run ensure state to have the user removed + remote.exec "sudo snap debug ensure-state-soon" + + # wait for user to get removed + retry -n 30 --wait 1 sh -c "getent passwd user2 | NOMATCH 'user2:'" + + # last, we want to see if we can import a new system-user assertion which + # has nothing to do with user2. + setup_ramdisk ./user3.assert + + echo "$(snap auto-import) imports assertions from the mounted ramdisk" + remote.exec "sudo snap auto-import" + + # ensure we are looking at the correct user, and we could import a new user + # that wasn't user2 + remote.exec "snap known system-user" | MATCH "format: 2" + remote.exec "snap known system-user" | MATCH "name: user3" + remote.exec "snap known system-user" | MATCH "user-presence: until-expiration" + + # check the user exists in the list of users + remote.exec "getent passwd user3" | MATCH 'user3:' + + # remove the ramdisk + teardown_ramdisk diff --git a/tests/nested/manual/core20-auto-remove-user/user2-2.json b/tests/nested/manual/core20-auto-remove-user/user2-2.json new file mode 100644 index 0000000000..271aef6a4b --- /dev/null +++ b/tests/nested/manual/core20-auto-remove-user/user2-2.json @@ -0,0 +1,20 @@ +{ + "type": "system-user", + "format": "2", + "revision": "2", + "authority-id": "developer1", + "series": [ + "16" + ], + "brand-id": "developer1", + "email": "snappy-dev@lists.launchpad.net", + "models": [ + "testkeys-snapd-signed-core-{VERSION}-amd64", + "testkeys-snapd-secured-core-{VERSION}-amd64" + ], + "name": "user2", + "username": "user2", + "password": "$6$o5er943Y$cngsJHutSgACVbR65WAnhaUPC9.vENj8locb50hvMdMRMK8cQ3Zbu6WPh5Al2JrnHzpR63osPCwE/IFG/2s6K1", + "user-presence": "until-expiration", + "since": "2020-05-16T18:06:04+00:00" +} diff --git a/tests/nested/manual/core20-auto-remove-user/user2.json b/tests/nested/manual/core20-auto-remove-user/user2.json new file mode 100644 index 0000000000..bfa66c28cf --- /dev/null +++ b/tests/nested/manual/core20-auto-remove-user/user2.json @@ -0,0 +1,20 @@ +{ + "type": "system-user", + "format": "2", + "revision": "1", + "authority-id": "developer1", + "series": [ + "16" + ], + "brand-id": "developer1", + "email": "snappy-dev@lists.launchpad.net", + "models": [ + "testkeys-snapd-signed-core-{VERSION}-amd64", + "testkeys-snapd-secured-core-{VERSION}-amd64" + ], + "name": "user2", + "username": "user2", + "password": "$6$o5er943Y$cngsJHutSgACVbR65WAnhaUPC9.vENj8locb50hvMdMRMK8cQ3Zbu6WPh5Al2JrnHzpR63osPCwE/IFG/2s6K1", + "user-presence": "until-expiration", + "since": "2020-05-16T18:06:04+00:00" +} diff --git a/tests/nested/manual/core20-auto-remove-user/user3.json b/tests/nested/manual/core20-auto-remove-user/user3.json new file mode 100644 index 0000000000..fdcb17782e --- /dev/null +++ b/tests/nested/manual/core20-auto-remove-user/user3.json @@ -0,0 +1,20 @@ +{ + "type": "system-user", + "format": "2", + "revision": "1", + "authority-id": "developer1", + "series": [ + "16" + ], + "brand-id": "developer1", + "email": "snappy-dev2@lists.launchpad.net", + "models": [ + "testkeys-snapd-signed-core-{VERSION}-amd64", + "testkeys-snapd-secured-core-{VERSION}-amd64" + ], + "name": "user3", + "username": "user3", + "password": "$6$o5er943Y$cngsJHutSgACVbR65WAnhaUPC9.vENj8locb50hvMdMRMK8cQ3Zbu6WPh5Al2JrnHzpR63osPCwE/IFG/2s6K1", + "user-presence": "until-expiration", + "since": "2020-05-16T18:06:04+00:00" +} diff --git a/tests/nested/manual/fde-on-classic/classic-model.assert b/tests/nested/manual/fde-on-classic/classic-model.assert index 2c392090ec..1a462d54d1 100644 --- a/tests/nested/manual/fde-on-classic/classic-model.assert +++ b/tests/nested/manual/fde-on-classic/classic-model.assert @@ -8,6 +8,8 @@ base: core22 classic: true distribution: ubuntu grade: dangerous +serial-authority: + - generic snaps: - default-channel: 22/edge @@ -30,13 +32,13 @@ snaps: timestamp: 2022-03-31T12:00:00.0Z sign-key-sha3-384: JGh2mJNy5UeNKQ05MNvphnlDcgteasy0WiJFgk-aJ9XvPyqcKwIm8zomJtWwb-mT -AcLBcwQAAQoAHRYhBAho4l7L210CwlP5EEjPuvv/MykzBQJi0Y/TAAoJEEjPuvv/MykzFLsP/3YO -sTN4B6q0PWe8SNb1qPMidKryoZ86dO6GmI4RVnRzN678J6j09bBqgYB9N61BtYGYCUHYlVsZAsVh -W1BQNzd0PVZHPSz3eI/yzBna8buRtBLO/vfAVIaLrtMGy9YGlL8uRPF+LqUbvwm+0MUoy4FOsVCw -kBR6A4jl++AiSJPucVncqs62CskB2RbIBWnk1jUlBeT11CRZB7jjjxlc7dPjJwvtcwpIGj3VMaz8 -azqxSBvOnfOBLe/eBeGxZwbmMA5t/p41aOP5eCVlDFDhagvUveAG6cUeAmXIhR6QM639UJQR75Qq -6/ITVUvbFtFGJBpV06701xfDCDgO4/OTeYSimgJ+dXzQnn+rAmhRI6HmPkDyNnKScMN1O+Ak5GZt -u6uueWKQyRtXz2WL2Z5/xmfoPWRPOON8jhzs5HQ7+zW5I7pf3Ri1K6gfnymrcOG9ArluWjZrYBTh -MdkpgY7cvxVvWDPiVBD9zdMVx4QaT0FxivYRoFDJ3N+yAM3MTBZkD6cvQavfHsIaCdYfTZYiHrrK -D+XtyoRDl0kQtww139XsM+FST+eMHrrguX5EdaNUw6p1uSjO3zJCtjENH5pTf3R/MUafN9LT48kl -qfhX1/GA71seKspQoAz1yPfyfn0wJUqzQelgVEMDLaV82Fp54qZmSQ3zS6txaaMdkKyDEFRE +AcLBcwQAAQoAHRYhBAho4l7L210CwlP5EEjPuvv/MykzBQJjhJAOAAoJEEjPuvv/MykzyRkQAK/+ +e5UOBHhpfYZdI+ulEecGES45tT+F/tm3zWxiEqw98rx5yAiPOvzcxeJkWybVOWZAWZuKhxEtr9mr +mnYLXurtFIz/kDZsF/qbnOR0/qVXw8nSemGJW9eTAoUpgI20AEuIuNcBFWvZbM9QpJzEARIbxUHD +R/tzeAIFD3rd5qth9O8DRpIKZzx4kZvXzDs5aIoteDCCjqY2WHNbcHtZqR4NJ52VuRfCWjHelbLA ++DRZssZjih5ni0OAYVl0xuwHGDJuOecw0x2Hgb148N1mM+Ehd2pmjs1JjlmswlLqeA9byw8SakDo +eT3fn4PD9a3guPw7Ct5YRJFa3BL2xG8XtK71pmRRbxwEYsJPUbRyHPaqnhVYjdqg8YUgCjyQRD/c +EuF8SsYeGmeVnUgVk5GGl1e/TmHA63RhThwnJV0VdUHIzqjkOafr79F0tv+ijOK48pAbrr2vfLCw +mIewlj96WhhKGchbr2B+YAtREDJpRcnkDoib7bzi4nE5Km4nLka0SaG4ALj/A59yE52GZlfSWCs8 +d0lJNaKMmjeDaZLBpYJaMIKagFMrOhWYbKAlE/inXEGnSSNgkq9LM5QvYtuH1tChcmS/vupJcUYj +yr1ywdw9jQ3SHQalskqG9+hp0yz1Xwe3U3QvQZFAOqxkqVTplP0TFIZ/dUTc+ZSB+ZPHCy1d diff --git a/tests/nested/manual/fde-on-classic/classic-model.json b/tests/nested/manual/fde-on-classic/classic-model.json index a76ed73347..2a7ce043bc 100644 --- a/tests/nested/manual/fde-on-classic/classic-model.json +++ b/tests/nested/manual/fde-on-classic/classic-model.json @@ -10,6 +10,9 @@ "grade": "dangerous", "classic": "true", "distribution": "ubuntu", + "serial-authority": [ + "generic" + ], "snaps": [ { "name": "pc", diff --git a/tests/nested/manual/fde-on-classic/task.yaml b/tests/nested/manual/fde-on-classic/task.yaml index 411fb466ab..f17c3c2bcb 100644 --- a/tests/nested/manual/fde-on-classic/task.yaml +++ b/tests/nested/manual/fde-on-classic/task.yaml @@ -57,6 +57,12 @@ execute: | # no reboot required remote.exec not test -f /run/reboot-required + # Check for the generic serial assertion + retry -n 100 sh -c 'remote.exec "snap changes" | MATCH "Done.*Initialize device"' + remote.exec "snap model --serial --assertion" | MATCH "authority-id: generic" + remote.exec "snap model --serial --assertion" | MATCH "brand-id: tcMZ22pMaY5EVwoLozfjM4fR31bko4yj" + remote.exec "snap model --serial --assertion" | MATCH "model: ubuntu-core-22-pc-amd64" + # refresh kernel snap refresh_snap_and_reboot() { diff --git a/tests/nested/manual/muinstaller-real/task.yaml b/tests/nested/manual/muinstaller-real/task.yaml new file mode 100644 index 0000000000..6cf9b93566 --- /dev/null +++ b/tests/nested/manual/muinstaller-real/task.yaml @@ -0,0 +1,246 @@ +summary: End-to-end test for install via muinstaller + +systems: [ubuntu-22.04-64] + +environment: + # Test both encrypted and unencrypted install using the muinstaller + NESTED_ENABLE_TPM/encrypted: true + NESTED_ENABLE_SECURE_BOOT/encrypted: true + + # unencrypted case + NESTED_ENABLE_TPM/plain: false + NESTED_ENABLE_SECURE_BOOT/plain: false + + # ensure we use our latest code + NESTED_BUILD_SNAPD_FROM_CURRENT: true + NESTED_REPACK_KERNEL_SNAP: true + NESTED_ENABLE_OVMF: true + # store related setup + STORE_ADDR: localhost:11028 + STORE_DIR: $(pwd)/fake-store-blobdir + # image + IMAGE_MOUNTPOINT: /mnt/cloudimg + +prepare: | + if [ "$TRUST_TEST_KEYS" = "false" ]; then + echo "This test needs test keys to be trusted" + exit + fi + snap install jq + #shellcheck source=tests/lib/store.sh + . "$TESTSLIB"/store.sh + setup_fake_store "$STORE_DIR" + +restore: | + #shellcheck source=tests/lib/store.sh + . "$TESTSLIB"/store.sh + teardown_fake_store "$STORE_DIR" + rm -rf ./classic-root + +execute: | + # shellcheck source=tests/lib/prepare.sh + . "$TESTSLIB/prepare.sh" + #shellcheck source=tests/lib/nested.sh + . "$TESTSLIB"/nested.sh + + # install the snapd deb from spread so we are using the same version to + # validate the seed as well as call preseed, etc. + dpkg -i "$SPREAD_PATH"/../snapd_*.deb + + echo Expose the needed assertions through the fakestore + cp "$TESTSLIB"/assertions/developer1.account "$STORE_DIR/asserts" + cp "$TESTSLIB"/assertions/developer1.account-key "$STORE_DIR/asserts" + cp "$TESTSLIB"/assertions/testrootorg-store.account-key "$STORE_DIR/asserts" + export SNAPPY_FORCE_SAS_URL=http://$STORE_ADDR + + version="$(nested_get_version)" + + # build updated shim + version=22 + snap download --basename=pc --channel="$version/edge" pc + unsquashfs -d pc-gadget pc.snap + echo 'console=ttyS0 systemd.journald.forward_to_console=1' > pc-gadget/cmdline.extra + # use the system-seed-null classic role + sed -i 's/role: system-seed/role: system-seed-null/' pc-gadget/meta/gadget.yaml + echo "Sign the shim binary" + KEY_NAME=$(tests.nested download snakeoil-key) + SNAKEOIL_KEY="$PWD/$KEY_NAME.key" + SNAKEOIL_CERT="$PWD/$KEY_NAME.pem" + tests.nested secboot-sign gadget pc-gadget "$SNAKEOIL_KEY" "$SNAKEOIL_CERT" + snap pack --filename=pc.snap pc-gadget/ + + # get an updated kernel + snap download --basename=pc-kernel --channel="$version/edge" pc-kernel + uc20_build_initramfs_kernel_snap "$PWD/pc-kernel.snap" "$NESTED_ASSETS_DIR" + mv "${NESTED_ASSETS_DIR}"/pc-kernel_*.snap pc-kernel.snap + + # prepare a classic seed + # TODO: + # - create pc-classic custom gadget + # - repacked snapd snap + # (should be as simple as adding "--snap=./local-gadget.snap ...") + gendeveloper1 sign-model < "$TESTSLIB"/assertions/developer1-22-classic-dangerous.json > my.model + snap prepare-image --classic \ + --channel=edge \ + --snap ./pc-kernel.snap \ + --snap ./pc.snap \ + my.model \ + ./classic-seed + # make the seed label more predictable for fake-installer auto-mode + LABEL=classic + mv ./classic-seed/system-seed/systems/* ./classic-seed/system-seed/systems/"$LABEL" + cp -a ./classic-seed/system-seed/ /var/lib/snapd/seed + + # we don't need the fakestore anymore + teardown_fake_store "$STORE_DIR" + + + # build the fake-installer snap + snap install snapcraft --candidate --classic + snap install lxd --candidate + snap set lxd waitready.timeout=240 + lxd waitready + lxd init --auto + echo "Setting up proxy for lxc" + if [ -n "${http_proxy:-}" ]; then + lxd.lxc config set core.proxy_http "$http_proxy" + fi + if [ -n "${https_proxy:-}" ]; then + lxd.lxc config set core.proxy_https "$http_proxy" + fi + (cd "$TESTSLIB"/muinstaller && snapcraft) + MUINSTALLER_SNAP="$(find "$TESTSLIB"/muinstaller/ -maxdepth 1 -name '*.snap')" + echo "found $MUINSTALLER_SNAP" + + # create new disk for the installer to work on and attach to VM + truncate --size=4G fake-disk.img + + NESTED_PARAM_EXTRA="-drive file=$(pwd)/fake-disk.img,if=virtio,snapshot=off" + export NESTED_PARAM_EXTRA + + # create a VM and mount a cloud image + tests.nested build-image classic + + # TODO: nested classic images do not support secure boot today so + # this will not work to test the secure boot installer. So for + # now the workaround is to boot classic to create user/ssh + # keys, shutdown down, convert disk from qcow2->raw and rename + # from classic->core and use nested_start_core_vm (like below) + # + # start it so that cloud-init creates ssh keys and user + nested_start_classic_vm + + # make sure classic image is bootable with snakeoil keys + # TODO: move to nested_create_classic_image + # XXX: use assets from gadget instead? + for s in BOOT/BOOTX64.EFI ubuntu/shimx64.efi; do + remote.exec "sudo cp -a /boot/efi/EFI/$s /tmp" + remote.exec "sudo chmod 755 /tmp/$(basename $s)" + remote.pull /tmp/"$(basename $s)" . + nested_secboot_sign_file "$(basename $s)" "$SNAKEOIL_KEY" "$SNAKEOIL_CERT" + remote.push "$(basename $s)" + remote.exec "sudo mv $(basename $s) /boot/efi/EFI/$s" + done + + # push our snap down + # TODO: this abuses /var/lib/snapd to store the deb so that mk-initramfs-classic + # can pick it up. the real installer will also need a very recent snapd + # in its on disk-image to supprot seeding + remote.push "$SPREAD_PATH"/../snapd_*.deb + remote.exec "sudo mv snapd_*.deb /var/lib/snapd/" + remote.exec "sudo apt install -y /var/lib/snapd/snapd_*.deb" + + # push our seed down + # TODO: merge with classic /var/lib/snapd/seed eventually + # XXX: port scp -r to remote.push + #remote.push ./classic-seed/system-seed/ '~/' + sshpass -p ubuntu scp -r -P 8022 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ./classic-seed/system-seed/ user1@localhost:~/install-seed + remote.exec "sudo mv /home/user1/install-seed /var/lib/snapd/" + + # XXX: the code in DeviceManager.SystemAndGadgetInfo() will only work on + # classic systems with modeenv right now (which is something we may need + # to fix to work from the classic installer). + # For now pretend we have a modeenv + remote.exec 'echo "mode=run" | sudo tee -a /var/lib/snapd/modeenv' + remote.exec 'sudo systemctl restart snapd' + + # shutdown the classic vm to install with a core VM that supports + # secboot/tpm + nested_shutdown + sync + + # HACK: convert "classic" qcow2 to raw "core" image because we need + # to boot with OVMF we really should fix this so that classic and + # core VMs are more similar + qemu-img convert -f qcow2 -O raw "$NESTED_IMAGES_DIR/$(nested_get_image_name classic)" "$NESTED_IMAGES_DIR/$(nested_get_image_name core)" + # and we don't need the classic image anymore + # TODO: uncomment + #rm -f "$NESTED_IMAGES_DIR/$(nested_get_image_name classic)" + # TODO: this prevents "nested_prepare_ssh" inside nested_start_core_vm + # from running, we already have a user so this is not needed + IMAGE_NAME="$(nested_get_image_name core)" + touch "$NESTED_IMAGES_DIR/$IMAGE_NAME.configured" + nested_start_core_vm + + # bind mount new seed + remote.exec "sudo mount -o bind /var/lib/snapd/install-seed /var/lib/snapd/seed" + # push the muinstaller + remote.push "$MUINSTALLER_SNAP" + remote.exec "sudo snap install --classic --dangerous $(basename "$MUINSTALLER_SNAP")" + + # TODO: use retry + while true; do + if remote.exec "sudo snap logs muinstaller" | MATCH "install done"; then + break + fi + sleep 5 + done + remote.exec "sudo sync" + + # boot into the just installed drive + nested_shutdown + sync + + # HACK: rename to "core" image because we need to boot with OVMF + # we really should fix this so that classic and core VMs are more similar + mv fake-disk.img "$NESTED_IMAGES_DIR/$IMAGE_NAME" + unset NESTED_PARAM_EXTRA + + # remove cached image + # TODO: find a more elegant way + rm -f "$NESTED_IMAGES_DIR"/ubuntu-core-current.img + + # and start again + # TODO: make this nicer + export NESTED_TPM_NO_RESTART=1 + nested_start_core_vm + unset NESTED_TPM_NO_RESTART + + # things look fine + remote.exec "cat /etc/os-release" | MATCH 'NAME="Ubuntu"' + remote.exec "snap changes" | MATCH "Done.* Initialize system state" + remote.exec "snap list" | MATCH pc-kernel + + # check encryption + if [ "$NESTED_ENABLE_TPM" = true ]; then + remote.exec "sudo test -d /var/lib/snapd/device/fde" + remote.exec "sudo test -e /var/lib/snapd/device/fde/marker" + remote.exec "sudo test -e /var/lib/snapd/device/fde/marker" + remote.exec "sudo blkid /dev/disk/by-label/ubuntu-data-enc" | MATCH crypto_LUKS + + echo "Ensure recovery keys are available on classic FDE too" + remote.exec "sudo snap recovery --show-keys" > show-keys.out + MATCH 'recovery:\s+[0-9]{5}-[0-9]{5}-[0-9]{5}-[0-9]{5}-[0-9]{5}-[0-9]{5}-[0-9]{5}-[0-9]{5}' < show-keys.out + + # check disk mappings + remote.exec "sudo snap install jq" + # TODO: no ubuntu-save right now because: + # "ERROR cannot store device key pair: internal error: cannot access device keypair manager if ubuntu-save is unavailable" + #DISK_MAPPINGS=(/run/mnt/ubuntu-save/device/disk-mapping.json + # /run/mnt/data/var/lib/snapd/device/disk-mapping.json) + DISK_MAPPINGS=(/run/mnt/data/var/lib/snapd/device/disk-mapping.json) + for DM in "${DISK_MAPPINGS[@]}"; do + remote.exec "sudo cat $DM | jq '.pc.\"structure-encryption\".\"ubuntu-save\".method'" | MATCH '"LUKS"' + remote.exec "sudo cat $DM | jq '.pc.\"structure-encryption\".\"ubuntu-data\".method'" | MATCH '"LUKS"' + done + fi diff --git a/tests/nested/manual/fakeinstaller/task.yaml b/tests/nested/manual/muinstaller/task.yaml index 02d2374b11..de3984f7d6 100644 --- a/tests/nested/manual/fakeinstaller/task.yaml +++ b/tests/nested/manual/muinstaller/task.yaml @@ -91,8 +91,7 @@ execute: | # TODO: # - create pc-classic custom gadget # - repacked snapd snap - # (should be as simple as addinga "--snap=./local-gadget.snap ...") - LABEL="$(date +%Y%m%d)" + # (should be as simple as adding "--snap=./local-gadget.snap ...") gendeveloper1 sign-model < "$TESTSLIB"/assertions/developer1-22-classic-dangerous.json > my.model snap prepare-image --classic \ --channel=edge \ @@ -100,6 +99,9 @@ execute: | --snap ./pc-new.snap \ my.model \ ./classic-seed + # make the seed label more predictable for fake-installer auto-mode + LABEL=classic + mv ./classic-seed/system-seed/systems/* ./classic-seed/system-seed/systems/"$LABEL" cp -a ./classic-seed/system-seed/ /var/lib/snapd/seed # do some light checking that the system is valid @@ -107,14 +109,14 @@ execute: | test-snapd-curl.curl -s --unix-socket /run/snapd.socket http://localhost/v2/systems/"$LABEL" > system jq '.result.model.distribution' system | MATCH "ubuntu" - # build fakeinstaller and put in place - go build -o fakeinstaller "$TESTSLIB"/fakeinstaller/main.go + # build muinstaller and put in place + go build -o muinstaller "$TESTSLIB"/muinstaller/main.go # create fake disk for the installer to work on truncate --size=4G fake-disk.img loop_device=$(losetup --show -f ./fake-disk.img) # and "install" the current seed to the fake disk - ./fakeinstaller "$LABEL" "$loop_device" "$TESTSLIB"/fakeinstaller/mk-classic-rootfs.sh + ./muinstaller "$LABEL" "$TESTSLIB"/muinstaller/mk-classic-rootfs.sh "$loop_device" # validate that the fake installer created the expected partitions sfdisk -d "$loop_device" > fdisk_output MATCH "${loop_device}p1 .* name=\"BIOS Boot\"" < fdisk_output diff --git a/testutil/containschecker.go b/testutil/containschecker.go index 187c269db2..644ef1583b 100644 --- a/testutil/containschecker.go +++ b/testutil/containschecker.go @@ -179,39 +179,63 @@ func (c *deepUnsortedMatchChecker) Check(params []interface{}, _ []string) (bool return false, fmt.Sprintf("containers are of different types: %s != %s", container1.Kind(), container2.Kind()) } - if container1.Kind() != reflect.Map && container1.Kind() != reflect.Slice && container1.Kind() != reflect.Array { - return false, fmt.Sprintf("'%s' is not a supported type: must be slice, array, map or nil", container1.Kind().String()) + switch container1.Kind() { + case reflect.Array, reflect.Slice: + return deepSequenceMatch(container1, container2) + case reflect.Map: + return deepMapMatch(container1, container2) + default: + return false, fmt.Sprintf("'%s' is not a supported type: must be slice, array or map", container1.Kind().String()) } +} - if container1.Type().Comparable() && params[0] == params[1] { - return true, "" +func deepMapMatch(container1, container2 reflect.Value) (bool, string) { + if valid, output := validateContainerTypesAndLengths(container1, container2); !valid { + return false, output } - if container1.Len() != container2.Len() { - return false, fmt.Sprintf("containers have different lengths: %d != %d", container1.Len(), container2.Len()) + switch container1.Type().Elem().Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + // only run the unsorted match if the map values are containers + default: + if !reflect.DeepEqual(container1.Interface(), container2.Interface()) { + return false, "maps don't match" + } + return true, "" } - switch container1.Kind() { - case reflect.Array, reflect.Slice: - return deepSequenceMatch(container1, container2) + for _, key := range container1.MapKeys() { + el1 := container1.MapIndex(key) + el2 := container2.MapIndex(key) - case reflect.Map: - map1 := container1.Interface() - map2 := container2.Interface() - if !reflect.DeepEqual(map1, map2) { - return false, "maps don't match" + absent := el2 == reflect.Value{} + if absent { + return false, fmt.Sprintf("key %q from one map is absent from the other map", key) } - return true, "" + var ok bool + var msg string + switch el1.Kind() { + case reflect.Array, reflect.Slice: + ok, msg = deepSequenceMatch(el1, el2) + case reflect.Map: + ok, msg = deepMapMatch(el1, el2) + } - default: - return false, fmt.Sprintf("%T is not a supported container. Must be a slice, an array or a map", container1) + if !ok { + return false, msg + } } + + return true, "" } -func deepSequenceMatch(container1 reflect.Value, container2 reflect.Value) (bool, string) { - matched := make([]bool, container1.Len()) +func deepSequenceMatch(container1, container2 reflect.Value) (bool, string) { + if valid, output := validateContainerTypesAndLengths(container1, container2); !valid { + return false, output + } + matched := make([]bool, container1.Len()) out: for i := 0; i < container1.Len(); i++ { el1 := container1.Index(i).Interface() @@ -232,3 +256,20 @@ out: return true, "" } + +func validateContainerTypesAndLengths(container1, container2 reflect.Value) (bool, string) { + if container1.Len() != container2.Len() { + return false, fmt.Sprintf("containers have different lengths: %d != %d", container1.Len(), container2.Len()) + } else if container1.Type().Elem() != container2.Type().Elem() { + return false, fmt.Sprintf("containers have different element types: %s != %s", container1.Type().Elem(), container2.Type().Elem()) + } + + if container1.Kind() == reflect.Map && container2.Kind() == reflect.Map { + keyType1, keyType2 := container1.Type().Key(), container2.Type().Key() + if keyType1 != keyType2 { + return false, fmt.Sprintf("maps have different key types: %s != %s", keyType1, keyType2) + } + } + + return true, "" +} diff --git a/testutil/containschecker_test.go b/testutil/containschecker_test.go index f15b2a6547..38fe30d4ca 100644 --- a/testutil/containschecker_test.go +++ b/testutil/containschecker_test.go @@ -267,6 +267,17 @@ func (*containsCheckerSuite) TestDeepUnsortedMatchesMapSuccess(c *check.C) { c.Check(map2, DeepUnsortedMatches, map1) } +func (*containsCheckerSuite) TestDeepUnsortedMatchesMapStructFail(c *check.C) { + map1 := map[string]example{ + "a": {a: "a", b: map[string]int{"a": 2, "b": 1}}, + } + map2 := map[string]example{ + "a": {a: "a", b: map[string]int{"a": 1, "b": 2}}, + } + + testCheck(c, DeepUnsortedMatches, false, "maps don't match", map1, map2) +} + func (*containsCheckerSuite) TestDeepUnsortedMatchesMapUnmatchedKeyFailure(c *check.C) { map1 := map[string]int{"a": 1, "c": 2} map2 := map[string]int{"a": 1, "b": 2} @@ -287,16 +298,12 @@ func (*containsCheckerSuite) TestDeepUnsortedMatchesDifferentTypeFailure(c *chec testCheck(c, DeepUnsortedMatches, false, "containers are of different types: slice != array", []int{}, [1]int{}) } -func (*containsCheckerSuite) TestDeepUnsortedMatchesDifferentLengthFailure(c *check.C) { - testCheck(c, DeepUnsortedMatches, false, "containers have different lengths: 1 != 2", []int{1}, []int{1, 1}) -} - -func (*containsCheckerSuite) TestDeepUnsortedMatchesUnsupportedTypeFailure(c *check.C) { - testCheck(c, DeepUnsortedMatches, false, "'int' is not a supported type: must be slice, array, map or nil", 1, 2) +func (*containsCheckerSuite) TestDeepUnsortedMatchesDifferentElementType(c *check.C) { + testCheck(c, DeepUnsortedMatches, false, "containers have different element types: int != string", []int{1}, []string{"a"}) } -func (*containsCheckerSuite) TestDeepUnsortedMatchesUnsupportedPointerType(c *check.C) { - testCheck(c, DeepUnsortedMatches, false, "'ptr' is not a supported type: must be slice, array, map or nil", &[]string{"a", "b"}, &[]string{"b", "a"}) +func (*containsCheckerSuite) TestDeepUnsortedMatchesDifferentLengthFailure(c *check.C) { + testCheck(c, DeepUnsortedMatches, false, "containers have different lengths: 1 != 2", []int{1}, []int{1, 1}) } func (*containsCheckerSuite) TestDeepUnsortedMatchesNilArgFailure(c *check.C) { @@ -306,3 +313,60 @@ func (*containsCheckerSuite) TestDeepUnsortedMatchesNilArgFailure(c *check.C) { func (*containsCheckerSuite) TestDeepUnsortedMatchesBothNilArgSuccess(c *check.C) { c.Check(nil, DeepUnsortedMatches, nil) } + +func (*containsCheckerSuite) TestDeepUnsortedMatchesNonContainerValues(c *check.C) { + testCheck(c, DeepUnsortedMatches, false, "'string' is not a supported type: must be slice, array or map", "a", "a") + testCheck(c, DeepUnsortedMatches, false, "'int' is not a supported type: must be slice, array or map", 1, 2) + testCheck(c, DeepUnsortedMatches, false, "'bool' is not a supported type: must be slice, array or map", true, false) + testCheck(c, DeepUnsortedMatches, false, "'ptr' is not a supported type: must be slice, array or map", &[]string{"a", "b"}, &[]string{"a", "b"}) + testCheck(c, DeepUnsortedMatches, false, "'func' is not a supported type: must be slice, array or map", func() {}, func() {}) +} + +func (*containsCheckerSuite) TestDeepUnsortedMatchesMapsOfSlices(c *check.C) { + map1 := map[string][]string{"a": {"foo", "bar"}, "b": {"foo", "bar"}} + map2 := map[string][]string{"a": {"bar", "foo"}, "b": {"bar", "foo"}} + + c.Check(map1, DeepUnsortedMatches, map2) +} + +func (*containsCheckerSuite) TestDeepUnsortedMatchesMapsDifferentKeyTypes(c *check.C) { + map1 := map[string][]string{"a": {"foo", "bar"}} + map2 := map[int][]string{1: {"bar", "foo"}} + + testCheck(c, DeepUnsortedMatches, false, "maps have different key types: string != int", map1, map2) +} + +func (*containsCheckerSuite) TestDeepUnsortedMatchesMapsDifferentValueTypes(c *check.C) { + map1 := map[string][]string{"a": {"foo", "bar"}} + map2 := map[string][2]string{"a": {"foo", "bar"}} + + testCheck(c, DeepUnsortedMatches, false, "containers have different element types: []string != [2]string", map1, map2) +} + +func (*containsCheckerSuite) TestDeepUnsortedMatchesMapsDifferentLengths(c *check.C) { + map1 := map[string][]string{"a": {"foo", "bar"}, "b": {"foo", "bar"}} + map2 := map[string][]string{"a": {"bar", "foo"}} + + testCheck(c, DeepUnsortedMatches, false, "containers have different lengths: 2 != 1", map1, map2) +} + +func (*containsCheckerSuite) TestDeepUnsortedMatchesMapsMissingKey(c *check.C) { + map1 := map[string][]string{"a": {"foo", "bar"}} + map2 := map[string][]string{"b": {"bar", "foo"}} + + testCheck(c, DeepUnsortedMatches, false, "key \"a\" from one map is absent from the other map", map1, map2) +} + +func (*containsCheckerSuite) TestDeepUnsortedMatchesNestedMaps(c *check.C) { + map1 := map[string]map[string][]string{"a": {"b": []string{"foo", "bar"}}} + map2 := map[string]map[string][]string{"a": {"b": []string{"bar", "foo"}}} + c.Check(map1, DeepUnsortedMatches, map2) + + map1 = map[string]map[string][]string{"a": {"b": []string{"foo", "bar"}}} + map2 = map[string]map[string][]string{"a": {"c": []string{"bar", "foo"}}} + testCheck(c, DeepUnsortedMatches, false, "key \"b\" from one map is absent from the other map", map1, map2) + + map1 = map[string]map[string][]string{"a": {"b": []string{"foo", "bar"}}, "c": {"b": []string{"foo"}}} + map2 = map[string]map[string][]string{"a": {"b": []string{"bar", "foo"}}, "c": {"b": []string{"bar"}}} + testCheck(c, DeepUnsortedMatches, false, "element [0]=foo was unmatched in the second container", map1, map2) +} |
