diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 000000000000..b8f14b2b16c3 Binary files /dev/null and b/.DS_Store differ diff --git a/client/client_test.go b/client/client_test.go index 49b2fe7b6752..ae46e93236fe 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -38,6 +38,7 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" "github.com/distribution/reference" + intotov1 "github.com/in-toto/attestation/go/v1" intoto "github.com/in-toto/in-toto-golang/in_toto" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" @@ -71,6 +72,7 @@ import ( "golang.org/x/crypto/ssh/agent" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/structpb" ) func init() { @@ -8171,7 +8173,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.Equal(t, len(att.Layers), len(att.Img.RootFS.DiffIDs)) require.Equal(t, len(att.Img.History), 0) - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) purls := map[string]string{} @@ -8190,7 +8192,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) - subjects := []intoto.Subject{ + subjects := []intotov1.ResourceDescriptor{ { Name: purls[targets[0]], Digest: map[string]string{ @@ -8206,13 +8208,13 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { } require.Equal(t, subjects, attest.Subject) - var attest2 intoto.Statement + var attest2 intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[1], &attest2)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) require.Nil(t, attest2.Predicate) - subjects = []intoto.Subject{{ + subjects = []intotov1.ResourceDescriptor{{ Name: "/attestation.json", Digest: map[string]string{ "sha256": successDigest.Encoded(), @@ -8253,7 +8255,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) for _, p := range ps { - var attest intoto.Statement + var attest intotov1.Statement dt, err := os.ReadFile(path.Join(dir, strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation.json")) require.NoError(t, err) require.NoError(t, json.Unmarshal(dt, &attest)) @@ -8262,12 +8264,12 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) - require.Equal(t, []intoto.Subject{{ + require.Equal(t, []intotov1.ResourceDescriptor{{ Name: "greeting", Digest: result.ToDigestMap(digest.Canonical.FromString("hello " + platforms.Format(p) + "!")), }}, attest.Subject) - var attest2 intoto.Statement + var attest2 intotov1.Statement dt, err = os.ReadFile(path.Join(dir, strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation2.json")) require.NoError(t, err) require.NoError(t, json.Unmarshal(dt, &attest2)) @@ -8275,7 +8277,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) require.Nil(t, attest2.Predicate) - subjects := []intoto.Subject{{ + subjects := []intotov1.ResourceDescriptor{{ Name: "/attestation.json", Digest: map[string]string{ "sha256": successDigest.Encoded(), @@ -8311,7 +8313,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) for _, p := range ps { - var attest intoto.Statement + var attest intotov1.Statement item := m[path.Join(strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation.json")] require.NotNil(t, item) require.NoError(t, json.Unmarshal(item.Data, &attest)) @@ -8320,12 +8322,12 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) - require.Equal(t, []intoto.Subject{{ + require.Equal(t, []intotov1.ResourceDescriptor{{ Name: "greeting", Digest: result.ToDigestMap(digest.Canonical.FromString("hello " + platforms.Format(p) + "!")), }}, attest.Subject) - var attest2 intoto.Statement + var attest2 intotov1.Statement item = m[path.Join(strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation2.json")] require.NotNil(t, item) require.NoError(t, json.Unmarshal(item.Data, &attest2)) @@ -8333,7 +8335,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox) { require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) require.Nil(t, attest2.Predicate) - subjects := []intoto.Subject{{ + subjects := []intotov1.ResourceDescriptor{{ Name: "/attestation.json", Digest: map[string]string{ "sha256": successDigest.Encoded(), @@ -8464,7 +8466,7 @@ func testAttestationDefaultSubject(t *testing.T, sb integration.Sandbox) { atts := imgs.Filter("unknown/unknown") require.Equal(t, len(ps), len(atts.Images)) for i, att := range atts.Images { - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) @@ -8472,7 +8474,7 @@ func testAttestationDefaultSubject(t *testing.T, sb integration.Sandbox) { require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) name := fmt.Sprintf("pkg:docker/%s/buildkit/testattestationsemptysubject@latest?platform=%s", url.QueryEscape(registry), url.QueryEscape(platforms.Format(ps[i]))) - subjects := []intoto.Subject{{ + subjects := []intotov1.ResourceDescriptor{{ Name: name, Digest: map[string]string{ "sha256": bases[i].Desc.Digest.Encoded(), @@ -8531,14 +8533,13 @@ func testAttestationBundle(t *testing.T, sb integration.Sandbox) { } res.AddRef(pk, ref) - stmt := intoto.Statement{ - StatementHeader: intoto.StatementHeader{ - Type: intoto.StatementInTotoV01, - PredicateType: "https://example.com/attestations/v1.0", - }, - Predicate: map[string]interface{}{ - "foo": "1", - }, + pred, _ := structpb.NewStruct(map[string]interface{}{ + "foo": "1", + }) + stmt := &intotov1.Statement{ + Type: intotov1.StatementTypeUri, + PredicateType: "https://example.com/attestations/v1.0", + Predicate: pred, } buff := bytes.NewBuffer(nil) enc := json.NewEncoder(buff) @@ -8617,13 +8618,13 @@ func testAttestationBundle(t *testing.T, sb integration.Sandbox) { require.Equal(t, len(ps)*1, len(atts.Images)) for i, att := range atts.Images { require.Equal(t, 1, len(att.LayersRaw)) - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) require.Equal(t, map[string]interface{}{"foo": "1"}, attest.Predicate) name := fmt.Sprintf("pkg:docker/%s/buildkit/testattestationsbundle@latest?platform=%s", url.QueryEscape(registry), url.QueryEscape(platforms.Format(ps[i]))) - subjects := []intoto.Subject{{ + subjects := []intotov1.ResourceDescriptor{{ Name: name, Digest: map[string]string{ "sha256": bases[i].Desc.Digest.Encoded(), @@ -8840,7 +8841,7 @@ EOF require.Equal(t, 2, len(imgs.Images)) att := imgs.Find("unknown/unknown") - attest := intoto.Statement{} + attest := intotov1.Statement{} require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) @@ -8872,7 +8873,7 @@ EOF require.Equal(t, 2, len(imgs.Images)) att = imgs.Find("unknown/unknown") - attest = intoto.Statement{} + attest = intotov1.Statement{} require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) @@ -8904,7 +8905,7 @@ EOF require.Equal(t, 2, len(imgs.Images)) att = imgs.Find("unknown/unknown") - attest = intoto.Statement{} + attest = intotov1.Statement{} require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) @@ -9070,7 +9071,7 @@ EOF att := imgs.Find("unknown/unknown") require.NotNil(t, att) - attest := intoto.Statement{} + attest := intotov1.Statement{} require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) @@ -9213,7 +9214,7 @@ func testSBOMSupplements(t *testing.T, sb integration.Sandbox) { att := imgs.Find("unknown/unknown") attest := struct { - intoto.StatementHeader + intotov1.Statement Predicate spdx.Document }{} require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) diff --git a/docs/attestations/attestation-storage.md b/docs/attestations/attestation-storage.md index 18095d05e13a..cfeb077dc5fd 100644 --- a/docs/attestations/attestation-storage.md +++ b/docs/attestations/attestation-storage.md @@ -60,7 +60,7 @@ The contents of each layer will be a blob dependent on its `mediaType`. ```json { - "_type": "https://in-toto.io/Statement/v0.1", + "_type": "https://in-toto.io/Statement/v1", "subject": [ { "name": "", @@ -198,7 +198,7 @@ Attestation body containing the SBOM data listing the packages used during the b ```json { - "_type": "https://in-toto.io/Statement/v0.1", + "_type": "https://in-toto.io/Statement/v1", "predicateType": "https://spdx.dev/Document", "subject": [ { diff --git a/docs/attestations/sbom.md b/docs/attestations/sbom.md index 9cb008775a36..6e2bf0009428 100644 --- a/docs/attestations/sbom.md +++ b/docs/attestations/sbom.md @@ -109,7 +109,7 @@ the following SBOM: ```json { - "_type": "https://in-toto.io/Statement/v0.1", + "_type": "https://in-toto.io/Statement/v1", "predicateType": "https://spdx.dev/Document", "subject": [ { diff --git a/docs/attestations/slsa-definitions.md b/docs/attestations/slsa-definitions.md index 313d8114f440..ddcf094f27ba 100644 --- a/docs/attestations/slsa-definitions.md +++ b/docs/attestations/slsa-definitions.md @@ -400,7 +400,7 @@ in a provenance attestation similar to the following, for a `mode=min` build: ```json { - "_type": "https://in-toto.io/Statement/v0.1", + "_type": "https://in-toto.io/Statement/v1", "predicateType": "https://slsa.dev/provenance/v0.2", "subject": [ { @@ -463,7 +463,7 @@ For a similar build, but with `mode=max`: ```json { - "_type": "https://in-toto.io/Statement/v0.1", + "_type": "https://in-toto.io/Statement/v1", "predicateType": "https://slsa.dev/provenance/v0.2", "subject": [ { diff --git a/exporter/attestation/make.go b/exporter/attestation/make.go index 8ed910c1e8d3..3240a45c3e7c 100644 --- a/exporter/attestation/make.go +++ b/exporter/attestation/make.go @@ -6,7 +6,7 @@ import ( "os" "github.com/containerd/continuity/fs" - intoto "github.com/in-toto/in-toto-golang/in_toto" + intotov1 "github.com/in-toto/attestation/go/v1" "github.com/moby/buildkit/exporter" gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/session" @@ -14,6 +14,7 @@ import ( "github.com/moby/buildkit/solver/result" "github.com/pkg/errors" "golang.org/x/sync/errgroup" + "google.golang.org/protobuf/types/known/structpb" ) // ReadAll reads the content of an attestation. @@ -56,9 +57,9 @@ func ReadAll(ctx context.Context, s session.Group, att exporter.Attestation) ([] // MakeInTotoStatements iterates over all provided result attestations and // generates intoto attestation statements. -func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []exporter.Attestation, defaultSubjects []intoto.Subject) ([]intoto.Statement, error) { +func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []exporter.Attestation, defaultSubjects []*intotov1.ResourceDescriptor) ([]*intotov1.Statement, error) { eg, ctx := errgroup.WithContext(ctx) - statements := make([]intoto.Statement, len(attestations)) + statements := make([]*intotov1.Statement, len(attestations)) for i, att := range attestations { i, att := i, att @@ -74,7 +75,7 @@ func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []e if err != nil { return err } - statements[i] = *stmt + statements[i] = stmt case gatewaypb.AttestationKindBundle: return errors.New("bundle attestation kind must be un-bundled first") } @@ -87,13 +88,13 @@ func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []e return statements, nil } -func makeInTotoStatement(ctx context.Context, content []byte, attestation exporter.Attestation, defaultSubjects []intoto.Subject) (*intoto.Statement, error) { +func makeInTotoStatement(ctx context.Context, content []byte, attestation exporter.Attestation, defaultSubjects []*intotov1.ResourceDescriptor) (*intotov1.Statement, error) { if len(attestation.InToto.Subjects) == 0 { attestation.InToto.Subjects = []result.InTotoSubject{{ Kind: gatewaypb.InTotoSubjectKindSelf, }} } - subjects := []intoto.Subject{} + subjects := []*intotov1.ResourceDescriptor{} for _, subject := range attestation.InToto.Subjects { subjectName := "_" if subject.Name != "" { @@ -110,14 +111,14 @@ func makeInTotoStatement(ctx context.Context, content []byte, attestation export } for _, name := range subjectNames { - subjects = append(subjects, intoto.Subject{ + subjects = append(subjects, &intotov1.ResourceDescriptor{ Name: name, Digest: defaultSubject.Digest, }) } } case gatewaypb.InTotoSubjectKindRaw: - subjects = append(subjects, intoto.Subject{ + subjects = append(subjects, &intotov1.ResourceDescriptor{ Name: subjectName, Digest: result.ToDigestMap(subject.Digest...), }) @@ -126,13 +127,21 @@ func makeInTotoStatement(ctx context.Context, content []byte, attestation export } } - stmt := intoto.Statement{ - StatementHeader: intoto.StatementHeader{ - Type: intoto.StatementInTotoV01, - PredicateType: attestation.InToto.PredicateType, - Subject: subjects, - }, - Predicate: json.RawMessage(content), + var pred map[string]interface{} + err := json.Unmarshal(content, &pred) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal attestation predicate") + } + predicate, err := structpb.NewStruct(pred) + if err != nil { + return nil, errors.Wrap(err, "failed to convert attestation predicate to struct") + } + + stmt := intotov1.Statement{ + Type: intotov1.StatementTypeUri, + Subject: subjects, + PredicateType: attestation.InToto.PredicateType, + Predicate: predicate, } return &stmt, nil } diff --git a/exporter/attestation/unbundle.go b/exporter/attestation/unbundle.go index a2120d7975e1..0cdc1e597725 100644 --- a/exporter/attestation/unbundle.go +++ b/exporter/attestation/unbundle.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/containerd/continuity/fs" - intoto "github.com/in-toto/in-toto-golang/in_toto" + intotov1 "github.com/in-toto/attestation/go/v1" "github.com/moby/buildkit/exporter" gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/session" @@ -137,7 +137,7 @@ func unbundle(ctx context.Context, root string, bundle exporter.Attestation) ([] return nil, err } dec := json.NewDecoder(f) - var stmt intoto.Statement + var stmt intotov1.Statement if err := dec.Decode(&stmt); err != nil { return nil, errors.Wrap(err, "cannot decode in-toto statement") } diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go index b43761aeda85..983906ecbdd7 100644 --- a/exporter/containerimage/writer.go +++ b/exporter/containerimage/writer.go @@ -14,6 +14,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/labels" "github.com/containerd/containerd/platforms" + intotov1 "github.com/in-toto/attestation/go/v1" intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" @@ -248,7 +249,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session return nil, err } - var defaultSubjects []intoto.Subject + var defaultSubjects []*intotov1.ResourceDescriptor for _, name := range strings.Split(opts.ImageName, ",") { if name == "" { continue @@ -257,7 +258,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session if err != nil { return nil, err } - defaultSubjects = append(defaultSubjects, intoto.Subject{ + defaultSubjects = append(defaultSubjects, &intotov1.ResourceDescriptor{ Name: pl, Digest: result.ToDigestMap(desc.Digest), }) @@ -482,7 +483,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima }, &configDesc, nil } -func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *ImageCommitOpts, p exptypes.Platform, target string, statements []intoto.Statement) (*ocispecs.Descriptor, error) { +func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *ImageCommitOpts, p exptypes.Platform, target string, statements []*intotov1.Statement) (*ocispecs.Descriptor, error) { var ( manifestType = ocispecs.MediaTypeImageManifest configType = ocispecs.MediaTypeImageConfig diff --git a/exporter/local/fs.go b/exporter/local/fs.go index b24d6aacda7a..d37b0e1a6b5a 100644 --- a/exporter/local/fs.go +++ b/exporter/local/fs.go @@ -13,7 +13,7 @@ import ( "time" "github.com/docker/docker/pkg/idtools" - intoto "github.com/in-toto/in-toto-golang/in_toto" + intotov1 "github.com/in-toto/attestation/go/v1" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/attestation" @@ -145,7 +145,7 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab return nil, nil, err } if len(attestations) > 0 { - subjects := []intoto.Subject{} + subjects := []*intotov1.ResourceDescriptor{} err = outputFS.Walk(ctx, "", func(path string, entry fs.DirEntry, err error) error { if err != nil { return err @@ -162,7 +162,7 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab if _, err := io.Copy(d.Hash(), f); err != nil { return err } - subjects = append(subjects, intoto.Subject{ + subjects = append(subjects, &intotov1.ResourceDescriptor{ Name: path, Digest: result.ToDigestMap(d.Digest()), }) diff --git a/frontend/dockerfile/dockerfile_provenance_test.go b/frontend/dockerfile/dockerfile_provenance_test.go index e66fff771b7a..9c3ad09b2469 100644 --- a/frontend/dockerfile/dockerfile_provenance_test.go +++ b/frontend/dockerfile/dockerfile_provenance_test.go @@ -20,7 +20,7 @@ import ( "github.com/containerd/containerd/content/proxy" "github.com/containerd/containerd/platforms" "github.com/containerd/continuity/fs/fstest" - intoto "github.com/in-toto/in-toto-golang/in_toto" + intotov1 "github.com/in-toto/attestation/go/v1" provenanceCommon "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client" @@ -118,9 +118,9 @@ RUN echo "ok" > /foo require.NotNil(t, att) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://in-toto.io/Statement/v1", attest.Type) require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const type stmtT struct { @@ -314,9 +314,9 @@ COPY myapp.Dockerfile / require.NotNil(t, att) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://in-toto.io/Statement/v1", attest.Type) require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const type stmtT struct { @@ -449,9 +449,9 @@ RUN echo "ok-$TARGETARCH" > /foo att := imgs.FindAttestation(p) require.NotNil(t, att) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://in-toto.io/Statement/v1", attest.Type) require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const type stmtT struct { @@ -630,9 +630,9 @@ func testClientFrontendProvenance(t *testing.T, sb integration.Sandbox) { att := imgs.FindAttestation("linux/arm64") require.NotNil(t, att) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://in-toto.io/Statement/v1", attest.Type) require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const type stmtT struct { @@ -663,9 +663,9 @@ func testClientFrontendProvenance(t *testing.T, sb integration.Sandbox) { att = imgs.FindAttestation("linux/amd64") require.NotNil(t, att) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - attest = intoto.Statement{} + attest = intotov1.Statement{} require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://in-toto.io/Statement/v1", attest.Type) require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const stmt = stmtT{} @@ -775,9 +775,9 @@ func testClientLLBProvenance(t *testing.T, sb integration.Sandbox) { att := imgs.FindAttestation(nativePlatform) require.NotNil(t, att) require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://in-toto.io/Statement/v1", attest.Type) require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const type stmtT struct { diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 342e11de4b26..8028335fabdf 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + intotov1 "github.com/in-toto/attestation/go/v1" "io" "net/http" "net/http/httptest" @@ -6234,7 +6235,7 @@ EOF att := imgs.Find("unknown/unknown") require.Equal(t, 1, len(att.LayersRaw)) - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) @@ -6353,7 +6354,7 @@ FROM base att := imgs.Find("unknown/unknown") require.Equal(t, 1, len(att.LayersRaw)) - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) require.Subset(t, attest.Predicate, map[string]interface{}{"name": "core"}) @@ -6418,9 +6419,9 @@ ARG BUILDKIT_SBOM_SCAN_STAGE=true require.Equal(t, 4, len(att.LayersRaw)) extraCount := 0 for _, l := range att.LayersRaw { - var attest intoto.Statement + var attest intotov1.Statement require.NoError(t, json.Unmarshal(l, &attest)) - att := attest.Predicate.(map[string]interface{}) + att := attest.Predicate.AsMap() switch att["name"] { case "core": case "extra": diff --git a/go.mod b/go.mod index 76120d052a43..17638c2ee459 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.4 - github.com/in-toto/in-toto-golang v0.5.0 + github.com/in-toto/in-toto-golang v0.9.1-0.20230919171745-f55a6fe48c49 github.com/klauspost/compress v1.17.2 github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/moby/locker v1.0.1 @@ -150,7 +150,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/vbatts/tar-split v0.11.2 // indirect go.opencensus.io v0.24.0 // indirect @@ -164,3 +164,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 // indirect ) + +require github.com/in-toto/attestation v0.1.1-0.20230828220013-11b7a1a4ca51 diff --git a/go.sum b/go.sum index 3223b3ee4995..5e96973efe82 100644 --- a/go.sum +++ b/go.sum @@ -785,8 +785,10 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= -github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= +github.com/in-toto/attestation v0.1.1-0.20230828220013-11b7a1a4ca51 h1:79cutIt/QsUDEWEPKUdC9OiI0C9fYxRuU1VvYTGYTuo= +github.com/in-toto/attestation v0.1.1-0.20230828220013-11b7a1a4ca51/go.mod h1:hCR5COCuENh5+VfojEkJnt7caOymbEgvyZdKifD6pOw= +github.com/in-toto/in-toto-golang v0.9.1-0.20230919171745-f55a6fe48c49 h1:TFiXUZ3zvU/gg+E0Aj3zLBbSZjI2+gX91nWCzTE54ew= +github.com/in-toto/in-toto-golang v0.9.1-0.20230919171745-f55a6fe48c49/go.mod h1:UeWt1pGX1GFm5Dd+KsdQp6FCHYFefdQnQ3hUifk5LxM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -1110,8 +1112,8 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= diff --git a/vendor/github.com/in-toto/attestation/LICENSE b/vendor/github.com/in-toto/attestation/LICENSE new file mode 100644 index 000000000000..702a3365c066 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/LICENSE @@ -0,0 +1,13 @@ +Copyright 2021 in-toto Developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go new file mode 100644 index 000000000000..5ebeea35d71f --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -0,0 +1,18 @@ +/* +Wrapper APIs for in-toto attestation ResourceDescriptor protos. +*/ + +package v1 + +import "errors" + +var ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") + +func (d *ResourceDescriptor) Validate() error { + // at least one of name, URI or digest are required + if d.GetName() == "" && d.GetUri() == "" && len(d.GetDigest()) == 0 { + return ErrRDRequiredField + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go new file mode 100644 index 000000000000..4a14b10a9276 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: in_toto_attestation/v1/resource_descriptor.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 ResourceDescriptor. +// https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md +// Validation of all fields is left to the users of this proto. +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // Per the Struct protobuf spec, this type corresponds to + // a JSON Object, which is truly a map under the hood. + // So, the Struct a) is still consistent with our specification for + // the `annotations` field, and b) has native support in some language + // bindings making their use easier in implementations. + // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceDescriptor) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *ResourceDescriptor) GetDigest() map[string]string { + if x != nil { + return x.Digest + } + return nil +} + +func (x *ResourceDescriptor) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ResourceDescriptor) GetDownloadLocation() string { + if x != nil { + return x.DownloadLocation + } + return "" +} + +func (x *ResourceDescriptor) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ResourceDescriptor) GetAnnotations() *structpb.Struct { + if x != nil { + return x.Annotations + } + return nil +} + +var File_in_toto_attestation_v1_resource_descriptor_proto protoreflect.FileDescriptor + +var file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x4e, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x47, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, + 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc +) + +func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData) + }) + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData +} + +var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []interface{}{ + (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor + nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.ResourceDescriptor.digest:type_name -> in_toto_attestation.v1.ResourceDescriptor.DigestEntry + 2, // 1: in_toto_attestation.v1.ResourceDescriptor.annotations:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_resource_descriptor_proto_init() } +func file_in_toto_attestation_v1_resource_descriptor_proto_init() { + if File_in_toto_attestation_v1_resource_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_resource_descriptor_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_resource_descriptor_proto = out.File + file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = nil + file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = nil + file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go new file mode 100644 index 000000000000..f63d5f0d7475 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go @@ -0,0 +1,50 @@ +/* +Wrapper APIs for in-toto attestation Statement layer protos. +*/ + +package v1 + +import "errors" + +const StatementTypeUri = "https://in-toto.io/Statement/v1" + +var ( + ErrInvalidStatementType = errors.New("wrong statement type") + ErrSubjectRequired = errors.New("at least one subject required") + ErrDigestRequired = errors.New("at least one digest required") + ErrPredicateTypeRequired = errors.New("predicate type required") + ErrPredicateRequired = errors.New("predicate object required") +) + +func (s *Statement) Validate() error { + if s.GetType() != StatementTypeUri { + return ErrInvalidStatementType + } + + if s.GetSubject() == nil || len(s.GetSubject()) == 0 { + return ErrSubjectRequired + } + + // check all resource descriptors in the subject + subject := s.GetSubject() + for _, rd := range subject { + if err := rd.Validate(); err != nil { + return err + } + + // v1 statements require the digest to be set in the subject + if len(rd.GetDigest()) == 0 { + return ErrDigestRequired + } + } + + if s.GetPredicateType() == "" { + return ErrPredicateTypeRequired + } + + if s.GetPredicate() == nil { + return ErrPredicateRequired + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go new file mode 100644 index 000000000000..cf304fcdac27 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: in_toto_attestation/v1/statement.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 Statement. +// https://github.com/in-toto/attestation/tree/main/spec/v1 +// Validation of all fields is left to the users of this proto. +type Statement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Expected to always be "https://in-toto.io/Statement/v1" + Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` + Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` + PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` + Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Statement) Reset() { + *x = Statement{} + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Statement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statement) ProtoMessage() {} + +func (x *Statement) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statement.ProtoReflect.Descriptor instead. +func (*Statement) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_statement_proto_rawDescGZIP(), []int{0} +} + +func (x *Statement) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Statement) GetSubject() []*ResourceDescriptor { + if x != nil { + return x.Subject + } + return nil +} + +func (x *Statement) GetPredicateType() string { + if x != nil { + return x.PredicateType + } + return "" +} + +func (x *Statement) GetPredicate() *structpb.Struct { + if x != nil { + return x.Predicate + } + return nil +} + +var File_in_toto_attestation_v1_statement_proto protoreflect.FileDescriptor + +var file_in_toto_attestation_v1_statement_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, + 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x30, + 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xc4, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x13, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x47, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_in_toto_attestation_v1_statement_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_statement_proto_rawDescData = file_in_toto_attestation_v1_statement_proto_rawDesc +) + +func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_statement_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_statement_proto_rawDescData = protoimpl.X.CompressGZIP(file_in_toto_attestation_v1_statement_proto_rawDescData) + }) + return file_in_toto_attestation_v1_statement_proto_rawDescData +} + +var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_in_toto_attestation_v1_statement_proto_goTypes = []interface{}{ + (*Statement)(nil), // 0: in_toto_attestation.v1.Statement + (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_statement_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.Statement.subject:type_name -> in_toto_attestation.v1.ResourceDescriptor + 2, // 1: in_toto_attestation.v1.Statement.predicate:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_statement_proto_init() } +func file_in_toto_attestation_v1_statement_proto_init() { + if File_in_toto_attestation_v1_statement_proto != nil { + return + } + file_in_toto_attestation_v1_resource_descriptor_proto_init() + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_statement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_in_toto_attestation_v1_statement_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_statement_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_statement_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_statement_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_statement_proto = out.File + file_in_toto_attestation_v1_statement_proto_rawDesc = nil + file_in_toto_attestation_v1_statement_proto_goTypes = nil + file_in_toto_attestation_v1_statement_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go new file mode 100644 index 000000000000..b9ec4b0d60df --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go @@ -0,0 +1,133 @@ +package in_toto + +import ( + ita1 "github.com/in-toto/attestation/go/v1" + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" +) + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + + // StatementInTotoV1 is the type URI for ITE-6 v1 Statements. + // This is constant for all predicate types. + StatementInTotoV1 = ita1.StatementTypeUri + + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/bom" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" +) + +// Subject describes the set of software artifacts the statement applies to. +// +// Deprecated: This implementation of Subject exists for historical +// compatibility and should not be used. This implementation has been +// superseded by a ResourceDescriptor struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +// To generate an ITE-6 v1 Statement subject, use the ResourceDescriptor Go +// APIs provided in https://github.com/in-toto/attestation/tree/main/go/v1. +type Subject struct { + Name string `json:"name"` + Digest common.DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +// +// Deprecated: This implementation of StatementHeader exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the Statement struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +// To generate an ITE-6 v1 Statement, use the Go APIs provided in +// https://github.com/in-toto/attestation/tree/main/go/v1. +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +// Deprecated: This implementation of Statement exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the Statement struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +// To generate an ITE-6 v1 Statement, use the Go APIs provided in +// https://github.com/in-toto/attestation/tree/main/go/v1. +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. +type ProvenanceStatementSLSA01 struct { + StatementHeader + Predicate slsa01.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. +type ProvenanceStatementSLSA02 struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA1 is the definition for an entire provenance statement with SLSA 1.0 predicate. +// +// Deprecated: ProvenanceStatementSLSA1 exists for historical +// compatibility and should not be used. To generate an ITE-6 v1 Statement +// with an ITE-9 Provenance v1 predicate, use the Go APIs provided in +// https://github.com/in-toto/attestation/tree/main/go. +type ProvenanceStatementSLSA1 struct { + StatementHeader + Predicate slsa1.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. +// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). +type ProvenanceStatement struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +This is currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go new file mode 100644 index 000000000000..2c8afff1f75a --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go @@ -0,0 +1,166 @@ +package in_toto + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/secure-systems-lab/go-securesystemslib/signerverifier" +) + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrInvalidPayloadType indicates that the envelope used an unknown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +type Envelope struct { + envelope *dsse.Envelope + payload any +} + +func loadEnvelope(env *dsse.Envelope) (*Envelope, error) { + e := &Envelope{envelope: env} + + contentBytes, err := env.DecodeB64Payload() + if err != nil { + return nil, err + } + + payload, err := loadPayload(contentBytes) + if err != nil { + return nil, err + } + e.payload = payload + + return e, nil +} + +func (e *Envelope) SetPayload(payload any) error { + encodedBytes, err := cjson.EncodeCanonical(payload) + if err != nil { + return err + } + + e.payload = payload + e.envelope = &dsse.Envelope{ + Payload: base64.StdEncoding.EncodeToString(encodedBytes), + PayloadType: PayloadType, + } + + return nil +} + +func (e *Envelope) GetPayload() any { + return e.payload +} + +func (e *Envelope) VerifySignature(key Key) error { + verifier, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + ev, err := dsse.NewEnvelopeVerifier(verifier) + if err != nil { + return err + } + + _, err = ev.Verify(context.Background(), e.envelope) + return err +} + +func (e *Envelope) Sign(key Key) error { + signer, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + es, err := dsse.NewEnvelopeSigner(signer) + if err != nil { + return err + } + + payload, err := e.envelope.DecodeB64Payload() + if err != nil { + return err + } + + env, err := es.SignPayload(context.Background(), e.envelope.PayloadType, payload) + if err != nil { + return err + } + + e.envelope = env + return nil +} + +func (e *Envelope) Sigs() []Signature { + sigs := []Signature{} + for _, s := range e.envelope.Signatures { + sigs = append(sigs, Signature{ + KeyID: s.KeyID, + Sig: s.Sig, + }) + } + return sigs +} + +func (e *Envelope) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range e.Sigs() { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +func (e *Envelope) Dump(path string) error { + jsonBytes, err := json.MarshalIndent(e.envelope, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = os.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +func getSignerVerifierFromKey(key Key) (dsse.SignerVerifier, error) { + sslibKey := getSSLibKeyFromKey(key) + + switch sslibKey.KeyType { + case signerverifier.RSAKeyType: + return signerverifier.NewRSAPSSSignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ED25519KeyType: + return signerverifier.NewED25519SignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ECDSAKeyType: + return signerverifier.NewECDSASignerVerifierFromSSLibKey(&sslibKey) + } + + return nil, ErrUnsupportedKeyType +} + +func getSSLibKeyFromKey(key Key) signerverifier.SSLibKey { + return signerverifier.SSLibKey{ + KeyType: key.KeyType, + KeyIDHashAlgorithms: key.KeyIDHashAlgorithms, + KeyID: key.KeyID, + Scheme: key.Scheme, + KeyVal: signerverifier.KeyVal{ + Public: key.KeyVal.Public, + Private: key.KeyVal.Private, + Certificate: key.KeyVal.Certificate, + }, + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go index 7de482821ad4..52429ca44bee 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "strings" @@ -325,7 +324,7 @@ func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []st return ErrNoPEMBlock } // Read key bytes - pemBytes, err := ioutil.ReadAll(r) + pemBytes, err := io.ReadAll(r) if err != nil { return err } @@ -344,7 +343,7 @@ func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { return ErrNoPEMBlock } // Read key bytes - pemBytes, err := ioutil.ReadAll(r) + pemBytes, err := io.ReadAll(r) if err != nil { return err } @@ -366,7 +365,7 @@ func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { keyIDHashAlgorithms = []string{"sha256", "sha512"} - switch key.(type) { + switch k := key.(type) { case *rsa.PublicKey, *rsa.PrivateKey: scheme = rsassapsssha256Scheme case ed25519.PrivateKey, ed25519.PublicKey: @@ -374,7 +373,7 @@ func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms [] case *ecdsa.PrivateKey, *ecdsa.PublicKey: scheme = ecdsaSha2nistp256 case *x509.Certificate: - return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey) + return getDefaultKeyScheme(k.PublicKey) default: err = ErrUnsupportedKeyType } @@ -382,11 +381,10 @@ func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms [] return scheme, keyIDHashAlgorithms, err } -func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { - - switch key.(type) { +func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + switch key := keyObj.(type) { case *rsa.PublicKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey)) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } @@ -396,7 +394,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH case *rsa.PrivateKey: // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) // This behavior is consistent to the securesystemslib - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public()) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) if err != nil { return err } @@ -404,16 +402,16 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case ed25519.PublicKey: - if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + if err := k.setKeyComponents(key, []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case ed25519.PrivateKey: - pubKeyBytes := key.(ed25519.PrivateKey).Public() - if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + pubKeyBytes := key.Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case *ecdsa.PrivateKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public()) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) if err != nil { return err } @@ -421,7 +419,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case *ecdsa.PublicKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey)) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } @@ -429,7 +427,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case *x509.Certificate: - err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms) + err := k.loadKey(key.PublicKey, pemData, scheme, keyIDHashAlgorithms) if err != nil { return err } diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go index e22b79da320e..f56b784ea0c0 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "reflect" "regexp" @@ -15,10 +14,6 @@ import ( "strings" "time" - "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" - slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" - slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" - "github.com/secure-systems-lab/go-securesystemslib/cjson" "github.com/secure-systems-lab/go-securesystemslib/dsse" ) @@ -30,7 +25,7 @@ and private keys in PEM format stored as strings. For public keys the Private field may be an empty string. */ type KeyVal struct { - Private string `json:"private"` + Private string `json:"private,omitempty"` Public string `json:"public"` Certificate string `json:"certificate,omitempty"` } @@ -48,9 +43,6 @@ type Key struct { Scheme string `json:"scheme"` } -// PayloadType is the payload type used for links and layouts. -const PayloadType = "application/vnd.in-toto+json" - // ErrEmptyKeyField will be thrown if a field in our Key struct is empty. var ErrEmptyKeyField = errors.New("empty field in key") @@ -73,23 +65,6 @@ var ErrNoPublicKey = errors.New("the given key is not a public key") // for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") -const ( - // StatementInTotoV01 is the statement type for the generalized link format - // containing statements. This is constant for all predicate types. - StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" - // PredicateSPDX represents a SBOM using the SPDX standard. - // The SPDX mandates 'spdxVersion' field, so predicate type can omit - // version. - PredicateSPDX = "https://spdx.dev/Document" - // PredicateCycloneDX represents a CycloneDX SBOM - PredicateCycloneDX = "https://cyclonedx.org/bom" - // PredicateLinkV1 represents an in-toto 0.9 link. - PredicateLinkV1 = "https://in-toto.io/Link/v1" -) - -// ErrInvalidPayloadType indicates that the envelope used an unkown payload type -var ErrInvalidPayloadType = errors.New("unknown payload type") - /* matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key curve size. We do not need a full regex match here, because @@ -702,6 +677,67 @@ func validateLayout(layout Layout) error { return nil } +type Metadata interface { + Sign(Key) error + VerifySignature(Key) error + GetPayload() any + Sigs() []Signature + GetSignatureForKeyID(string) (Signature, error) + Dump(string) error +} + +func LoadMetadata(path string) (Metadata, error) { + jsonBytes, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var rawData map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawData); err != nil { + return nil, err + } + + if _, ok := rawData["payloadType"]; ok { + dsseEnv := &dsse.Envelope{} + if rawData["payload"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata envelope requires 'payload' and 'signatures' parts") + } + + if err := json.Unmarshal(jsonBytes, dsseEnv); err != nil { + return nil, err + } + + if dsseEnv.PayloadType != PayloadType { + return nil, ErrInvalidPayloadType + } + + return loadEnvelope(dsseEnv) + } + + mb := &Metablock{} + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawData["signed"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata requires 'signed' and 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawData["signatures"], &mb.Signatures); err != nil { + return nil, err + } + + payload, err := loadPayload(*rawData["signed"]) + if err != nil { + return nil, err + } + + mb.Signed = payload + + return mb, nil +} + /* Metablock is a generic container for signable in-toto objects such as Layout or Link. It has two fields, one that contains the signable object and one that @@ -767,17 +803,13 @@ func checkRequiredJSONFields(obj map[string]interface{}, Load parses JSON formatted metadata at the passed path into the Metablock object on which it was called. It returns an error if it cannot parse a valid JSON formatted Metablock that contains a Link or Layout. + +Deprecated: Use LoadMetadata for a signature wrapper agnostic way to load an +envelope. */ func (mb *Metablock) Load(path string) error { - // Open file and close before returning - jsonFile, err := os.Open(path) - if err != nil { - return err - } - defer jsonFile.Close() - // Read entire file - jsonBytes, err := ioutil.ReadAll(jsonFile) + jsonBytes, err := os.ReadFile(path) if err != nil { return err } @@ -803,54 +835,14 @@ func (mb *Metablock) Load(path string) error { return err } - // Temporarily copy signed to opaque map to inspect the `_type` of signed - // and create link or layout accordingly - var signed map[string]interface{} - if err := json.Unmarshal(*rawMb["signed"], &signed); err != nil { + payload, err := loadPayload(*rawMb["signed"]) + if err != nil { return err } - if signed["_type"] == "link" { - var link Link - if err := checkRequiredJSONFields(signed, reflect.TypeOf(link)); err != nil { - return err - } - - data, err := rawMb["signed"].MarshalJSON() - if err != nil { - return err - } - decoder := json.NewDecoder(strings.NewReader(string(data))) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&link); err != nil { - return err - } - mb.Signed = link - - } else if signed["_type"] == "layout" { - var layout Layout - if err := checkRequiredJSONFields(signed, reflect.TypeOf(layout)); err != nil { - return err - } - - data, err := rawMb["signed"].MarshalJSON() - if err != nil { - return err - } - decoder := json.NewDecoder(strings.NewReader(string(data))) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&layout); err != nil { - return err - } - - mb.Signed = layout - - } else { - return fmt.Errorf("the '_type' field of the 'signed' part of in-toto" + - " metadata must be one of 'link' or 'layout'") - } + mb.Signed = payload - return jsonFile.Close() + return nil } /* @@ -866,7 +858,7 @@ func (mb *Metablock) Dump(path string) error { } // Write JSON bytes to the passed path with permissions (-rw-r--r--) - err = ioutil.WriteFile(path, jsonBytes, 0644) + err = os.WriteFile(path, jsonBytes, 0644) if err != nil { return err } @@ -883,6 +875,14 @@ func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { return cjson.EncodeCanonical(mb.Signed) } +func (mb *Metablock) GetPayload() any { + return mb.Signed +} + +func (mb *Metablock) Sigs() []Signature { + return mb.Signatures +} + /* VerifySignature verifies the first signature, corresponding to the passed Key, that it finds in the Signatures field of the Metablock on which it was called. @@ -965,109 +965,3 @@ func (mb *Metablock) Sign(key Key) error { mb.Signatures = append(mb.Signatures, newSignature) return nil } - -// Subject describes the set of software artifacts the statement applies to. -type Subject struct { - Name string `json:"name"` - Digest common.DigestSet `json:"digest"` -} - -// StatementHeader defines the common fields for all statements -type StatementHeader struct { - Type string `json:"_type"` - PredicateType string `json:"predicateType"` - Subject []Subject `json:"subject"` -} - -/* -Statement binds the attestation to a particular subject and identifies the -of the predicate. This struct represents a generic statement. -*/ -type Statement struct { - StatementHeader - // Predicate contains type speficic metadata. - Predicate interface{} `json:"predicate"` -} - -// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. -type ProvenanceStatementSLSA01 struct { - StatementHeader - Predicate slsa01.ProvenancePredicate `json:"predicate"` -} - -// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. -type ProvenanceStatementSLSA02 struct { - StatementHeader - Predicate slsa02.ProvenancePredicate `json:"predicate"` -} - -// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. -// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). -type ProvenanceStatement struct { - StatementHeader - Predicate slsa02.ProvenancePredicate `json:"predicate"` -} - -// LinkStatement is the definition for an entire link statement. -type LinkStatement struct { - StatementHeader - Predicate Link `json:"predicate"` -} - -/* -SPDXStatement is the definition for an entire SPDX statement. -This is currently not implemented. Some tooling exists here: -https://github.com/spdx/tools-golang, but this software is still in -early state. -This struct is the same as the generic Statement struct but is added for -completeness -*/ -type SPDXStatement struct { - StatementHeader - Predicate interface{} `json:"predicate"` -} - -/* -CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not -currently serialized just as its SPDX counterpart. It is an empty -interface, like the generic Statement. -*/ -type CycloneDXStatement struct { - StatementHeader - Predicate interface{} `json:"predicate"` -} - -/* -DSSESigner provides signature generation and validation based on the SSL -Signing Spec: https://github.com/secure-systems-lab/signing-spec -as describe by: https://github.com/MarkLodato/ITE/tree/media-type/ITE/5 -It wraps the generic SSL envelope signer and enforces the correct payload -type both during signature generation and validation. -*/ -type DSSESigner struct { - signer *dsse.EnvelopeSigner -} - -func NewDSSESigner(p ...dsse.SignVerifier) (*DSSESigner, error) { - es, err := dsse.NewEnvelopeSigner(p...) - if err != nil { - return nil, err - } - - return &DSSESigner{ - signer: es, - }, nil -} - -func (s *DSSESigner) SignPayload(body []byte) (*dsse.Envelope, error) { - return s.signer.SignPayload(PayloadType, body) -} - -func (s *DSSESigner) Verify(e *dsse.Envelope) error { - if e.PayloadType != PayloadType { - return ErrInvalidPayloadType - } - - _, err := s.signer.Verify(e) - return err -} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go index 87e690507011..d897cf7ceeb7 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "os/exec" "path/filepath" @@ -44,7 +44,7 @@ normalized to Unix-style line separators (LF) before hashing file contents. func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { supportedHashMappings := getHashMapping() // Read file from passed path - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) hashedContentsMap := make(map[string]interface{}) if err != nil { return nil, err @@ -92,12 +92,22 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) { +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]interface{}, err error) { // Make sure to initialize a fresh hashset for every RecordArtifacts call visitedSymlinks = NewSet() - evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) - // pass result and error through - return evalArtifacts, err + evalArtifactsUnnormalized, err := recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if err != nil { + return nil, err + } + + // Normalize all paths in evalArtifactsUnnormalized. + evalArtifacts = make(map[string]interface{}, len(evalArtifactsUnnormalized)) + for key, value := range evalArtifactsUnnormalized { + // Convert windows filepath to unix filepath. + evalArtifacts[filepath.ToSlash(key)] = value + } + + return evalArtifacts, nil } /* @@ -118,7 +128,7 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) { +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]interface{}, error) { artifacts := make(map[string]interface{}) for _, path := range paths { err := filepath.Walk(path, @@ -160,18 +170,35 @@ func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns if err != nil { return err } + info, err := os.Stat(evalSym) + if err != nil { + return err + } + targetIsDir := false + if info.IsDir() { + if !followSymlinkDirs { + // We don't follow symlinked directories + return nil + } + targetIsDir = true + } // add symlink to visitedSymlinks set // this way, we know which link we have visited already // if we visit a symlink twice, we have detected a symlink cycle visitedSymlinks.Add(path) - // We recursively call RecordArtifacts() to follow + // We recursively call recordArtifacts() to follow // the new path. - evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if evalErr != nil { return evalErr } for key, value := range evalArtifacts { - artifacts[key] = value + if targetIsDir { + symlinkPath := filepath.Join(path, strings.TrimPrefix(key, evalSym)) + artifacts[symlinkPath] = value + } else { + artifacts[path] = value + } } return nil } @@ -189,8 +216,7 @@ func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns } } // Check if path is unique - _, existingPath := artifacts[path] - if existingPath { + if _, exists := artifacts[path]; exists { return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) } artifacts[path] = artifact @@ -273,8 +299,8 @@ func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) } // TODO: duplicate stdout, stderr - stdout, _ := ioutil.ReadAll(stdoutPipe) - stderr, _ := ioutil.ReadAll(stderrPipe) + stdout, _ := io.ReadAll(stdoutPipe) + stderr, _ := io.ReadAll(stderrPipe) retVal := waitErrToExitCode(cmd.Wait()) @@ -293,14 +319,10 @@ and materials at the passed materialPaths. The returned link is wrapped in a Metablock object. If command execution or artifact recording fails the first return value is an empty Metablock and the second return value is the error. */ -func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, - cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, - lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - - materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } // make sure that we only run RunCommand if cmdArgs is not nil or empty @@ -308,16 +330,16 @@ func InTotoRun(name string, runDir string, materialPaths []string, productPaths if len(cmdArgs) != 0 { byProducts, err = RunCommand(cmdArgs, runDir) if err != nil { - return linkMb, err + return nil, err } } - products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - linkMb.Signed = Link{ + link := Link{ Type: "link", Name: name, Materials: materials, @@ -327,14 +349,25 @@ func InTotoRun(name string, runDir string, materialPaths []string, productPaths Environment: map[string]interface{}{}, } - linkMb.Signatures = []Signature{} - // We use a new feature from Go1.13 here, to check the key struct. - // IsZero() will return True, if the key hasn't been initialized + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } - // with other values than the default ones. + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { - return linkMb, err + return nil, err } } @@ -347,14 +380,13 @@ in order to provide evidence for supply chain steps that cannot be carries out by a single command. InTotoRecordStart collects the hashes of the materials before any commands are run, signs the unfinished link, and returns the link. */ -func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - linkMb.Signed = Link{ + link := Link{ Type: "link", Name: name, Materials: materials, @@ -364,9 +396,26 @@ func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorit Environment: map[string]interface{}{}, } + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} + linkMb.Signatures = []Signature{} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { - return linkMb, err + return nil, err } } @@ -380,25 +429,39 @@ created by InTotoRecordStart and records the hashes of any products creted by commands run between InTotoRecordStart and InTotoRecordStop. The resultant finished link metablock is then signed by the provided key and returned. */ -func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - if err := prelimLinkMb.VerifySignature(key); err != nil { - return linkMb, err +func InTotoRecordStop(prelimLinkEnv Metadata, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + if err := prelimLinkEnv.VerifySignature(key); err != nil { + return nil, err } - link, ok := prelimLinkMb.Signed.(Link) + link, ok := prelimLinkEnv.GetPayload().(Link) if !ok { - return linkMb, errors.New("invalid metadata block") + return nil, errors.New("invalid metadata block") } - products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } link.Products = products - linkMb.Signed = link + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { return linkMb, err @@ -407,3 +470,77 @@ func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, ha return linkMb, nil } + +/* +InTotoMatchProducts checks if local artifacts match products in passed link. + +NOTE: Does not check integrity or authenticity of passed link! +*/ +func InTotoMatchProducts(link *Link, paths []string, hashAlgorithms []string, excludePatterns []string, lstripPaths []string) ([]string, []string, []string, error) { + if len(paths) == 0 { + paths = append(paths, ".") + } + + artifacts, err := RecordArtifacts(paths, hashAlgorithms, excludePatterns, lstripPaths, false, false) + if err != nil { + return nil, nil, nil, err + } + + artifactNames := []string{} + for name := range artifacts { + artifactNames = append(artifactNames, name) + } + artifactsSet := NewSet(artifactNames...) + + productNames := []string{} + for name := range link.Products { + productNames = append(productNames, name) + } + productsSet := NewSet(productNames...) + + onlyInProductsSet := productsSet.Difference(artifactsSet) + onlyInProducts := []string{} + for name := range onlyInProductsSet { + onlyInProducts = append(onlyInProducts, name) + } + + notInProductsSet := artifactsSet.Difference(productsSet) + notInProducts := []string{} + for name := range notInProductsSet { + notInProducts = append(notInProducts, name) + } + + inBothSet := artifactsSet.Intersection(productsSet) + differ := []string{} + for name := range inBothSet { + linkHashes := map[string]string{} + switch hashObj := link.Products[name].(type) { + case map[string]any: + for alg, val := range hashObj { + linkHashes[alg] = val.(string) + } + case map[string]string: + for alg, val := range hashObj { + linkHashes[alg] = val + } + } + + artifactHashes := map[string]string{} + switch hashObj := artifacts[name].(type) { + case map[string]any: + for alg, val := range hashObj { + artifactHashes[alg] = val.(string) + } + case map[string]string: + for alg, val := range hashObj { + artifactHashes[alg] = val + } + } + + if !reflect.DeepEqual(linkHashes, artifactHashes) { + differ = append(differ, name) + } + } + + return onlyInProducts, notInProducts, differ, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go index 5fca7abb7326..40416e29a85a 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -11,6 +11,13 @@ const ( PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2" ) +// These are type aliases to common to avoid backwards incompatible changes. +type ( + DigestSet = common.DigestSet + ProvenanceBuilder = common.ProvenanceBuilder + ProvenanceMaterial = common.ProvenanceMaterial +) + // ProvenancePredicate is the provenance predicate definition. type ProvenancePredicate struct { // Builder identifies the entity that executed the invocation, which is trusted to have diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go new file mode 100644 index 000000000000..1e99880277e0 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go @@ -0,0 +1,183 @@ +package v1 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v1" +) + +// ProvenancePredicate is the provenance predicate definition. +// +// Deprecated: ProvenancePredicate exists for historical compatibility +// and should not be used. This implementation has been superseded by the +// Provenance struct generated from the Protobuf definition provided +// by the in-toto Attestation Framework. +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. +type ProvenancePredicate struct { + // The BuildDefinition describes all of the inputs to the build. The + // accuracy and completeness are implied by runDetails.builder.id. + // + // It SHOULD contain all the information necessary and sufficient to + // initialize the build and begin execution. + BuildDefinition ProvenanceBuildDefinition `json:"buildDefinition"` + + // Details specific to this particular execution of the build. + RunDetails ProvenanceRunDetails `json:"runDetails"` +} + +// ProvenanceBuildDefinition describes the inputs to the build. +// +// Deprecated: ProvenanceBuildDefinition exists for historical compatibility +// and should not be used. This implementation has been superseded by the +// BuildDefinition struct generated from the Protobuf definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. +type ProvenanceBuildDefinition struct { + // Identifies the template for how to perform the build and interpret the + // parameters and dependencies. + + // The URI SHOULD resolve to a human-readable specification that includes: + // overall description of the build type; schema for externalParameters and + // systemParameters; unambiguous instructions for how to initiate the build + // given this BuildDefinition, and a complete example. + BuildType string `json:"buildType"` + + // The parameters that are under external control, such as those set by a + // user or tenant of the build system. They MUST be complete at SLSA Build + // L3, meaning that that there is no additional mechanism for an external + // party to influence the build. (At lower SLSA Build levels, the + // completeness MAY be best effort.) + + // The build system SHOULD be designed to minimize the size and complexity + // of externalParameters, in order to reduce fragility and ease + // verification. Consumers SHOULD have an expectation of what “good” looks + // like; the more information that they need to check, the harder that task + // becomes. + ExternalParameters interface{} `json:"externalParameters"` + + // The parameters that are under the control of the entity represented by + // builder.id. The primary intention of this field is for debugging, + // incident response, and vulnerability management. The values here MAY be + // necessary for reproducing the build. There is no need to verify these + // parameters because the build system is already trusted, and in many cases + // it is not practical to do so. + InternalParameters interface{} `json:"internalParameters,omitempty"` + + // Unordered collection of artifacts needed at build time. Completeness is + // best effort, at least through SLSA Build L3. For example, if the build + // script fetches and executes “example.com/foo.sh”, which in turn fetches + // “example.com/bar.tar.gz”, then both “foo.sh” and “bar.tar.gz” SHOULD be + // listed here. + ResolvedDependencies []ResourceDescriptor `json:"resolvedDependencies,omitempty"` +} + +// ProvenanceRunDetails includes details specific to a particular execution of a +// build. +// +// Deprecated: ProvenanceRunDetails exists for historical compatibility +// and should not be used. This implementation has been superseded by the +// RunDetails struct generated from the Protobuf definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. +type ProvenanceRunDetails struct { + // Identifies the entity that executed the invocation, which is trusted to + // have correctly performed the operation and populated this provenance. + // + // This field is REQUIRED for SLSA Build 1 unless id is implicit from the + // attestation envelope. + Builder Builder `json:"builder"` + + // Metadata about this particular execution of the build. + BuildMetadata BuildMetadata `json:"metadata,omitempty"` + + // Additional artifacts generated during the build that are not considered + // the “output” of the build but that might be needed during debugging or + // incident response. For example, this might reference logs generated + // during the build and/or a digest of the fully evaluated build + // configuration. + // + // In most cases, this SHOULD NOT contain all intermediate files generated + // during the build. Instead, this SHOULD only contain files that are + // likely to be useful later and that cannot be easily reproduced. + Byproducts []ResourceDescriptor `json:"byproducts,omitempty"` +} + +// ResourceDescriptor describes a particular software artifact or resource +// (mutable or immutable). +// See https://github.com/in-toto/attestation/blob/main/spec/v1.0/resource_descriptor.md +// +// Deprecated: This implementation of ResoureDescriptor exists for +// historical compatibility and should not be used. This struct has been +// superseded by the ResourceDescriptor struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/v1. +type ResourceDescriptor struct { + // A URI used to identify the resource or artifact globally. This field is + // REQUIRED unless either digest or content is set. + URI string `json:"uri,omitempty"` + + // A set of cryptographic digests of the contents of the resource or + // artifact. This field is REQUIRED unless either uri or content is set. + Digest common.DigestSet `json:"digest,omitempty"` + + // TMachine-readable identifier for distinguishing between descriptors. + Name string `json:"name,omitempty"` + + // The location of the described resource or artifact, if different from the + // uri. + DownloadLocation string `json:"downloadLocation,omitempty"` + + // The MIME Type (i.e., media type) of the described resource or artifact. + MediaType string `json:"mediaType,omitempty"` + + // The contents of the resource or artifact. This field is REQUIRED unless + // either uri or digest is set. + Content []byte `json:"content,omitempty"` + + // This field MAY be used to provide additional information or metadata + // about the resource or artifact that may be useful to the consumer when + // evaluating the attestation against a policy. + Annotations map[string]interface{} `json:"annotations,omitempty"` +} + +// Builder represents the transitive closure of all the entities that are, by +// necessity, trusted to faithfully run the build and record the provenance. +// +// Deprecated: This implementation of Builder exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the Builder struct generated from the Protobuf definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. +type Builder struct { + // URI indicating the transitive closure of the trusted builder. + ID string `json:"id"` + + // Version numbers of components of the builder. + Version map[string]string `json:"version,omitempty"` + + // Dependencies used by the orchestrator that are not run within the + // workload and that do not affect the build, but might affect the + // provenance generation or security guarantees. + BuilderDependencies []ResourceDescriptor `json:"builderDependencies,omitempty"` +} + +// Deprecated: This implementation of BuildMetadata exists for historical +// compatibility and should not be used. This implementation has been +// superseded by the BuildMetadata struct generated from the Protobuf +// definition in +// https://github.com/in-toto/attestation/tree/main/protos/in_toto_attestation/predicates/provenance/v1. +type BuildMetadata struct { + // Identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and + // format is defined by builder.id; by default it is treated as opaque and + // case-sensitive. The value SHOULD be globally unique. + InvocationID string `json:"invocationID,omitempty"` + + // The timestamp of when the build started. + StartedOn *time.Time `json:"startedOn,omitempty"` + + // The timestamp of when the build completed. + FinishedOn *time.Time `json:"finishedOn,omitempty"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go index 59cba86eb52c..5c36dede13d1 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -1,9 +1,15 @@ package in_toto import ( + "encoding/json" + "errors" "fmt" + "reflect" + "strings" ) +var ErrUnknownMetadataType = errors.New("unknown metadata type encountered: not link or layout") + /* Set represents a data structure for set operations. See `NewSet` for how to create a Set, and available Set receivers for useful set operations. @@ -145,3 +151,40 @@ func (s Set) IsSubSet(subset Set) bool { } return true } + +func loadPayload(payloadBytes []byte) (any, error) { + var payload map[string]any + if err := json.Unmarshal(payloadBytes, &payload); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + if payload["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(payload, reflect.TypeOf(link)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return link, nil + } else if payload["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(payload, reflect.TypeOf(layout)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return layout, nil + } + + return nil, ErrUnknownMetadataType +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go index 2302040f4600..2564bd47eb2b 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -12,7 +12,6 @@ import ( "io" "os" "path" - osPath "path" "path/filepath" "reflect" "regexp" @@ -23,6 +22,8 @@ import ( // ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") +var ErrNotLayout = errors.New("verification workflow passed a non-layout") + /* RunInspections iteratively executes the command in the Run field of all inspections of the passed layout, creating unsigned link metadata that records @@ -41,8 +42,8 @@ If executing the inspection command fails, or if the executed command has a non-zero exit code, the first return value is an empty Metablock map and the second return value is the error. */ -func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[string]Metablock, error) { - inspectionMetadata := make(map[string]Metablock) +func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSSE bool) (map[string]Metadata, error) { + inspectionMetadata := make(map[string]Metadata) for _, inspection := range layout.Inspect { @@ -51,14 +52,14 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s paths = []string{runDir} } - linkMb, err := InTotoRun(inspection.Name, runDir, paths, paths, - inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization) + linkEnv, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization, false, useDSSE) if err != nil { return nil, err } - retVal := linkMb.Signed.(Link).ByProducts["return-value"] + retVal := linkEnv.GetPayload().(Link).ByProducts["return-value"] if retVal != float64(0) { return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ " returned a non-zero value: %d", inspection.Run, inspection.Name, @@ -67,11 +68,11 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s // Dump inspection link to cwd using the short link name format linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) - if err := linkMb.Dump(linkName); err != nil { + if err := linkEnv.Dump(linkName); err != nil { fmt.Printf("JSON serialization or writing failed: %s", err) } - inspectionMetadata[inspection.Name] = linkMb + inspectionMetadata[inspection.Name] = linkEnv } return inspectionMetadata, nil } @@ -80,10 +81,10 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s // type MATCH. See VerifyArtifacts for more details. func verifyMatchRule(ruleData map[string]string, srcArtifacts map[string]interface{}, srcArtifactQueue Set, - itemsMetadata map[string]Metablock) Set { + itemsMetadata map[string]Metadata) Set { consumed := NewSet() // Get destination link metadata - dstLinkMb, exists := itemsMetadata[ruleData["dstName"]] + dstLinkEnv, exists := itemsMetadata[ruleData["dstName"]] if !exists { // Destination link does not exist, rule can't consume any // artifacts @@ -94,9 +95,9 @@ func verifyMatchRule(ruleData map[string]string, var dstArtifacts map[string]interface{} switch ruleData["dstType"] { case "materials": - dstArtifacts = dstLinkMb.Signed.(Link).Materials + dstArtifacts = dstLinkEnv.GetPayload().(Link).Materials case "products": - dstArtifacts = dstLinkMb.Signed.(Link).Products + dstArtifacts = dstLinkEnv.GetPayload().(Link).Products } // cleanup paths in pattern and artifact maps @@ -140,7 +141,7 @@ func verifyMatchRule(ruleData map[string]string, // Construct corresponding destination artifact path, i.e. // an optional destination prefix plus the source base path - dstPath := path.Clean(osPath.Join(ruleData["dstPrefix"], srcBasePath)) + dstPath := path.Clean(path.Join(ruleData["dstPrefix"], srcBasePath)) // Try to find the corresponding destination artifact dstArtifact, exists := dstArtifacts[dstPath] @@ -180,7 +181,7 @@ DISALLOW rule to fail overall verification, if artifacts are left in the queue that should have been consumed by preceding rules. */ func VerifyArtifacts(items []interface{}, - itemsMetadata map[string]Metablock) error { + itemsMetadata map[string]Metadata) error { // Verify artifact rules for each item in the layout for _, itemI := range items { // The layout item (interface) must be a Link or an Inspection we are only @@ -207,7 +208,7 @@ func VerifyArtifacts(items []interface{}, } // Use the item's name to extract the corresponding link - srcLinkMb, exists := itemsMetadata[itemName] + srcLinkEnv, exists := itemsMetadata[itemName] if !exists { return fmt.Errorf("VerifyArtifacts could not find metadata"+ " for item '%s', got: '%s'", itemName, itemsMetadata) @@ -215,8 +216,8 @@ func VerifyArtifacts(items []interface{}, // Create shortcuts to materials and products (including hashes) reported // by the item's link, required to verify "match" rules - materials := srcLinkMb.Signed.(Link).Materials - products := srcLinkMb.Signed.(Link).Products + materials := srcLinkEnv.GetPayload().(Link).Materials + products := srcLinkEnv.GetPayload().(Link).Products // All other rules only require the material or product paths (without // hashes). We extract them from the corresponding maps and store them as @@ -364,9 +365,9 @@ Products, the first return value is an empty Metablock map and the second return value is the error. */ func ReduceStepsMetadata(layout Layout, - stepsMetadata map[string]map[string]Metablock) (map[string]Metablock, + stepsMetadata map[string]map[string]Metadata) (map[string]Metadata, error) { - stepsMetadataReduced := make(map[string]Metablock) + stepsMetadataReduced := make(map[string]Metadata) for _, step := range layout.Steps { linksPerStep, ok := stepsMetadata[step.Name] @@ -379,16 +380,16 @@ func ReduceStepsMetadata(layout Layout, // Get the first link (could be any link) for the current step, which will // serve as reference link for below comparisons var referenceKeyID string - var referenceLinkMb Metablock - for keyID, linkMb := range linksPerStep { - referenceLinkMb = linkMb + var referenceLinkEnv Metadata + for keyID, linkEnv := range linksPerStep { + referenceLinkEnv = linkEnv referenceKeyID = keyID break } // Only one link, nothing to reduce, take the reference link if len(linksPerStep) == 1 { - stepsMetadataReduced[step.Name] = referenceLinkMb + stepsMetadataReduced[step.Name] = referenceLinkEnv // Multiple links, reduce but first check } else { @@ -396,11 +397,11 @@ func ReduceStepsMetadata(layout Layout, // TODO: What should we do if there are more links, than the // threshold requires, but not all of them are equal? Right now we would // also error. - for keyID, linkMb := range linksPerStep { - if !reflect.DeepEqual(linkMb.Signed.(Link).Materials, - referenceLinkMb.Signed.(Link).Materials) || - !reflect.DeepEqual(linkMb.Signed.(Link).Products, - referenceLinkMb.Signed.(Link).Products) { + for keyID, linkEnv := range linksPerStep { + if !reflect.DeepEqual(linkEnv.GetPayload().(Link).Materials, + referenceLinkEnv.GetPayload().(Link).Materials) || + !reflect.DeepEqual(linkEnv.GetPayload().(Link).Products, + referenceLinkEnv.GetPayload().(Link).Products) { return nil, fmt.Errorf("link '%s' and '%s' have different"+ " artifacts", fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), @@ -408,7 +409,7 @@ func ReduceStepsMetadata(layout Layout, } } // We haven't errored out, so we can reduce (i.e take the reference link) - stepsMetadataReduced[step.Name] = referenceLinkMb + stepsMetadataReduced[step.Name] = referenceLinkEnv } } return stepsMetadataReduced, nil @@ -421,7 +422,7 @@ command, as per the layout. Soft verification means that, in case a command does not align, a warning is issued. */ func VerifyStepCommandAlignment(layout Layout, - stepsMetadata map[string]map[string]Metablock) { + stepsMetadata map[string]map[string]Metadata) { for _, step := range layout.Steps { linksPerStep, ok := stepsMetadata[step.Name] // We should never get here, layout verification must fail earlier @@ -430,9 +431,9 @@ func VerifyStepCommandAlignment(layout Layout, "', no link metadata found.") } - for signerKeyID, linkMb := range linksPerStep { + for signerKeyID, linkEnv := range linksPerStep { expectedCommandS := strings.Join(step.ExpectedCommand, " ") - executedCommandS := strings.Join(linkMb.Signed.(Link).Command, " ") + executedCommandS := strings.Join(linkEnv.GetPayload().(Link).Command, " ") if expectedCommandS != executedCommandS { linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) @@ -502,11 +503,11 @@ return value is an empty map of Metablock maps and the second return value is the error. */ func VerifyLinkSignatureThesholds(layout Layout, - stepsMetadata map[string]map[string]Metablock, rootCertPool, intermediateCertPool *x509.CertPool) ( - map[string]map[string]Metablock, error) { + stepsMetadata map[string]map[string]Metadata, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metadata, error) { // This will stores links with valid signature from an authorized functionary // for all steps - stepsMetadataVerified := make(map[string]map[string]Metablock) + stepsMetadataVerified := make(map[string]map[string]Metadata) // Try to find enough (>= threshold) links each with a valid signature from // distinct authorized functionaries for each step @@ -515,7 +516,7 @@ func VerifyLinkSignatureThesholds(layout Layout, // This will store links with valid signature from an authorized // functionary for the given step - linksPerStepVerified := make(map[string]Metablock) + linksPerStepVerified := make(map[string]Metadata) // Check if there are any links at all for the given step linksPerStep, ok := stepsMetadata[step.Name] @@ -528,12 +529,12 @@ func VerifyLinkSignatureThesholds(layout Layout, // verification passes. Only good links are stored, to verify thresholds // below. isAuthorizedSignature := false - for signerKeyID, linkMb := range linksPerStep { + for signerKeyID, linkEnv := range linksPerStep { for _, authorizedKeyID := range step.PubKeys { if signerKeyID == authorizedKeyID { if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { - if err := linkMb.VerifySignature(verifierKey); err == nil { - linksPerStepVerified[signerKeyID] = linkMb + if err := linkEnv.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkEnv isAuthorizedSignature = true break } @@ -544,7 +545,7 @@ func VerifyLinkSignatureThesholds(layout Layout, // If the signer's key wasn't in our step's pubkeys array, check the cert pool to // see if the key is known to us. if !isAuthorizedSignature { - sig, err := linkMb.GetSignatureForKeyID(signerKeyID) + sig, err := linkEnv.GetSignatureForKeyID(signerKeyID) if err != nil { stepErr = err continue @@ -563,13 +564,13 @@ func VerifyLinkSignatureThesholds(layout Layout, continue } - err = linkMb.VerifySignature(cert) + err = linkEnv.VerifySignature(cert) if err != nil { stepErr = err continue } - linksPerStepVerified[signerKeyID] = linkMb + linksPerStepVerified[signerKeyID] = linkEnv } } @@ -614,30 +615,30 @@ ignored. Only a preliminary threshold check is performed, that is, if there aren't at least Threshold links for any given step, the first return value is an empty map of Metablock maps and the second return value is the error. */ -func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metablock, error) { - stepsMetadata := make(map[string]map[string]Metablock) +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metadata, error) { + stepsMetadata := make(map[string]map[string]Metadata) for _, step := range layout.Steps { - linksPerStep := make(map[string]Metablock) + linksPerStep := make(map[string]Metadata) // Since we can verify against certificates belonging to a CA, we need to // load any possible links - linkFiles, err := filepath.Glob(osPath.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + linkFiles, err := filepath.Glob(path.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) if err != nil { return nil, err } for _, linkPath := range linkFiles { - var linkMb Metablock - if err := linkMb.Load(linkPath); err != nil { + linkEnv, err := LoadMetadata(linkPath) + if err != nil { continue } // To get the full key from the metadata's signatures, we have to check // for one with the same short id... signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") - for _, sig := range linkMb.Signatures { + for _, sig := range linkEnv.Sigs() { if strings.HasPrefix(sig.KeyID, signerShortKeyID) { - linksPerStep[sig.KeyID] = linkMb + linksPerStep[sig.KeyID] = linkEnv break } } @@ -677,14 +678,14 @@ Signatures and keys are associated by key id. If the key map is empty, or the Metablock's Signature field does not have a signature for one or more of the passed keys, or a matching signature is invalid, an error is returned. */ -func VerifyLayoutSignatures(layoutMb Metablock, +func VerifyLayoutSignatures(layoutEnv Metadata, layoutKeys map[string]Key) error { if len(layoutKeys) < 1 { return fmt.Errorf("layout verification requires at least one key") } for _, key := range layoutKeys { - if err := layoutMb.VerifySignature(key); err != nil { + if err := layoutEnv.VerifySignature(key); err != nil { return err } } @@ -700,29 +701,35 @@ NOTE: The assumption is that the steps mentioned in the layout are to be performed sequentially. So, the first step mentioned in the layout denotes what comes into the supply chain and the last step denotes what goes out. */ -func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metablock, - stepName string) (Metablock, error) { +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metadata, + stepName string, useDSSE bool) (Metadata, error) { var summaryLink Link - var result Metablock if len(layout.Steps) > 0 { firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] - summaryLink.Materials = firstStepLink.Signed.(Link).Materials + summaryLink.Materials = firstStepLink.GetPayload().(Link).Materials summaryLink.Name = stepName - summaryLink.Type = firstStepLink.Signed.(Link).Type + summaryLink.Type = firstStepLink.GetPayload().(Link).Type - summaryLink.Products = lastStepLink.Signed.(Link).Products - summaryLink.ByProducts = lastStepLink.Signed.(Link).ByProducts + summaryLink.Products = lastStepLink.GetPayload().(Link).Products + summaryLink.ByProducts = lastStepLink.GetPayload().(Link).ByProducts // Using the last command of the sublayout as the command // of the summary link can be misleading. Is it necessary to // include all the commands executed as part of sublayout? - summaryLink.Command = lastStepLink.Signed.(Link).Command + summaryLink.Command = lastStepLink.GetPayload().(Link).Command } - result.Signed = summaryLink + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(summaryLink); err != nil { + return nil, err + } - return result, nil + return env, nil + } + + return &Metablock{Signed: summaryLink}, nil } /* @@ -731,11 +738,11 @@ so, recursively resolves it and replaces it with a summary link summarizing the steps carried out in the sublayout. */ func VerifySublayouts(layout Layout, - stepsMetadataVerified map[string]map[string]Metablock, - superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metablock, error) { + stepsMetadataVerified map[string]map[string]Metadata, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metadata, error) { for stepName, linkData := range stepsMetadataVerified { for keyID, metadata := range linkData { - if _, ok := metadata.Signed.(Layout); ok { + if _, ok := metadata.GetPayload().(Layout); ok { layoutKeys := make(map[string]Key) layoutKeys[keyID] = layout.Keys[keyID] @@ -861,55 +868,60 @@ Metablock object. NOTE: Artifact rules of type "create", "modify" and "delete" are currently not supported. */ -func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, +func InTotoVerify(layoutEnv Metadata, layoutKeys map[string]Key, linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( - Metablock, error) { - - var summaryLink Metablock - var err error + Metadata, error) { // Verify root signatures - if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { - return summaryLink, err + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err + } + + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true } - // Extract the layout from its Metablock container (for further processing) - layout := layoutMb.Signed.(Layout) + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } // Verify layout expiration if err := VerifyLayoutExpiration(layout); err != nil { - return summaryLink, err + return nil, err } // Substitute parameters in layout - layout, err = SubstituteParameters(layout, parameterDictionary) + layout, err := SubstituteParameters(layout, parameterDictionary) if err != nil { - return summaryLink, err + return nil, err } rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) if err != nil { - return summaryLink, err + return nil, err } // Load links for layout stepsMetadata, err := LoadLinksForLayout(layout, linkDir) if err != nil { - return summaryLink, err + return nil, err } // Verify link signatures stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, stepsMetadata, rootCertPool, intermediateCertPool) if err != nil { - return summaryLink, err + return nil, err } // Verify and resolve sublayouts stepsSublayoutVerified, err := VerifySublayouts(layout, stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) if err != nil { - return summaryLink, err + return nil, err } // Verify command alignment (WARNING only) @@ -922,18 +934,18 @@ func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, stepsMetadataReduced, err := ReduceStepsMetadata(layout, stepsSublayoutVerified) if err != nil { - return summaryLink, err + return nil, err } // Verify artifact rules if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), stepsMetadataReduced); err != nil { - return summaryLink, err + return nil, err } - inspectionMetadata, err := RunInspections(layout, "", lineNormalization) + inspectionMetadata, err := RunInspections(layout, "", lineNormalization, useDSSE) if err != nil { - return summaryLink, err + return nil, err } // Add steps metadata to inspection metadata, because inspection artifact @@ -944,51 +956,48 @@ func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), inspectionMetadata); err != nil { - return summaryLink, err + return nil, err } - summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) if err != nil { - return summaryLink, err + return nil, err } return summaryLink, nil } /* -InTotoVerifyWithDirectory provides the same functionality as IntotoVerify, but +InTotoVerifyWithDirectory provides the same functionality as InTotoVerify, but adds the possibility to select a local directory from where the inspections are run. */ -func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, +func InTotoVerifyWithDirectory(layoutEnv Metadata, layoutKeys map[string]Key, linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( - Metablock, error) { - - var summaryLink Metablock - var err error + Metadata, error) { // runDir sanity checks // check if path exists info, err := os.Stat(runDir) if err != nil { - return Metablock{}, err + return nil, err } // check if runDir is a symlink if info.Mode()&os.ModeSymlink == os.ModeSymlink { - return Metablock{}, ErrInspectionRunDirIsSymlink + return nil, ErrInspectionRunDirIsSymlink } // check if runDir is writable and a directory err = isWritable(runDir) if err != nil { - return Metablock{}, err + return nil, err } // check if runDir is empty (we do not want to overwrite files) // We abuse File.Readdirnames for this action. f, err := os.Open(runDir) if err != nil { - return Metablock{}, err + return nil, err } defer f.Close() // We use Readdirnames(1) for performance reasons, one child node @@ -996,55 +1005,63 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, _, err = f.Readdirnames(1) // if io.EOF gets returned as error the directory is empty if err == io.EOF { - return Metablock{}, err + return nil, err } err = f.Close() if err != nil { - return Metablock{}, err + return nil, err } // Verify root signatures - if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { - return summaryLink, err + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err } - // Extract the layout from its Metablock container (for further processing) - layout := layoutMb.Signed.(Layout) + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true + } + + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } // Verify layout expiration if err := VerifyLayoutExpiration(layout); err != nil { - return summaryLink, err + return nil, err } // Substitute parameters in layout layout, err = SubstituteParameters(layout, parameterDictionary) if err != nil { - return summaryLink, err + return nil, err } rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) if err != nil { - return summaryLink, err + return nil, err } // Load links for layout stepsMetadata, err := LoadLinksForLayout(layout, linkDir) if err != nil { - return summaryLink, err + return nil, err } // Verify link signatures stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, stepsMetadata, rootCertPool, intermediateCertPool) if err != nil { - return summaryLink, err + return nil, err } // Verify and resolve sublayouts stepsSublayoutVerified, err := VerifySublayouts(layout, stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) if err != nil { - return summaryLink, err + return nil, err } // Verify command alignment (WARNING only) @@ -1057,18 +1074,18 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, stepsMetadataReduced, err := ReduceStepsMetadata(layout, stepsSublayoutVerified) if err != nil { - return summaryLink, err + return nil, err } // Verify artifact rules if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), stepsMetadataReduced); err != nil { - return summaryLink, err + return nil, err } - inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization) + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization, useDSSE) if err != nil { - return summaryLink, err + return nil, err } // Add steps metadata to inspection metadata, because inspection artifact @@ -1079,12 +1096,12 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), inspectionMetadata); err != nil { - return summaryLink, err + return nil, err } - summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) if err != nil { - return summaryLink, err + return nil, err } return summaryLink, nil diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go index fb1d5918b282..abc860a491bf 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" "reflect" - "regexp" "sort" + "strings" ) /* @@ -18,8 +18,12 @@ escaping backslashes ("\") and double quotes (") and wrapping the resulting string in double quotes ("). */ func encodeCanonicalString(s string) string { - re := regexp.MustCompile(`([\"\\])`) - return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) + // Escape backslashes + s = strings.ReplaceAll(s, "\\", "\\\\") + // Escape double quotes + s = strings.ReplaceAll(s, "\"", "\\\"") + // Wrap with double quotes + return fmt.Sprintf("\"%s\"", s) } /* @@ -28,16 +32,7 @@ object according to the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed *bytes.Buffer. If canonicalization fails it returns an error. */ -func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { - // Since this function is called recursively, we use panic if an error occurs - // and recover in a deferred function, which is always called before - // returning. There we set the error that is returned eventually. - defer func() { - if r := recover(); r != nil { - err = errors.New(r.(string)) - } - }() - +func encodeCanonical(obj interface{}, result *strings.Builder) (err error) { switch objAsserted := obj.(type) { case string: result.WriteString(encodeCanonicalString(objAsserted)) @@ -90,10 +85,9 @@ func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { // Canonicalize map for i, key := range mapKeys { - // Note: `key` must be a `string` (see `case map[string]interface{}`) and - // canonicalization of strings cannot err out (see `case string`), thus - // no error handling is needed here. - encodeCanonical(key, result) + if err := encodeCanonical(key, result); err != nil { + return err + } result.WriteString(":") if err := encodeCanonical(objAsserted[key], result); err != nil { @@ -120,7 +114,16 @@ slice. It uses the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte slice is nil and the second return value contains the error. */ -func EncodeCanonical(obj interface{}) ([]byte, error) { +func EncodeCanonical(obj interface{}) (out []byte, err error) { + // We use panic if an error occurs and recover in a deferred function, + // which is always called before returning. + // There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + // FIXME: Terrible hack to turn the passed struct into a map, converting // the struct's variable names to the json key names defined in the struct data, err := json.Marshal(obj) @@ -136,10 +139,13 @@ func EncodeCanonical(obj interface{}) ([]byte, error) { } // Create a buffer and write the canonicalized JSON bytes to it - var result bytes.Buffer + var result strings.Builder + // Allocate output result buffer with the input size. + result.Grow(len(data)) + // Recursively encode the jsonmap if err := encodeCanonical(jsonMap, &result); err != nil { return nil, err } - return result.Bytes(), nil + return []byte(result.String()), nil } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go new file mode 100644 index 000000000000..ed223e90b5a1 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go @@ -0,0 +1,64 @@ +package dsse + +import ( + "encoding/base64" + "fmt" +) + +/* +Envelope captures an envelope as described by the DSSE specification. See here: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +type Envelope struct { + PayloadType string `json:"payloadType"` + Payload string `json:"payload"` + Signatures []Signature `json:"signatures"` +} + +/* +DecodeB64Payload returns the serialized body, decoded from the envelope's +payload field. A flexible decoder is used, first trying standard base64, then +URL-encoded base64. +*/ +func (e *Envelope) DecodeB64Payload() ([]byte, error) { + return b64Decode(e.Payload) +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the key which was used to create the signature. +The used signature scheme has to be agreed upon by the signer and verifer +out of band. +The signature is a base64 encoding of the raw bytes from the signature +algorithm. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` +} + +/* +PAE implementes the DSSE Pre-Authentic Encoding +https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition +*/ +func PAE(payloadType string, payload []byte) []byte { + return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", + len(payloadType), payloadType, + len(payload), payload)) +} + +/* +Both standard and url encoding are allowed: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +func b64Decode(s string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode payload (is payload in the right format?)") + } + } + + return b, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go index 3dc05a4294e1..85aed102d4b2 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -5,111 +5,35 @@ https://github.com/secure-systems-lab/dsse package dsse import ( + "context" "encoding/base64" "errors" - "fmt" ) -// ErrUnknownKey indicates that the implementation does not recognize the -// key. -var ErrUnknownKey = errors.New("unknown key") - -// ErrNoSignature indicates that an envelope did not contain any signatures. -var ErrNoSignature = errors.New("no signature found") - // ErrNoSigners indicates that no signer was provided. var ErrNoSigners = errors.New("no signers provided") -/* -Envelope captures an envelope as described by the Secure Systems Lab -Signing Specification. See here: -https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md -*/ -type Envelope struct { - PayloadType string `json:"payloadType"` - Payload string `json:"payload"` - Signatures []Signature `json:"signatures"` -} - -/* -DecodeB64Payload returns the serialized body, decoded -from the envelope's payload field. A flexible -decoder is used, first trying standard base64, then -URL-encoded base64. -*/ -func (e *Envelope) DecodeB64Payload() ([]byte, error) { - return b64Decode(e.Payload) -} - -/* -Signature represents a generic in-toto signature that contains the identifier -of the key which was used to create the signature. -The used signature scheme has to be agreed upon by the signer and verifer -out of band. -The signature is a base64 encoding of the raw bytes from the signature -algorithm. -*/ -type Signature struct { - KeyID string `json:"keyid"` - Sig string `json:"sig"` -} - -/* -PAE implementes the DSSE Pre-Authentic Encoding -https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition -*/ -func PAE(payloadType string, payload []byte) []byte { - return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", - len(payloadType), payloadType, - len(payload), payload)) -} - -/* -Signer defines the interface for an abstract signing algorithm. -The Signer interface is used to inject signature algorithm implementations -into the EnevelopeSigner. This decoupling allows for any signing algorithm -and key management system can be used. -The full message is provided as the parameter. If the signature algorithm -depends on hashing of the message prior to signature calculation, the -implementor of this interface must perform such hashing. -The function must return raw bytes representing the calculated signature -using the current algorithm, and the key used (if applicable). -For an example see EcdsaSigner in sign_test.go. -*/ -type Signer interface { - Sign(data []byte) ([]byte, error) - KeyID() (string, error) -} - -// SignVerifer provides both the signing and verification interface. -type SignVerifier interface { - Signer - Verifier -} - // EnvelopeSigner creates signed Envelopes. type EnvelopeSigner struct { - providers []SignVerifier - ev *EnvelopeVerifier + providers []SignerVerifier } /* -NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer -algorithms to sign the data. -Creates a verifier with threshold=1, at least one of the providers must validate signitures successfully. +NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer algorithms to +sign the data. Creates a verifier with threshold=1, at least one of the +providers must validate signatures successfully. */ -func NewEnvelopeSigner(p ...SignVerifier) (*EnvelopeSigner, error) { +func NewEnvelopeSigner(p ...SignerVerifier) (*EnvelopeSigner, error) { return NewMultiEnvelopeSigner(1, p...) } /* NewMultiEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer -algorithms to sign the data. -Creates a verifier with threshold. -threashold indicates the amount of providers that must validate the envelope. +algorithms to sign the data. Creates a verifier with threshold. Threshold +indicates the amount of providers that must validate the envelope. */ -func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, error) { - var providers []SignVerifier +func NewMultiEnvelopeSigner(threshold int, p ...SignerVerifier) (*EnvelopeSigner, error) { + var providers []SignerVerifier for _, sv := range p { if sv != nil { @@ -121,19 +45,8 @@ func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, return nil, ErrNoSigners } - evps := []Verifier{} - for _, p := range providers { - evps = append(evps, p.(Verifier)) - } - - ev, err := NewMultiEnvelopeVerifier(threshold, evps...) - if err != nil { - return nil, err - } - return &EnvelopeSigner{ providers: providers, - ev: ev, }, nil } @@ -143,7 +56,7 @@ Returned is an envelope as defined here: https://github.com/secure-systems-lab/dsse/blob/master/envelope.md One signature will be added for each Signer in the EnvelopeSigner. */ -func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { +func (es *EnvelopeSigner) SignPayload(ctx context.Context, payloadType string, body []byte) (*Envelope, error) { var e = Envelope{ Payload: base64.StdEncoding.EncodeToString(body), PayloadType: payloadType, @@ -152,7 +65,7 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop paeEnc := PAE(payloadType, body) for _, signer := range es.providers { - sig, err := signer.Sign(paeEnc) + sig, err := signer.Sign(ctx, paeEnc) if err != nil { return nil, err } @@ -169,29 +82,3 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop return &e, nil } - -/* -Verify decodes the payload and verifies the signature. -Any domain specific validation such as parsing the decoded body and -validating the payload type is left out to the caller. -Verify returns a list of accepted keys each including a keyid, public and signiture of the accepted provider keys. -*/ -func (es *EnvelopeSigner) Verify(e *Envelope) ([]AcceptedKey, error) { - return es.ev.Verify(e) -} - -/* -Both standard and url encoding are allowed: -https://github.com/secure-systems-lab/dsse/blob/master/envelope.md -*/ -func b64Decode(s string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b, err = base64.URLEncoding.DecodeString(s) - if err != nil { - return nil, err - } - } - - return b, nil -} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go new file mode 100644 index 000000000000..99d03c7df9b6 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go @@ -0,0 +1,43 @@ +package dsse + +import ( + "context" + "crypto" +) + +/* +Signer defines the interface for an abstract signing algorithm. The Signer +interface is used to inject signature algorithm implementations into the +EnvelopeSigner. This decoupling allows for any signing algorithm and key +management system can be used. The full message is provided as the parameter. +If the signature algorithm depends on hashing of the message prior to signature +calculation, the implementor of this interface must perform such hashing. The +function must return raw bytes representing the calculated signature using the +current algorithm, and the key used (if applicable). +*/ +type Signer interface { + Sign(ctx context.Context, data []byte) ([]byte, error) + KeyID() (string, error) +} + +/* +Verifier verifies a complete message against a signature and key. If the message +was hashed prior to signature generation, the verifier must perform the same +steps. If KeyID returns successfully, only signature matching the key ID will be +verified. +*/ +type Verifier interface { + Verify(ctx context.Context, data, sig []byte) error + KeyID() (string, error) + Public() crypto.PublicKey +} + +// SignerVerifier provides both the signing and verification interface. +type SignerVerifier interface { + Signer + Verifier +} + +// Deprecated: switch to renamed SignerVerifier. This is currently aliased for +// backwards compatibility. +type SignVerifier = SignerVerifier diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index ead1c32ca80b..a36146b82a7d 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -1,6 +1,7 @@ package dsse import ( + "context" "crypto" "errors" "fmt" @@ -8,17 +9,8 @@ import ( "golang.org/x/crypto/ssh" ) -/* -Verifier verifies a complete message against a signature and key. -If the message was hashed prior to signature generation, the verifier -must perform the same steps. -If KeyID returns successfully, only signature matching the key ID will be verified. -*/ -type Verifier interface { - Verify(data, sig []byte) error - KeyID() (string, error) - Public() crypto.PublicKey -} +// ErrNoSignature indicates that an envelope did not contain any signatures. +var ErrNoSignature = errors.New("no signature found") type EnvelopeVerifier struct { providers []Verifier @@ -31,7 +23,7 @@ type AcceptedKey struct { Sig Signature } -func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { +func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]AcceptedKey, error) { if e == nil { return nil, errors.New("cannot verify a nil envelope") } @@ -78,7 +70,7 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { continue } - err = v.Verify(paeEnc, sig) + err = v.Verify(ctx, paeEnc, sig) if err != nil { continue } @@ -104,11 +96,11 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { // Sanity if with some reflect magic this happens. if ev.threshold <= 0 || ev.threshold > len(ev.providers) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } if len(usedKeyids) < ev.threshold { - return acceptedKeys, errors.New(fmt.Sprintf("Accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)) + return acceptedKeys, fmt.Errorf("accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold) } return acceptedKeys, nil @@ -119,15 +111,15 @@ func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { } func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { - if threshold <= 0 || threshold > len(p) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } ev := EnvelopeVerifier{ providers: p, threshold: threshold, } + return &ev, nil } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go new file mode 100644 index 000000000000..578d6a5483d5 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -0,0 +1,111 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "fmt" + "os" +) + +const ECDSAKeyType = "ecdsa" + +// ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using ECDSA keys. +type ECDSASignerVerifier struct { + keyID string + curveSize int + private *ecdsa.PrivateKey + public *ecdsa.PublicKey +} + +// NewECDSASignerVerifierFromSSLibKey creates an ECDSASignerVerifier from an +// SSLibKey. +func NewECDSASignerVerifierFromSSLibKey(key *SSLibKey) (*ECDSASignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv := &ECDSASignerVerifier{ + keyID: key.KeyID, + curveSize: publicParsedKey.(*ecdsa.PublicKey).Params().BitSize, + public: publicParsedKey.(*ecdsa.PublicKey), + private: nil, + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv.private = privateParsedKey.(*ecdsa.PrivateKey) + } + + return sv, nil +} + +// Sign creates a signature for `data`. +func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := getECDSAHashedData(data, sv.curveSize) + + return ecdsa.SignASN1(rand.Reader, sv.private, hashedData) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ECDSASignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := getECDSAHashedData(data, sv.curveSize) + + if ok := ecdsa.VerifyASN1(sv.public, hashedData, sig); !ok { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in +// a file in the custom securesystemslib format. +func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ECDSA key from file: %w", err) + } + + return loadKeyFromSSLibBytes(contents) +} + +func getECDSAHashedData(data []byte, curveSize int) []byte { + switch { + case curveSize <= 256: + return hashBeforeSigning(data, sha256.New()) + case 256 < curveSize && curveSize <= 384: + return hashBeforeSigning(data, sha512.New384()) + case curveSize > 384: + return hashBeforeSigning(data, sha512.New()) + } + return []byte{} +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go new file mode 100644 index 000000000000..c71d313a75dc --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -0,0 +1,98 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ed25519" + "encoding/hex" + "fmt" + "os" +) + +const ED25519KeyType = "ed25519" + +// ED25519SignerVerifier is a dsse.SignerVerifier compliant interface to sign +// and verify signatures using ED25519 keys. +type ED25519SignerVerifier struct { + keyID string + private ed25519.PrivateKey + public ed25519.PublicKey +} + +// NewED25519SignerVerifierFromSSLibKey creates an Ed25519SignerVerifier from an +// SSLibKey. +func NewED25519SignerVerifierFromSSLibKey(key *SSLibKey) (*ED25519SignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + public, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + var private []byte + if len(key.KeyVal.Private) > 0 { + private, err = hex.DecodeString(key.KeyVal.Private) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + // python-securesystemslib provides an interface to generate ed25519 + // keys but it differs slightly in how it serializes the key to disk. + // Specifically, the keyval.private field includes _only_ the private + // portion of the key while libraries such as crypto/ed25519 also expect + // the public portion. So, if the private portion is half of what we + // expect, we append the public portion as well. + if len(private) == ed25519.PrivateKeySize/2 { + private = append(private, public...) + } + } + + return &ED25519SignerVerifier{ + keyID: key.KeyID, + public: ed25519.PublicKey(public), + private: ed25519.PrivateKey(private), + }, nil +} + +// Sign creates a signature for `data`. +func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if len(sv.private) == 0 { + return nil, ErrNotPrivateKey + } + + signature := ed25519.Sign(sv.private, data) + return signature, nil +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ED25519SignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + if ok := ed25519.Verify(sv.public, data, sig); ok { + return nil + } + return ErrSignatureVerificationFailed +} + +// KeyID returns the identifier of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored +// in a file in the custom securesystemslib format. +func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ED25519 key from file: %w", err) + } + + return loadKeyFromSSLibBytes(contents) +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go new file mode 100644 index 000000000000..3612f28a4b2f --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -0,0 +1,141 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "fmt" + "os" + "strings" +) + +const ( + RSAKeyType = "rsa" + RSAKeyScheme = "rsassa-pss-sha256" + RSAPrivateKeyPEM = "RSA PRIVATE KEY" +) + +// RSAPSSSignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using RSA keys following the RSA-PSS scheme. +type RSAPSSSignerVerifier struct { + keyID string + private *rsa.PrivateKey + public *rsa.PublicKey +} + +// NewRSAPSSSignerVerifierFromSSLibKey creates an RSAPSSSignerVerifier from an +// SSLibKey. +func NewRSAPSSSignerVerifierFromSSLibKey(key *SSLibKey) (*RSAPSSSignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: privateParsedKey.(*rsa.PrivateKey), + }, nil + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: nil, + }, nil +} + +// Sign creates a signature for `data`. +func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := hashBeforeSigning(data, sha256.New()) + + return rsa.SignPSS(rand.Reader, sv.private, crypto.SHA256, hashedData, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *RSAPSSSignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := hashBeforeSigning(data, sha256.New()) + + if err := rsa.VerifyPSS(sv.public, crypto.SHA256, hashedData, sig, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}); err != nil { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a +// file. +func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + pemData, keyObj, err := decodeAndParsePEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + key := &SSLibKey{ + KeyType: RSAKeyType, + Scheme: RSAKeyScheme, + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyVal: KeyVal{}, + } + + switch k := keyObj.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))) + + case *rsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))) + key.KeyVal.Private = strings.TrimSpace(string(generatePEMBlock(pemData.Bytes, RSAPrivateKeyPEM))) + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyID = keyID + } + + return key, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go new file mode 100644 index 000000000000..5f510f7be571 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -0,0 +1,34 @@ +package signerverifier + +import ( + "errors" +) + +var KeyIDHashAlgorithms = []string{"sha256", "sha512"} + +var ( + ErrNotPrivateKey = errors.New("loaded key is not a private key") + ErrSignatureVerificationFailed = errors.New("failed to verify signature") + ErrUnknownKeyType = errors.New("unknown key type") + ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") + ErrInvalidKey = errors.New("key object has no value") +) + +const ( + PublicKeyPEM = "PUBLIC KEY" + PrivateKeyPEM = "PRIVATE KEY" +) + +type SSLibKey struct { + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` + KeyID string `json:"keyid"` +} + +type KeyVal struct { + Private string `json:"private,omitempty"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go new file mode 100644 index 000000000000..73aaa77d46a2 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -0,0 +1,150 @@ +package signerverifier + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "hash" + "testing" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +/* +Credits: Parts of this file were originally authored for in-toto-golang. +*/ + +var ( + // ErrNoPEMBlock gets triggered when there is no PEM block in the provided file + ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + // ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails + ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") +) + +// loadKeyFromSSLibBytes returns a pointer to a Key instance created from the +// contents of the bytes. The key contents are expected to be in the custom +// securesystemslib format. +func loadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { + var key *SSLibKey + if err := json.Unmarshal(contents, &key); err != nil { + return nil, err + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, err + } + key.KeyID = keyID + } + + return key, nil +} + +func calculateKeyID(k *SSLibKey) (string, error) { + key := map[string]any{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + canonical, err := cjson.EncodeCanonical(key) + if err != nil { + return "", err + } + digest := sha256.Sum256(canonical) + return hex.EncodeToString(digest[:]), nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +decodeAndParsePEM receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParsePEM(pemBytes []byte) (*pem.Block, any, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parsePEMKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parsePEMKey(data []byte) (any, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +func hashBeforeSigning(data []byte, h hash.Hash) []byte { + h.Write(data) + return h.Sum(nil) +} + +func hexDecode(t *testing.T, data string) []byte { + t.Helper() + b, err := hex.DecodeString(data) + if err != nil { + t.Fatal(err) + } + return b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1049ea7d92ac..783fccc7008d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -606,12 +606,16 @@ github.com/hashicorp/go-retryablehttp # github.com/hashicorp/golang-lru v0.5.4 ## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru -# github.com/in-toto/in-toto-golang v0.5.0 -## explicit; go 1.17 +# github.com/in-toto/attestation v0.1.1-0.20230828220013-11b7a1a4ca51 +## explicit; go 1.20 +github.com/in-toto/attestation/go/v1 +# github.com/in-toto/in-toto-golang v0.9.1-0.20230919171745-f55a6fe48c49 +## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1 github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2 +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1 # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath @@ -718,10 +722,11 @@ github.com/prometheus/procfs/internal/util # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 -# github.com/secure-systems-lab/go-securesystemslib v0.4.0 -## explicit; go 1.17 +# github.com/secure-systems-lab/go-securesystemslib v0.7.0 +## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse +github.com/secure-systems-lab/go-securesystemslib/signerverifier # github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 ## explicit github.com/serialx/hashring