diff --git a/client/client_test.go b/client/client_test.go index d501f37c99de..758b74b1dd91 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -34,6 +34,7 @@ import ( "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" gateway "github.com/moby/buildkit/frontend/gateway/client" @@ -43,6 +44,7 @@ import ( "github.com/moby/buildkit/session/sshforward/sshprovider" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/attestation" binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/entitlements" @@ -50,6 +52,7 @@ import ( "github.com/moby/buildkit/util/testutil/echoserver" "github.com/moby/buildkit/util/testutil/httpserver" "github.com/moby/buildkit/util/testutil/integration" + digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -167,6 +170,7 @@ func TestIntegration(t *testing.T) { testCallInfo, testPullWithLayerLimit, testExportAnnotations, + testExportAttestations, ) tests = append(tests, diffOpTestCases()...) integration.Run(t, tests, mirrors) @@ -6253,6 +6257,195 @@ func testExportAnnotations(t *testing.T, sb integration.Sandbox) { } } +func testExportAttestations(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + + ps := []ocispecs.Platform{ + platforms.MustParse("linux/amd64"), + platforms.MustParse("linux/arm64"), + } + + success := []byte(`{"success": true}`) + successDigest := digest.SHA256.FromBytes(success) + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{} + + for _, p := range ps { + pk := platforms.Format(p) + expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p}) + + // build image + st := llb.Scratch().File( + llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) + + // build attestations + st = llb.Scratch(). + File(llb.Mkfile("/attestation.json", 0600, success)). + File(llb.Mkfile("/attestation2.json", 0600, []byte{})) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + refAttest, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddAttestation(pk, &gateway.InTotoAttestation{ + PredicateRef: refAttest, + PredicatePath: "/attestation.json", + PredicateType: "https://example.com/attestations/v1.0", + Subjects: []attestation.InTotoSubject{ + &attestation.InTotoSubjectSelf{}, + }, + }) + res.AddAttestation(pk, &gateway.InTotoAttestation{ + PredicateRef: refAttest, + PredicatePath: "/attestation2.json", + PredicateType: "https://example.com/attestations2/v1.0", + Subjects: []attestation.InTotoSubject{ + &attestation.InTotoSubjectRaw{ + Name: "/attestation.json", + Digest: []digest.Digest{successDigest}, + }, + }, + }) + } + + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil + } + + target := registry + "/buildkit/testattestations:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + + index, err := testutil.ReadIndex(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, len(ps)*2, len(index)) + + var imgs []*testutil.ImageInfo + for _, p := range ps { + pk := platforms.Format(p) + img := index.Find(pk) + require.NotNil(t, img) + require.Equal(t, pk, platforms.Format(*img.Desc.Platform)) + require.Equal(t, 1, len(img.Layers)) + require.Equal(t, []byte(fmt.Sprintf("hello %s!", pk)), img.Layers[0]["greeting"].Data) + imgs = append(imgs, img) + } + + atts := index.Filter("unknown/unknown") + require.Equal(t, len(ps), len(atts)) + for i, att := range atts { + require.Equal(t, "unknown/unknown", platforms.Format(*att.Desc.Platform)) + require.Equal(t, "unknown/unknown", att.Img.OS+"/"+att.Img.Architecture) + require.Equal(t, attestation.DockerAnnotationReferenceTypeDefault, att.Desc.Annotations[attestation.DockerAnnotationReferenceType]) + require.Equal(t, imgs[i].Desc.Digest.String(), att.Desc.Annotations[attestation.DockerAnnotationReferenceDigest]) + require.Equal(t, 2, len(att.Layers)) + require.Equal(t, len(att.Layers), len(att.Img.RootFS.DiffIDs)) + require.Equal(t, len(att.Img.History), 0) + + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) + require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) + subjects := []intoto.Subject{{ + Name: "_", + Digest: map[string]string{ + "sha256": imgs[i].Desc.Digest.Encoded(), + }, + }} + require.Equal(t, subjects, attest.Subject) + + var attest2 intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[1], &attest2)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) + require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) + require.Nil(t, attest2.Predicate) + subjects = []intoto.Subject{{ + Name: "/attestation.json", + Digest: map[string]string{ + "sha256": successDigest.Encoded(), + }, + }} + require.Equal(t, subjects, attest2.Subject) + } + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + return + } + client, err := containerd.New(cdAddress) + require.NoError(t, err) + defer client.Close() + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + + err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) + require.NoError(t, err) + + checkAllReleasable(t, c, sb, true) +} + func makeSSHAgentSock(t *testing.T, agent agent.Agent) (p string, err error) { tmpDir, err := integration.Tmpdir(t) if err != nil { diff --git a/exporter/containerimage/attestations.go b/exporter/containerimage/attestations.go new file mode 100644 index 000000000000..89b56567302d --- /dev/null +++ b/exporter/containerimage/attestations.go @@ -0,0 +1,10 @@ +package containerimage + +import ( + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +var intotoPlatform ocispecs.Platform = ocispecs.Platform{ + Architecture: "unknown", + OS: "unknown", +} diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go index ee82ae8f529d..d9af277dbc93 100644 --- a/exporter/containerimage/writer.go +++ b/exporter/containerimage/writer.go @@ -5,6 +5,8 @@ import ( "context" "encoding/json" "fmt" + "os" + "path" "strings" "time" @@ -12,6 +14,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" @@ -19,6 +22,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/buildinfo" binfotypes "github.com/moby/buildkit/util/buildinfo/types" @@ -132,6 +136,8 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, sessionI labels := map[string]string{} + var attestationManifests []ocispecs.Descriptor + for i, p := range p.Platforms { r, ok := inp.Refs[p.ID] if !ok { @@ -158,6 +164,25 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, sessionI idx.Manifests = append(idx.Manifests, *desc) labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String() + + if attestations, ok := inp.Attestations[p.ID]; ok { + inTotos, err := ic.extractAttestations(ctx, session.NewGroup(sessionID), desc, attestations...) + if err != nil { + return nil, err + } + + desc, err := ic.commitAttestationsManifest(ctx, p, desc.Digest.String(), opts.OCITypes, inTotos) + if err != nil { + return nil, err + } + desc.Platform = &intotoPlatform + attestationManifests = append(attestationManifests, *desc) + } + } + + for i, mfst := range attestationManifests { + idx.Manifests = append(idx.Manifests, mfst) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", len(p.Platforms)+i)] = mfst.Digest.String() } idxBytes, err := json.MarshalIndent(idx, "", " ") @@ -219,10 +244,72 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC return out, err } +func (ic *ImageWriter) extractAttestations(ctx context.Context, s session.Group, desc *ocispecs.Descriptor, attestations ...exporter.Attestation) ([]intoto.Statement, error) { + eg, ctx := errgroup.WithContext(ctx) + statements := make([]intoto.Statement, len(attestations)) + + for i, att := range attestations { + i, att := i, att + eg.Go(func() error { + switch att := att.(type) { + case *exporter.InTotoAttestation: + mount, err := att.PredicateRef.Mount(ctx, true, s) + if err != nil { + return err + } + + lm := snapshot.LocalMounter(mount) + src, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + predicate, err := os.ReadFile(path.Join(src, att.PredicatePath)) + if err != nil { + return err + } + if len(predicate) == 0 { + predicate = nil + } + statements[i] = intoto.Statement{ + StatementHeader: intoto.StatementHeader{ + Type: intoto.StatementInTotoV01, + PredicateType: att.PredicateType, + }, + Predicate: json.RawMessage(predicate), + } + for _, subject := range att.Subjects { + switch subject2 := subject.(type) { + case *attestation.InTotoSubjectSelf: + statements[i].Subject = append(statements[i].Subject, intoto.Subject{ + Name: "_", + Digest: attestation.DigestToDigestMap(desc.Digest), + }) + case *attestation.InTotoSubjectRaw: + statements[i].Subject = append(statements[i].Subject, intoto.Subject{ + Name: subject2.Name, + Digest: subject2.DigestMap(), + }) + default: + return errors.Errorf("unknown attestation subject type %T", subject) + } + } + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + return statements, nil +} + func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, oci bool, inlineCache []byte, buildInfo []byte) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { if len(config) == 0 { var err error - config, err = emptyImageConfig() + config, err = defaultImageConfig() if err != nil { return nil, nil, err } @@ -284,19 +371,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache } for i, desc := range remote.Descriptors { - // oci supports annotations but don't export internal annotations - if oci { - delete(desc.Annotations, "containerd.io/uncompressed") - delete(desc.Annotations, "buildkit/createdat") - for k := range desc.Annotations { - if strings.HasPrefix(k, "containerd.io/distribution.source.") { - delete(desc.Annotations, k) - } - } - } else { - desc.Annotations = nil - } - + removeInternalLayerAnnotations(&desc, oci) mfst.Layers = append(mfst.Layers, desc) labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() } @@ -338,6 +413,112 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache }, &configDesc, nil } +func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, p exptypes.Platform, target string, oci bool, statements []intoto.Statement) (*ocispecs.Descriptor, error) { + var ( + manifestType = ocispecs.MediaTypeImageManifest + configType = ocispecs.MediaTypeImageConfig + ) + if !oci { + manifestType = images.MediaTypeDockerSchema2Manifest + configType = images.MediaTypeDockerSchema2Config + } + + layers := make([]ocispecs.Descriptor, len(statements)) + for i, statement := range statements { + i, statement := i, statement + + data, err := json.Marshal(statement) + if err != nil { + return nil, err + } + digest := digest.FromBytes(data) + desc := ocispecs.Descriptor{ + MediaType: attestation.MediaTypeDockerSchema2AttestationType, + Digest: digest, + Size: int64(len(data)), + Annotations: map[string]string{ + "containerd.io/uncompressed": digest.String(), + "in-toto.io/predicate-type": statement.PredicateType, + }, + } + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, digest.String(), bytes.NewReader(data), desc); err != nil { + return nil, errors.Wrapf(err, "error writing data blob %s", digest) + } + layers[i] = desc + } + + config, err := attestationsConfig(layers) + if err != nil { + return nil, err + } + configDigest := digest.FromBytes(config) + configDesc := ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + } + + mfst := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Manifest + }{ + MediaType: manifestType, + Manifest: ocispecs.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + }, + }, + } + + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDigest.String(), + } + for i, desc := range layers { + removeInternalLayerAnnotations(&desc, oci) + mfst.Layers = append(mfst.Layers, desc) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() + } + + mfstJSON, err := json.MarshalIndent(mfst, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal manifest") + } + + mfstDigest := digest.FromBytes(mfstJSON) + mfstDesc := ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + } + + done := progress.OneOff(ctx, "exporting attestation manifest "+mfstDigest.String()) + if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { + return nil, done(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) + } + if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { + return nil, done(errors.Wrap(err, "error writing config blob")) + } + done(nil) + + return &ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + MediaType: manifestType, + Annotations: map[string]string{ + attestation.DockerAnnotationReferenceType: attestation.DockerAnnotationReferenceTypeDefault, + attestation.DockerAnnotationReferenceDigest: target, + }, + }, nil +} + func (ic *ImageWriter) ContentStore() content.Store { return ic.opt.ContentStore } @@ -350,22 +531,13 @@ func (ic *ImageWriter) Applier() diff.Applier { return ic.opt.Applier } -func emptyImageConfig() ([]byte, error) { +func defaultImageConfig() ([]byte, error) { pl := platforms.Normalize(platforms.DefaultSpec()) - type image struct { - ocispecs.Image - - // Variant defines platform variant. To be added to OCI. - Variant string `json:"variant,omitempty"` - } - - img := image{ - Image: ocispecs.Image{ - Architecture: pl.Architecture, - OS: pl.OS, - }, - Variant: pl.Variant, + img := ocispecs.Image{ + Architecture: pl.Architecture, + OS: pl.OS, + Variant: pl.Variant, } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" @@ -374,6 +546,22 @@ func emptyImageConfig() ([]byte, error) { return dt, errors.Wrap(err, "failed to create empty image config") } +func attestationsConfig(layers []ocispecs.Descriptor) ([]byte, error) { + img := ocispecs.Image{ + Architecture: intotoPlatform.Architecture, + OS: intotoPlatform.OS, + OSVersion: intotoPlatform.OSVersion, + OSFeatures: intotoPlatform.OSFeatures, + Variant: intotoPlatform.Variant, + } + img.RootFS.Type = "layers" + for _, layer := range layers { + img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations["containerd.io/uncompressed"])) + } + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create attestations image config") +} + func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { var config struct { History []ocispecs.History @@ -530,6 +718,21 @@ func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, histo return remote, history } +func removeInternalLayerAnnotations(desc *ocispecs.Descriptor, oci bool) { + if oci { + // oci supports annotations but don't export internal annotations + delete(desc.Annotations, "containerd.io/uncompressed") + delete(desc.Annotations, "buildkit/createdat") + for k := range desc.Annotations { + if strings.HasPrefix(k, "containerd.io/distribution.source.") { + delete(desc.Annotations, k) + } + } + } else { + desc.Annotations = nil + } +} + type refMetadata struct { description string createdAt *time.Time diff --git a/exporter/exporter.go b/exporter/exporter.go index 8b79e440c0ba..aca156f77bf3 100644 --- a/exporter/exporter.go +++ b/exporter/exporter.go @@ -4,6 +4,7 @@ import ( "context" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/compression" ) @@ -22,7 +23,21 @@ type Config struct { } type Source struct { - Ref cache.ImmutableRef - Refs map[string]cache.ImmutableRef - Metadata map[string][]byte + Ref cache.ImmutableRef + Refs map[string]cache.ImmutableRef + Metadata map[string][]byte + Attestations map[string][]Attestation } + +type Attestation interface { + isExporterAttestation() +} + +type InTotoAttestation struct { + PredicateType string + PredicateRef cache.ImmutableRef + PredicatePath string + Subjects []attestation.InTotoSubject +} + +func (a *InTotoAttestation) isExporterAttestation() {} diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 511c4f20b458..d1c80cf96c93 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -3953,14 +3953,14 @@ COPY --from=base arch / desc, provider, err := contentutil.ProviderFromRef(target + "-img") require.NoError(t, err) - imgMap, err := readIndex(sb.Context(), provider, desc) + imgMap, err := testutil.ReadIndex(sb.Context(), provider, desc) require.NoError(t, err) require.Equal(t, 2, len(imgMap)) - require.Equal(t, "amd64", string(imgMap["linux/amd64"].layers[1]["arch"].Data)) - dtamd := imgMap["linux/amd64"].layers[0]["unique"].Data - dtarm := imgMap["linux/arm/v7"].layers[0]["unique"].Data + require.Equal(t, "amd64", string(imgMap.Find("linux/amd64").Layers[1]["arch"].Data)) + dtamd := imgMap.Find("linux/amd64").Layers[0]["unique"].Data + dtarm := imgMap.Find("linux/arm/v7").Layers[0]["unique"].Data require.NotEqual(t, dtamd, dtarm) for i := 0; i < 2; i++ { @@ -3993,14 +3993,14 @@ COPY --from=base arch / require.Equal(t, desc.Digest, desc2.Digest) - imgMap, err = readIndex(sb.Context(), provider, desc2) + imgMap, err = testutil.ReadIndex(sb.Context(), provider, desc2) require.NoError(t, err) require.Equal(t, 2, len(imgMap)) - require.Equal(t, "arm", string(imgMap["linux/arm/v7"].layers[1]["arch"].Data)) - dtamd2 := imgMap["linux/amd64"].layers[0]["unique"].Data - dtarm2 := imgMap["linux/arm/v7"].layers[0]["unique"].Data + require.Equal(t, "arm", string(imgMap.Find("linux/arm/v7").Layers[1]["arch"].Data)) + dtamd2 := imgMap.Find("linux/amd64").Layers[0]["unique"].Data + dtarm2 := imgMap.Find("linux/arm/v7").Layers[0]["unique"].Data require.Equal(t, string(dtamd), string(dtamd2)) require.Equal(t, string(dtarm), string(dtarm2)) } @@ -5307,7 +5307,7 @@ RUN echo foo >> /test desc, provider, err := contentutil.ProviderFromRef(target) require.NoError(t, err) - img, err := readImage(sb.Context(), provider, desc) + img, err := testutil.ReadImage(sb.Context(), provider, desc) require.NoError(t, err) dirDerived, err := integration.Tmpdir( @@ -5339,11 +5339,11 @@ RUN echo foo >> /test desc, provider, err = contentutil.ProviderFromRef(targetDerived) require.NoError(t, err) - imgDerived, err := readImage(sb.Context(), provider, desc) + imgDerived, err := testutil.ReadImage(sb.Context(), provider, desc) require.NoError(t, err) - require.NotEqual(t, img.img.Created, imgDerived.img.Created) - diff := imgDerived.img.Created.Sub(*img.img.Created) + require.NotEqual(t, img.Img.Created, imgDerived.Img.Created) + diff := imgDerived.Img.Created.Sub(*img.Img.Created) require.Greater(t, diff, time.Duration(0)) require.Less(t, diff, 10*time.Minute) } @@ -6043,64 +6043,3 @@ func fixedWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser return wc, nil } } - -type imageInfo struct { - desc ocispecs.Descriptor - img ocispecs.Image - layers []map[string]*testutil.TarItem -} - -func readIndex(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (map[string]*imageInfo, error) { - dt, err := content.ReadBlob(ctx, p, desc) - if err != nil { - return nil, err - } - var idx ocispecs.Index - if err := json.Unmarshal(dt, &idx); err != nil { - return nil, err - } - - mi := map[string]*imageInfo{} - - for _, m := range idx.Manifests { - img, err := readImage(ctx, p, m) - if err != nil { - return nil, err - } - mi[platforms.Format(*m.Platform)] = img - } - return mi, nil -} -func readImage(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*imageInfo, error) { - ii := &imageInfo{desc: desc} - - dt, err := content.ReadBlob(ctx, p, desc) - if err != nil { - return nil, err - } - var mfst ocispecs.Manifest - if err := json.Unmarshal(dt, &mfst); err != nil { - return nil, err - } - - dt, err = content.ReadBlob(ctx, p, mfst.Config) - if err != nil { - return nil, err - } - if err := json.Unmarshal(dt, &ii.img); err != nil { - return nil, err - } - - for _, l := range mfst.Layers { - dt, err := content.ReadBlob(ctx, p, l) - if err != nil { - return nil, err - } - m, err := testutil.ReadTarToMap(dt, true) - if err != nil { - return nil, err - } - ii.layers = append(ii.layers, m) - } - return ii, nil -} diff --git a/frontend/gateway/client/result.go b/frontend/gateway/client/result.go index bd5422847822..613308eea548 100644 --- a/frontend/gateway/client/result.go +++ b/frontend/gateway/client/result.go @@ -4,16 +4,31 @@ import ( "context" "sync" + "github.com/moby/buildkit/util/attestation" "github.com/pkg/errors" ) type BuildFunc func(context.Context, Client) (*Result, error) +type Attestation interface { + isClientAttestation() +} + +type InTotoAttestation struct { + PredicateType string + PredicateRef Reference + PredicatePath string + Subjects []attestation.InTotoSubject +} + +func (a *InTotoAttestation) isClientAttestation() {} + type Result struct { - mu sync.Mutex - Ref Reference - Refs map[string]Reference - Metadata map[string][]byte + mu sync.Mutex + Ref Reference + Refs map[string]Reference + Metadata map[string][]byte + Attestations map[string][]Attestation } func NewResult() *Result { @@ -38,6 +53,15 @@ func (r *Result) AddRef(k string, ref Reference) { r.mu.Unlock() } +func (r *Result) AddAttestation(k string, v Attestation) { + r.mu.Lock() + if r.Attestations == nil { + r.Attestations = map[string][]Attestation{} + } + r.Attestations[k] = append(r.Attestations[k], v) + r.mu.Unlock() +} + func (r *Result) SetRef(ref Reference) { r.Ref = ref } diff --git a/frontend/gateway/forwarder/forward.go b/frontend/gateway/forwarder/forward.go index c4b32e961b0e..986cd40ba8a0 100644 --- a/frontend/gateway/forwarder/forward.go +++ b/frontend/gateway/forwarder/forward.go @@ -18,6 +18,7 @@ import ( llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -83,6 +84,16 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli c.refs = append(c.refs, rr) cRes.SetRef(rr) } + for k, as := range res.Attestations { + for _, a := range as { + att, rrs, err := c.newAttestation(a, session.NewGroup(c.sid)) + if err != nil { + return nil, err + } + c.refs = append(c.refs, rrs...) + cRes.AddAttestation(k, att) + } + } c.mu.Unlock() cRes.Metadata = res.Metadata @@ -203,6 +214,43 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err res.Ref = rr.acquireResultProxy() } res.Metadata = r.Metadata + if r.Attestations != nil { + res.Attestations = make(map[string][]frontend.Attestation) + for k, as := range r.Attestations { + for _, a := range as { + switch a := a.(type) { + case *client.InTotoAttestation: + rr, ok := a.PredicateRef.(*ref) + if !ok { + return nil, errors.Errorf("invalid reference type for forward %T", r) + } + + subjects := make([]attestation.InTotoSubject, len(a.Subjects)) + for i, s := range a.Subjects { + switch s := s.(type) { + case *attestation.InTotoSubjectSelf: + subjects[i] = &attestation.InTotoSubjectSelf{} + case *attestation.InTotoSubjectRaw: + subjects[i] = &attestation.InTotoSubjectRaw{ + Name: s.Name, + Digest: s.Digest, + } + default: + return nil, errors.Errorf("unknown attestation subject type %T", s) + } + } + res.Attestations[k] = append(res.Attestations[k], &frontend.InTotoAttestation{ + PredicateRef: rr.acquireResultProxy(), + PredicatePath: a.PredicatePath, + PredicateType: a.PredicateType, + Subjects: subjects, + }) + default: + return nil, errors.Errorf("unknown attestation type %T", a) + } + } + } + } return res, nil } @@ -302,6 +350,43 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return ctr, nil } +func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { + return &ref{resultProxy: r, session: s, c: c}, nil +} + +func (c *bridgeClient) newAttestation(a frontend.Attestation, s session.Group) (client.Attestation, []*ref, error) { + switch a := a.(type) { + case *frontend.InTotoAttestation: + rr, err := c.newRef(a.PredicateRef, session.NewGroup(c.sid)) + if err != nil { + return nil, nil, err + } + + subjects := make([]attestation.InTotoSubject, len(a.Subjects)) + for i, subject := range a.Subjects { + switch subject := subject.(type) { + case *attestation.InTotoSubjectSelf: + subjects[i] = &attestation.InTotoSubjectSelf{} + case *attestation.InTotoSubjectRaw: + subjects[i] = &attestation.InTotoSubjectRaw{ + Name: subject.Name, + Digest: subject.Digest, + } + default: + return nil, nil, errors.Errorf("unknown attestation subject type %T", s) + } + } + return &client.InTotoAttestation{ + PredicateType: a.PredicateType, + PredicateRef: rr, + PredicatePath: a.PredicatePath, + Subjects: subjects, + }, []*ref{rr}, nil + default: + return nil, nil, errors.Errorf("unknown attestation type %T", a) + } +} + type ref struct { resultProxy solver.ResultProxy resultProxyClones []solver.ResultProxy @@ -310,10 +395,6 @@ type ref struct { c *bridgeClient } -func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { - return &ref{resultProxy: r, session: s, c: c}, nil -} - func (r *ref) acquireResultProxy() solver.ResultProxy { s1, s2 := solver.SplitResultProxy(r.resultProxy) r.resultProxy = s1 diff --git a/frontend/gateway/gateway.go b/frontend/gateway/gateway.go index b983d37a60be..698e40a82994 100644 --- a/frontend/gateway/gateway.go +++ b/frontend/gateway/gateway.go @@ -634,6 +634,7 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) var defaultID string lbf.mu.Lock() + if res.Refs != nil { ids := make(map[string]string, len(res.Refs)) defs := make(map[string]*opspb.Definition, len(res.Refs)) @@ -696,6 +697,33 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id} } } + + if res.Attestations != nil { + attestations := map[string]*pb.Attestations{} + for k, atts := range res.Attestations { + for _, att := range atts { + switch att := att.(type) { + case *frontend.InTotoAttestation: + ref := att.PredicateRef + id := identity.NewID() + def := ref.Definition() + lbf.refs[id] = ref + + pbAtt, err := pb.ToInTotoPB(att.PredicateType, &pb.Ref{Id: id, Def: def}, att.PredicatePath, att.Subjects...) + if err != nil { + return nil, err + } + attestations[k].Attestation = append(attestations[k].Attestation, &pb.Attestations_Attestation{ + Attestation: pbAtt, + }) + default: + return nil, errors.Errorf("unknown attestation type %T", att) + } + } + } + pbRes.Attestations = attestations + } + lbf.mu.Unlock() // compatibility mode for older clients @@ -890,6 +918,20 @@ func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) } r.Refs = m } + + if in.Result.Attestations != nil { + r.Attestations = map[string][]frontend.Attestation{} + for k, pbAtts := range in.Result.Attestations { + for _, pbAtt := range pbAtts.Attestation { + att, err := lbf.convertAttestation(pbAtt) + if err != nil { + return nil, err + } + r.Attestations[k] = append(r.Attestations[k], att) + } + } + } + return lbf.setResult(r, nil) } @@ -1398,6 +1440,29 @@ func (lbf *llbBridgeForwarder) cloneRef(id string) (solver.ResultProxy, error) { return s2, nil } +func (lbf *llbBridgeForwarder) convertAttestation(att *pb.Attestations_Attestation) (frontend.Attestation, error) { + switch att := att.Attestation.(type) { + case *pb.Attestations_Attestation_Intoto: + predicateType, predicateRef, predicatePath, subjects, err := pb.FromInTotoPB(att) + if err != nil { + return nil, err + } + + ref, err := lbf.cloneRef(predicateRef.Id) + if err != nil { + return nil, err + } + return &frontend.InTotoAttestation{ + PredicateType: predicateType, + PredicateRef: ref, + PredicatePath: predicatePath, + Subjects: subjects, + }, nil + default: + return nil, errors.Errorf("unknown attestation type %T", att) + } +} + func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { go func() { <-ctx.Done() diff --git a/frontend/gateway/grpcclient/client.go b/frontend/gateway/grpcclient/client.go index 3f5545f0f594..1f67875506b8 100644 --- a/frontend/gateway/grpcclient/client.go +++ b/frontend/gateway/grpcclient/client.go @@ -95,6 +95,26 @@ func convertRef(ref client.Reference) (*pb.Ref, error) { return &pb.Ref{Id: r.id, Def: r.def}, nil } +func convertAttestation(att client.Attestation) (*pb.Attestations_Attestation, error) { + switch att := att.(type) { + case *client.InTotoAttestation: + pbRef, err := convertRef(att.PredicateRef) + if err != nil { + return nil, err + } + + attestation, err := pb.ToInTotoPB(att.PredicateType, pbRef, att.PredicatePath, att.Subjects...) + if err != nil { + return nil, err + } + return &pb.Attestations_Attestation{ + Attestation: attestation, + }, nil + default: + return nil, errors.Errorf("unknown attestation type %T", att) + } +} + func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error { client, err := current() if err != nil { @@ -160,6 +180,25 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro } } } + + if res.Attestations != nil { + attestations := map[string]*pb.Attestations{} + for k, as := range res.Attestations { + for _, a := range as { + pbAtt, err := convertAttestation(a) + if err != nil { + retError = err + continue + } + if attestations[k] == nil { + attestations[k] = &pb.Attestations{} + } + attestations[k].Attestation = append(attestations[k].Attestation, pbAtt) + } + } + pbRes.Attestations = attestations + } + if retError == nil { req.Result = pbRes } @@ -443,6 +482,19 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * res.AddRef(k, ref) } } + + if resp.Result.Attestations != nil { + res.Attestations = map[string][]client.Attestation{} + for p, as := range resp.Result.Attestations { + for _, a := range as.Attestation { + att, err := newAttestation(c, a) + res.AddAttestation(p, att) + if err != nil { + return nil, err + } + } + } + } } return res, nil @@ -1069,6 +1121,29 @@ func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fsty return resp.Stat, nil } +func newAttestation(c *grpcClient, att *pb.Attestations_Attestation) (client.Attestation, error) { + switch att := att.Attestation.(type) { + case *pb.Attestations_Attestation_Intoto: + predicateType, predicateRef, predicatePath, subjects, err := pb.FromInTotoPB(att) + if err != nil { + return nil, err + } + ref, err := newReference(c, predicateRef) + if err != nil { + return nil, err + } + + return &client.InTotoAttestation{ + PredicateType: predicateType, + PredicateRef: ref, + PredicatePath: predicatePath, + Subjects: subjects, + }, nil + default: + return nil, errors.Errorf("unknown attestation type %T", att) + } +} + func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) { dialOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return stdioConn(), nil diff --git a/frontend/gateway/pb/attestations.go b/frontend/gateway/pb/attestations.go new file mode 100644 index 000000000000..82f82ae996d4 --- /dev/null +++ b/frontend/gateway/pb/attestations.go @@ -0,0 +1,60 @@ +package moby_buildkit_v1_frontend //nolint:revive + +import ( + "github.com/moby/buildkit/util/attestation" + "github.com/pkg/errors" +) + +func ToInTotoPB(predicateType string, predicateRef *Ref, predicatePath string, subjects ...attestation.InTotoSubject) (*Attestations_Attestation_Intoto, error) { + pbSubjects := []*InToto_Subject{} + for _, subject := range subjects { + switch s := subject.(type) { + case *attestation.InTotoSubjectRaw: + pbSubjects = append(pbSubjects, &InToto_Subject{ + Subject: &InToto_Subject_Raw{ + Raw: &InToto_Subject_RawSubject{ + Name: s.Name, + Digest: s.Digest, + }, + }, + }) + case *attestation.InTotoSubjectSelf: + pbSubjects = append(pbSubjects, &InToto_Subject{ + Subject: &InToto_Subject_Self{ + Self: &InToto_Subject_SelfSubject{}, + }, + }) + default: + return nil, errors.Errorf("unknown in toto subject type %T", s) + } + } + + intoto := &InToto{ + PredicateType: predicateType, + PredicatePath: predicatePath, + PredicateRef: predicateRef, + Subjects: pbSubjects, + } + + return &Attestations_Attestation_Intoto{ + Intoto: intoto, + }, nil +} + +func FromInTotoPB(att *Attestations_Attestation_Intoto) (string, *Ref, string, []attestation.InTotoSubject, error) { + subjects := []attestation.InTotoSubject{} + for _, pbSubject := range att.Intoto.Subjects { + switch pbSubject := pbSubject.Subject.(type) { + case *InToto_Subject_Raw: + subjects = append(subjects, &attestation.InTotoSubjectRaw{ + Name: pbSubject.Raw.Name, + Digest: pbSubject.Raw.Digest, + }) + case *InToto_Subject_Self: + subjects = append(subjects, &attestation.InTotoSubjectSelf{}) + default: + return "", nil, "", nil, errors.Errorf("unknown in toto subject type %T", pbSubject) + } + } + return att.Intoto.PredicateType, att.Intoto.PredicateRef, att.Intoto.PredicatePath, subjects, nil +} diff --git a/frontend/gateway/pb/gateway.pb.go b/frontend/gateway/pb/gateway.pb.go index 743ad2ec526d..a315e41fd6b5 100644 --- a/frontend/gateway/pb/gateway.pb.go +++ b/frontend/gateway/pb/gateway.pb.go @@ -39,11 +39,12 @@ type Result struct { // *Result_RefsDeprecated // *Result_Ref // *Result_Refs - Result isResult_Result `protobuf_oneof:"result"` - Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Result isResult_Result `protobuf_oneof:"result"` + Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Attestations map[string]*Attestations `protobuf:"bytes,11,rep,name=attestations,proto3" json:"attestations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Result) Reset() { *m = Result{} } @@ -145,6 +146,13 @@ func (m *Result) GetMetadata() map[string][]byte { return nil } +func (m *Result) GetAttestations() map[string]*Attestations { + if m != nil { + return m.Attestations + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Result) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -304,6 +312,374 @@ func (m *RefMap) GetRefs() map[string]*Ref { return nil } +type Attestations struct { + Attestation []*Attestations_Attestation `protobuf:"bytes,1,rep,name=attestation,proto3" json:"attestation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attestations) Reset() { *m = Attestations{} } +func (m *Attestations) String() string { return proto.CompactTextString(m) } +func (*Attestations) ProtoMessage() {} +func (*Attestations) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{4} +} +func (m *Attestations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attestations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attestations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attestations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attestations.Merge(m, src) +} +func (m *Attestations) XXX_Size() int { + return m.Size() +} +func (m *Attestations) XXX_DiscardUnknown() { + xxx_messageInfo_Attestations.DiscardUnknown(m) +} + +var xxx_messageInfo_Attestations proto.InternalMessageInfo + +func (m *Attestations) GetAttestation() []*Attestations_Attestation { + if m != nil { + return m.Attestation + } + return nil +} + +type Attestations_Attestation struct { + // Types that are valid to be assigned to Attestation: + // *Attestations_Attestation_Intoto + Attestation isAttestations_Attestation_Attestation `protobuf_oneof:"Attestation"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attestations_Attestation) Reset() { *m = Attestations_Attestation{} } +func (m *Attestations_Attestation) String() string { return proto.CompactTextString(m) } +func (*Attestations_Attestation) ProtoMessage() {} +func (*Attestations_Attestation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{4, 0} +} +func (m *Attestations_Attestation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attestations_Attestation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attestations_Attestation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attestations_Attestation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attestations_Attestation.Merge(m, src) +} +func (m *Attestations_Attestation) XXX_Size() int { + return m.Size() +} +func (m *Attestations_Attestation) XXX_DiscardUnknown() { + xxx_messageInfo_Attestations_Attestation.DiscardUnknown(m) +} + +var xxx_messageInfo_Attestations_Attestation proto.InternalMessageInfo + +type isAttestations_Attestation_Attestation interface { + isAttestations_Attestation_Attestation() + MarshalTo([]byte) (int, error) + Size() int +} + +type Attestations_Attestation_Intoto struct { + Intoto *InToto `protobuf:"bytes,1,opt,name=intoto,proto3,oneof" json:"intoto,omitempty"` +} + +func (*Attestations_Attestation_Intoto) isAttestations_Attestation_Attestation() {} + +func (m *Attestations_Attestation) GetAttestation() isAttestations_Attestation_Attestation { + if m != nil { + return m.Attestation + } + return nil +} + +func (m *Attestations_Attestation) GetIntoto() *InToto { + if x, ok := m.GetAttestation().(*Attestations_Attestation_Intoto); ok { + return x.Intoto + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Attestations_Attestation) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Attestations_Attestation_Intoto)(nil), + } +} + +type InToto struct { + PredicateType string `protobuf:"bytes,1,opt,name=predicateType,proto3" json:"predicateType,omitempty"` + PredicateRef *Ref `protobuf:"bytes,2,opt,name=predicateRef,proto3" json:"predicateRef,omitempty"` + PredicatePath string `protobuf:"bytes,3,opt,name=predicatePath,proto3" json:"predicatePath,omitempty"` + Subjects []*InToto_Subject `protobuf:"bytes,4,rep,name=subjects,proto3" json:"subjects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InToto) Reset() { *m = InToto{} } +func (m *InToto) String() string { return proto.CompactTextString(m) } +func (*InToto) ProtoMessage() {} +func (*InToto) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5} +} +func (m *InToto) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InToto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InToto.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InToto) XXX_Merge(src proto.Message) { + xxx_messageInfo_InToto.Merge(m, src) +} +func (m *InToto) XXX_Size() int { + return m.Size() +} +func (m *InToto) XXX_DiscardUnknown() { + xxx_messageInfo_InToto.DiscardUnknown(m) +} + +var xxx_messageInfo_InToto proto.InternalMessageInfo + +func (m *InToto) GetPredicateType() string { + if m != nil { + return m.PredicateType + } + return "" +} + +func (m *InToto) GetPredicateRef() *Ref { + if m != nil { + return m.PredicateRef + } + return nil +} + +func (m *InToto) GetPredicatePath() string { + if m != nil { + return m.PredicatePath + } + return "" +} + +func (m *InToto) GetSubjects() []*InToto_Subject { + if m != nil { + return m.Subjects + } + return nil +} + +type InToto_Subject struct { + // Types that are valid to be assigned to Subject: + // *InToto_Subject_Self + // *InToto_Subject_Raw + Subject isInToto_Subject_Subject `protobuf_oneof:"Subject"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InToto_Subject) Reset() { *m = InToto_Subject{} } +func (m *InToto_Subject) String() string { return proto.CompactTextString(m) } +func (*InToto_Subject) ProtoMessage() {} +func (*InToto_Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5, 0} +} +func (m *InToto_Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InToto_Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InToto_Subject.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InToto_Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_InToto_Subject.Merge(m, src) +} +func (m *InToto_Subject) XXX_Size() int { + return m.Size() +} +func (m *InToto_Subject) XXX_DiscardUnknown() { + xxx_messageInfo_InToto_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_InToto_Subject proto.InternalMessageInfo + +type isInToto_Subject_Subject interface { + isInToto_Subject_Subject() + MarshalTo([]byte) (int, error) + Size() int +} + +type InToto_Subject_Self struct { + Self *InToto_Subject_SelfSubject `protobuf:"bytes,1,opt,name=self,proto3,oneof" json:"self,omitempty"` +} +type InToto_Subject_Raw struct { + Raw *InToto_Subject_RawSubject `protobuf:"bytes,2,opt,name=raw,proto3,oneof" json:"raw,omitempty"` +} + +func (*InToto_Subject_Self) isInToto_Subject_Subject() {} +func (*InToto_Subject_Raw) isInToto_Subject_Subject() {} + +func (m *InToto_Subject) GetSubject() isInToto_Subject_Subject { + if m != nil { + return m.Subject + } + return nil +} + +func (m *InToto_Subject) GetSelf() *InToto_Subject_SelfSubject { + if x, ok := m.GetSubject().(*InToto_Subject_Self); ok { + return x.Self + } + return nil +} + +func (m *InToto_Subject) GetRaw() *InToto_Subject_RawSubject { + if x, ok := m.GetSubject().(*InToto_Subject_Raw); ok { + return x.Raw + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*InToto_Subject) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*InToto_Subject_Self)(nil), + (*InToto_Subject_Raw)(nil), + } +} + +type InToto_Subject_SelfSubject struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InToto_Subject_SelfSubject) Reset() { *m = InToto_Subject_SelfSubject{} } +func (m *InToto_Subject_SelfSubject) String() string { return proto.CompactTextString(m) } +func (*InToto_Subject_SelfSubject) ProtoMessage() {} +func (*InToto_Subject_SelfSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5, 0, 0} +} +func (m *InToto_Subject_SelfSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InToto_Subject_SelfSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InToto_Subject_SelfSubject.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InToto_Subject_SelfSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_InToto_Subject_SelfSubject.Merge(m, src) +} +func (m *InToto_Subject_SelfSubject) XXX_Size() int { + return m.Size() +} +func (m *InToto_Subject_SelfSubject) XXX_DiscardUnknown() { + xxx_messageInfo_InToto_Subject_SelfSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_InToto_Subject_SelfSubject proto.InternalMessageInfo + +type InToto_Subject_RawSubject struct { + Digest []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,rep,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InToto_Subject_RawSubject) Reset() { *m = InToto_Subject_RawSubject{} } +func (m *InToto_Subject_RawSubject) String() string { return proto.CompactTextString(m) } +func (*InToto_Subject_RawSubject) ProtoMessage() {} +func (*InToto_Subject_RawSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5, 0, 1} +} +func (m *InToto_Subject_RawSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InToto_Subject_RawSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InToto_Subject_RawSubject.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InToto_Subject_RawSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_InToto_Subject_RawSubject.Merge(m, src) +} +func (m *InToto_Subject_RawSubject) XXX_Size() int { + return m.Size() +} +func (m *InToto_Subject_RawSubject) XXX_DiscardUnknown() { + xxx_messageInfo_InToto_Subject_RawSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_InToto_Subject_RawSubject proto.InternalMessageInfo + +func (m *InToto_Subject_RawSubject) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type ReturnRequest struct { Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` Error *rpc.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` @@ -316,7 +692,7 @@ func (m *ReturnRequest) Reset() { *m = ReturnRequest{} } func (m *ReturnRequest) String() string { return proto.CompactTextString(m) } func (*ReturnRequest) ProtoMessage() {} func (*ReturnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{4} + return fileDescriptor_f1a937782ebbded5, []int{6} } func (m *ReturnRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -369,7 +745,7 @@ func (m *ReturnResponse) Reset() { *m = ReturnResponse{} } func (m *ReturnResponse) String() string { return proto.CompactTextString(m) } func (*ReturnResponse) ProtoMessage() {} func (*ReturnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{5} + return fileDescriptor_f1a937782ebbded5, []int{7} } func (m *ReturnResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -408,7 +784,7 @@ func (m *InputsRequest) Reset() { *m = InputsRequest{} } func (m *InputsRequest) String() string { return proto.CompactTextString(m) } func (*InputsRequest) ProtoMessage() {} func (*InputsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{6} + return fileDescriptor_f1a937782ebbded5, []int{8} } func (m *InputsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -448,7 +824,7 @@ func (m *InputsResponse) Reset() { *m = InputsResponse{} } func (m *InputsResponse) String() string { return proto.CompactTextString(m) } func (*InputsResponse) ProtoMessage() {} func (*InputsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{7} + return fileDescriptor_f1a937782ebbded5, []int{9} } func (m *InputsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +876,7 @@ func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigReq func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } func (*ResolveImageConfigRequest) ProtoMessage() {} func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{8} + return fileDescriptor_f1a937782ebbded5, []int{10} } func (m *ResolveImageConfigRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +959,7 @@ func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigRe func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } func (*ResolveImageConfigResponse) ProtoMessage() {} func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{9} + return fileDescriptor_f1a937782ebbded5, []int{11} } func (m *ResolveImageConfigResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -644,7 +1020,7 @@ func (m *SolveRequest) Reset() { *m = SolveRequest{} } func (m *SolveRequest) String() string { return proto.CompactTextString(m) } func (*SolveRequest) ProtoMessage() {} func (*SolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{10} + return fileDescriptor_f1a937782ebbded5, []int{12} } func (m *SolveRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -756,7 +1132,7 @@ func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } func (*CacheOptionsEntry) ProtoMessage() {} func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{11} + return fileDescriptor_f1a937782ebbded5, []int{13} } func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -813,7 +1189,7 @@ func (m *SolveResponse) Reset() { *m = SolveResponse{} } func (m *SolveResponse) String() string { return proto.CompactTextString(m) } func (*SolveResponse) ProtoMessage() {} func (*SolveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{12} + return fileDescriptor_f1a937782ebbded5, []int{14} } func (m *SolveResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -869,7 +1245,7 @@ func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } func (*ReadFileRequest) ProtoMessage() {} func (*ReadFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{13} + return fileDescriptor_f1a937782ebbded5, []int{15} } func (m *ReadFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -931,7 +1307,7 @@ func (m *FileRange) Reset() { *m = FileRange{} } func (m *FileRange) String() string { return proto.CompactTextString(m) } func (*FileRange) ProtoMessage() {} func (*FileRange) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{14} + return fileDescriptor_f1a937782ebbded5, []int{16} } func (m *FileRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -985,7 +1361,7 @@ func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } func (*ReadFileResponse) ProtoMessage() {} func (*ReadFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{15} + return fileDescriptor_f1a937782ebbded5, []int{17} } func (m *ReadFileResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1034,7 +1410,7 @@ func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} } func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) } func (*ReadDirRequest) ProtoMessage() {} func (*ReadDirRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{16} + return fileDescriptor_f1a937782ebbded5, []int{18} } func (m *ReadDirRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1095,7 +1471,7 @@ func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} } func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) } func (*ReadDirResponse) ProtoMessage() {} func (*ReadDirResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{17} + return fileDescriptor_f1a937782ebbded5, []int{19} } func (m *ReadDirResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1143,7 +1519,7 @@ func (m *StatFileRequest) Reset() { *m = StatFileRequest{} } func (m *StatFileRequest) String() string { return proto.CompactTextString(m) } func (*StatFileRequest) ProtoMessage() {} func (*StatFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{18} + return fileDescriptor_f1a937782ebbded5, []int{20} } func (m *StatFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,7 +1573,7 @@ func (m *StatFileResponse) Reset() { *m = StatFileResponse{} } func (m *StatFileResponse) String() string { return proto.CompactTextString(m) } func (*StatFileResponse) ProtoMessage() {} func (*StatFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{19} + return fileDescriptor_f1a937782ebbded5, []int{21} } func (m *StatFileResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1243,7 +1619,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} } func (m *PingRequest) String() string { return proto.CompactTextString(m) } func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{20} + return fileDescriptor_f1a937782ebbded5, []int{22} } func (m *PingRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1285,7 +1661,7 @@ func (m *PongResponse) Reset() { *m = PongResponse{} } func (m *PongResponse) String() string { return proto.CompactTextString(m) } func (*PongResponse) ProtoMessage() {} func (*PongResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{21} + return fileDescriptor_f1a937782ebbded5, []int{23} } func (m *PongResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1352,7 +1728,7 @@ func (m *WarnRequest) Reset() { *m = WarnRequest{} } func (m *WarnRequest) String() string { return proto.CompactTextString(m) } func (*WarnRequest) ProtoMessage() {} func (*WarnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{22} + return fileDescriptor_f1a937782ebbded5, []int{24} } func (m *WarnRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1433,7 +1809,7 @@ func (m *WarnResponse) Reset() { *m = WarnResponse{} } func (m *WarnResponse) String() string { return proto.CompactTextString(m) } func (*WarnResponse) ProtoMessage() {} func (*WarnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{23} + return fileDescriptor_f1a937782ebbded5, []int{25} } func (m *WarnResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1479,7 +1855,7 @@ func (m *NewContainerRequest) Reset() { *m = NewContainerRequest{} } func (m *NewContainerRequest) String() string { return proto.CompactTextString(m) } func (*NewContainerRequest) ProtoMessage() {} func (*NewContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{24} + return fileDescriptor_f1a937782ebbded5, []int{26} } func (m *NewContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1560,7 +1936,7 @@ func (m *NewContainerResponse) Reset() { *m = NewContainerResponse{} } func (m *NewContainerResponse) String() string { return proto.CompactTextString(m) } func (*NewContainerResponse) ProtoMessage() {} func (*NewContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{25} + return fileDescriptor_f1a937782ebbded5, []int{27} } func (m *NewContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1600,7 +1976,7 @@ func (m *ReleaseContainerRequest) Reset() { *m = ReleaseContainerRequest func (m *ReleaseContainerRequest) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerRequest) ProtoMessage() {} func (*ReleaseContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{26} + return fileDescriptor_f1a937782ebbded5, []int{28} } func (m *ReleaseContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1646,7 +2022,7 @@ func (m *ReleaseContainerResponse) Reset() { *m = ReleaseContainerRespon func (m *ReleaseContainerResponse) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerResponse) ProtoMessage() {} func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{27} + return fileDescriptor_f1a937782ebbded5, []int{29} } func (m *ReleaseContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1695,7 +2071,7 @@ func (m *ExecMessage) Reset() { *m = ExecMessage{} } func (m *ExecMessage) String() string { return proto.CompactTextString(m) } func (*ExecMessage) ProtoMessage() {} func (*ExecMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{28} + return fileDescriptor_f1a937782ebbded5, []int{30} } func (m *ExecMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1851,7 +2227,7 @@ func (m *InitMessage) Reset() { *m = InitMessage{} } func (m *InitMessage) String() string { return proto.CompactTextString(m) } func (*InitMessage) ProtoMessage() {} func (*InitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{29} + return fileDescriptor_f1a937782ebbded5, []int{31} } func (m *InitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1927,7 +2303,7 @@ func (m *ExitMessage) Reset() { *m = ExitMessage{} } func (m *ExitMessage) String() string { return proto.CompactTextString(m) } func (*ExitMessage) ProtoMessage() {} func (*ExitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{30} + return fileDescriptor_f1a937782ebbded5, []int{32} } func (m *ExitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1980,7 +2356,7 @@ func (m *StartedMessage) Reset() { *m = StartedMessage{} } func (m *StartedMessage) String() string { return proto.CompactTextString(m) } func (*StartedMessage) ProtoMessage() {} func (*StartedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{31} + return fileDescriptor_f1a937782ebbded5, []int{33} } func (m *StartedMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2019,7 +2395,7 @@ func (m *DoneMessage) Reset() { *m = DoneMessage{} } func (m *DoneMessage) String() string { return proto.CompactTextString(m) } func (*DoneMessage) ProtoMessage() {} func (*DoneMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{32} + return fileDescriptor_f1a937782ebbded5, []int{34} } func (m *DoneMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2061,7 +2437,7 @@ func (m *FdMessage) Reset() { *m = FdMessage{} } func (m *FdMessage) String() string { return proto.CompactTextString(m) } func (*FdMessage) ProtoMessage() {} func (*FdMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{33} + return fileDescriptor_f1a937782ebbded5, []int{35} } func (m *FdMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2123,7 +2499,7 @@ func (m *ResizeMessage) Reset() { *m = ResizeMessage{} } func (m *ResizeMessage) String() string { return proto.CompactTextString(m) } func (*ResizeMessage) ProtoMessage() {} func (*ResizeMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{34} + return fileDescriptor_f1a937782ebbded5, []int{36} } func (m *ResizeMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2179,7 +2555,7 @@ func (m *SignalMessage) Reset() { *m = SignalMessage{} } func (m *SignalMessage) String() string { return proto.CompactTextString(m) } func (*SignalMessage) ProtoMessage() {} func (*SignalMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{35} + return fileDescriptor_f1a937782ebbded5, []int{37} } func (m *SignalMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2217,12 +2593,19 @@ func (m *SignalMessage) GetName() string { func init() { proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") + proto.RegisterMapType((map[string]*Attestations)(nil), "moby.buildkit.v1.frontend.Result.AttestationsEntry") proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") + proto.RegisterType((*Attestations)(nil), "moby.buildkit.v1.frontend.Attestations") + proto.RegisterType((*Attestations_Attestation)(nil), "moby.buildkit.v1.frontend.Attestations.Attestation") + proto.RegisterType((*InToto)(nil), "moby.buildkit.v1.frontend.InToto") + proto.RegisterType((*InToto_Subject)(nil), "moby.buildkit.v1.frontend.InToto.Subject") + proto.RegisterType((*InToto_Subject_SelfSubject)(nil), "moby.buildkit.v1.frontend.InToto.Subject.SelfSubject") + proto.RegisterType((*InToto_Subject_RawSubject)(nil), "moby.buildkit.v1.frontend.InToto.Subject.RawSubject") proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") @@ -2264,138 +2647,152 @@ func init() { func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 2086 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xcd, 0x6f, 0xe3, 0xc6, - 0x15, 0x37, 0xad, 0xef, 0xa7, 0x8f, 0x75, 0x26, 0x69, 0xca, 0x10, 0xc1, 0xc6, 0x61, 0x53, 0x57, - 0xbb, 0x71, 0xa8, 0xd4, 0x9b, 0xc0, 0x5b, 0x6f, 0x91, 0x74, 0xfd, 0x05, 0x2b, 0xb1, 0xbd, 0xee, - 0x38, 0xc5, 0x02, 0x41, 0x0a, 0x94, 0x16, 0x47, 0x5a, 0x62, 0x69, 0x92, 0x1d, 0x8e, 0xd6, 0xeb, - 0xe4, 0xd2, 0xde, 0x7a, 0xec, 0xa9, 0xd7, 0x02, 0xfd, 0x0b, 0x7a, 0xea, 0xb1, 0xe7, 0x1c, 0x7b, - 0x29, 0x50, 0xf4, 0x10, 0x14, 0xfb, 0x47, 0x14, 0xe8, 0xad, 0x78, 0x33, 0x43, 0x89, 0x92, 0x65, - 0x4a, 0x42, 0x4e, 0x9a, 0x79, 0x7c, 0xbf, 0x37, 0xef, 0x6b, 0xde, 0x7b, 0x23, 0x68, 0x0e, 0x5c, - 0xc1, 0xae, 0xdc, 0x6b, 0x27, 0xe6, 0x91, 0x88, 0xc8, 0x5b, 0x97, 0xd1, 0xc5, 0xb5, 0x73, 0x31, - 0xf4, 0x03, 0xef, 0xb9, 0x2f, 0x9c, 0x17, 0x3f, 0x75, 0xfa, 0x3c, 0x0a, 0x05, 0x0b, 0x3d, 0xeb, - 0x83, 0x81, 0x2f, 0x9e, 0x0d, 0x2f, 0x9c, 0x5e, 0x74, 0xd9, 0x19, 0x44, 0x83, 0xa8, 0x23, 0x11, - 0x17, 0xc3, 0xbe, 0xdc, 0xc9, 0x8d, 0x5c, 0x29, 0x49, 0xd6, 0xd6, 0x34, 0xfb, 0x20, 0x8a, 0x06, - 0x01, 0x73, 0x63, 0x3f, 0xd1, 0xcb, 0x0e, 0x8f, 0x7b, 0x9d, 0x44, 0xb8, 0x62, 0x98, 0x68, 0xcc, - 0x66, 0x06, 0x83, 0x8a, 0x74, 0x52, 0x45, 0x3a, 0x49, 0x14, 0xbc, 0x60, 0xbc, 0x13, 0x5f, 0x74, - 0xa2, 0x38, 0xe5, 0xee, 0xdc, 0xca, 0xed, 0xc6, 0x7e, 0x47, 0x5c, 0xc7, 0x2c, 0xe9, 0x5c, 0x45, - 0xfc, 0x39, 0xe3, 0x1a, 0xf0, 0xe0, 0x56, 0xc0, 0x50, 0xf8, 0x01, 0xa2, 0x7a, 0x6e, 0x9c, 0xe0, - 0x21, 0xf8, 0xab, 0x41, 0x59, 0xb3, 0x45, 0x14, 0xfa, 0x89, 0xf0, 0xfd, 0x81, 0xdf, 0xe9, 0x27, - 0x12, 0xa3, 0x4e, 0x41, 0x23, 0x14, 0xbb, 0xfd, 0x87, 0x02, 0x94, 0x29, 0x4b, 0x86, 0x81, 0x20, - 0x1b, 0xd0, 0xe4, 0xac, 0xbf, 0xcf, 0x62, 0xce, 0x7a, 0xae, 0x60, 0x9e, 0x69, 0xac, 0x1b, 0xed, - 0xda, 0xd1, 0x0a, 0x9d, 0x24, 0x93, 0x5f, 0x41, 0x8b, 0xb3, 0x7e, 0x92, 0x61, 0x5c, 0x5d, 0x37, - 0xda, 0xf5, 0xad, 0xf7, 0x9d, 0x5b, 0x83, 0xe1, 0x50, 0xd6, 0x3f, 0x71, 0xe3, 0x31, 0xe4, 0x68, - 0x85, 0x4e, 0x09, 0x21, 0x5b, 0x50, 0xe0, 0xac, 0x6f, 0x16, 0xa4, 0xac, 0xbb, 0xf9, 0xb2, 0x8e, - 0x56, 0x28, 0x32, 0x93, 0x6d, 0x28, 0xa2, 0x14, 0xb3, 0x28, 0x41, 0xef, 0xce, 0x55, 0xe0, 0x68, - 0x85, 0x4a, 0x00, 0xf9, 0x1c, 0xaa, 0x97, 0x4c, 0xb8, 0x9e, 0x2b, 0x5c, 0x13, 0xd6, 0x0b, 0xed, - 0xfa, 0x56, 0x27, 0x17, 0x8c, 0x0e, 0x72, 0x4e, 0x34, 0xe2, 0x20, 0x14, 0xfc, 0x9a, 0x8e, 0x04, - 0x58, 0x8f, 0xa0, 0x39, 0xf1, 0x89, 0xac, 0x41, 0xe1, 0x39, 0xbb, 0x56, 0xfe, 0xa3, 0xb8, 0x24, - 0x6f, 0x40, 0xe9, 0x85, 0x1b, 0x0c, 0x99, 0x74, 0x55, 0x83, 0xaa, 0xcd, 0xce, 0xea, 0x43, 0x63, - 0xb7, 0x0a, 0x65, 0x2e, 0xc5, 0xdb, 0x7f, 0x32, 0x60, 0x6d, 0xda, 0x4f, 0xa4, 0xab, 0x2d, 0x34, - 0xa4, 0x92, 0x1f, 0x2f, 0xe1, 0x62, 0x24, 0x24, 0x4a, 0x55, 0x29, 0xc2, 0xda, 0x86, 0xda, 0x88, - 0x34, 0x4f, 0xc5, 0x5a, 0x46, 0x45, 0x7b, 0x1b, 0x0a, 0x94, 0xf5, 0x49, 0x0b, 0x56, 0x7d, 0x9d, - 0x14, 0x74, 0xd5, 0xf7, 0xc8, 0x3a, 0x14, 0x3c, 0xd6, 0xd7, 0xc1, 0x6f, 0x39, 0xf1, 0x85, 0xb3, - 0xcf, 0xfa, 0x7e, 0xe8, 0x0b, 0x3f, 0x0a, 0x29, 0x7e, 0xb2, 0xff, 0x62, 0x60, 0x72, 0xa1, 0x5a, - 0xe4, 0xd3, 0x09, 0x3b, 0xe6, 0xa7, 0xca, 0x0d, 0xed, 0x9f, 0xe6, 0x6b, 0xff, 0x51, 0x56, 0xfb, - 0xb9, 0xf9, 0x93, 0xb5, 0x4e, 0x40, 0x93, 0x32, 0x31, 0xe4, 0x21, 0x65, 0xbf, 0x1d, 0xb2, 0x44, - 0x90, 0x9f, 0xa5, 0x11, 0x91, 0xf2, 0xe7, 0xa5, 0x15, 0x32, 0x52, 0x0d, 0x20, 0x6d, 0x28, 0x31, - 0xce, 0x23, 0xae, 0xb5, 0x20, 0x8e, 0xaa, 0x1c, 0x0e, 0x8f, 0x7b, 0xce, 0xb9, 0xac, 0x1c, 0x54, - 0x31, 0xd8, 0x6b, 0xd0, 0x4a, 0x4f, 0x4d, 0xe2, 0x28, 0x4c, 0x98, 0x7d, 0x07, 0x9a, 0xdd, 0x30, - 0x1e, 0x8a, 0x44, 0xeb, 0x61, 0xff, 0xdd, 0x80, 0x56, 0x4a, 0x51, 0x3c, 0xe4, 0x2b, 0xa8, 0x8f, - 0x7d, 0x9c, 0x3a, 0x73, 0x27, 0x47, 0xbf, 0x49, 0x7c, 0x26, 0x40, 0xda, 0xb7, 0x59, 0x71, 0xd6, - 0x29, 0xac, 0x4d, 0x33, 0xcc, 0xf0, 0xf4, 0x7b, 0x93, 0x9e, 0x9e, 0x0e, 0x7c, 0xc6, 0xb3, 0xff, - 0x34, 0xe0, 0x2d, 0xca, 0x64, 0x29, 0xec, 0x5e, 0xba, 0x03, 0xb6, 0x17, 0x85, 0x7d, 0x7f, 0x90, - 0xba, 0x79, 0x4d, 0x66, 0x55, 0x2a, 0x19, 0x13, 0xac, 0x0d, 0xd5, 0xb3, 0xc0, 0x15, 0xfd, 0x88, - 0x5f, 0x6a, 0xe1, 0x0d, 0x14, 0x9e, 0xd2, 0xe8, 0xe8, 0x2b, 0x59, 0x87, 0xba, 0x16, 0x7c, 0x12, - 0x79, 0x4c, 0xd6, 0x8c, 0x1a, 0xcd, 0x92, 0x88, 0x09, 0x95, 0xe3, 0x68, 0x70, 0xea, 0x5e, 0x32, - 0x59, 0x1c, 0x6a, 0x34, 0xdd, 0x12, 0x1b, 0x1a, 0x9a, 0x91, 0x7f, 0x71, 0x1d, 0x33, 0xb3, 0xb4, - 0x6e, 0xb4, 0x4b, 0x74, 0x82, 0x46, 0xde, 0x86, 0xda, 0x39, 0x4b, 0x12, 0x3f, 0x0a, 0xbb, 0xfb, - 0x66, 0x59, 0xe2, 0xc7, 0x04, 0xfb, 0x77, 0x06, 0x58, 0xb3, 0xec, 0xd2, 0x41, 0xfa, 0x0c, 0xca, - 0xfb, 0xfe, 0x80, 0x25, 0x2a, 0x7f, 0x6a, 0xbb, 0x5b, 0xdf, 0x7e, 0xf7, 0xce, 0xca, 0xbf, 0xbf, - 0x7b, 0xe7, 0x7e, 0xa6, 0x32, 0x47, 0x31, 0x0b, 0x7b, 0x51, 0x28, 0x5c, 0x3f, 0x64, 0x1c, 0x1b, - 0xcc, 0x07, 0x9e, 0x84, 0x38, 0x0a, 0x49, 0xb5, 0x04, 0xf2, 0x26, 0x94, 0x95, 0x74, 0x5d, 0x38, - 0xf4, 0xce, 0xfe, 0x5b, 0x09, 0x1a, 0xe7, 0xa8, 0x40, 0xea, 0x4d, 0x07, 0x60, 0x1c, 0x04, 0x9d, - 0xb8, 0xd3, 0xa1, 0xc9, 0x70, 0x10, 0x0b, 0xaa, 0x87, 0x3a, 0x49, 0xf4, 0x85, 0x1f, 0xed, 0xc9, - 0x97, 0x50, 0x4f, 0xd7, 0x4f, 0x62, 0x61, 0x16, 0x64, 0x96, 0x3d, 0xcc, 0xc9, 0xb2, 0xac, 0x26, - 0x4e, 0x06, 0xaa, 0x73, 0x2c, 0x43, 0x21, 0x9b, 0xf0, 0x9a, 0x1b, 0x04, 0xd1, 0x95, 0xbe, 0x38, - 0xf2, 0x0a, 0xc8, 0x10, 0x54, 0xe9, 0xcd, 0x0f, 0xe4, 0x43, 0x78, 0x3d, 0x43, 0x7c, 0xcc, 0xb9, - 0x7b, 0x8d, 0x39, 0x53, 0x96, 0xfc, 0xb3, 0x3e, 0x61, 0x15, 0x3b, 0xf4, 0x43, 0x37, 0x30, 0x41, - 0xf2, 0xa8, 0x0d, 0xc6, 0xfc, 0xe0, 0x65, 0x1c, 0x71, 0xc1, 0xf8, 0x63, 0x21, 0xb8, 0x59, 0x97, - 0xce, 0x9c, 0xa0, 0x91, 0x33, 0x68, 0xec, 0xb9, 0xbd, 0x67, 0xac, 0x7b, 0x89, 0xc4, 0xc4, 0x6c, - 0x48, 0xb3, 0x37, 0x73, 0xcc, 0x96, 0xec, 0x4f, 0xe2, 0xcc, 0x75, 0x9a, 0x90, 0x40, 0x7a, 0xd0, - 0x4a, 0x4d, 0x57, 0xf7, 0xd0, 0x6c, 0x4a, 0x99, 0x8f, 0x96, 0x75, 0xa5, 0x42, 0xab, 0x23, 0xa6, - 0x44, 0x62, 0x20, 0x0f, 0xf0, 0xca, 0xb9, 0x82, 0x99, 0x2d, 0x69, 0xf3, 0x68, 0x6f, 0x7d, 0x02, - 0x6b, 0xd3, 0xd1, 0x58, 0xa6, 0xf0, 0x5b, 0xbf, 0x84, 0xd7, 0x67, 0xa8, 0xf0, 0xbd, 0x6a, 0xc2, - 0x5f, 0x0d, 0x78, 0xed, 0x86, 0xdf, 0x08, 0x81, 0xa2, 0xbc, 0x8b, 0x4a, 0xa4, 0x5c, 0x93, 0x13, - 0x28, 0x61, 0x5c, 0x12, 0x73, 0x55, 0x3a, 0x6d, 0x7b, 0x99, 0x40, 0x38, 0x12, 0xa9, 0x1c, 0xa6, - 0xa4, 0x58, 0x0f, 0x01, 0xc6, 0xc4, 0xa5, 0xda, 0xdf, 0x57, 0xd0, 0xd4, 0x51, 0xd1, 0x17, 0x7c, - 0x4d, 0x4d, 0x2a, 0x1a, 0x8c, 0x73, 0xc8, 0xb8, 0x65, 0x14, 0x96, 0x6c, 0x19, 0xf6, 0x37, 0x70, - 0x87, 0x32, 0xd7, 0x3b, 0xf4, 0x03, 0x76, 0x7b, 0x65, 0xc4, 0xdb, 0xea, 0x07, 0xec, 0xcc, 0x15, - 0xcf, 0x46, 0xb7, 0x55, 0xef, 0xc9, 0x0e, 0x94, 0xa8, 0x1b, 0x0e, 0x98, 0x3e, 0xfa, 0xbd, 0x9c, - 0xa3, 0xe5, 0x21, 0xc8, 0x4b, 0x15, 0xc4, 0x7e, 0x04, 0xb5, 0x11, 0x0d, 0x6b, 0xcd, 0x93, 0x7e, - 0x3f, 0x61, 0xaa, 0x6e, 0x15, 0xa8, 0xde, 0x21, 0xfd, 0x98, 0x85, 0x03, 0x7d, 0x74, 0x81, 0xea, - 0x9d, 0xbd, 0x81, 0xe3, 0x4a, 0xaa, 0xb9, 0x76, 0x0d, 0x81, 0xe2, 0x3e, 0xce, 0x54, 0x86, 0xbc, - 0x60, 0x72, 0x6d, 0x7b, 0xd8, 0xea, 0x5c, 0x6f, 0xdf, 0xe7, 0xb7, 0x1b, 0x68, 0x42, 0x65, 0xdf, - 0xe7, 0x19, 0xfb, 0xd2, 0x2d, 0xd9, 0xc0, 0x26, 0xd8, 0x0b, 0x86, 0x1e, 0x5a, 0x2b, 0x18, 0x0f, - 0x75, 0xb5, 0x9f, 0xa2, 0xda, 0x9f, 0x2a, 0x3f, 0xca, 0x53, 0xb4, 0x32, 0x9b, 0x50, 0x61, 0xa1, - 0xe0, 0x3e, 0x4b, 0x3b, 0x25, 0x71, 0xd4, 0x18, 0xec, 0xc8, 0x31, 0x58, 0x76, 0x64, 0x9a, 0xb2, - 0xd8, 0xdb, 0x70, 0x07, 0x09, 0xf9, 0x81, 0x20, 0x50, 0xcc, 0x28, 0x29, 0xd7, 0xf6, 0x0e, 0xac, - 0x8d, 0x81, 0xfa, 0xe8, 0x0d, 0x28, 0xe2, 0x90, 0xad, 0x0b, 0xf1, 0xac, 0x73, 0xe5, 0x77, 0xbb, - 0x09, 0xf5, 0x33, 0x3f, 0x4c, 0x7b, 0xa2, 0xfd, 0xca, 0x80, 0xc6, 0x59, 0x14, 0x8e, 0x7b, 0xc9, - 0x19, 0xdc, 0x49, 0x6f, 0xe0, 0xe3, 0xb3, 0xee, 0x9e, 0x1b, 0xa7, 0xa6, 0xac, 0xdf, 0x0c, 0xb3, - 0x7e, 0x0f, 0x38, 0x8a, 0x71, 0xb7, 0x88, 0x6d, 0x87, 0x4e, 0xc3, 0xc9, 0x2f, 0xa0, 0x72, 0x7c, - 0xbc, 0x2b, 0x25, 0xad, 0x2e, 0x25, 0x29, 0x85, 0x91, 0x4f, 0xa0, 0xf2, 0x54, 0x3e, 0x53, 0x12, - 0xdd, 0x1a, 0x66, 0xa4, 0x9c, 0x32, 0x54, 0xb1, 0x51, 0xd6, 0x8b, 0xb8, 0x47, 0x53, 0x90, 0xfd, - 0x5f, 0x03, 0xea, 0x4f, 0xdd, 0xf1, 0xbc, 0xf5, 0x19, 0x94, 0xbd, 0xef, 0xdd, 0x2f, 0xd5, 0x16, - 0x6f, 0x71, 0xc0, 0x5e, 0xb0, 0x40, 0xa7, 0xaa, 0xda, 0x20, 0x35, 0x79, 0x16, 0x71, 0x75, 0x3b, - 0x1b, 0x54, 0x6d, 0x30, 0xaf, 0x3d, 0x26, 0x5c, 0x3f, 0x30, 0x8b, 0xeb, 0x05, 0xec, 0xad, 0x6a, - 0x87, 0x51, 0x1f, 0xf2, 0x40, 0x36, 0xa5, 0x1a, 0xc5, 0x25, 0xb1, 0xa1, 0xe8, 0x87, 0xfd, 0x48, - 0xf6, 0x1d, 0x5d, 0xdd, 0xce, 0xa3, 0x21, 0xef, 0xb1, 0x6e, 0xd8, 0x8f, 0xa8, 0xfc, 0x46, 0xde, - 0x85, 0x32, 0xc7, 0x6b, 0x94, 0x98, 0x15, 0xe9, 0x94, 0x1a, 0x72, 0xa9, 0xcb, 0xa6, 0x3f, 0xd8, - 0x2d, 0x68, 0x28, 0xbb, 0xf5, 0xc4, 0xf7, 0xc7, 0x55, 0x78, 0xfd, 0x94, 0x5d, 0xed, 0xa5, 0x76, - 0xa5, 0x0e, 0x59, 0x87, 0xfa, 0x88, 0xd6, 0xdd, 0xd7, 0xe9, 0x97, 0x25, 0xe1, 0x61, 0x27, 0xd1, - 0x30, 0x14, 0x69, 0x0c, 0xe5, 0x61, 0x92, 0x42, 0xf5, 0x07, 0xf2, 0x63, 0xa8, 0x9c, 0x32, 0x81, - 0xef, 0x49, 0x69, 0x75, 0x6b, 0xab, 0x8e, 0x3c, 0xa7, 0x4c, 0xe0, 0x78, 0x44, 0xd3, 0x6f, 0x38, - 0x73, 0xc5, 0xe9, 0xcc, 0x55, 0x9c, 0x35, 0x73, 0xa5, 0x5f, 0xc9, 0x36, 0xd4, 0x7b, 0x51, 0x98, - 0x08, 0xee, 0xfa, 0x78, 0x70, 0x49, 0x32, 0xff, 0x00, 0x99, 0x55, 0x60, 0xf7, 0xc6, 0x1f, 0x69, - 0x96, 0x93, 0xdc, 0x07, 0x60, 0x2f, 0x05, 0x77, 0x8f, 0xa2, 0x44, 0x24, 0x66, 0x59, 0x2a, 0x0c, - 0x88, 0x43, 0x42, 0xf7, 0x8c, 0x66, 0xbe, 0xda, 0x6f, 0xc2, 0x1b, 0x93, 0x1e, 0xd1, 0xae, 0x7a, - 0x04, 0x3f, 0xa4, 0x2c, 0x60, 0x6e, 0xc2, 0x96, 0xf7, 0x96, 0x6d, 0x81, 0x79, 0x13, 0xac, 0x05, - 0xff, 0xaf, 0x00, 0xf5, 0x83, 0x97, 0xac, 0x77, 0xc2, 0x92, 0xc4, 0x1d, 0xc8, 0xc9, 0xef, 0x8c, - 0x47, 0x3d, 0x96, 0x24, 0x23, 0x59, 0x63, 0x02, 0xf9, 0x39, 0x14, 0xbb, 0xa1, 0x2f, 0x74, 0x9b, - 0xdb, 0xc8, 0x1d, 0xbc, 0x7d, 0xa1, 0x65, 0xe2, 0xa3, 0x13, 0xb7, 0x64, 0x07, 0x8a, 0x58, 0x24, - 0x16, 0x29, 0xd4, 0x5e, 0x06, 0x8b, 0x18, 0xb2, 0x2b, 0x9f, 0xe9, 0xfe, 0xd7, 0x4c, 0x47, 0xa9, - 0x9d, 0xdf, 0x61, 0xfc, 0xaf, 0xd9, 0x58, 0x82, 0x46, 0x92, 0x03, 0xa8, 0x9c, 0x0b, 0x97, 0xe3, - 0x8b, 0x5d, 0x45, 0xef, 0x5e, 0xde, 0x20, 0xa2, 0x38, 0xc7, 0x52, 0x52, 0x2c, 0x3a, 0xe1, 0xe0, - 0xa5, 0x2f, 0xf4, 0x6d, 0xc8, 0x73, 0x02, 0xb2, 0x65, 0x0c, 0xc1, 0x2d, 0xa2, 0xf7, 0xa3, 0x90, - 0x99, 0x95, 0xb9, 0x68, 0x64, 0xcb, 0xa0, 0x71, 0x8b, 0x6e, 0x38, 0xf7, 0x07, 0x38, 0xdf, 0x55, - 0xe7, 0xba, 0x41, 0x31, 0x66, 0xdc, 0xa0, 0x08, 0xbb, 0x15, 0x28, 0xc9, 0x69, 0xc6, 0xfe, 0xb3, - 0x01, 0xf5, 0x4c, 0x9c, 0x16, 0xb8, 0x77, 0x6f, 0x43, 0x11, 0x5f, 0xfa, 0x3a, 0xfe, 0x55, 0x79, - 0xeb, 0x98, 0x70, 0xa9, 0xa4, 0x62, 0xe1, 0x38, 0xf4, 0x54, 0x51, 0x6c, 0x52, 0x5c, 0x22, 0xe5, - 0x0b, 0x71, 0x2d, 0x43, 0x56, 0xa5, 0xb8, 0x24, 0x9b, 0x50, 0x3d, 0x67, 0xbd, 0x21, 0xf7, 0xc5, - 0xb5, 0x0c, 0x42, 0x6b, 0x6b, 0x4d, 0x96, 0x13, 0x4d, 0x93, 0x97, 0x73, 0xc4, 0x61, 0x7f, 0x8e, - 0xc9, 0x39, 0x56, 0x90, 0x40, 0x71, 0x0f, 0xdf, 0x3b, 0xa8, 0x59, 0x93, 0xca, 0x35, 0x3e, 0x39, - 0x0f, 0xe6, 0x3d, 0x39, 0x0f, 0xd2, 0x27, 0xe7, 0x64, 0x50, 0xb1, 0xfb, 0x64, 0x9c, 0x6c, 0x3f, - 0x86, 0xda, 0x28, 0xf1, 0xf0, 0xb5, 0x7f, 0xe8, 0xe9, 0x93, 0x56, 0x0f, 0x3d, 0x34, 0xe5, 0xe0, - 0xc9, 0xa1, 0x3c, 0xa5, 0x4a, 0x71, 0x39, 0xea, 0xf5, 0x85, 0x4c, 0xaf, 0xdf, 0xc6, 0xc7, 0x74, - 0x26, 0xfb, 0x90, 0x89, 0x46, 0x57, 0x49, 0xaa, 0x32, 0xae, 0x95, 0x19, 0x41, 0x22, 0x65, 0x49, - 0x33, 0x82, 0xc4, 0xfe, 0x11, 0x34, 0x27, 0xe2, 0x85, 0x4c, 0xf2, 0xf5, 0xa6, 0x47, 0x42, 0x5c, - 0x6f, 0xfd, 0xab, 0x06, 0xb5, 0xe3, 0xe3, 0xdd, 0x5d, 0xee, 0x7b, 0x03, 0x46, 0x7e, 0x6f, 0x00, - 0xb9, 0xf9, 0x0c, 0x23, 0x1f, 0xe5, 0xdf, 0x8c, 0xd9, 0xaf, 0x51, 0xeb, 0xe3, 0x25, 0x51, 0xba, - 0x3f, 0x7f, 0x09, 0x25, 0x39, 0x1b, 0x92, 0x9f, 0x2c, 0x38, 0xd3, 0x5b, 0xed, 0xf9, 0x8c, 0x5a, - 0x76, 0x0f, 0xaa, 0xe9, 0x7c, 0x45, 0xee, 0xe7, 0xaa, 0x37, 0x31, 0x3e, 0x5a, 0xef, 0x2f, 0xc4, - 0xab, 0x0f, 0xf9, 0x0d, 0x54, 0xf4, 0xd8, 0x44, 0xee, 0xcd, 0xc1, 0x8d, 0x07, 0x38, 0xeb, 0xfe, - 0x22, 0xac, 0x63, 0x33, 0xd2, 0xf1, 0x28, 0xd7, 0x8c, 0xa9, 0xe1, 0x2b, 0xd7, 0x8c, 0x1b, 0xf3, - 0xd6, 0x53, 0x28, 0xe2, 0x1c, 0x45, 0xf2, 0xea, 0x49, 0x66, 0xd0, 0xb2, 0xf2, 0xc2, 0x35, 0x31, - 0x80, 0xfd, 0x1a, 0xeb, 0xae, 0x7c, 0x8b, 0xe6, 0x57, 0xdc, 0xcc, 0x1f, 0x48, 0xd6, 0xbd, 0x05, - 0x38, 0xc7, 0xe2, 0xf5, 0x3b, 0xae, 0xbd, 0xc0, 0xbf, 0x38, 0xf3, 0xc5, 0x4f, 0xfd, 0x5f, 0x14, - 0x41, 0x23, 0xdb, 0x4e, 0x89, 0x93, 0x03, 0x9d, 0x31, 0x89, 0x58, 0x9d, 0x85, 0xf9, 0xf5, 0x81, - 0xdf, 0xe0, 0x9b, 0x60, 0xb2, 0xd5, 0x92, 0xad, 0x5c, 0x77, 0xcc, 0x6c, 0xea, 0xd6, 0x83, 0xa5, - 0x30, 0xfa, 0x70, 0x57, 0xb5, 0x72, 0xdd, 0xae, 0x49, 0x7e, 0x67, 0x1a, 0xb5, 0x7c, 0x6b, 0x41, - 0xbe, 0xb6, 0xf1, 0xa1, 0x81, 0x79, 0x86, 0x23, 0x5c, 0xae, 0xec, 0xcc, 0x6c, 0x9b, 0x9b, 0x67, - 0xd9, 0x59, 0x70, 0xb7, 0xf1, 0xed, 0xab, 0xbb, 0xc6, 0x3f, 0x5e, 0xdd, 0x35, 0xfe, 0xf3, 0xea, - 0xae, 0x71, 0x51, 0x96, 0x7f, 0xce, 0x3f, 0xf8, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x29, 0xd9, - 0x1c, 0x06, 0xee, 0x18, 0x00, 0x00, + // 2318 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x8a, 0x14, 0x3f, 0x1e, 0x3f, 0xac, 0x4c, 0xd2, 0x94, 0x59, 0x04, 0x8e, 0xb2, 0x75, + 0x1d, 0xd9, 0x71, 0x96, 0xa9, 0x6c, 0x43, 0xae, 0xdd, 0x26, 0x35, 0xf5, 0x01, 0x31, 0x96, 0x6c, + 0x75, 0xe4, 0xc0, 0x68, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xd6, 0xab, 0x9d, 0xed, 0xec, 0xd0, + 0xb2, 0x92, 0x4b, 0x7b, 0xed, 0xa9, 0xa7, 0x5e, 0x0b, 0xf4, 0xd4, 0x63, 0x4f, 0x3d, 0x16, 0x3d, + 0x06, 0xe8, 0xa5, 0x97, 0x02, 0x45, 0x0f, 0x41, 0xe1, 0x3f, 0xa2, 0x40, 0x6f, 0xc5, 0x9b, 0x9d, + 0x25, 0x87, 0x14, 0xbd, 0xa4, 0xe0, 0x93, 0x66, 0xde, 0xbe, 0xf7, 0x9b, 0x79, 0xdf, 0x6f, 0x28, + 0x68, 0x0c, 0x3c, 0xc9, 0x4e, 0xbd, 0x33, 0x37, 0x16, 0x5c, 0x72, 0xf2, 0xce, 0x09, 0x3f, 0x3e, + 0x73, 0x8f, 0x87, 0x41, 0xe8, 0x3f, 0x0b, 0xa4, 0xfb, 0xfc, 0x07, 0x6e, 0x5f, 0xf0, 0x48, 0xb2, + 0xc8, 0xb7, 0x3f, 0x1a, 0x04, 0xf2, 0xe9, 0xf0, 0xd8, 0xed, 0xf1, 0x93, 0xf6, 0x80, 0x0f, 0x78, + 0x5b, 0x49, 0x1c, 0x0f, 0xfb, 0x6a, 0xa7, 0x36, 0x6a, 0x95, 0x22, 0xd9, 0x1b, 0xd3, 0xec, 0x03, + 0xce, 0x07, 0x21, 0xf3, 0xe2, 0x20, 0xd1, 0xcb, 0xb6, 0x88, 0x7b, 0xed, 0x44, 0x7a, 0x72, 0x98, + 0x68, 0x99, 0x1b, 0x86, 0x0c, 0x5e, 0xa4, 0x9d, 0x5d, 0xa4, 0x9d, 0xf0, 0xf0, 0x39, 0x13, 0xed, + 0xf8, 0xb8, 0xcd, 0xe3, 0x8c, 0xbb, 0xfd, 0x4a, 0x6e, 0x2f, 0x0e, 0xda, 0xf2, 0x2c, 0x66, 0x49, + 0xfb, 0x94, 0x8b, 0x67, 0x4c, 0x68, 0x81, 0x9b, 0xaf, 0x14, 0x18, 0xca, 0x20, 0x44, 0xa9, 0x9e, + 0x17, 0x27, 0x78, 0x08, 0xfe, 0xd5, 0x42, 0xa6, 0xda, 0x92, 0x47, 0x41, 0x22, 0x83, 0x60, 0x10, + 0xb4, 0xfb, 0x89, 0x92, 0x49, 0x4f, 0x41, 0x25, 0x52, 0x76, 0xe7, 0xef, 0x45, 0x28, 0x51, 0x96, + 0x0c, 0x43, 0x49, 0xae, 0x42, 0x43, 0xb0, 0xfe, 0x36, 0x8b, 0x05, 0xeb, 0x79, 0x92, 0xf9, 0x2d, + 0x6b, 0xcd, 0x5a, 0xaf, 0xee, 0x2d, 0xd1, 0x49, 0x32, 0xf9, 0x1c, 0x9a, 0x82, 0xf5, 0x13, 0x83, + 0x71, 0x79, 0xcd, 0x5a, 0xaf, 0x6d, 0x7c, 0xe8, 0xbe, 0xd2, 0x19, 0x2e, 0x65, 0xfd, 0x03, 0x2f, + 0x1e, 0x8b, 0xec, 0x2d, 0xd1, 0x29, 0x10, 0xb2, 0x01, 0x05, 0xc1, 0xfa, 0xad, 0x82, 0xc2, 0xba, + 0x9c, 0x8f, 0xb5, 0xb7, 0x44, 0x91, 0x99, 0x6c, 0x42, 0x11, 0x51, 0x5a, 0x45, 0x25, 0xf4, 0xfe, + 0xdc, 0x0b, 0xec, 0x2d, 0x51, 0x25, 0x40, 0x1e, 0x40, 0xe5, 0x84, 0x49, 0xcf, 0xf7, 0xa4, 0xd7, + 0x82, 0xb5, 0xc2, 0x7a, 0x6d, 0xa3, 0x9d, 0x2b, 0x8c, 0x06, 0x72, 0x0f, 0xb4, 0xc4, 0x4e, 0x24, + 0xc5, 0x19, 0x1d, 0x01, 0x90, 0x27, 0x50, 0xf7, 0xa4, 0x64, 0x68, 0xd5, 0x80, 0x47, 0x49, 0xab, + 0xa6, 0x00, 0x6f, 0xce, 0x07, 0xbc, 0x6f, 0x48, 0xa5, 0xa0, 0x13, 0x40, 0xf6, 0x3d, 0x68, 0x4c, + 0x9c, 0x49, 0x56, 0xa1, 0xf0, 0x8c, 0x9d, 0xa5, 0x8e, 0xa1, 0xb8, 0x24, 0x6f, 0xc1, 0xca, 0x73, + 0x2f, 0x1c, 0x32, 0xe5, 0x83, 0x3a, 0x4d, 0x37, 0x77, 0x97, 0xef, 0x58, 0xf6, 0x53, 0x78, 0xe3, + 0x1c, 0xfe, 0x0c, 0x80, 0x1f, 0x9b, 0x00, 0xb5, 0x8d, 0x0f, 0x72, 0x6e, 0x6d, 0xc2, 0x19, 0x27, + 0x75, 0x2a, 0x50, 0x12, 0x4a, 0x21, 0xe7, 0xf7, 0x16, 0xac, 0x4e, 0xbb, 0x9a, 0x74, 0xb5, 0x93, + 0x2c, 0x65, 0x96, 0xdb, 0x17, 0x88, 0x12, 0x24, 0x68, 0xc3, 0x28, 0x08, 0x7b, 0x13, 0xaa, 0x23, + 0xd2, 0x3c, 0x63, 0x54, 0x8d, 0x2b, 0x3a, 0x9b, 0x50, 0xa0, 0xac, 0x4f, 0x9a, 0xb0, 0x1c, 0xe8, + 0xb8, 0xa6, 0xcb, 0x81, 0x4f, 0xd6, 0xa0, 0xe0, 0xb3, 0xbe, 0x56, 0xbd, 0xe9, 0xc6, 0xc7, 0xee, + 0x36, 0xeb, 0x07, 0x51, 0x80, 0x2a, 0x52, 0xfc, 0xe4, 0xfc, 0xd1, 0xc2, 0xfc, 0xc0, 0x6b, 0x91, + 0x4f, 0x27, 0xf4, 0x98, 0x1f, 0xed, 0xe7, 0x6e, 0xff, 0x24, 0xff, 0xf6, 0xb7, 0x26, 0x3d, 0x31, + 0x27, 0x05, 0x4c, 0xed, 0xfe, 0x66, 0x41, 0xdd, 0x74, 0x0e, 0xf9, 0x1c, 0x6a, 0x46, 0x20, 0xe9, + 0x1b, 0xdf, 0x5c, 0xd0, 0xb5, 0xe6, 0x86, 0x9a, 0x38, 0xf6, 0xcf, 0xa0, 0x66, 0x7c, 0x23, 0xf7, + 0xa0, 0x14, 0x44, 0x92, 0x4b, 0xae, 0xb4, 0xc8, 0xcf, 0xbf, 0x6e, 0xf4, 0x98, 0x4b, 0xbe, 0xb7, + 0x44, 0xb5, 0x48, 0xa7, 0x31, 0x81, 0xe5, 0xfc, 0xb6, 0x08, 0xa5, 0x94, 0x87, 0x5c, 0x81, 0x46, + 0x2c, 0x98, 0x1f, 0x60, 0x08, 0x3c, 0x3e, 0x8b, 0x99, 0xb6, 0xd1, 0x24, 0x91, 0x74, 0xa0, 0x3e, + 0x22, 0xd0, 0x91, 0x0f, 0xe7, 0x19, 0x6d, 0x42, 0x66, 0xe2, 0xa4, 0x43, 0x4f, 0x3e, 0x55, 0xc5, + 0xc7, 0x3c, 0x09, 0x89, 0x64, 0x07, 0x2a, 0xc9, 0xf0, 0xf8, 0x97, 0xac, 0x27, 0xb1, 0xd0, 0xa0, + 0x25, 0xaf, 0xcd, 0x55, 0xd4, 0x3d, 0x4a, 0x25, 0xe8, 0x48, 0xd4, 0xfe, 0xd3, 0x32, 0x94, 0x35, + 0x95, 0x3c, 0x80, 0x62, 0xc2, 0xc2, 0xbe, 0xb6, 0xdb, 0xed, 0x85, 0xe1, 0xdc, 0x23, 0x16, 0xf6, + 0xf5, 0x1a, 0x6b, 0x19, 0x82, 0x90, 0x3d, 0x28, 0x08, 0xef, 0x54, 0x1b, 0xe0, 0xd6, 0xe2, 0x58, + 0xd4, 0x3b, 0x1d, 0x43, 0x21, 0x84, 0xdd, 0x80, 0x9a, 0x71, 0x80, 0x1d, 0x02, 0x8c, 0x79, 0xc8, + 0x67, 0x50, 0xda, 0x0e, 0x06, 0x2c, 0x91, 0x2a, 0x9c, 0xaa, 0x9d, 0x8d, 0x6f, 0xbe, 0x7d, 0x6f, + 0xe9, 0xdf, 0xdf, 0xbe, 0x77, 0xdd, 0x68, 0x38, 0x3c, 0x66, 0x51, 0x8f, 0x47, 0xd2, 0x0b, 0x22, + 0x26, 0xb0, 0x6f, 0x7e, 0xe4, 0x2b, 0x11, 0x37, 0x95, 0xa4, 0x1a, 0x81, 0x10, 0x28, 0x46, 0xde, + 0x49, 0x96, 0xa7, 0x6a, 0xdd, 0xa9, 0x8e, 0xcc, 0xe3, 0x48, 0x68, 0x50, 0x26, 0x87, 0x22, 0xa2, + 0xec, 0x57, 0x43, 0xe4, 0xff, 0x61, 0x56, 0x61, 0x16, 0x88, 0xb4, 0xb4, 0xb6, 0x52, 0x2d, 0x40, + 0xd6, 0x61, 0x85, 0x09, 0xc1, 0x85, 0xb6, 0x0f, 0x71, 0xd3, 0x66, 0xee, 0x8a, 0xb8, 0xe7, 0x1e, + 0xa9, 0x66, 0x4e, 0x53, 0x06, 0x67, 0x15, 0x9a, 0xd9, 0xa9, 0x49, 0xcc, 0xa3, 0x84, 0x39, 0x97, + 0xa0, 0xd1, 0x8d, 0xe2, 0xa1, 0x4c, 0xf4, 0x3d, 0x9c, 0xbf, 0x5a, 0xd0, 0xcc, 0x28, 0x29, 0x0f, + 0xf9, 0x12, 0x6a, 0xe3, 0x9a, 0x91, 0x15, 0x87, 0xbb, 0xb9, 0x5e, 0x30, 0xe5, 0x8d, 0x82, 0xa3, + 0x6b, 0x85, 0x09, 0x67, 0x3f, 0x84, 0xd5, 0x69, 0x86, 0x19, 0x95, 0xe3, 0xca, 0x64, 0xe5, 0x98, + 0x2e, 0x64, 0x46, 0xa5, 0xf8, 0xa7, 0x05, 0xef, 0x50, 0xa6, 0xa6, 0x93, 0xee, 0x89, 0x37, 0x60, + 0x5b, 0x3c, 0xea, 0x07, 0x83, 0xcc, 0xcc, 0xab, 0xaa, 0x4a, 0x66, 0xc8, 0x98, 0x21, 0xeb, 0x50, + 0x39, 0x0c, 0x3d, 0xd9, 0xe7, 0xe2, 0x44, 0x83, 0xd7, 0x11, 0x3c, 0xa3, 0xd1, 0xd1, 0x57, 0xb2, + 0x06, 0x35, 0x0d, 0x7c, 0xc0, 0x7d, 0xa6, 0x33, 0xc9, 0x24, 0x91, 0x16, 0x94, 0xf7, 0xf9, 0xe0, + 0x21, 0xfa, 0xbd, 0xa8, 0xbe, 0x66, 0x5b, 0xe2, 0x40, 0x5d, 0x33, 0x0a, 0x95, 0xf0, 0x2b, 0x6b, + 0xd6, 0xfa, 0x0a, 0x9d, 0xa0, 0x91, 0x77, 0xa1, 0x7a, 0xc4, 0x92, 0x24, 0xe0, 0x51, 0x77, 0xbb, + 0x55, 0x52, 0xf2, 0x63, 0x82, 0xf3, 0x6b, 0x0b, 0xec, 0x59, 0x7a, 0x69, 0x27, 0x99, 0xb1, 0x6b, + 0xbd, 0x66, 0xec, 0xbe, 0x0d, 0xa5, 0x14, 0x5d, 0xb7, 0x5c, 0xbd, 0x73, 0xfe, 0xb2, 0x02, 0xf5, + 0x23, 0xbc, 0x40, 0x66, 0x4d, 0x17, 0x60, 0xec, 0x04, 0x1d, 0xb8, 0xd3, 0xae, 0x31, 0x38, 0x88, + 0x0d, 0x95, 0x5d, 0x1d, 0x24, 0x3a, 0x31, 0x46, 0x7b, 0xf2, 0x05, 0xd4, 0xb2, 0xf5, 0xa3, 0x58, + 0xb6, 0x0a, 0x2a, 0xca, 0xee, 0xe4, 0x44, 0x99, 0x79, 0x13, 0xd7, 0x10, 0xd5, 0x31, 0x66, 0x50, + 0xc8, 0x0d, 0x78, 0xc3, 0x0b, 0x43, 0x7e, 0xaa, 0x13, 0x47, 0xa5, 0x80, 0x72, 0x41, 0x85, 0x9e, + 0xff, 0x40, 0x3e, 0x86, 0x37, 0x0d, 0xe2, 0x7d, 0x21, 0xbc, 0x33, 0x8c, 0x99, 0x92, 0xe2, 0x9f, + 0xf5, 0x09, 0xbb, 0xf2, 0x6e, 0x10, 0x79, 0x61, 0x0b, 0x14, 0x4f, 0xba, 0x41, 0x9f, 0xef, 0xbc, + 0x88, 0xb9, 0x90, 0x4c, 0xdc, 0x97, 0x52, 0xb4, 0x6a, 0xca, 0x98, 0x13, 0x34, 0x72, 0x08, 0xf5, + 0x2d, 0xaf, 0xf7, 0x94, 0x75, 0x4f, 0x90, 0x98, 0xb4, 0xea, 0x4a, 0xed, 0x1b, 0x39, 0x6a, 0x2b, + 0xf6, 0x47, 0xb1, 0x39, 0x51, 0x99, 0x08, 0xa4, 0x07, 0xcd, 0x4c, 0xf5, 0x34, 0x0f, 0x5b, 0x0d, + 0x85, 0x79, 0xef, 0xa2, 0xa6, 0x4c, 0xa5, 0xd3, 0x23, 0xa6, 0x20, 0xd1, 0x91, 0x3b, 0x98, 0x72, + 0x9e, 0x64, 0xad, 0xa6, 0xd2, 0x79, 0xb4, 0xb7, 0x3f, 0x81, 0xd5, 0x69, 0x6f, 0x5c, 0x64, 0x90, + 0xb1, 0x7f, 0x0a, 0x6f, 0xce, 0xb8, 0xc2, 0x6b, 0xd5, 0x84, 0x3f, 0x5b, 0xf0, 0xc6, 0x39, 0xbb, + 0x61, 0x89, 0x36, 0x9a, 0xaf, 0x5a, 0x93, 0x03, 0x58, 0x41, 0xbf, 0x24, 0xad, 0x65, 0x65, 0xb4, + 0xcd, 0x8b, 0x38, 0xc2, 0x55, 0x92, 0xa9, 0xc1, 0x52, 0x14, 0xfb, 0x0e, 0xc0, 0x98, 0x78, 0xa1, + 0x71, 0xee, 0x4b, 0x68, 0x68, 0xaf, 0xe8, 0x04, 0x5f, 0x4d, 0x1f, 0x0f, 0x5a, 0x18, 0x9f, 0x06, + 0xe3, 0x96, 0x51, 0xb8, 0x60, 0xcb, 0x70, 0xbe, 0x86, 0x4b, 0x94, 0x79, 0xfe, 0x6e, 0x10, 0xb2, + 0x57, 0x57, 0x46, 0xcc, 0xd6, 0x20, 0x4c, 0xc7, 0x86, 0x2c, 0x5b, 0xf5, 0x9e, 0xdc, 0x85, 0x15, + 0xea, 0x45, 0x03, 0xa6, 0x8f, 0xbe, 0x92, 0x73, 0xb4, 0x3a, 0x04, 0x79, 0x69, 0x2a, 0xe2, 0xdc, + 0x83, 0xea, 0x88, 0x86, 0xb5, 0xe6, 0x51, 0xbf, 0x9f, 0xb0, 0xb4, 0x6e, 0x15, 0xa8, 0xde, 0x21, + 0x7d, 0x9f, 0x45, 0x03, 0x7d, 0x74, 0x81, 0xea, 0x9d, 0x73, 0x15, 0xc7, 0xef, 0xec, 0xe6, 0xda, + 0x34, 0x04, 0x8a, 0xdb, 0xf8, 0xcc, 0xb1, 0x54, 0x82, 0xa9, 0xb5, 0xe3, 0x63, 0xab, 0xf3, 0xfc, + 0xed, 0x40, 0xbc, 0x5a, 0xc1, 0x16, 0x94, 0xb7, 0x03, 0x61, 0xe8, 0x97, 0x6d, 0xc9, 0x55, 0x6c, + 0x82, 0xbd, 0x70, 0xe8, 0xa3, 0xb6, 0x92, 0x89, 0x48, 0x57, 0xfb, 0x29, 0xaa, 0xf3, 0x69, 0x6a, + 0x47, 0x75, 0x8a, 0xbe, 0xcc, 0x0d, 0x28, 0xb3, 0x48, 0x8a, 0x80, 0x65, 0x9d, 0x92, 0xb8, 0xe9, + 0xcb, 0xd4, 0x55, 0x2f, 0x53, 0xd5, 0x91, 0x69, 0xc6, 0xe2, 0x6c, 0xc2, 0x25, 0x24, 0xe4, 0x3b, + 0x82, 0x40, 0xd1, 0xb8, 0xa4, 0x5a, 0x3b, 0x77, 0x61, 0x75, 0x2c, 0xa8, 0x8f, 0xbe, 0x0a, 0x45, + 0x1c, 0x36, 0x75, 0x21, 0x9e, 0x75, 0xae, 0xfa, 0xee, 0x34, 0xa0, 0x76, 0x18, 0x44, 0x59, 0x4f, + 0x74, 0x5e, 0x5a, 0x50, 0x3f, 0xe4, 0xd1, 0xb8, 0x97, 0x1c, 0xc2, 0xa5, 0x2c, 0x03, 0xef, 0x1f, + 0x76, 0xb7, 0xbc, 0x38, 0x53, 0x65, 0xed, 0xbc, 0x9b, 0xf5, 0x13, 0xdd, 0x4d, 0x19, 0x3b, 0x45, + 0x6c, 0x3b, 0x74, 0x5a, 0x9c, 0xfc, 0x04, 0xca, 0xfb, 0xfb, 0x1d, 0x85, 0xb4, 0x7c, 0x21, 0xa4, + 0x4c, 0x8c, 0x7c, 0x02, 0xe5, 0x27, 0xea, 0x97, 0x83, 0x44, 0xb7, 0x86, 0x19, 0x21, 0x97, 0x2a, + 0x9a, 0xb2, 0x51, 0xd6, 0xe3, 0xc2, 0xa7, 0x99, 0x90, 0xf3, 0x5f, 0x0b, 0x6a, 0x4f, 0xbc, 0xf1, + 0xbc, 0xf5, 0x19, 0x94, 0xfc, 0xd7, 0xee, 0x97, 0xe9, 0x16, 0xb3, 0x38, 0x64, 0xcf, 0x59, 0xa8, + 0x43, 0x35, 0xdd, 0x20, 0x35, 0x79, 0xca, 0x45, 0x9a, 0x9d, 0x75, 0x9a, 0x6e, 0x30, 0xae, 0x7d, + 0x26, 0xbd, 0x20, 0x54, 0x83, 0x76, 0x9d, 0xea, 0x1d, 0x7a, 0x7d, 0x28, 0x42, 0xd5, 0x94, 0xaa, + 0x14, 0x97, 0xc4, 0x81, 0x62, 0x10, 0xf5, 0xb9, 0xea, 0x3b, 0xba, 0xba, 0x1d, 0xf1, 0xa1, 0xe8, + 0xb1, 0x6e, 0xd4, 0xe7, 0x54, 0x7d, 0x23, 0xef, 0x43, 0x49, 0x60, 0x1a, 0x25, 0xad, 0xb2, 0x32, + 0x4a, 0x15, 0xb9, 0xd2, 0x64, 0xd3, 0x1f, 0x9c, 0x26, 0xd4, 0x53, 0xbd, 0xf5, 0xc4, 0xf7, 0xbb, + 0x65, 0x78, 0xf3, 0x21, 0x3b, 0xdd, 0xca, 0xf4, 0xca, 0x0c, 0xb2, 0x06, 0xb5, 0x11, 0xad, 0xbb, + 0xad, 0xc3, 0xcf, 0x24, 0xe1, 0x61, 0x07, 0x7c, 0x18, 0xc9, 0xcc, 0x87, 0xea, 0x30, 0x45, 0xa1, + 0xfa, 0x03, 0xf9, 0x3e, 0x94, 0x1f, 0x32, 0x79, 0xca, 0xc5, 0x33, 0xa5, 0x75, 0x73, 0xa3, 0x86, + 0x3c, 0x0f, 0x99, 0xc4, 0xf1, 0x88, 0x66, 0xdf, 0x70, 0xe6, 0x8a, 0xb3, 0x99, 0xab, 0x38, 0x6b, + 0xe6, 0xca, 0xbe, 0x92, 0x4d, 0xa8, 0xf5, 0x78, 0x94, 0x48, 0xe1, 0x05, 0x78, 0xf0, 0x8a, 0x62, + 0xfe, 0x0e, 0x32, 0xa7, 0x8e, 0xdd, 0x1a, 0x7f, 0xa4, 0x26, 0x27, 0xb9, 0x0e, 0xc0, 0x5e, 0x48, + 0xe1, 0xed, 0xf1, 0x44, 0x26, 0xad, 0x92, 0xba, 0x30, 0xa0, 0x1c, 0x12, 0xba, 0x87, 0xd4, 0xf8, + 0xea, 0xbc, 0x0d, 0x6f, 0x4d, 0x5a, 0x44, 0x9b, 0xea, 0x1e, 0x7c, 0x97, 0xb2, 0x90, 0x79, 0x09, + 0xbb, 0xb8, 0xb5, 0x1c, 0x1b, 0x5a, 0xe7, 0x85, 0x35, 0xf0, 0xff, 0x0a, 0x50, 0xdb, 0x79, 0xc1, + 0x7a, 0x07, 0x2c, 0x49, 0xbc, 0x81, 0x9a, 0xfc, 0x0e, 0x05, 0xef, 0xb1, 0x24, 0x19, 0x61, 0x8d, + 0x09, 0xe4, 0x47, 0x50, 0xec, 0x46, 0x81, 0xd4, 0x6d, 0xee, 0x6a, 0xee, 0xe0, 0x1d, 0x48, 0x8d, + 0x89, 0x6f, 0x27, 0xdc, 0x92, 0xbb, 0x50, 0xc4, 0x22, 0xb1, 0x48, 0xa1, 0xf6, 0x0d, 0x59, 0x94, + 0x21, 0x1d, 0xf5, 0xcb, 0x59, 0xf0, 0x15, 0xd3, 0x5e, 0x5a, 0xcf, 0xef, 0x30, 0xc1, 0x57, 0x6c, + 0x8c, 0xa0, 0x25, 0xc9, 0x0e, 0x94, 0x8f, 0xa4, 0x27, 0x24, 0xf3, 0xb5, 0xf7, 0xf2, 0x9e, 0x96, + 0x9a, 0x73, 0x8c, 0x92, 0xc9, 0xa2, 0x11, 0x76, 0x5e, 0x04, 0x52, 0x67, 0x43, 0x9e, 0x11, 0x90, + 0xcd, 0x50, 0x04, 0xb7, 0x28, 0xbd, 0xcd, 0x23, 0xd6, 0x2a, 0xcf, 0x95, 0x46, 0x36, 0x43, 0x1a, + 0xb7, 0x68, 0x86, 0xa3, 0x60, 0x80, 0xf3, 0x5d, 0x65, 0xae, 0x19, 0x52, 0x46, 0xc3, 0x0c, 0x29, + 0xa1, 0x53, 0x86, 0x15, 0x35, 0xcd, 0x38, 0x7f, 0xb0, 0xa0, 0x66, 0xf8, 0x69, 0x81, 0xbc, 0x7b, + 0x17, 0x8a, 0x07, 0x4c, 0x7a, 0xda, 0xff, 0x15, 0x95, 0x75, 0x4c, 0x7a, 0x54, 0x51, 0xb1, 0x70, + 0xec, 0xfa, 0x69, 0x51, 0x6c, 0x50, 0x5c, 0x22, 0xe5, 0xb1, 0x3c, 0x53, 0x2e, 0xab, 0x50, 0x5c, + 0x92, 0x1b, 0x50, 0x39, 0x62, 0xbd, 0xa1, 0x08, 0xe4, 0x99, 0x72, 0x42, 0x73, 0x63, 0x55, 0x95, + 0x13, 0x4d, 0x53, 0xc9, 0x39, 0xe2, 0x70, 0x1e, 0x60, 0x70, 0x8e, 0x2f, 0x48, 0xa0, 0xb8, 0x85, + 0xef, 0x1d, 0xbc, 0x59, 0x83, 0xaa, 0x35, 0x3e, 0x39, 0x77, 0xe6, 0x3d, 0x39, 0x77, 0xb2, 0x27, + 0xe7, 0xa4, 0x53, 0xb1, 0xfb, 0x18, 0x46, 0x76, 0xee, 0x43, 0x75, 0x14, 0x78, 0xa4, 0x09, 0xcb, + 0xbb, 0xbe, 0x3e, 0x69, 0x79, 0xd7, 0x47, 0x55, 0x76, 0x1e, 0xed, 0xaa, 0x53, 0x2a, 0x14, 0x97, + 0xa3, 0x5e, 0x5f, 0x30, 0x7a, 0xfd, 0x26, 0x3e, 0xa6, 0x8d, 0xe8, 0x43, 0x26, 0xca, 0x4f, 0x93, + 0xec, 0xca, 0xb8, 0x4e, 0xd5, 0x08, 0x13, 0x85, 0xa5, 0xd4, 0x08, 0x13, 0xe7, 0x7b, 0xd0, 0x98, + 0xf0, 0x17, 0x32, 0xa9, 0xd7, 0x9b, 0x1e, 0x09, 0x71, 0xbd, 0xf1, 0xaf, 0x2a, 0x54, 0xf7, 0xf7, + 0x3b, 0x1d, 0x11, 0xf8, 0x03, 0x46, 0x7e, 0x63, 0x01, 0x39, 0xff, 0x0c, 0x23, 0xb7, 0xf2, 0x33, + 0x63, 0xf6, 0x6b, 0xd4, 0xbe, 0x7d, 0x41, 0x29, 0xdd, 0x9f, 0xbf, 0x80, 0x15, 0x35, 0x1b, 0x92, + 0x0f, 0x16, 0x9c, 0xe9, 0xed, 0xf5, 0xf9, 0x8c, 0x1a, 0xbb, 0x07, 0x95, 0x6c, 0xbe, 0x22, 0xd7, + 0x73, 0xaf, 0x37, 0x31, 0x3e, 0xda, 0x1f, 0x2e, 0xc4, 0xab, 0x0f, 0xf9, 0x05, 0x94, 0xf5, 0xd8, + 0x44, 0xae, 0xcd, 0x91, 0x1b, 0x0f, 0x70, 0xf6, 0xf5, 0x45, 0x58, 0xc7, 0x6a, 0x64, 0xe3, 0x51, + 0xae, 0x1a, 0x53, 0xc3, 0x57, 0xae, 0x1a, 0xe7, 0xe6, 0xad, 0x27, 0x50, 0xc4, 0x39, 0x8a, 0xe4, + 0xd5, 0x13, 0x63, 0xd0, 0xb2, 0xf3, 0xdc, 0x35, 0x31, 0x80, 0xfd, 0x1c, 0xeb, 0xae, 0x7a, 0x8b, + 0xe6, 0x57, 0x5c, 0xe3, 0x07, 0x24, 0xfb, 0xda, 0x02, 0x9c, 0x63, 0x78, 0xfd, 0x8e, 0x5b, 0x5f, + 0xe0, 0x57, 0x9c, 0xf9, 0xf0, 0x53, 0xbf, 0x17, 0x71, 0xa8, 0x9b, 0xed, 0x94, 0xb8, 0x39, 0xa2, + 0x33, 0x26, 0x11, 0xbb, 0xbd, 0x30, 0xbf, 0x3e, 0xf0, 0x6b, 0x7c, 0x13, 0x4c, 0xb6, 0x5a, 0xb2, + 0x91, 0x6b, 0x8e, 0x99, 0x4d, 0xdd, 0xbe, 0x79, 0x21, 0x19, 0x7d, 0xb8, 0x97, 0xb6, 0x72, 0xdd, + 0xae, 0x49, 0x7e, 0x67, 0x1a, 0xb5, 0x7c, 0x7b, 0x41, 0xbe, 0x75, 0xeb, 0x63, 0x0b, 0xe3, 0x0c, + 0x47, 0xb8, 0x5c, 0x6c, 0x63, 0xb6, 0xcd, 0x8d, 0x33, 0x73, 0x16, 0xec, 0xd4, 0xbf, 0x79, 0x79, + 0xd9, 0xfa, 0xc7, 0xcb, 0xcb, 0xd6, 0x7f, 0x5e, 0x5e, 0xb6, 0x8e, 0x4b, 0xea, 0xff, 0x65, 0x37, + 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x5f, 0xee, 0x6d, 0x81, 0x1c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2945,6 +3342,32 @@ func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Attestations) > 0 { + for k := range m.Attestations { + v := m.Attestations[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } if len(m.Metadata) > 0 { for k := range m.Metadata { v := m.Metadata[k] @@ -3200,7 +3623,7 @@ func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { +func (m *Attestations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3210,12 +3633,12 @@ func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Attestations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Attestations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3224,34 +3647,24 @@ func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Attestation) > 0 { + for iNdEx := len(m.Attestation) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attestation[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { +func (m *Attestations_Attestation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3261,12 +3674,12 @@ func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *Attestations_Attestation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Attestations_Attestation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3275,37 +3688,40 @@ func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil -} - -func (m *InputsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.Attestation != nil { + { + size := m.Attestation.Size() + i -= size + if _, err := m.Attestation.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - return dAtA[:n], nil + return len(dAtA) - i, nil } -func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Attestations_Attestation_Intoto) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Attestations_Attestation_Intoto) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Intoto != nil { + { + size, err := m.Intoto.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } - -func (m *InputsResponse) Marshal() (dAtA []byte, err error) { +func (m *InToto) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3315,12 +3731,12 @@ func (m *InputsResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *InToto) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InToto) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3329,36 +3745,50 @@ func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Definitions) > 0 { - for k := range m.Definitions { - v := m.Definitions[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x22 + } + } + if len(m.PredicatePath) > 0 { + i -= len(m.PredicatePath) + copy(dAtA[i:], m.PredicatePath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.PredicatePath))) + i-- + dAtA[i] = 0x1a + } + if m.PredicateRef != nil { + { + size, err := m.PredicateRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if len(m.PredicateType) > 0 { + i -= len(m.PredicateType) + copy(dAtA[i:], m.PredicateType) + i = encodeVarintGateway(dAtA, i, uint64(len(m.PredicateType))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { +func (m *InToto_Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3368,12 +3798,12 @@ func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *InToto_Subject) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InToto_Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3382,35 +3812,49 @@ func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.SessionID) > 0 { - i -= len(m.SessionID) - copy(dAtA[i:], m.SessionID) - i = encodeVarintGateway(dAtA, i, uint64(len(m.SessionID))) - i-- - dAtA[i] = 0x32 - } - if m.ResolverType != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.ResolverType)) - i-- - dAtA[i] = 0x28 - } - if len(m.LogName) > 0 { - i -= len(m.LogName) - copy(dAtA[i:], m.LogName) - i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) - i-- - dAtA[i] = 0x22 + if m.Subject != nil { + { + size := m.Subject.Size() + i -= size + if _, err := m.Subject.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - if len(m.ResolveMode) > 0 { - i -= len(m.ResolveMode) - copy(dAtA[i:], m.ResolveMode) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) + return len(dAtA) - i, nil +} + +func (m *InToto_Subject_Self) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InToto_Subject_Self) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Self != nil { + { + size, err := m.Self.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa } - if m.Platform != nil { + return len(dAtA) - i, nil +} +func (m *InToto_Subject_Raw) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InToto_Subject_Raw) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Raw != nil { { - size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Raw.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3420,17 +3864,9 @@ func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i-- dAtA[i] = 0x12 } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } - -func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { +func (m *InToto_Subject_SelfSubject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3440,12 +3876,12 @@ func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *InToto_Subject_SelfSubject) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InToto_Subject_SelfSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3454,24 +3890,10 @@ func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, err i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Config) > 0 { - i -= len(m.Config) - copy(dAtA[i:], m.Config) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) - i-- - dAtA[i] = 0x12 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { +func (m *InToto_Subject_RawSubject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3481,12 +3903,12 @@ func (m *SolveRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *InToto_Subject_RawSubject) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InToto_Subject_RawSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3495,122 +3917,64 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Evaluate { + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Name))) i-- - if m.Evaluate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 + dAtA[i] = 0x12 } - if len(m.FrontendInputs) > 0 { - for k := range m.FrontendInputs { - v := m.FrontendInputs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) + if len(m.Digest) > 0 { + for iNdEx := len(m.Digest) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Digest[iNdEx]) + copy(dAtA[i:], m.Digest[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest[iNdEx]))) i-- dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x6a - } - } - if len(m.CacheImports) > 0 { - for iNdEx := len(m.CacheImports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CacheImports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - } - if len(m.ExporterAttr) > 0 { - i -= len(m.ExporterAttr) - copy(dAtA[i:], m.ExporterAttr) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) - i-- - dAtA[i] = 0x5a - } - if m.Final { - i-- - if m.Final { - dAtA[i] = 1 - } else { - dAtA[i] = 0 } - i-- - dAtA[i] = 0x50 } - if m.AllowResultArrayRef { - i-- - if m.AllowResultArrayRef { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + return len(dAtA) - i, nil +} + +func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.AllowResultReturn { - i-- - if m.AllowResultReturn { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + return dAtA[:n], nil +} + +func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.FrontendOpt) > 0 { - for k := range m.FrontendOpt { - v := m.FrontendOpt[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } - } - if len(m.Frontend) > 0 { - i -= len(m.Frontend) - copy(dAtA[i:], m.Frontend) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) i-- dAtA[i] = 0x12 } - if m.Definition != nil { + if m.Result != nil { { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3623,7 +3987,7 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { +func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3633,12 +3997,12 @@ func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { +func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3647,36 +4011,10 @@ func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Attrs) > 0 { - for k := range m.Attrs { - v := m.Attrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { +func (m *InputsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3686,12 +4024,12 @@ func (m *SolveResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3700,29 +4038,10 @@ func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { +func (m *InputsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3732,12 +4051,12 @@ func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3746,36 +4065,36 @@ func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Range != nil { - { - size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Definitions) > 0 { + for k := range m.Definitions { + v := m.Definitions[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x1a - } - if len(m.FilePath) > 0 { - i -= len(m.FilePath) - copy(dAtA[i:], m.FilePath) - i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *FileRange) Marshal() (dAtA []byte, err error) { +func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3785,12 +4104,12 @@ func (m *FileRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { +func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3799,20 +4118,55 @@ func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Length != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Length)) + if len(m.SessionID) > 0 { + i -= len(m.SessionID) + copy(dAtA[i:], m.SessionID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.SessionID))) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x32 } - if m.Offset != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) + if m.ResolverType != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.ResolverType)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x28 + } + if len(m.LogName) > 0 { + i -= len(m.LogName) + copy(dAtA[i:], m.LogName) + i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) + i-- + dAtA[i] = 0x22 + } + if len(m.ResolveMode) > 0 { + i -= len(m.ResolveMode) + copy(dAtA[i:], m.ResolveMode) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) + i-- + dAtA[i] = 0x1a + } + if m.Platform != nil { + { + size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { +func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3822,12 +4176,12 @@ func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3836,17 +4190,24 @@ func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3856,12 +4217,12 @@ func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3870,31 +4231,135 @@ func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.IncludePattern) > 0 { - i -= len(m.IncludePattern) - copy(dAtA[i:], m.IncludePattern) - i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) + if m.Evaluate { i-- - dAtA[i] = 0x1a + if m.Evaluate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 } - if len(m.DirPath) > 0 { - i -= len(m.DirPath) - copy(dAtA[i:], m.DirPath) - i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) + if len(m.FrontendInputs) > 0 { + for k := range m.FrontendInputs { + v := m.FrontendInputs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.CacheImports) > 0 { + for iNdEx := len(m.CacheImports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CacheImports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.ExporterAttr) > 0 { + i -= len(m.ExporterAttr) + copy(dAtA[i:], m.ExporterAttr) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) + i-- + dAtA[i] = 0x5a + } + if m.Final { + i-- + if m.Final { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.AllowResultArrayRef { + i-- + if m.AllowResultArrayRef { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowResultReturn { + i-- + if m.AllowResultReturn { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.FrontendOpt) > 0 { + for k := range m.FrontendOpt { + v := m.FrontendOpt[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) i-- dAtA[i] = 0x12 } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + if m.Definition != nil { + { + size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { +func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3904,12 +4369,12 @@ func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3918,24 +4383,36 @@ func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3945,12 +4422,12 @@ func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3959,12 +4436,271 @@ func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Range != nil { + { + size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.FilePath) > 0 { + i -= len(m.FilePath) + copy(dAtA[i:], m.FilePath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.IncludePattern) > 0 { + i -= len(m.IncludePattern) + copy(dAtA[i:], m.IncludePattern) + i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) + i-- + dAtA[i] = 0x1a + } + if len(m.DirPath) > 0 { + i -= len(m.DirPath) + copy(dAtA[i:], m.DirPath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 } if len(m.Ref) > 0 { i -= len(m.Ref) @@ -4635,20 +5371,20 @@ func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x20 } if len(m.Fds) > 0 { - dAtA26 := make([]byte, len(m.Fds)*10) - var j25 int + dAtA31 := make([]byte, len(m.Fds)*10) + var j30 int for _, num := range m.Fds { for num >= 1<<7 { - dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) + dAtA31[j30] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j25++ + j30++ } - dAtA26[j25] = uint8(num) - j25++ + dAtA31[j30] = uint8(num) + j30++ } - i -= j25 - copy(dAtA[i:], dAtA26[:j25]) - i = encodeVarintGateway(dAtA, i, uint64(j25)) + i -= j30 + copy(dAtA[i:], dAtA31[:j30]) + i = encodeVarintGateway(dAtA, i, uint64(j30)) i-- dAtA[i] = 0x1a } @@ -4924,7 +5660,20 @@ func (m *Result) Size() (n int) { n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) } } - if m.XXX_unrecognized != nil { + if len(m.Attestations) > 0 { + for k, v := range m.Attestations { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n @@ -5041,6 +5790,154 @@ func (m *RefMap) Size() (n int) { return n } +func (m *Attestations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attestation) > 0 { + for _, e := range m.Attestation { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Attestations_Attestation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Attestation != nil { + n += m.Attestation.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Attestations_Attestation_Intoto) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Intoto != nil { + l = m.Intoto.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *InToto) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PredicateType) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.PredicateRef != nil { + l = m.PredicateRef.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.PredicatePath) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InToto_Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Subject != nil { + n += m.Subject.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InToto_Subject_Self) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Self != nil { + l = m.Self.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *InToto_Subject_Raw) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = m.Raw.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *InToto_Subject_SelfSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InToto_Subject_RawSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Digest) > 0 { + for _, s := range m.Digest { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ReturnRequest) Size() (n int) { if m == nil { return 0 @@ -6119,6 +7016,135 @@ func (m *Result) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attestations == nil { + m.Attestations = make(map[string]*Attestations) + } + var mapkey string + var mapvalue *Attestations + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Attestations{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attestations[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -6295,7 +7321,662 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Refs[mapkey] = mapvalue + m.Refs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ref) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ref: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ref: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Def == nil { + m.Def = &pb.Definition{} + } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Refs == nil { + m.Refs = make(map[string]*Ref) + } + var mapkey string + var mapvalue *Ref + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Ref{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Refs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Attestations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attestations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attestations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attestation = append(m.Attestation, &Attestations_Attestation{}) + if err := m.Attestation[len(m.Attestation)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Attestations_Attestation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attestation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attestation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Intoto", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &InToto{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Attestation = &Attestations_Attestation_Intoto{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InToto) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InToto: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InToto: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PredicateType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PredicateType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PredicateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PredicateRef == nil { + m.PredicateRef = &Ref{} + } + if err := m.PredicateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PredicatePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PredicatePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, &InToto_Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6319,7 +8000,7 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ref) Unmarshal(dAtA []byte) error { +func (m *InToto_Subject) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6342,17 +8023,17 @@ func (m *Ref) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ref: wiretype end group for non-group") + return fmt.Errorf("proto: Subject: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ref: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Self", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6362,27 +8043,30 @@ func (m *Ref) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + v := &InToto_Subject_SelfSubject{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Subject = &InToto_Subject_Self{v} iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6409,12 +8093,11 @@ func (m *Ref) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Def == nil { - m.Def = &pb.Definition{} - } - if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &InToto_Subject_RawSubject{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Subject = &InToto_Subject_Raw{v} iNdEx = postIndex default: iNdEx = preIndex @@ -6438,7 +8121,7 @@ func (m *Ref) Unmarshal(dAtA []byte) error { } return nil } -func (m *RefMap) Unmarshal(dAtA []byte) error { +func (m *InToto_Subject_SelfSubject) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6461,17 +8144,68 @@ func (m *RefMap) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefMap: wiretype end group for non-group") + return fmt.Errorf("proto: SelfSubject: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SelfSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InToto_Subject_RawSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawSubject: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6481,120 +8215,55 @@ func (m *RefMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Refs == nil { - m.Refs = make(map[string]*Ref) + m.Digest = append(m.Digest, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var mapkey string - var mapvalue *Ref - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGateway - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGateway - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Ref{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Refs[mapkey] = mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/frontend/gateway/pb/gateway.proto b/frontend/gateway/pb/gateway.proto index 66c0396e1a6a..97cb93a29774 100644 --- a/frontend/gateway/pb/gateway.proto +++ b/frontend/gateway/pb/gateway.proto @@ -48,6 +48,7 @@ message Result { RefMap refs = 4; } map metadata = 10; + map attestations = 11; } message RefMapDeprecated { @@ -63,6 +64,36 @@ message RefMap { map refs = 1; } +message Attestations { + message Attestation { + oneof Attestation { + InToto intoto = 1; + } + } + repeated Attestation attestation = 1; +} + +message InToto { + string predicateType = 1; + Ref predicateRef = 2; + string predicatePath = 3; + + message Subject { + message SelfSubject { + } + message RawSubject { + repeated string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 2; + } + oneof Subject { + SelfSubject self = 1; + RawSubject raw = 2; + } + } + repeated Subject subjects = 4; +} + + message ReturnRequest { Result result = 1; google.rpc.Status error = 2; diff --git a/frontend/result.go b/frontend/result.go index 5afc10c9f89f..8f979e22bea4 100644 --- a/frontend/result.go +++ b/frontend/result.go @@ -2,12 +2,27 @@ package frontend import ( "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/attestation" ) +type Attestation interface { + isFrontendAttestation() +} + +type InTotoAttestation struct { + PredicateType string + PredicateRef solver.ResultProxy + PredicatePath string + Subjects []attestation.InTotoSubject +} + +func (a *InTotoAttestation) isFrontendAttestation() {} + type Result struct { - Ref solver.ResultProxy - Refs map[string]solver.ResultProxy - Metadata map[string][]byte + Ref solver.ResultProxy + Refs map[string]solver.ResultProxy + Metadata map[string][]byte + Attestations map[string][]Attestation } func (r *Result) EachRef(fn func(solver.ResultProxy) error) (err error) { @@ -21,5 +36,15 @@ func (r *Result) EachRef(fn func(solver.ResultProxy) error) (err error) { } } } + for _, as := range r.Attestations { + for _, a := range as { + switch a := a.(type) { + case *InTotoAttestation: + if err1 := fn(a.PredicateRef); err1 != nil && err == nil { + err = err1 + } + } + } + } return err } diff --git a/go.mod b/go.mod index 5ed51d324ee7..2fffcc88c48a 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.3 + github.com/in-toto/in-toto-golang v0.3.3 github.com/klauspost/compress v1.15.7 github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/moby/locker v1.0.1 @@ -129,6 +130,7 @@ require ( github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/shibumi/go-pathspec v1.2.0 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/vbatts/tar-split v0.11.2 // indirect go.opencensus.io v0.23.0 // indirect diff --git a/go.sum b/go.sum index e3fb8f3aadbe..5cdceec409f2 100644 --- a/go.sum +++ b/go.sum @@ -245,6 +245,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -303,6 +304,8 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -849,6 +852,8 @@ github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/in-toto/in-toto-golang v0.3.3 h1:tkkEBU5i09UEeWKnrp6Rq4fXKAfpVXYMLRO5mDfnb3I= +github.com/in-toto/in-toto-golang v0.3.3/go.mod h1:dbXecHGZSqRubmm5TXtvDSZT5JyaKD7ebVTiC2aMLWY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= @@ -904,6 +909,7 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -925,6 +931,7 @@ github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -978,6 +985,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -1122,6 +1130,7 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.5.0 h1:042Buzk+NhDI+DeSAA62RwJL8VAuZUMQZUjCsRz1Mug= github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1209,6 +1218,8 @@ github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfF github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shibumi/go-pathspec v1.2.0 h1:KVKEDHYk7bQolRMs7nfzjT3SBOCgcXFJzccnj9bsGbA= +github.com/shibumi/go-pathspec v1.2.0/go.mod h1:bDxCftD0fST3qXIlHoQ/fChsU4mWMVklXp1yPErQaaY= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -1237,6 +1248,7 @@ github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVj github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -1244,6 +1256,7 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1255,6 +1268,7 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1459,6 +1473,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1595,6 +1610,7 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= @@ -1719,6 +1735,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1882,6 +1899,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2015,6 +2033,7 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/solver/llbsolver/ops/build.go b/solver/llbsolver/ops/build.go index 39d2a7707571..4ada980dce88 100644 --- a/solver/llbsolver/ops/build.go +++ b/solver/llbsolver/ops/build.go @@ -130,9 +130,12 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return nil, err } - for _, r := range newRes.Refs { - r.Release(context.TODO()) - } + newRes.EachRef(func(ref solver.ResultProxy) error { + if ref == newRes.Ref { + return nil + } + return ref.Release(context.TODO()) + }) r, err := newRes.Ref.Result(ctx) if err != nil { diff --git a/solver/llbsolver/solver.go b/solver/llbsolver/solver.go index 61220b475f2b..86e0937dd04b 100644 --- a/solver/llbsolver/solver.go +++ b/solver/llbsolver/solver.go @@ -19,6 +19,7 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" @@ -241,6 +242,47 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } inp.Refs = m } + if res.Attestations != nil { + m := make(map[string][]exporter.Attestation, len(res.Attestations)) + for k, as := range res.Attestations { + for _, a := range as { + switch a := a.(type) { + case *frontend.InTotoAttestation: + r, err := a.PredicateRef.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", r.Sys()) + } + + subjects := make([]attestation.InTotoSubject, len(a.Subjects)) + for i, s := range a.Subjects { + switch s := s.(type) { + case *attestation.InTotoSubjectSelf: + subjects[i] = &attestation.InTotoSubjectSelf{} + case *attestation.InTotoSubjectRaw: + subjects[i] = &attestation.InTotoSubjectRaw{ + Name: s.Name, + Digest: s.Digest, + } + default: + return nil, errors.Errorf("unknown attestation subject type %T", s) + } + } + + m[k] = append(m[k], &exporter.InTotoAttestation{ + PredicateType: a.PredicateType, + PredicateRef: workerRef.ImmutableRef, + PredicatePath: a.PredicatePath, + Subjects: subjects, + }) + } + } + } + inp.Attestations = m + } if _, ok := asInlineCache(exp.CacheExporter); ok { if err := inBuilderContext(ctx, j, "preparing layers for inline cache", j.SessionID+"-cache-inline", func(ctx context.Context, _ session.Group) error { if cr != nil { diff --git a/solver/pb/caps.go b/solver/pb/caps.go index 1553bfabfb46..b7094c8f08b0 100644 --- a/solver/pb/caps.go +++ b/solver/pb/caps.go @@ -79,7 +79,8 @@ const ( CapMergeOp apicaps.CapID = "mergeop" CapDiffOp apicaps.CapID = "diffop" - CapAnnotations apicaps.CapID = "exporter.image.annotations" + CapAnnotations apicaps.CapID = "exporter.image.annotations" + CapAttestations apicaps.CapID = "exporter.image.attestations" ) func init() { @@ -440,4 +441,11 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + // FIXME: enable once attestations gateway api is stable + // Caps.Init(apicaps.Cap{ + // ID: CapAttestations, + // Enabled: true, + // Status: apicaps.CapStatusExperimental, + // }) } diff --git a/util/attestation/attestations.go b/util/attestation/attestations.go new file mode 100644 index 000000000000..35f4404cd627 --- /dev/null +++ b/util/attestation/attestations.go @@ -0,0 +1,11 @@ +package attestation + +const ( + MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json" + + DockerAnnotationReferenceType = "vnd.docker.reference.type" + DockerAnnotationReferenceDigest = "vnd.docker.reference.digest" + DockerAnnotationReferenceDescription = "vnd.docker.reference.description" + + DockerAnnotationReferenceTypeDefault = "attestation-manifest" +) diff --git a/util/attestation/subject.go b/util/attestation/subject.go new file mode 100644 index 000000000000..ebd327604567 --- /dev/null +++ b/util/attestation/subject.go @@ -0,0 +1,34 @@ +package attestation + +import ( + digest "github.com/opencontainers/go-digest" +) + +type InTotoSubject interface { + isInTotoSubject() +} + +type InTotoSubjectSelf struct{} + +func (as *InTotoSubjectSelf) isInTotoSubject() {} + +type InTotoSubjectRaw struct { + Name string + Digest []digest.Digest +} + +func (as *InTotoSubjectRaw) isInTotoSubject() {} + +func (as *InTotoSubjectRaw) DigestMap() map[string]string { + m := map[string]string{} + for _, d := range as.Digest { + m[d.Algorithm().String()] = d.Encoded() + } + return m +} + +func DigestToDigestMap(d digest.Digest) map[string]string { + return map[string]string{ + d.Algorithm().String(): d.Encoded(), + } +} diff --git a/util/imageutil/config.go b/util/imageutil/config.go index cfb9d417b38b..bdf8a961fd33 100644 --- a/util/imageutil/config.go +++ b/util/imageutil/config.go @@ -13,6 +13,7 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/resolver/limited" "github.com/moby/buildkit/util/resolver/retryhandler" @@ -159,7 +160,8 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo } else { descs = append(descs, index.Manifests...) } - case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType: + case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType, + attestation.MediaTypeDockerSchema2AttestationType: // childless data types. return nil, nil default: diff --git a/util/push/push.go b/util/push/push.go index 660b2025fa2d..881b2fd86f16 100644 --- a/util/push/push.go +++ b/util/push/push.go @@ -15,6 +15,7 @@ import ( "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/progress" @@ -248,7 +249,8 @@ func childrenHandler(provider content.Provider) images.HandlerFunc { } case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, - ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: + ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip, + attestation.MediaTypeDockerSchema2AttestationType: // childless data types. return nil, nil default: diff --git a/util/testutil/imageinfo.go b/util/testutil/imageinfo.go new file mode 100644 index 000000000000..52e2c2501325 --- /dev/null +++ b/util/testutil/imageinfo.go @@ -0,0 +1,101 @@ +package testutil + +import ( + "context" + "encoding/json" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ImageInfo struct { + Desc ocispecs.Descriptor + Img ocispecs.Image + Layers []map[string]*TarItem + LayersRaw [][]byte + descPlatform string +} + +type ImageInfos []*ImageInfo + +func (infos ImageInfos) Find(platform string) *ImageInfo { + result := infos.Filter(platform) + if len(result) == 0 { + return nil + } + return result[0] +} + +func (infos ImageInfos) Filter(platform string) ImageInfos { + result := ImageInfos{} + for _, info := range infos { + if info.descPlatform == platform { + result = append(result, info) + } + } + return result +} + +func ReadIndex(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (ImageInfos, error) { + infos := ImageInfos{} + + dt, err := content.ReadBlob(ctx, p, desc) + if err != nil { + return nil, err + } + var idx ocispecs.Index + if err := json.Unmarshal(dt, &idx); err != nil { + return nil, err + } + + for _, m := range idx.Manifests { + img, err := ReadImage(ctx, p, m) + if err != nil { + return nil, err + } + img.descPlatform = platforms.Format(*m.Platform) + infos = append(infos, img) + } + return infos, nil +} + +func ReadImage(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImageInfo, error) { + ii := &ImageInfo{Desc: desc} + + dt, err := content.ReadBlob(ctx, p, desc) + if err != nil { + return nil, err + } + var mfst ocispecs.Manifest + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + + dt, err = content.ReadBlob(ctx, p, mfst.Config) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &ii.Img); err != nil { + return nil, err + } + + ii.Layers = make([]map[string]*TarItem, len(mfst.Layers)) + ii.LayersRaw = make([][]byte, len(mfst.Layers)) + for i, l := range mfst.Layers { + dt, err := content.ReadBlob(ctx, p, l) + if err != nil { + return nil, err + } + ii.LayersRaw[i] = dt + if images.IsLayerType(l.MediaType) { + m, err := ReadTarToMap(dt, true) + if err != nil { + return nil, err + } + ii.Layers[i] = m + } + } + return ii, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/LICENSE b/vendor/github.com/in-toto/in-toto-golang/LICENSE new file mode 100644 index 000000000000..963ee949e8e1 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/LICENSE @@ -0,0 +1,13 @@ +Copyright 2018 New York University + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/canonicaljson.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/canonicaljson.go new file mode 100644 index 000000000000..961756e5a09b --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/canonicaljson.go @@ -0,0 +1,145 @@ +package in_toto + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "regexp" + "sort" +) + +/* +encodeCanonicalString is a helper function to canonicalize the passed string +according to the OLPC canonical JSON specification for strings (see +http://wiki.laptop.org/go/Canonical_JSON). String canonicalization consists of +escaping backslashes ("\") and double quotes (") and wrapping the resulting +string in double quotes ("). +*/ +func encodeCanonicalString(s string) string { + re := regexp.MustCompile(`([\"\\])`) + return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) +} + +/* +encodeCanonical is a helper function to recursively canonicalize the passed +object according to the OLPC canonical JSON specification (see +http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed +*bytes.Buffer. If canonicalization fails it returns an error. +*/ +func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { + // Since this function is called recursively, we use panic if an error occurs + // and recover in a deferred function, which is always called before + // returning. There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + switch objAsserted := obj.(type) { + case string: + result.WriteString(encodeCanonicalString(objAsserted)) + + case bool: + if objAsserted { + result.WriteString("true") + } else { + result.WriteString("false") + } + + // The wrapping `EncodeCanonical` function decodes the passed json data with + // `decoder.UseNumber` so that any numeric value is stored as `json.Number` + // (instead of the default `float64`). This allows us to assert that it is a + // non-floating point number, which are the only numbers allowed by the used + // canonicalization specification. + case json.Number: + if _, err := objAsserted.Int64(); err != nil { + panic(fmt.Sprintf("Can't canonicalize floating point number '%s'", + objAsserted)) + } + result.WriteString(objAsserted.String()) + + case nil: + result.WriteString("null") + + // Canonicalize slice + case []interface{}: + result.WriteString("[") + for i, val := range objAsserted { + if err := encodeCanonical(val, result); err != nil { + return err + } + if i < (len(objAsserted) - 1) { + result.WriteString(",") + } + } + result.WriteString("]") + + case map[string]interface{}: + result.WriteString("{") + + // Make a list of keys + var mapKeys []string + for key := range objAsserted { + mapKeys = append(mapKeys, key) + } + // Sort keys + sort.Strings(mapKeys) + + // Canonicalize map + for i, key := range mapKeys { + // Note: `key` must be a `string` (see `case map[string]interface{}`) and + // canonicalization of strings cannot err out (see `case string`), thus + // no error handling is needed here. + encodeCanonical(key, result) + + result.WriteString(":") + if err := encodeCanonical(objAsserted[key], result); err != nil { + return err + } + if i < (len(mapKeys) - 1) { + result.WriteString(",") + } + i++ + } + result.WriteString("}") + + default: + // We recover in a deferred function defined above + panic(fmt.Sprintf("Can't canonicalize '%s' of type '%s'", + objAsserted, reflect.TypeOf(objAsserted))) + } + return nil +} + +/* +EncodeCanonical JSON canonicalizes the passed object and returns it as a byte +slice. It uses the OLPC canonical JSON specification (see +http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte +slice is nil and the second return value contains the error. +*/ +func EncodeCanonical(obj interface{}) ([]byte, error) { + // FIXME: Terrible hack to turn the passed struct into a map, converting + // the struct's variable names to the json key names defined in the struct + data, err := json.Marshal(obj) + if err != nil { + return nil, err + } + var jsonMap interface{} + + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&jsonMap); err != nil { + return nil, err + } + + // Create a buffer and write the canonicalized JSON bytes to it + var result bytes.Buffer + if err := encodeCanonical(jsonMap, &result); err != nil { + return nil, err + } + + return result.Bytes(), nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go new file mode 100644 index 000000000000..9b1de12b182d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go @@ -0,0 +1,156 @@ +package in_toto + +import ( + "crypto/x509" + "fmt" + "net/url" +) + +const ( + AllowAllConstraint = "*" +) + +// CertificateConstraint defines the attributes a certificate must have to act as a functionary. +// A wildcard `*` allows any value in the specified attribute, where as an empty array or value +// asserts that the certificate must have nothing for that attribute. A certificate must have +// every value defined in a constraint to match. +type CertificateConstraint struct { + CommonName string `json:"common_name"` + DNSNames []string `json:"dns_names"` + Emails []string `json:"emails"` + Organizations []string `json:"organizations"` + Roots []string `json:"roots"` + URIs []string `json:"uris"` +} + +// checkResult is a data structure used to hold +// certificate constraint errors +type checkResult struct { + errors []error +} + +// newCheckResult initializes a new checkResult +func newCheckResult() *checkResult { + return &checkResult{ + errors: make([]error, 0), + } +} + +// evaluate runs a constraint check on a certificate +func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult { + err := constraintCheck(cert) + if err != nil { + cr.errors = append(cr.errors, err) + } + return cr +} + +// error reduces all of the errors into one error with a +// combined error message. If there are no errors, nil +// will be returned. +func (cr *checkResult) error() error { + if len(cr.errors) == 0 { + return nil + } + return fmt.Errorf("cert failed constraints check: %+q", cr.errors) +} + +// Check tests the provided certificate against the constraint. An error is returned if the certificate +// fails any of the constraints. nil is returned if the certificate passes all of the constraints. +func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + return newCheckResult(). + evaluate(cert, cc.checkCommonName). + evaluate(cert, cc.checkDNSNames). + evaluate(cert, cc.checkEmails). + evaluate(cert, cc.checkOrganizations). + evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)). + evaluate(cert, cc.checkURIs). + error() +} + +// checkCommonName verifies that the certificate's common name matches the constraint. +func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error { + return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName}) +} + +// checkDNSNames verifies that the certificate's dns names matches the constraint. +func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error { + return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames) +} + +// checkEmails verifies that the certificate's emails matches the constraint. +func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error { + return checkCertConstraint("email", cc.Emails, cert.EmailAddresses) +} + +// checkOrganizations verifies that the certificate's organizations matches the constraint. +func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error { + return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization) +} + +// checkRoots verifies that the certificate's roots matches the constraint. +// The certificates trust chain must also be verified. +func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error { + return func(cert *x509.Certificate) error { + _, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool) + if err != nil { + return fmt.Errorf("failed to verify roots: %w", err) + } + return checkCertConstraint("root", cc.Roots, rootCAIDs) + } +} + +// checkURIs verifies that the certificate's URIs matches the constraint. +func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error { + return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs)) +} + +// urisToStrings is a helper that converts a list of URL objects to the string that represents them +func urisToStrings(uris []*url.URL) []string { + res := make([]string, 0, len(uris)) + for _, uri := range uris { + res = append(res, uri.String()) + } + + return res +} + +// checkCertConstraint tests that the provided test values match the allowed values of the constraint. +// All allowed values must be met one-to-one to be considered a successful match. +func checkCertConstraint(attributeName string, constraints, values []string) error { + // If the only constraint is to allow all, the check succeeds + if len(constraints) == 1 && constraints[0] == AllowAllConstraint { + return nil + } + + if len(constraints) == 1 && constraints[0] == "" { + constraints = []string{} + } + + if len(values) == 1 && values[0] == "" { + values = []string{} + } + + // If no constraints are specified, but the certificate has values for the attribute, then the check fails + if len(constraints) == 0 && len(values) > 0 { + return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName) + } + + unmet := NewSet(constraints...) + for _, v := range values { + // if the cert has a value we didn't expect, fail early + if !unmet.Has(v) { + return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints) + } + + // consider the constraint met + unmet.Remove(v) + } + + // if we have any unmet left after going through each test value, fail. + if len(unmet) > 0 { + return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints) + } + + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go new file mode 100644 index 000000000000..bdfc65d69f99 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go @@ -0,0 +1,30 @@ +package in_toto + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" +) + +/* +getHashMapping returns a mapping from hash algorithm to supported hash +interface. +*/ +func getHashMapping() map[string]func() hash.Hash { + return map[string]func() hash.Hash{ + "sha256": sha256.New, + "sha512": sha512.New, + "sha384": sha512.New384, + } +} + +/* +hashToHex calculates the hash over data based on hash algorithm h. +*/ +func hashToHex(h hash.Hash, data []byte) []byte { + h.Write(data) + // We need to use h.Sum(nil) here, because otherwise hash.Sum() appends + // the hash to the passed data. So instead of having only the hash + // we would get: "dataHASH" + return h.Sum(nil) +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go new file mode 100644 index 000000000000..15d098acdf9e --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -0,0 +1,667 @@ +package in_toto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" +) + +// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails +var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") + +// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file +var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + +// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA +var ErrUnsupportedKeyType = errors.New("unsupported key type") + +// ErrInvalidSignature is returned when the signature is invalid +var ErrInvalidSignature = errors.New("invalid signature") + +// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519 +var ErrInvalidKey = errors.New("invalid key") + +const ( + rsaKeyType string = "rsa" + ecdsaKeyType string = "ecdsa" + ed25519KeyType string = "ed25519" + rsassapsssha256Scheme string = "rsassa-pss-sha256" + ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224" + ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256" + ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384" + ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521" + ed25519Scheme string = "ed25519" + pemPublicKey string = "PUBLIC KEY" + pemPrivateKey string = "PRIVATE KEY" + pemRSAPrivateKey string = "RSA PRIVATE KEY" +) + +/* +getSupportedKeyIDHashAlgorithms returns a string slice of supported +KeyIDHashAlgorithms. We need to use this function instead of a constant, +because Go does not support global constant slices. +*/ +func getSupportedKeyIDHashAlgorithms() Set { + return NewSet("sha256", "sha512") +} + +/* +getSupportedRSASchemes returns a string slice of supported RSA Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedRSASchemes() []string { + return []string{rsassapsssha256Scheme} +} + +/* +getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedEcdsaSchemes() []string { + return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521} +} + +/* +getSupportedEd25519Schemes returns a string slice of supported ed25519 Key +schemes. We need to use this function instead of a constant because Go does +not support global constant slices. +*/ +func getSupportedEd25519Schemes() []string { + return []string{ed25519Scheme} +} + +/* +generateKeyID creates a partial key map and generates the key ID +based on the created partial key map via the SHA256 method. +The resulting keyID will be directly saved in the corresponding key object. +On success generateKeyID will return nil, in case of errors while encoding +there will be an error. +*/ +func (k *Key) generateKeyID() error { + // Create partial key map used to create the keyid + // Unfortunately, we can't use the Key object because this also carries + // yet unwanted fields, such as KeyID and KeyVal.Private and therefore + // produces a different hash. We generate the keyID exactly as we do in + // the securesystemslib to keep interoperability between other in-toto + // implementations. + var keyToBeHashed = map[string]interface{}{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + keyCanonical, err := EncodeCanonical(keyToBeHashed) + if err != nil { + return err + } + // calculate sha256 and return string representation of keyID + keyHashed := sha256.Sum256(keyCanonical) + k.KeyID = fmt.Sprintf("%x", keyHashed) + err = validateKey(*k) + if err != nil { + return err + } + return nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +setKeyComponents sets all components in our key object. +Furthermore it makes sure to remove any trailing and leading whitespaces or newlines. +We treat key types differently for interoperability reasons to the in-toto python +implementation and the securesystemslib. +*/ +func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error { + // assume we have a privateKey if the key size is bigger than 0 + + switch keyType { + case rsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ecdsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ed25519KeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)), + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType) + } + k.KeyType = keyType + k.Scheme = scheme + k.KeyIDHashAlgorithms = KeyIDHashAlgorithms + if err := k.generateKeyID(); err != nil { + return err + } + return nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + * PKCS8 + * PKCS1 + * PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parseKey(data []byte) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseCertificate(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +/* +decodeAndParse receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parseKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +LoadKey loads the key file at specified file path into the key object. +It automatically derives the PEM type and the key type. +Right now the following PEM types are supported: + + * PKCS1 for private keys + * PKCS8 for private keys + * PKIX for public keys + +The following key types are supported and will be automatically assigned to +the key type field: + + * ed25519 + * rsa + * ecdsa + +The following schemes are supported: + + * ed25519 -> ed25519 + * rsa -> rsassa-pss-sha256 + * ecdsa -> ecdsa-sha256-nistp256 + +Note that, this behavior is consistent with the securesystemslib, except for +ecdsa. We do not use the scheme string as key type in in-toto-golang. +Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair. + +On success it will return nil. The following errors can happen: + + * path not found or not readable + * no PEM block in the loaded file + * no valid PKCS8/PKCS1 private key or PKIX public key + * errors while marshalling + * unsupported key types +*/ +func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms) + if err != nil { + return err + } + + return pemFile.Close() +} + +func (k *Key) LoadKeyDefaults(path string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReaderDefaults(pemFile) + if err != nil { + return err + } + + return pemFile.Close() +} + +// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise. +func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := ioutil.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms) +} + +func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := ioutil.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms) +} + +func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { + keyIDHashAlgorithms = []string{"sha256", "sha512"} + + switch key.(type) { + case *rsa.PublicKey, *rsa.PrivateKey: + scheme = rsassapsssha256Scheme + case ed25519.PrivateKey, ed25519.PublicKey: + scheme = ed25519Scheme + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + scheme = ecdsaSha2nistp256 + case *x509.Certificate: + return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey) + default: + err = ErrUnsupportedKeyType + } + + return scheme, keyIDHashAlgorithms, err +} + +func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + + switch key.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey)) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *rsa.PrivateKey: + // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) + // This behavior is consistent to the securesystemslib + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PublicKey: + if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PrivateKey: + pubKeyBytes := key.(ed25519.PrivateKey).Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey)) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *x509.Certificate: + err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms) + if err != nil { + return err + } + + k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData)) + + default: + // We should never get here, because we implement all from Go supported Key Types + return errors.New("unexpected Error in LoadKey function") + } + + return nil +} + +/* +GenerateSignature will automatically detect the key type and sign the signable data +with the provided key. If everything goes right GenerateSignature will return +a for the key valid signature and err=nil. If something goes wrong it will +return a not initialized signature and an error. Possible errors are: + + * ErrNoPEMBlock + * ErrUnsupportedKeyType + +Currently supported is only one scheme per key. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func GenerateSignature(signable []byte, key Key) (Signature, error) { + err := validateKey(key) + if err != nil { + return Signature{}, err + } + var signature Signature + var signatureBuffer []byte + hashMapping := getHashMapping() + // The following switch block is needed for keeping interoperability + // with the securesystemslib and the python implementation + // in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded. + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), signable) + // We use rand.Reader as secure random source for rsa.SignPSS() + signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed, + &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return signature, err + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in GenerateSignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*ecdsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return Signature{}, ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), signable) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), signable) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), signable) + default: + panic("unexpected Error in GenerateSignature function") + } + // Generate the ecdsa signature on the same way, as we do in the securesystemslib + // We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES + // into an ASN.1 Object. + signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:]) + if err != nil { + return signature, err + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + privateHex, err := hex.DecodeString(key.KeyVal.Private) + if err != nil { + return signature, ErrInvalidHexString + } + // Note: We can directly use the key for signing and do not + // need to use ed25519.NewKeyFromSeed(). + signatureBuffer = ed25519.Sign(privateHex, signable) + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in GenerateSignature function") + } + signature.Sig = hex.EncodeToString(signatureBuffer) + signature.KeyID = key.KeyID + signature.Certificate = key.KeyVal.Certificate + return signature, nil +} + +/* +VerifySignature will verify unverified byte data via a passed key and signature. +Supported key types are: + + * rsa + * ed25519 + * ecdsa + +When encountering an RSA key, VerifySignature will decode the PEM block in the key +and will call rsa.VerifyPSS() for verifying the RSA signature. +When encountering an ed25519 key, VerifySignature will decode the hex string encoded +public key and will use ed25519.Verify() for verifying the ed25519 signature. +When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object +and will use the retrieved ecdsa components 'r' and 's' for verifying the signature. +On success it will return nil. In case of an unsupported key type or any other error +it will return an error. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func VerifySignature(key Key, sig Signature, unverified []byte) error { + err := validateKey(key) + if err != nil { + return err + } + sigBytes, err := hex.DecodeString(sig.Sig) + if err != nil { + return err + } + hashMapping := getHashMapping() + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), unverified) + err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return fmt.Errorf("%w: %s", ErrInvalidSignature, err) + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in VerifySignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*ecdsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), unverified) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), unverified) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), unverified) + default: + panic("unexpected Error in VerifySignature function") + } + if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok { + return ErrInvalidSignature + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + pubHex, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return ErrInvalidHexString + } + if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok { + return fmt.Errorf("%w: ed25519", ErrInvalidSignature) + } + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in VerifySignature function") + } + return nil +} + +/* +VerifyCertificateTrust verifies that the certificate has a chain of trust +to a root in rootCertPool, possibly using any intermediates in +intermediateCertPool */ +func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) { + verifyOptions := x509.VerifyOptions{ + Roots: rootCertPool, + Intermediates: intermediateCertPool, + } + chains, err := cert.Verify(verifyOptions) + if len(chains) == 0 || err != nil { + return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates") + } + return chains, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go new file mode 100644 index 000000000000..71e8d4322d08 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go @@ -0,0 +1,228 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://golang.org/LICENSE. + +// this is a modified version of path.Match that removes handling of path separators + +package in_toto + +import ( + "errors" + "unicode/utf8" +) + +// errBadPattern indicates a pattern was malformed. +var errBadPattern = errors.New("syntax error in pattern") + +// match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-/ characters +// '?' matches any single non-/ character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +func match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches everything + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + // Before returning false with no error, + // check that the remainder of the pattern is syntactically valid. + for len(pattern) > 0 { + _, chunk, pattern = scanChunk(pattern) + if _, _, err := matchChunk(chunk, ""); err != nil { + return false, err + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + // failed records whether the match has failed. + // After the match fails, the loop continues on processing chunk, + // checking that the pattern is well-formed but no longer reading s. + failed := false + for len(chunk) > 0 { + if !failed && len(s) == 0 { + failed = true + } + switch chunk[0] { + case '[': + // character class + var r rune + if !failed { + var n int + r, n = utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + // possibly negated + negated := false + if len(chunk) > 0 && chunk[0] == '^' { + negated = true + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return "", false, err + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return "", false, err + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + failed = true + } + + case '?': + if !failed { + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + return "", false, errBadPattern + } + fallthrough + + default: + if !failed { + if chunk[0] != s[0] { + failed = true + } + s = s[1:] + } + chunk = chunk[1:] + } + } + if failed { + return "", false, nil + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = errBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = errBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = errBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = errBadPattern + } + return +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go new file mode 100644 index 000000000000..82c1e09041ad --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -0,0 +1,1097 @@ +package in_toto + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/in-toto/in-toto-golang/pkg/ssl" +) + +/* +KeyVal contains the actual values of a key, as opposed to key metadata such as +a key identifier or key type. For RSA keys, the key value is a pair of public +and private keys in PEM format stored as strings. For public keys the Private +field may be an empty string. +*/ +type KeyVal struct { + Private string `json:"private"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} + +/* +Key represents a generic in-toto key that contains key metadata, such as an +identifier, supported hash algorithms to create the identifier, the key type +and the supported signature scheme, and the actual key value. +*/ +type Key struct { + KeyID string `json:"keyid"` + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` +} + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrEmptyKeyField will be thrown if a field in our Key struct is empty. +var ErrEmptyKeyField = errors.New("empty field in key") + +// ErrInvalidHexString will be thrown, if a string doesn't match a hex string. +var ErrInvalidHexString = errors.New("invalid hex string") + +// ErrSchemeKeyTypeMismatch will be thrown, if the given scheme and key type are not supported together. +var ErrSchemeKeyTypeMismatch = errors.New("the scheme and key type are not supported together") + +// ErrUnsupportedKeyIDHashAlgorithms will be thrown, if the specified KeyIDHashAlgorithms is not supported. +var ErrUnsupportedKeyIDHashAlgorithms = errors.New("the given keyID hash algorithm is not supported") + +// ErrKeyKeyTypeMismatch will be thrown, if the specified keyType does not match the key +var ErrKeyKeyTypeMismatch = errors.New("the given key does not match its key type") + +// ErrNoPublicKey gets returned when the private key value is not empty. +var ErrNoPublicKey = errors.New("the given key is not a public key") + +// ErrCurveSizeSchemeMismatch gets returned, when the scheme and curve size are incompatible +// for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" +var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" + // PredicateSLSAProvenanceV01 represents a build provenance for an artifact. + PredicateSLSAProvenanceV01 = "https://slsa.dev/provenance/v0.1" +) + +// ErrInvalidPayloadType indicates that the envelope used an unkown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +/* +matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key +curve size. We do not need a full regex match here, because +our validateKey functions are already checking for a valid scheme string. +*/ +func matchEcdsaScheme(curveSize int, scheme string) error { + if !strings.HasSuffix(scheme, strconv.Itoa(curveSize)) { + return ErrCurveSizeSchemeMismatch + } + return nil +} + +/* +validateHexString is used to validate that a string passed to it contains +only valid hexadecimal characters. +*/ +func validateHexString(str string) error { + formatCheck, _ := regexp.MatchString("^[a-fA-F0-9]+$", str) + if !formatCheck { + return fmt.Errorf("%w: %s", ErrInvalidHexString, str) + } + return nil +} + +/* +validateKeyVal validates the KeyVal struct. In case of an ed25519 key, +it will check for a hex string for private and public key. In any other +case, validateKeyVal will try to decode the PEM block. If this succeeds, +we have a valid PEM block in our KeyVal struct. On success it will return nil +on failure it will return the corresponding error. This can be either +an ErrInvalidHexString, an ErrNoPEMBlock or an ErrUnsupportedKeyType +if the KeyType is unknown. +*/ +func validateKeyVal(key Key) error { + switch key.KeyType { + case ed25519KeyType: + // We cannot use matchPublicKeyKeyType or matchPrivateKeyKeyType here, + // because we retrieve the key not from PEM. Hence we are dealing with + // plain ed25519 key bytes. These bytes can't be typechecked like in the + // matchKeyKeytype functions. + err := validateHexString(key.KeyVal.Public) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + err := validateHexString(key.KeyVal.Private) + if err != nil { + return err + } + } + case rsaKeyType, ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + err = matchPublicKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return err + } + err = matchPrivateKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + } + default: + return ErrUnsupportedKeyType + } + return nil +} + +/* +matchPublicKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA public key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPublicKeyKeyType(key interface{}, keyType string) error { + switch key.(type) { + case *rsa.PublicKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PublicKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchPrivateKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA private key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPrivateKeyKeyType(key interface{}, keyType string) error { + // we can only check RSA and ECDSA this way, because we are storing them in PEM + // format. ed25519 keys are stored as plain ed25519 keys encoded as hex strings + // so we have no metadata for them. + switch key.(type) { + case *rsa.PrivateKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PrivateKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchKeyTypeScheme checks if the specified scheme matches our specified +keyType. If the keyType is not supported it will return an +ErrUnsupportedKeyType. If the keyType and scheme do not match it will return +an ErrSchemeKeyTypeMismatch. If the specified keyType and scheme are +compatible matchKeyTypeScheme will return nil. +*/ +func matchKeyTypeScheme(key Key) error { + switch key.KeyType { + case rsaKeyType: + for _, scheme := range getSupportedRSASchemes() { + if key.Scheme == scheme { + return nil + } + } + case ed25519KeyType: + for _, scheme := range getSupportedEd25519Schemes() { + if key.Scheme == scheme { + return nil + } + } + case ecdsaKeyType: + for _, scheme := range getSupportedEcdsaSchemes() { + if key.Scheme == scheme { + return nil + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, key.KeyType) + } + return ErrSchemeKeyTypeMismatch +} + +/* +validateKey checks the outer key object (everything, except the KeyVal struct). +It verifies the keyID for being a hex string and checks for empty fields. +On success it will return nil, on error it will return the corresponding error. +Either: ErrEmptyKeyField or ErrInvalidHexString. +*/ +func validateKey(key Key) error { + err := validateHexString(key.KeyID) + if err != nil { + return err + } + // This probably can be done more elegant with reflection + // but we care about performance, do we?! + if key.KeyType == "" { + return fmt.Errorf("%w: keytype", ErrEmptyKeyField) + } + if key.KeyVal.Public == "" && key.KeyVal.Certificate == "" { + return fmt.Errorf("%w: keyval.public and keyval.certificate cannot both be blank", ErrEmptyKeyField) + } + if key.Scheme == "" { + return fmt.Errorf("%w: scheme", ErrEmptyKeyField) + } + err = matchKeyTypeScheme(key) + if err != nil { + return err + } + // only check for supported KeyIDHashAlgorithms, if the variable has been set + if key.KeyIDHashAlgorithms != nil { + supportedKeyIDHashAlgorithms := getSupportedKeyIDHashAlgorithms() + if !supportedKeyIDHashAlgorithms.IsSubSet(NewSet(key.KeyIDHashAlgorithms...)) { + return fmt.Errorf("%w: %#v, supported are: %#v", ErrUnsupportedKeyIDHashAlgorithms, key.KeyIDHashAlgorithms, getSupportedKeyIDHashAlgorithms()) + } + } + return nil +} + +/* +validatePublicKey is a wrapper around validateKey. It test if the private key +value in the key is empty and then validates the key via calling validateKey. +On success it will return nil, on error it will return an ErrNoPublicKey error. +*/ +func validatePublicKey(key Key) error { + if key.KeyVal.Private != "" { + return ErrNoPublicKey + } + err := validateKey(key) + if err != nil { + return err + } + return nil +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the Key, which was used to create the signature and the signature data. The +used signature scheme is found in the corresponding Key. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` + Certificate string `json:"cert,omitempty"` +} + +// GetCertificate returns the parsed x509 certificate attached to the signature, +// if it exists. +func (sig Signature) GetCertificate() (Key, error) { + key := Key{} + if len(sig.Certificate) == 0 { + return key, errors.New("Signature has empty Certificate") + } + + err := key.LoadKeyReaderDefaults(strings.NewReader(sig.Certificate)) + return key, err +} + +/* +validateSignature is a function used to check if a passed signature is valid, +by inspecting the key ID and the signature itself. +*/ +func validateSignature(signature Signature) error { + if err := validateHexString(signature.KeyID); err != nil { + return err + } + if err := validateHexString(signature.Sig); err != nil { + return err + } + return nil +} + +/* +validateSliceOfSignatures is a helper function used to validate multiple +signatures stored in a slice. +*/ +func validateSliceOfSignatures(slice []Signature) error { + for _, signature := range slice { + if err := validateSignature(signature); err != nil { + return err + } + } + return nil +} + +/* +Link represents the evidence of a supply chain step performed by a functionary. +It should be contained in a generic Metablock object, which provides +functionality for signing and signature verification, and reading from and +writing to disk. +*/ +type Link struct { + Type string `json:"_type"` + Name string `json:"name"` + Materials map[string]interface{} `json:"materials"` + Products map[string]interface{} `json:"products"` + ByProducts map[string]interface{} `json:"byproducts"` + Command []string `json:"command"` + Environment map[string]interface{} `json:"environment"` +} + +/* +validateArtifacts is a general function used to validate products and materials. +*/ +func validateArtifacts(artifacts map[string]interface{}) error { + for artifactName, artifact := range artifacts { + artifactValue := reflect.ValueOf(artifact).MapRange() + for artifactValue.Next() { + value := artifactValue.Value().Interface().(string) + hashType := artifactValue.Key().Interface().(string) + if err := validateHexString(value); err != nil { + return fmt.Errorf("in artifact '%s', %s hash value: %s", + artifactName, hashType, err.Error()) + } + } + } + return nil +} + +/* +validateLink is a function used to ensure that a passed item of type Link +matches the necessary format. +*/ +func validateLink(link Link) error { + if link.Type != "link" { + return fmt.Errorf("invalid type for link '%s': should be 'link'", + link.Name) + } + + if err := validateArtifacts(link.Materials); err != nil { + return fmt.Errorf("in materials of link '%s': %s", link.Name, + err.Error()) + } + + if err := validateArtifacts(link.Products); err != nil { + return fmt.Errorf("in products of link '%s': %s", link.Name, + err.Error()) + } + + return nil +} + +/* +LinkNameFormat represents a format string used to create the filename for a +signed Link (wrapped in a Metablock). It consists of the name of the link and +the first 8 characters of the signing key id. E.g.: + fmt.Sprintf(LinkNameFormat, "package", + "2f89b9272acfc8f4a0a0f094d789fdb0ba798b0fe41f2f5f417c12f0085ff498") + // returns "package.2f89b9272.link" +*/ +const LinkNameFormat = "%s.%.8s.link" +const PreliminaryLinkNameFormat = ".%s.%.8s.link-unfinished" + +/* +LinkNameFormatShort is for links that are not signed, e.g.: + fmt.Sprintf(LinkNameFormatShort, "unsigned") + // returns "unsigned.link" +*/ +const LinkNameFormatShort = "%s.link" +const LinkGlobFormat = "%s.????????.link" + +/* +SublayoutLinkDirFormat represents the format of the name of the directory for +sublayout links during the verification workflow. +*/ +const SublayoutLinkDirFormat = "%s.%.8s" + +/* +SupplyChainItem summarizes common fields of the two available supply chain +item types, Inspection and Step. +*/ +type SupplyChainItem struct { + Name string `json:"name"` + ExpectedMaterials [][]string `json:"expected_materials"` + ExpectedProducts [][]string `json:"expected_products"` +} + +/* +validateArtifactRule calls UnpackRule to validate that the passed rule conforms +with any of the available rule formats. +*/ +func validateArtifactRule(rule []string) error { + if _, err := UnpackRule(rule); err != nil { + return err + } + return nil +} + +/* +validateSliceOfArtifactRules iterates over passed rules to validate them. +*/ +func validateSliceOfArtifactRules(rules [][]string) error { + for _, rule := range rules { + if err := validateArtifactRule(rule); err != nil { + return err + } + } + return nil +} + +/* +validateSupplyChainItem is used to validate the common elements found in both +steps and inspections. Here, the function primarily ensures that the name of +a supply chain item isn't empty. +*/ +func validateSupplyChainItem(item SupplyChainItem) error { + if item.Name == "" { + return fmt.Errorf("name cannot be empty") + } + + if err := validateSliceOfArtifactRules(item.ExpectedMaterials); err != nil { + return fmt.Errorf("invalid material rule: %s", err) + } + if err := validateSliceOfArtifactRules(item.ExpectedProducts); err != nil { + return fmt.Errorf("invalid product rule: %s", err) + } + return nil +} + +/* +Inspection represents an in-toto supply chain inspection, whose command in the +Run field is executed during final product verification, generating unsigned +link metadata. Materials and products used/produced by the inspection are +constrained by the artifact rules in the inspection's ExpectedMaterials and +ExpectedProducts fields. +*/ +type Inspection struct { + Type string `json:"_type"` + Run []string `json:"run"` + SupplyChainItem +} + +/* +validateInspection ensures that a passed inspection is valid and matches the +necessary format of an inspection. +*/ +func validateInspection(inspection Inspection) error { + if err := validateSupplyChainItem(inspection.SupplyChainItem); err != nil { + return fmt.Errorf("inspection %s", err.Error()) + } + if inspection.Type != "inspection" { + return fmt.Errorf("invalid Type value for inspection '%s': should be "+ + "'inspection'", inspection.SupplyChainItem.Name) + } + return nil +} + +/* +Step represents an in-toto step of the supply chain performed by a functionary. +During final product verification in-toto looks for corresponding Link +metadata, which is used as signed evidence that the step was performed +according to the supply chain definition. Materials and products used/produced +by the step are constrained by the artifact rules in the step's +ExpectedMaterials and ExpectedProducts fields. +*/ +type Step struct { + Type string `json:"_type"` + PubKeys []string `json:"pubkeys"` + CertificateConstraints []CertificateConstraint `json:"cert_constraints,omitempty"` + ExpectedCommand []string `json:"expected_command"` + Threshold int `json:"threshold"` + SupplyChainItem +} + +// CheckCertConstraints returns true if the provided certificate matches at least one +// of the constraints for this step. +func (s Step) CheckCertConstraints(key Key, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + if len(s.CertificateConstraints) == 0 { + return fmt.Errorf("no constraints found") + } + + _, possibleCert, err := decodeAndParse([]byte(key.KeyVal.Certificate)) + if err != nil { + return err + } + + cert, ok := possibleCert.(*x509.Certificate) + if !ok { + return fmt.Errorf("not a valid certificate") + } + + for _, constraint := range s.CertificateConstraints { + err = constraint.Check(cert, rootCAIDs, rootCertPool, intermediateCertPool) + if err == nil { + return nil + } + } + if err != nil { + return err + } + + // this should not be reachable since there is at least one constraint, and the for loop only saw err != nil + return fmt.Errorf("unknown certificate constraint error") +} + +/* +validateStep ensures that a passed step is valid and matches the +necessary format of an step. +*/ +func validateStep(step Step) error { + if err := validateSupplyChainItem(step.SupplyChainItem); err != nil { + return fmt.Errorf("step %s", err.Error()) + } + if step.Type != "step" { + return fmt.Errorf("invalid Type value for step '%s': should be 'step'", + step.SupplyChainItem.Name) + } + for _, keyID := range step.PubKeys { + if err := validateHexString(keyID); err != nil { + return err + } + } + return nil +} + +/* +ISO8601DateSchema defines the format string of a timestamp following the +ISO 8601 standard. +*/ +const ISO8601DateSchema = "2006-01-02T15:04:05Z" + +/* +Layout represents the definition of a software supply chain. It lists the +sequence of steps required in the software supply chain and the functionaries +authorized to perform these steps. Functionaries are identified by their +public keys. In addition, the layout may list a sequence of inspections that +are executed during in-toto supply chain verification. A layout should be +contained in a generic Metablock object, which provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Layout struct { + Type string `json:"_type"` + Steps []Step `json:"steps"` + Inspect []Inspection `json:"inspect"` + Keys map[string]Key `json:"keys"` + RootCas map[string]Key `json:"rootcas,omitempty"` + IntermediateCas map[string]Key `json:"intermediatecas,omitempty"` + Expires string `json:"expires"` + Readme string `json:"readme"` +} + +// Go does not allow to pass `[]T` (slice with certain type) to a function +// that accepts `[]interface{}` (slice with generic type) +// We have to manually create the interface slice first, see +// https://golang.org/doc/faq#convert_slice_of_interface +// TODO: Is there a better way to do polymorphism for steps and inspections? +func (l *Layout) stepsAsInterfaceSlice() []interface{} { + stepsI := make([]interface{}, len(l.Steps)) + for i, v := range l.Steps { + stepsI[i] = v + } + return stepsI +} +func (l *Layout) inspectAsInterfaceSlice() []interface{} { + inspectionsI := make([]interface{}, len(l.Inspect)) + for i, v := range l.Inspect { + inspectionsI[i] = v + } + return inspectionsI +} + +// RootCAIDs returns a slice of all of the Root CA IDs +func (l *Layout) RootCAIDs() []string { + rootCAIDs := make([]string, 0, len(l.RootCas)) + for rootCAID := range l.RootCas { + rootCAIDs = append(rootCAIDs, rootCAID) + } + return rootCAIDs +} + +func validateLayoutKeys(keys map[string]Key) error { + for keyID, key := range keys { + if key.KeyID != keyID { + return fmt.Errorf("invalid key found") + } + err := validatePublicKey(key) + if err != nil { + return err + } + } + + return nil +} + +/* +validateLayout is a function used to ensure that a passed item of type Layout +matches the necessary format. +*/ +func validateLayout(layout Layout) error { + if layout.Type != "layout" { + return fmt.Errorf("invalid Type value for layout: should be 'layout'") + } + + if _, err := time.Parse(ISO8601DateSchema, layout.Expires); err != nil { + return fmt.Errorf("expiry time parsed incorrectly - date either" + + " invalid or of incorrect format") + } + + if err := validateLayoutKeys(layout.Keys); err != nil { + return err + } + + if err := validateLayoutKeys(layout.RootCas); err != nil { + return err + } + + if err := validateLayoutKeys(layout.IntermediateCas); err != nil { + return err + } + + var namesSeen = make(map[string]bool) + for _, step := range layout.Steps { + if namesSeen[step.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[step.Name] = true + + if err := validateStep(step); err != nil { + return err + } + } + for _, inspection := range layout.Inspect { + if namesSeen[inspection.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[inspection.Name] = true + } + return nil +} + +/* +Metablock is a generic container for signable in-toto objects such as Layout +or Link. It has two fields, one that contains the signable object and one that +contains corresponding signatures. Metablock also provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Metablock struct { + // NOTE: Whenever we want to access an attribute of `Signed` we have to + // perform type assertion, e.g. `metablock.Signed.(Layout).Keys` + // Maybe there is a better way to store either Layouts or Links in `Signed`? + // The notary folks seem to have separate container structs: + // https://github.com/theupdateframework/notary/blob/master/tuf/data/root.go#L10-L14 + // https://github.com/theupdateframework/notary/blob/master/tuf/data/targets.go#L13-L17 + // I implemented it this way, because there will be several functions that + // receive or return a Metablock, where the type of Signed has to be inferred + // on runtime, e.g. when iterating over links for a layout, and a link can + // turn out to be a layout (sublayout) + Signed interface{} `json:"signed"` + Signatures []Signature `json:"signatures"` +} + +type jsonField struct { + name string + omitempty bool +} + +/* +checkRequiredJSONFields checks that the passed map (obj) has keys for each of +the json tags in the passed struct type (typ), and returns an error otherwise. +Any json tags that contain the "omitempty" option be allowed to be optional. +*/ +func checkRequiredJSONFields(obj map[string]interface{}, + typ reflect.Type) error { + + // Create list of json tags, e.g. `json:"_type"` + attributeCount := typ.NumField() + allFields := make([]jsonField, 0) + for i := 0; i < attributeCount; i++ { + fieldStr := typ.Field(i).Tag.Get("json") + field := jsonField{ + name: fieldStr, + omitempty: false, + } + + if idx := strings.Index(fieldStr, ","); idx != -1 { + field.name = fieldStr[:idx] + field.omitempty = strings.Contains(fieldStr[idx+1:], "omitempty") + } + + allFields = append(allFields, field) + } + + // Assert that there's a key in the passed map for each tag + for _, field := range allFields { + if _, ok := obj[field.name]; !ok && !field.omitempty { + return fmt.Errorf("required field %s missing", field.name) + } + } + return nil +} + +/* +Load parses JSON formatted metadata at the passed path into the Metablock +object on which it was called. It returns an error if it cannot parse +a valid JSON formatted Metablock that contains a Link or Layout. +*/ +func (mb *Metablock) Load(path string) error { + // Open file and close before returning + jsonFile, err := os.Open(path) + if err != nil { + return err + } + defer jsonFile.Close() + + // Read entire file + jsonBytes, err := ioutil.ReadAll(jsonFile) + if err != nil { + return err + } + + // Unmarshal JSON into a map of raw messages (signed and signatures) + // We can't fully unmarshal immediately, because we need to inspect the + // type (link or layout) to decide which data structure to use + var rawMb map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawMb); err != nil { + return err + } + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawMb["signed"] == nil || rawMb["signatures"] == nil { + return fmt.Errorf("in-toto metadata requires 'signed' and" + + " 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawMb["signatures"], &mb.Signatures); err != nil { + return err + } + + // Temporarily copy signed to opaque map to inspect the `_type` of signed + // and create link or layout accordingly + var signed map[string]interface{} + if err := json.Unmarshal(*rawMb["signed"], &signed); err != nil { + return err + } + + if signed["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(signed, reflect.TypeOf(link)); err != nil { + return err + } + + data, err := rawMb["signed"].MarshalJSON() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return err + } + mb.Signed = link + + } else if signed["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(signed, reflect.TypeOf(layout)); err != nil { + return err + } + + data, err := rawMb["signed"].MarshalJSON() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return err + } + + mb.Signed = layout + + } else { + return fmt.Errorf("the '_type' field of the 'signed' part of in-toto" + + " metadata must be one of 'link' or 'layout'") + } + + return jsonFile.Close() +} + +/* +Dump JSON serializes and writes the Metablock on which it was called to the +passed path. It returns an error if JSON serialization or writing fails. +*/ +func (mb *Metablock) Dump(path string) error { + // JSON encode Metablock formatted with newlines and indentation + // TODO: parametrize format + jsonBytes, err := json.MarshalIndent(mb, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = ioutil.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +/* +GetSignableRepresentation returns the canonical JSON representation of the +Signed field of the Metablock on which it was called. If canonicalization +fails the first return value is nil and the second return value is the error. +*/ +func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { + return EncodeCanonical(mb.Signed) +} + +/* +VerifySignature verifies the first signature, corresponding to the passed Key, +that it finds in the Signatures field of the Metablock on which it was called. +It returns an error if Signatures does not contain a Signature corresponding to +the passed Key, the object in Signed cannot be canonicalized, or the Signature +is invalid. +*/ +func (mb *Metablock) VerifySignature(key Key) error { + sig, err := mb.GetSignatureForKeyID(key.KeyID) + if err != nil { + return err + } + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + if err := VerifySignature(key, sig, dataCanonical); err != nil { + return err + } + return nil +} + +// GetSignatureForKeyID returns the signature that was created by the provided keyID, if it exists. +func (mb *Metablock) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range mb.Signatures { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +/* +ValidateMetablock ensures that a passed Metablock object is valid. It indirectly +validates the Link or Layout that the Metablock object contains. +*/ +func ValidateMetablock(mb Metablock) error { + switch mbSignedType := mb.Signed.(type) { + case Layout: + if err := validateLayout(mb.Signed.(Layout)); err != nil { + return err + } + case Link: + if err := validateLink(mb.Signed.(Link)); err != nil { + return err + } + default: + return fmt.Errorf("unknown type '%s', should be 'layout' or 'link'", + mbSignedType) + } + + if err := validateSliceOfSignatures(mb.Signatures); err != nil { + return err + } + + return nil +} + +/* +Sign creates a signature over the signed portion of the metablock using the Key +object provided. It then appends the resulting signature to the signatures +field as provided. It returns an error if the Signed object cannot be +canonicalized, or if the key is invalid or not supported. +*/ +func (mb *Metablock) Sign(key Key) error { + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + newSignature, err := GenerateSignature(dataCanonical, key) + if err != nil { + return err + } + + mb.Signatures = append(mb.Signatures, newSignature) + return nil +} + +/* +DigestSet contains a set of digests. It is represented as a map from +algorithm name to lowercase hex-encoded value. +*/ +type DigestSet map[string]string + +// Subject describes the set of software artifacts the statement applies to. +type Subject struct { + Name string `json:"name"` + Digest DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceBuilder idenfifies the entity that executed the build steps. +type ProvenanceBuilder struct { + ID string `json:"id"` +} + +// ProvenanceRecipe describes the actions performed by the builder. +type ProvenanceRecipe struct { + Type string `json:"type"` + // DefinedInMaterial can be sent as the null pointer to indicate that + // the value is not present. + DefinedInMaterial *int `json:"definedInMaterial,omitempty"` + EntryPoint string `json:"entryPoint"` + Arguments interface{} `json:"arguments,omitempty"` + Environment interface{} `json:"environment,omitempty"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + Arguments bool `json:"arguments"` + Environment bool `json:"environment"` + Materials bool `json:"materials"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + Completeness ProvenanceComplete `json:"completeness"` + Reproducible bool `json:"reproducible"` +} + +// ProvenanceMaterial defines the materials used to build an artifact. +type ProvenanceMaterial struct { + URI string `json:"uri"` + Digest DigestSet `json:"digest,omitempty"` +} + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + Builder ProvenanceBuilder `json:"builder"` + Recipe ProvenanceRecipe `json:"recipe"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + Materials []ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceStatement is the definition for an entire provenance statement. +type ProvenanceStatement struct { + StatementHeader + Predicate ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +Currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +SSLSigner provides signature generation and validation based on the SSL +Signing Spec: https://github.com/secure-systems-lab/signing-spec +as describe by: https://github.com/MarkLodato/ITE/tree/media-type/ITE/5 +It wraps the generic SSL envelope signer and enforces the correct payload +type both during signature generation and validation. +*/ +type SSLSigner struct { + signer *ssl.EnvelopeSigner +} + +func NewSSLSigner(p ...ssl.SignVerifier) (*SSLSigner, error) { + es, err := ssl.NewEnvelopeSigner(p...) + if err != nil { + return nil, err + } + + return &SSLSigner{ + signer: es, + }, nil +} + +func (s *SSLSigner) SignPayload(body []byte) (*ssl.Envelope, error) { + return s.signer.SignPayload(PayloadType, body) +} + +func (s *SSLSigner) Verify(e *ssl.Envelope) error { + if e.PayloadType != PayloadType { + return ErrInvalidPayloadType + } + + return s.signer.Verify(e) +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go new file mode 100644 index 000000000000..1bba77c39e50 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go @@ -0,0 +1,131 @@ +package in_toto + +import ( + "fmt" + "strings" +) + +// An error message issued in UnpackRule if it receives a malformed rule. +var errorMsg = "Wrong rule format, available formats are:\n" + + "\tMATCH [IN ] WITH (MATERIALS|PRODUCTS)" + + " [IN ] FROM ,\n" + + "\tCREATE ,\n" + + "\tDELETE ,\n" + + "\tMODIFY ,\n" + + "\tALLOW ,\n" + + "\tDISALLOW ,\n" + + "\tREQUIRE \n\n" + +/* +UnpackRule parses the passed rule and extracts and returns the information +required for rule processing. It can be used to verify if a rule has a valid +format. Available rule formats are: + + MATCH [IN ] WITH (MATERIALS|PRODUCTS) + [IN ] FROM , + CREATE , + DELETE , + MODIFY , + ALLOW , + DISALLOW + +Rule tokens are normalized to lower case before returning. The returned map +has the following format: + + { + "type": "match" | "create" | "delete" |"modify" | "allow" | "disallow" + "pattern": "", + "srcPrefix": "", // MATCH rule only + "dstPrefix": "", // MATCH rule only + "dstType": "materials" | "products">, // MATCH rule only + "dstName": "", // Match rule only + } + +If the rule does not match any of the available formats the first return value +is nil and the second return value is the error. +*/ +func UnpackRule(rule []string) (map[string]string, error) { + // Cache rule len + ruleLen := len(rule) + + // Create all lower rule copy to case-insensitively parse out tokens whose + // position we don't know yet. We keep the original rule to retain the + // non-token elements' case. + ruleLower := make([]string, ruleLen) + for i, val := range rule { + ruleLower[i] = strings.ToLower(val) + } + + switch ruleLower[0] { + case "create", "modify", "delete", "allow", "disallow", "require": + if ruleLen != 2 { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + }, nil + + case "match": + var srcPrefix string + var dstType string + var dstPrefix string + var dstName string + + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // IN FROM + if ruleLen == 10 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "in" && + ruleLower[8] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = rule[7] + dstName = rule[9] + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // FROM + } else if ruleLen == 8 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = "" + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) IN + // FROM + } else if ruleLen == 8 && ruleLower[2] == "with" && + ruleLower[4] == "in" && ruleLower[6] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = rule[5] + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) FROM + } else if ruleLen == 6 && ruleLower[2] == "with" && + ruleLower[4] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = "" + dstName = rule[5] + + } else { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + "srcPrefix": srcPrefix, + "dstPrefix": dstPrefix, + "dstType": dstType, + "dstName": dstName, + }, nil + + default: + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go new file mode 100644 index 000000000000..80eef3d75da5 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -0,0 +1,400 @@ +package in_toto + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + + "github.com/shibumi/go-pathspec" +) + +// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function. +var ErrSymCycle = errors.New("symlink cycle detected") + +// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping +var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected") + +// visitedSymlinks is a hashset that contains all paths that we have visited. +var visitedSymlinks Set + +/* +RecordArtifact reads and hashes the contents of the file at the passed path +using sha256 and returns a map in the following format: + + { + "": { + "sha256": + } + } + +If reading the file fails, the first return value is nil and the second return +value is the error. +NOTE: For cross-platform consistency Windows-style line separators (CRLF) are +normalized to Unix-style line separators (LF) before hashing file contents. +*/ +func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { + supportedHashMappings := getHashMapping() + // Read file from passed path + contents, err := ioutil.ReadFile(path) + hashedContentsMap := make(map[string]interface{}) + if err != nil { + return nil, err + } + + if lineNormalization { + // "Normalize" file contents. We convert all line separators to '\n' + // for keeping operating system independence + contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n")) + contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n")) + } + + // Create a map of all the hashes present in the hash_func list + for _, element := range hashAlgorithms { + if _, ok := supportedHashMappings[element]; !ok { + return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element) + } + h := supportedHashMappings[element] + result := fmt.Sprintf("%x", hashToHex(h(), contents)) + hashedContentsMap[element] = result + } + + // Return it in a format that is conformant with link metadata artifacts + return hashedContentsMap, nil +} + +/* +RecordArtifacts is a wrapper around recordArtifacts. +RecordArtifacts initializes a set for storing visited symlinks, +calls recordArtifacts and deletes the set if no longer needed. +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) { + // Make sure to initialize a fresh hashset for every RecordArtifacts call + visitedSymlinks = NewSet() + evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + // pass result and error through + return evalArtifacts, err +} + +/* +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) { + artifacts := make(map[string]interface{}) + for _, path := range paths { + err := filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + // Abort if Walk function has a problem, + // e.g. path does not exist + if err != nil { + return err + } + // We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise + // we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub". + // If we would call pathspec outside of the filepath.Walk this would not match. + ignore, err := pathspec.GitIgnore(gitignorePatterns, path) + if err != nil { + return err + } + if ignore { + return nil + } + // Don't hash directories + if info.IsDir() { + return nil + } + + // check for symlink and evaluate the last element in a symlink + // chain via filepath.EvalSymlinks. We use EvalSymlinks here, + // because with os.Readlink() we would just read the next + // element in a possible symlink chain. This would mean more + // iterations. infoMode()&os.ModeSymlink uses the file + // type bitmask to check for a symlink. + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + // return with error if we detect a symlink cycle + if ok := visitedSymlinks.Has(path); ok { + // this error will get passed through + // to RecordArtifacts() + return ErrSymCycle + } + evalSym, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + // add symlink to visitedSymlinks set + // this way, we know which link we have visited already + // if we visit a symlink twice, we have detected a symlink cycle + visitedSymlinks.Add(path) + // We recursively call RecordArtifacts() to follow + // the new path. + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if evalErr != nil { + return evalErr + } + for key, value := range evalArtifacts { + artifacts[key] = value + } + return nil + } + artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization) + // Abort if artifact can't be recorded, e.g. + // due to file permissions + if err != nil { + return err + } + + for _, strip := range lStripPaths { + if strings.HasPrefix(path, strip) { + path = strings.TrimPrefix(path, strip) + break + } + } + // Check if path is unique + _, existingPath := artifacts[path] + if existingPath { + return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) + } + artifacts[path] = artifact + return nil + }) + + if err != nil { + return nil, err + } + } + + return artifacts, nil +} + +/* +waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It +returns -1 if no exit code can be inferred. +*/ +func waitErrToExitCode(err error) int { + // If there's no exit code, we return -1 + retVal := -1 + + // See https://stackoverflow.com/questions/10385551/get-exit-code-go + if err != nil { + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + // This works on both Unix and Windows. Although package + // syscall is generally platform dependent, WaitStatus is + // defined for both Unix and Windows and in both cases has + // an ExitStatus() method with the same signature. + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + retVal = status.ExitStatus() + } + } + } else { + retVal = 0 + } + + return retVal +} + +/* +RunCommand executes the passed command in a subprocess. The first element of +cmdArgs is used as executable and the rest as command arguments. It captures +and returns stdout, stderr and exit code. The format of the returned map is: + + { + "return-value": , + "stdout": "", + "stderr": "" + } + +If the command cannot be executed or no pipes for stdout or stderr can be +created the first return value is nil and the second return value is the error. +NOTE: Since stdout and stderr are captured, they cannot be seen during the +command execution. +*/ +func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) { + + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + + if runDir != "" { + cmd.Dir = runDir + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + // TODO: duplicate stdout, stderr + stdout, _ := ioutil.ReadAll(stdoutPipe) + stderr, _ := ioutil.ReadAll(stderrPipe) + + retVal := waitErrToExitCode(cmd.Wait()) + + return map[string]interface{}{ + "return-value": float64(retVal), + "stdout": string(stdout), + "stderr": string(stderr), + }, nil +} + +/* +InTotoRun executes commands, e.g. for software supply chain steps or +inspections of an in-toto layout, and creates and returns corresponding link +metadata. Link metadata contains recorded products at the passed productPaths +and materials at the passed materialPaths. The returned link is wrapped in a +Metablock object. If command execution or artifact recording fails the first +return value is an empty Metablock and the second return value is the error. +*/ +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, + cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, + lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + byProducts, err := RunCommand(cmdArgs, runDir) + if err != nil { + return linkMb, err + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + linkMb.Signed = Link{ + Type: "link", + Name: name, + Materials: materials, + Products: products, + ByProducts: byProducts, + Command: cmdArgs, + Environment: map[string]interface{}{}, + } + + linkMb.Signatures = []Signature{} + // We use a new feature from Go1.13 here, to check the key struct. + // IsZero() will return True, if the key hasn't been initialized + + // with other values than the default ones. + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStart begins the creation of a link metablock file in two steps, +in order to provide evidence for supply chain steps that cannot be carries out +by a single command. InTotoRecordStart collects the hashes of the materials +before any commands are run, signs the unfinished link, and returns the link. +*/ +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + linkMb.Signed = Link{ + Type: "link", + Name: name, + Materials: materials, + Products: map[string]interface{}{}, + ByProducts: map[string]interface{}{}, + Command: []string{}, + Environment: map[string]interface{}{}, + } + + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStop ends the creation of a metatadata link file created by +InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock +created by InTotoRecordStart and records the hashes of any products creted by +commands run between InTotoRecordStart and InTotoRecordStop. The resultant +finished link metablock is then signed by the provided key and returned. +*/ +func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + if err := prelimLinkMb.VerifySignature(key); err != nil { + return linkMb, err + } + + link, ok := prelimLinkMb.Signed.(Link) + if !ok { + return linkMb, errors.New("invalid metadata block") + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + link.Products = products + linkMb.Signed = link + + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go new file mode 100644 index 000000000000..59cba86eb52c --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -0,0 +1,147 @@ +package in_toto + +import ( + "fmt" +) + +/* +Set represents a data structure for set operations. See `NewSet` for how to +create a Set, and available Set receivers for useful set operations. + +Under the hood Set aliases map[string]struct{}, where the map keys are the set +elements and the map values are a memory-efficient way of storing the keys. +*/ +type Set map[string]struct{} + +/* +NewSet creates a new Set, assigns it the optionally passed variadic string +elements, and returns it. +*/ +func NewSet(elems ...string) Set { + var s Set = make(map[string]struct{}) + for _, elem := range elems { + s.Add(elem) + } + return s +} + +/* +Has returns True if the passed string is member of the set on which it was +called and False otherwise. +*/ +func (s Set) Has(elem string) bool { + _, ok := s[elem] + return ok +} + +/* +Add adds the passed string to the set on which it was called, if the string is +not a member of the set. +*/ +func (s Set) Add(elem string) { + s[elem] = struct{}{} +} + +/* +Remove removes the passed string from the set on which was is called, if the +string is a member of the set. +*/ +func (s Set) Remove(elem string) { + delete(s, elem) +} + +/* +Intersection creates and returns a new Set with the elements of the set on +which it was called that are also in the passed set. +*/ +func (s Set) Intersection(s2 Set) Set { + res := NewSet() + for elem := range s { + if !s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Difference creates and returns a new Set with the elements of the set on +which it was called that are not in the passed set. +*/ +func (s Set) Difference(s2 Set) Set { + res := NewSet() + for elem := range s { + if s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Filter creates and returns a new Set with the elements of the set on which it +was called that match the passed pattern. A matching error is treated like a +non-match plus a warning is printed. +*/ +func (s Set) Filter(pattern string) Set { + res := NewSet() + for elem := range s { + matched, err := match(pattern, elem) + if err != nil { + fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern) + continue + } + if !matched { + continue + } + res.Add(elem) + } + return res +} + +/* +Slice creates and returns an unordered string slice with the elements of the +set on which it was called. +*/ +func (s Set) Slice() []string { + var res []string + res = make([]string, 0, len(s)) + for elem := range s { + res = append(res, elem) + } + return res +} + +/* +InterfaceKeyStrings returns string keys of passed interface{} map in an +unordered string slice. +*/ +func InterfaceKeyStrings(m map[string]interface{}) []string { + res := make([]string, len(m)) + i := 0 + for k := range m { + res[i] = k + i++ + } + return res +} + +/* +IsSubSet checks if the parameter subset is a +subset of the superset s. +*/ +func (s Set) IsSubSet(subset Set) bool { + if len(subset) > len(s) { + return false + } + for key := range subset { + if s.Has(key) { + continue + } else { + return false + } + } + return true +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go new file mode 100644 index 000000000000..f555f79a528d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go @@ -0,0 +1,14 @@ +//go:build linux || darwin || !windows +// +build linux darwin !windows + +package in_toto + +import "golang.org/x/sys/unix" + +func isWritable(path string) error { + err := unix.Access(path, unix.W_OK) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go new file mode 100644 index 000000000000..8552f0345d04 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go @@ -0,0 +1,25 @@ +package in_toto + +import ( + "errors" + "os" +) + +func isWritable(path string) error { + // get fileInfo + info, err := os.Stat(path) + if err != nil { + return err + } + + // check if path is a directory + if !info.IsDir() { + return errors.New("not a directory") + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&(1<<(uint(7))) == 0 { + return errors.New("not writable") + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go new file mode 100644 index 000000000000..2302040f4600 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -0,0 +1,1091 @@ +/* +Package in_toto implements types and routines to verify a software supply chain +according to the in-toto specification. +See https://github.com/in-toto/docs/blob/master/in-toto-spec.md +*/ +package in_toto + +import ( + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "path" + osPath "path" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" +) + +// ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink +var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") + +/* +RunInspections iteratively executes the command in the Run field of all +inspections of the passed layout, creating unsigned link metadata that records +all files found in the current working directory as materials (before command +execution) and products (after command execution). A map with inspection names +as keys and Metablocks containing the generated link metadata as values is +returned. The format is: + + { + : Metablock, + : Metablock, + ... + } + +If executing the inspection command fails, or if the executed command has a +non-zero exit code, the first return value is an empty Metablock map and the +second return value is the error. +*/ +func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[string]Metablock, error) { + inspectionMetadata := make(map[string]Metablock) + + for _, inspection := range layout.Inspect { + + paths := []string{"."} + if runDir != "" { + paths = []string{runDir} + } + + linkMb, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization) + + if err != nil { + return nil, err + } + + retVal := linkMb.Signed.(Link).ByProducts["return-value"] + if retVal != float64(0) { + return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ + " returned a non-zero value: %d", inspection.Run, inspection.Name, + retVal) + } + + // Dump inspection link to cwd using the short link name format + linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) + if err := linkMb.Dump(linkName); err != nil { + fmt.Printf("JSON serialization or writing failed: %s", err) + } + + inspectionMetadata[inspection.Name] = linkMb + } + return inspectionMetadata, nil +} + +// verifyMatchRule is a helper function to process artifact rules of +// type MATCH. See VerifyArtifacts for more details. +func verifyMatchRule(ruleData map[string]string, + srcArtifacts map[string]interface{}, srcArtifactQueue Set, + itemsMetadata map[string]Metablock) Set { + consumed := NewSet() + // Get destination link metadata + dstLinkMb, exists := itemsMetadata[ruleData["dstName"]] + if !exists { + // Destination link does not exist, rule can't consume any + // artifacts + return consumed + } + + // Get artifacts from destination link metadata + var dstArtifacts map[string]interface{} + switch ruleData["dstType"] { + case "materials": + dstArtifacts = dstLinkMb.Signed.(Link).Materials + case "products": + dstArtifacts = dstLinkMb.Signed.(Link).Products + } + + // cleanup paths in pattern and artifact maps + if ruleData["pattern"] != "" { + ruleData["pattern"] = path.Clean(ruleData["pattern"]) + } + for k := range srcArtifacts { + if path.Clean(k) != k { + srcArtifacts[path.Clean(k)] = srcArtifacts[k] + delete(srcArtifacts, k) + } + } + for k := range dstArtifacts { + if path.Clean(k) != k { + dstArtifacts[path.Clean(k)] = dstArtifacts[k] + delete(dstArtifacts, k) + } + } + + // Normalize optional source and destination prefixes, i.e. if + // there is a prefix, then add a trailing slash if not there yet + for _, prefix := range []string{"srcPrefix", "dstPrefix"} { + if ruleData[prefix] != "" { + ruleData[prefix] = path.Clean(ruleData[prefix]) + if !strings.HasSuffix(ruleData[prefix], "/") { + ruleData[prefix] += "/" + } + } + } + // Iterate over queue and mark consumed artifacts + for srcPath := range srcArtifactQueue { + // Remove optional source prefix from source artifact path + // Noop if prefix is empty, or artifact does not have it + srcBasePath := strings.TrimPrefix(srcPath, ruleData["srcPrefix"]) + + // Ignore artifacts not matched by rule pattern + matched, err := match(ruleData["pattern"], srcBasePath) + if err != nil || !matched { + continue + } + + // Construct corresponding destination artifact path, i.e. + // an optional destination prefix plus the source base path + dstPath := path.Clean(osPath.Join(ruleData["dstPrefix"], srcBasePath)) + + // Try to find the corresponding destination artifact + dstArtifact, exists := dstArtifacts[dstPath] + // Ignore artifacts without corresponding destination artifact + if !exists { + continue + } + + // Ignore artifact pairs with no matching hashes + if !reflect.DeepEqual(srcArtifacts[srcPath], dstArtifact) { + continue + } + + // Only if a source and destination artifact pair was found and + // their hashes are equal, will we mark the source artifact as + // successfully consumed, i.e. it will be removed from the queue + consumed.Add(srcPath) + } + return consumed +} + +/* +VerifyArtifacts iteratively applies the material and product rules of the +passed items (step or inspection) to enforce and authorize artifacts (materials +or products) reported by the corresponding link and to guarantee that +artifacts are linked together across links. In the beginning all artifacts are +placed in a queue according to their type. If an artifact gets consumed by a +rule it is removed from the queue. An artifact can only be consumed once in +the course of processing the set of rules in ExpectedMaterials or +ExpectedProducts. + +Rules of type MATCH, ALLOW, CREATE, DELETE, MODIFY and DISALLOW are supported. + +All rules except for DISALLOW consume queued artifacts on success, and +leave the queue unchanged on failure. Hence, it is left to a terminal +DISALLOW rule to fail overall verification, if artifacts are left in the queue +that should have been consumed by preceding rules. +*/ +func VerifyArtifacts(items []interface{}, + itemsMetadata map[string]Metablock) error { + // Verify artifact rules for each item in the layout + for _, itemI := range items { + // The layout item (interface) must be a Link or an Inspection we are only + // interested in the name and the expected materials and products + var itemName string + var expectedMaterials [][]string + var expectedProducts [][]string + + switch item := itemI.(type) { + case Step: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + case Inspection: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + default: // Something wrong + return fmt.Errorf("VerifyArtifacts received an item of invalid type,"+ + " elements of passed slice 'items' must be one of 'Step' or"+ + " 'Inspection', got: '%s'", reflect.TypeOf(item)) + } + + // Use the item's name to extract the corresponding link + srcLinkMb, exists := itemsMetadata[itemName] + if !exists { + return fmt.Errorf("VerifyArtifacts could not find metadata"+ + " for item '%s', got: '%s'", itemName, itemsMetadata) + } + + // Create shortcuts to materials and products (including hashes) reported + // by the item's link, required to verify "match" rules + materials := srcLinkMb.Signed.(Link).Materials + products := srcLinkMb.Signed.(Link).Products + + // All other rules only require the material or product paths (without + // hashes). We extract them from the corresponding maps and store them as + // sets for convenience in further processing + materialPaths := NewSet() + for _, p := range InterfaceKeyStrings(materials) { + materialPaths.Add(path.Clean(p)) + } + productPaths := NewSet() + for _, p := range InterfaceKeyStrings(products) { + productPaths.Add(path.Clean(p)) + } + + // For `create`, `delete` and `modify` rules we prepare sets of artifacts + // (without hashes) that were created, deleted or modified in the current + // step or inspection + created := productPaths.Difference(materialPaths) + deleted := materialPaths.Difference(productPaths) + remained := materialPaths.Intersection(productPaths) + modified := NewSet() + for name := range remained { + if !reflect.DeepEqual(materials[name], products[name]) { + modified.Add(name) + } + } + + // For each item we have to run rule verification, once per artifact type. + // Here we prepare the corresponding data for each round. + verificationDataList := []map[string]interface{}{ + { + "srcType": "materials", + "rules": expectedMaterials, + "artifacts": materials, + "artifactPaths": materialPaths, + }, + { + "srcType": "products", + "rules": expectedProducts, + "artifacts": products, + "artifactPaths": productPaths, + }, + } + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Verifying %s '%s' ", reflect.TypeOf(itemI), itemName) + + // Process all material rules using the corresponding materials and all + // product rules using the corresponding products + for _, verificationData := range verificationDataList { + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("%s...\n", verificationData["srcType"]) + + rules := verificationData["rules"].([][]string) + artifacts := verificationData["artifacts"].(map[string]interface{}) + + // Use artifacts (without hashes) as base queue. Each rule only operates + // on artifacts in that queue. If a rule consumes an artifact (i.e. can + // be applied successfully), the artifact is removed from the queue. By + // applying a DISALLOW rule eventually, verification may return an error, + // if the rule matches any artifacts in the queue that should have been + // consumed earlier. + queue := verificationData["artifactPaths"].(Set) + + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Initial state\nMaterials: %s\nProducts: %s\nQueue: %s\n\n", + // materialPaths.Slice(), productPaths.Slice(), queue.Slice()) + + // Verify rules sequentially + for _, rule := range rules { + // Parse rule and error out if it is malformed + // NOTE: the rule format should have been validated before + ruleData, err := UnpackRule(rule) + if err != nil { + return err + } + + // Apply rule pattern to filter queued artifacts that are up for rule + // specific consumption + filtered := queue.Filter(path.Clean(ruleData["pattern"])) + + var consumed Set + switch ruleData["type"] { + case "match": + // Note: here we need to perform more elaborate filtering + consumed = verifyMatchRule(ruleData, artifacts, queue, itemsMetadata) + + case "allow": + // Consumes all filtered artifacts + consumed = filtered + + case "create": + // Consumes filtered artifacts that were created + consumed = filtered.Intersection(created) + + case "delete": + // Consumes filtered artifacts that were deleted + consumed = filtered.Intersection(deleted) + + case "modify": + // Consumes filtered artifacts that were modified + consumed = filtered.Intersection(modified) + + case "disallow": + // Does not consume but errors out if artifacts were filtered + if len(filtered) > 0 { + return fmt.Errorf("artifact verification failed for %s '%s',"+ + " %s %s disallowed by rule %s", + reflect.TypeOf(itemI).Name(), itemName, + verificationData["srcType"], filtered.Slice(), rule) + } + case "require": + // REQUIRE is somewhat of a weird animal that does not use + // patterns bur rather single filenames (for now). + if !queue.Has(ruleData["pattern"]) { + return fmt.Errorf("artifact verification failed for %s in REQUIRE '%s',"+ + " because %s is not in %s", verificationData["srcType"], + ruleData["pattern"], ruleData["pattern"], queue.Slice()) + } + } + // Update queue by removing consumed artifacts + queue = queue.Difference(consumed) + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Rule: %s\nQueue: %s\n\n", rule, queue.Slice()) + } + } + } + return nil +} + +/* +ReduceStepsMetadata merges for each step of the passed Layout all the passed +per-functionary links into a single link, asserting that the reported Materials +and Products are equal across links for a given step. This function may be +used at a time during the overall verification, where link threshold's have +been verified and subsequent verification only needs one exemplary link per +step. The function returns a map with one Metablock (link) per step: + + { + : Metablock, + : Metablock, + ... + } + +If links corresponding to the same step report different Materials or different +Products, the first return value is an empty Metablock map and the second +return value is the error. +*/ +func ReduceStepsMetadata(layout Layout, + stepsMetadata map[string]map[string]Metablock) (map[string]Metablock, + error) { + stepsMetadataReduced := make(map[string]Metablock) + + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not reduce metadata for step '" + step.Name + + "', no link metadata found.") + } + + // Get the first link (could be any link) for the current step, which will + // serve as reference link for below comparisons + var referenceKeyID string + var referenceLinkMb Metablock + for keyID, linkMb := range linksPerStep { + referenceLinkMb = linkMb + referenceKeyID = keyID + break + } + + // Only one link, nothing to reduce, take the reference link + if len(linksPerStep) == 1 { + stepsMetadataReduced[step.Name] = referenceLinkMb + + // Multiple links, reduce but first check + } else { + // Artifact maps must be equal for each type among all links + // TODO: What should we do if there are more links, than the + // threshold requires, but not all of them are equal? Right now we would + // also error. + for keyID, linkMb := range linksPerStep { + if !reflect.DeepEqual(linkMb.Signed.(Link).Materials, + referenceLinkMb.Signed.(Link).Materials) || + !reflect.DeepEqual(linkMb.Signed.(Link).Products, + referenceLinkMb.Signed.(Link).Products) { + return nil, fmt.Errorf("link '%s' and '%s' have different"+ + " artifacts", + fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), + fmt.Sprintf(LinkNameFormat, step.Name, keyID)) + } + } + // We haven't errored out, so we can reduce (i.e take the reference link) + stepsMetadataReduced[step.Name] = referenceLinkMb + } + } + return stepsMetadataReduced, nil +} + +/* +VerifyStepCommandAlignment (soft) verifies that for each step of the passed +layout the command executed, as per the passed link, matches the expected +command, as per the layout. Soft verification means that, in case a command +does not align, a warning is issued. +*/ +func VerifyStepCommandAlignment(layout Layout, + stepsMetadata map[string]map[string]Metablock) { + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not verify command alignment for step '" + step.Name + + "', no link metadata found.") + } + + for signerKeyID, linkMb := range linksPerStep { + expectedCommandS := strings.Join(step.ExpectedCommand, " ") + executedCommandS := strings.Join(linkMb.Signed.(Link).Command, " ") + + if expectedCommandS != executedCommandS { + linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) + fmt.Printf("WARNING: Expected command for step '%s' (%s) and command"+ + " reported by '%s' (%s) differ.\n", + step.Name, expectedCommandS, linkName, executedCommandS) + } + } + } +} + +/* +LoadLayoutCertificates loads the root and intermediate CAs from the layout if in the layout. +This will be used to check signatures that were used to sign links but not configured +in the PubKeys section of the step. No configured CAs means we don't want to allow this. +Returned CertPools will be empty in this case. +*/ +func LoadLayoutCertificates(layout Layout, intermediatePems [][]byte) (*x509.CertPool, *x509.CertPool, error) { + rootPool := x509.NewCertPool() + for _, certPem := range layout.RootCas { + ok := rootPool.AppendCertsFromPEM([]byte(certPem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load root certificates for layout") + } + } + + intermediatePool := x509.NewCertPool() + for _, intermediatePem := range layout.IntermediateCas { + ok := intermediatePool.AppendCertsFromPEM([]byte(intermediatePem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load intermediate certificates for layout") + } + } + + for _, intermediatePem := range intermediatePems { + ok := intermediatePool.AppendCertsFromPEM(intermediatePem) + if !ok { + return nil, nil, fmt.Errorf("failed to load provided intermediate certificates") + } + } + + return rootPool, intermediatePool, nil +} + +/* +VerifyLinkSignatureThesholds verifies that for each step of the passed layout, +there are at least Threshold links, validly signed by different authorized +functionaries. The returned map of link metadata per steps contains only +links with valid signatures from distinct functionaries and has the format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If for any step of the layout there are not enough links available, the first +return value is an empty map of Metablock maps and the second return value is +the error. +*/ +func VerifyLinkSignatureThesholds(layout Layout, + stepsMetadata map[string]map[string]Metablock, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metablock, error) { + // This will stores links with valid signature from an authorized functionary + // for all steps + stepsMetadataVerified := make(map[string]map[string]Metablock) + + // Try to find enough (>= threshold) links each with a valid signature from + // distinct authorized functionaries for each step + for _, step := range layout.Steps { + var stepErr error + + // This will store links with valid signature from an authorized + // functionary for the given step + linksPerStepVerified := make(map[string]Metablock) + + // Check if there are any links at all for the given step + linksPerStep, ok := stepsMetadata[step.Name] + if !ok || len(linksPerStep) < 1 { + stepErr = fmt.Errorf("no links found") + } + + // For each link corresponding to a step, check that the signer key was + // authorized, the layout contains a verification key and the signature + // verification passes. Only good links are stored, to verify thresholds + // below. + isAuthorizedSignature := false + for signerKeyID, linkMb := range linksPerStep { + for _, authorizedKeyID := range step.PubKeys { + if signerKeyID == authorizedKeyID { + if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { + if err := linkMb.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkMb + isAuthorizedSignature = true + break + } + } + } + } + + // If the signer's key wasn't in our step's pubkeys array, check the cert pool to + // see if the key is known to us. + if !isAuthorizedSignature { + sig, err := linkMb.GetSignatureForKeyID(signerKeyID) + if err != nil { + stepErr = err + continue + } + + cert, err := sig.GetCertificate() + if err != nil { + stepErr = err + continue + } + + // test certificate against the step's constraints to make sure it's a valid functionary + err = step.CheckCertConstraints(cert, layout.RootCAIDs(), rootCertPool, intermediateCertPool) + if err != nil { + stepErr = err + continue + } + + err = linkMb.VerifySignature(cert) + if err != nil { + stepErr = err + continue + } + + linksPerStepVerified[signerKeyID] = linkMb + } + } + + // Store all good links for a step + stepsMetadataVerified[step.Name] = linksPerStepVerified + + if len(linksPerStepVerified) < step.Threshold { + linksPerStep := stepsMetadata[step.Name] + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s)."+ + " '%d' out of '%d' available link(s) have a valid signature from an"+ + " authorized signer: %v", step.Name, step.Threshold, + len(linksPerStepVerified), len(linksPerStep), stepErr) + } + } + return stepsMetadataVerified, nil +} + +/* +LoadLinksForLayout loads for every Step of the passed Layout a Metablock +containing the corresponding Link. A base path to a directory that contains +the links may be passed using linkDir. Link file names are constructed, +using LinkNameFormat together with the corresponding step name and authorized +functionary key ids. A map of link metadata is returned and has the following +format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If a link cannot be loaded at a constructed link name or is invalid, it is +ignored. Only a preliminary threshold check is performed, that is, if there +aren't at least Threshold links for any given step, the first return value +is an empty map of Metablock maps and the second return value is the error. +*/ +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metablock, error) { + stepsMetadata := make(map[string]map[string]Metablock) + + for _, step := range layout.Steps { + linksPerStep := make(map[string]Metablock) + // Since we can verify against certificates belonging to a CA, we need to + // load any possible links + linkFiles, err := filepath.Glob(osPath.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + if err != nil { + return nil, err + } + + for _, linkPath := range linkFiles { + var linkMb Metablock + if err := linkMb.Load(linkPath); err != nil { + continue + } + + // To get the full key from the metadata's signatures, we have to check + // for one with the same short id... + signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") + for _, sig := range linkMb.Signatures { + if strings.HasPrefix(sig.KeyID, signerShortKeyID) { + linksPerStep[sig.KeyID] = linkMb + break + } + } + } + + if len(linksPerStep) < step.Threshold { + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s),"+ + " found '%d'", step.Name, step.Threshold, len(linksPerStep)) + } + + stepsMetadata[step.Name] = linksPerStep + } + + return stepsMetadata, nil +} + +/* +VerifyLayoutExpiration verifies that the passed Layout has not expired. It +returns an error if the (zulu) date in the Expires field is in the past. +*/ +func VerifyLayoutExpiration(layout Layout) error { + expires, err := time.Parse(ISO8601DateSchema, layout.Expires) + if err != nil { + return err + } + // Uses timezone of expires, i.e. UTC + if time.Until(expires) < 0 { + return fmt.Errorf("layout has expired on '%s'", expires) + } + return nil +} + +/* +VerifyLayoutSignatures verifies for each key in the passed key map the +corresponding signature of the Layout in the passed Metablock's Signed field. +Signatures and keys are associated by key id. If the key map is empty, or the +Metablock's Signature field does not have a signature for one or more of the +passed keys, or a matching signature is invalid, an error is returned. +*/ +func VerifyLayoutSignatures(layoutMb Metablock, + layoutKeys map[string]Key) error { + if len(layoutKeys) < 1 { + return fmt.Errorf("layout verification requires at least one key") + } + + for _, key := range layoutKeys { + if err := layoutMb.VerifySignature(key); err != nil { + return err + } + } + return nil +} + +/* +GetSummaryLink merges the materials of the first step (as mentioned in the +layout) and the products of the last step and returns a new link. This link +reports the materials and products and summarizes the overall software supply +chain. +NOTE: The assumption is that the steps mentioned in the layout are to be +performed sequentially. So, the first step mentioned in the layout denotes what +comes into the supply chain and the last step denotes what goes out. +*/ +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metablock, + stepName string) (Metablock, error) { + var summaryLink Link + var result Metablock + if len(layout.Steps) > 0 { + firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] + lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] + + summaryLink.Materials = firstStepLink.Signed.(Link).Materials + summaryLink.Name = stepName + summaryLink.Type = firstStepLink.Signed.(Link).Type + + summaryLink.Products = lastStepLink.Signed.(Link).Products + summaryLink.ByProducts = lastStepLink.Signed.(Link).ByProducts + // Using the last command of the sublayout as the command + // of the summary link can be misleading. Is it necessary to + // include all the commands executed as part of sublayout? + summaryLink.Command = lastStepLink.Signed.(Link).Command + } + + result.Signed = summaryLink + + return result, nil +} + +/* +VerifySublayouts checks if any step in the supply chain is a sublayout, and if +so, recursively resolves it and replaces it with a summary link summarizing the +steps carried out in the sublayout. +*/ +func VerifySublayouts(layout Layout, + stepsMetadataVerified map[string]map[string]Metablock, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metablock, error) { + for stepName, linkData := range stepsMetadataVerified { + for keyID, metadata := range linkData { + if _, ok := metadata.Signed.(Layout); ok { + layoutKeys := make(map[string]Key) + layoutKeys[keyID] = layout.Keys[keyID] + + sublayoutLinkDir := fmt.Sprintf(SublayoutLinkDirFormat, + stepName, keyID) + sublayoutLinkPath := filepath.Join(superLayoutLinkPath, + sublayoutLinkDir) + summaryLink, err := InTotoVerify(metadata, layoutKeys, + sublayoutLinkPath, stepName, make(map[string]string), intermediatePems, lineNormalization) + if err != nil { + return nil, err + } + linkData[keyID] = summaryLink + } + + } + } + return stepsMetadataVerified, nil +} + +// TODO: find a better way than two helper functions for the replacer op + +func substituteParamatersInSlice(replacer *strings.Replacer, slice []string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + newSlice = append(newSlice, replacer.Replace(item)) + } + return newSlice +} + +func substituteParametersInSliceOfSlices(replacer *strings.Replacer, + slice [][]string) [][]string { + newSlice := make([][]string, 0) + for _, item := range slice { + newSlice = append(newSlice, substituteParamatersInSlice(replacer, + item)) + } + return newSlice +} + +/* +SubstituteParameters performs parameter substitution in steps and inspections +in the following fields: +- Expected Materials and Expected Products of both +- Run of inspections +- Expected Command of steps +The substitution marker is '{}' and the keyword within the braces is replaced +by a value found in the substitution map passed, parameterDictionary. The +layout with parameters substituted is returned to the calling function. +*/ +func SubstituteParameters(layout Layout, + parameterDictionary map[string]string) (Layout, error) { + + if len(parameterDictionary) == 0 { + return layout, nil + } + + parameters := make([]string, 0) + + re := regexp.MustCompile("^[a-zA-Z0-9_-]+$") + + for parameter, value := range parameterDictionary { + parameterFormatCheck := re.MatchString(parameter) + if !parameterFormatCheck { + return layout, fmt.Errorf("invalid format for parameter") + } + + parameters = append(parameters, "{"+parameter+"}") + parameters = append(parameters, value) + } + + replacer := strings.NewReplacer(parameters...) + + for i := range layout.Steps { + layout.Steps[i].ExpectedMaterials = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedMaterials) + layout.Steps[i].ExpectedProducts = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedProducts) + layout.Steps[i].ExpectedCommand = substituteParamatersInSlice(replacer, + layout.Steps[i].ExpectedCommand) + } + + for i := range layout.Inspect { + layout.Inspect[i].ExpectedMaterials = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedMaterials) + layout.Inspect[i].ExpectedProducts = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedProducts) + layout.Inspect[i].Run = substituteParamatersInSlice(replacer, + layout.Inspect[i].Run) + } + + return layout, nil +} + +/* +InTotoVerify can be used to verify an entire software supply chain according to +the in-toto specification. It requires the metadata of the root layout, a map +that contains public keys to verify the root layout signatures, a path to a +directory from where it can load link metadata files, which are treated as +signed evidence for the steps defined in the layout, a step name, and a +paramater dictionary used for parameter substitution. The step name only +matters for sublayouts, where it's important to associate the summary of that +step with a unique name. The verification routine is as follows: + +1. Verify layout signature(s) using passed key(s) +2. Verify layout expiration date +3. Substitute parameters in layout +4. Load link metadata files for steps of layout +5. Verify signatures and signature thresholds for steps of layout +6. Verify sublayouts recursively +7. Verify command alignment for steps of layout (only warns) +8. Verify artifact rules for steps of layout +9. Execute inspection commands (generates link metadata for each inspection) +10. Verify artifact rules for inspections of layout + +InTotoVerify returns a summary link wrapped in a Metablock object and an error +value. If any of the verification routines fail, verification is aborted and +error is returned. In such an instance, the first value remains an empty +Metablock object. + +NOTE: Artifact rules of type "create", "modify" +and "delete" are currently not supported. +*/ +func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, + linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metablock, error) { + + var summaryLink Metablock + var err error + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { + return summaryLink, err + } + + // Extract the layout from its Metablock container (for further processing) + layout := layoutMb.Signed.(Layout) + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return summaryLink, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return summaryLink, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return summaryLink, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return summaryLink, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return summaryLink, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return summaryLink, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return summaryLink, err + } + + inspectionMetadata, err := RunInspections(layout, "", lineNormalization) + if err != nil { + return summaryLink, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return summaryLink, err + } + + summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + if err != nil { + return summaryLink, err + } + + return summaryLink, nil +} + +/* +InTotoVerifyWithDirectory provides the same functionality as IntotoVerify, but +adds the possibility to select a local directory from where the inspections are run. +*/ +func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, + linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metablock, error) { + + var summaryLink Metablock + var err error + + // runDir sanity checks + // check if path exists + info, err := os.Stat(runDir) + if err != nil { + return Metablock{}, err + } + + // check if runDir is a symlink + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + return Metablock{}, ErrInspectionRunDirIsSymlink + } + + // check if runDir is writable and a directory + err = isWritable(runDir) + if err != nil { + return Metablock{}, err + } + + // check if runDir is empty (we do not want to overwrite files) + // We abuse File.Readdirnames for this action. + f, err := os.Open(runDir) + if err != nil { + return Metablock{}, err + } + defer f.Close() + // We use Readdirnames(1) for performance reasons, one child node + // is enough to proof that the directory is not empty + _, err = f.Readdirnames(1) + // if io.EOF gets returned as error the directory is empty + if err == io.EOF { + return Metablock{}, err + } + err = f.Close() + if err != nil { + return Metablock{}, err + } + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { + return summaryLink, err + } + + // Extract the layout from its Metablock container (for further processing) + layout := layoutMb.Signed.(Layout) + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return summaryLink, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return summaryLink, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return summaryLink, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return summaryLink, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return summaryLink, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return summaryLink, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return summaryLink, err + } + + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return summaryLink, err + } + + summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + if err != nil { + return summaryLink, err + } + + return summaryLink, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/pkg/ssl/sign.go b/vendor/github.com/in-toto/in-toto-golang/pkg/ssl/sign.go new file mode 100644 index 000000000000..c40e90e8cae0 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/pkg/ssl/sign.go @@ -0,0 +1,168 @@ +/* +Package ssl implements the Secure Systems Lab signing-spec (sometimes +abbreviated SSL Siging spec. +https://github.com/secure-systems-lab/signing-spec +*/ +package ssl + +import ( + "encoding/base64" + "errors" + "fmt" +) + +// ErrUnknownKey indicates that the implementation does not recognize the +// key. +var ErrUnknownKey = errors.New("unknown key") + +// ErrNoSignature indicates that an envelope did not contain any signatures. +var ErrNoSignature = errors.New("no signature found") + +// ErrNoSigners indicates that no signer was provided. +var ErrNoSigners = errors.New("no signers provided") + +/* +Envelope captures an envelope as described by the Secure Systems Lab +Signing Specification. See here: +https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md +*/ +type Envelope struct { + PayloadType string `json:"payloadType"` + Payload string `json:"payload"` + Signatures []Signature `json:"signatures"` +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the key which was used to create the signature. +The used signature scheme has to be agreed upon by the signer and verifer +out of band. +The signature is a base64 encoding of the raw bytes from the signature +algorithm. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` +} + +/* +PAE implementes the DSSE Pre-Authentic Encoding +https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition +*/ +func PAE(payloadType, payload string) []byte { + return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", + len(payloadType), payloadType, + len(payload), payload)) +} + +/* +Signer defines the interface for an abstract signing algorithm. +The Signer interface is used to inject signature algorithm implementations +into the EnevelopeSigner. This decoupling allows for any signing algorithm +and key management system can be used. +The full message is provided as the parameter. If the signature algorithm +depends on hashing of the message prior to signature calculation, the +implementor of this interface must perform such hashing. +The function must return raw bytes representing the calculated signature +using the current algorithm, and the key used (if applicable). +For an example see EcdsaSigner in sign_test.go. +*/ +type Signer interface { + Sign(data []byte) ([]byte, string, error) +} + +// SignVerifer provides both the signing and verification interface. +type SignVerifier interface { + Signer + Verifier +} + +// EnvelopeSigner creates signed Envelopes. +type EnvelopeSigner struct { + providers []SignVerifier + ev EnvelopeVerifier +} + +/* +NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer +algorithms to sign the data. +*/ +func NewEnvelopeSigner(p ...SignVerifier) (*EnvelopeSigner, error) { + var providers []SignVerifier + + for _, sv := range p { + if sv != nil { + providers = append(providers, sv) + } + } + + if len(providers) == 0 { + return nil, ErrNoSigners + } + + evps := []Verifier{} + for _, p := range providers { + evps = append(evps, p.(Verifier)) + } + + return &EnvelopeSigner{ + providers: providers, + ev: EnvelopeVerifier{ + providers: evps, + }, + }, nil +} + +/* +SignPayload signs a payload and payload type according to the SSL signing spec. +Returned is an envelope as defined here: +https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md +One signature will be added for each Signer in the EnvelopeSigner. +*/ +func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { + var e = Envelope{ + Payload: base64.StdEncoding.EncodeToString(body), + PayloadType: payloadType, + } + + paeEnc := PAE(payloadType, string(body)) + + for _, signer := range es.providers { + sig, keyID, err := signer.Sign(paeEnc) + if err != nil { + return nil, err + } + + e.Signatures = append(e.Signatures, Signature{ + KeyID: keyID, + Sig: base64.StdEncoding.EncodeToString(sig), + }) + } + + return &e, nil +} + +/* +Verify decodes the payload and verifies the signature. +Any domain specific validation such as parsing the decoded body and +validating the payload type is left out to the caller. +*/ +func (es *EnvelopeSigner) Verify(e *Envelope) error { + return es.ev.Verify(e) +} + +/* +Both standard and url encoding are allowed: +https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md +*/ +func b64Decode(s string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + if err != nil { + return nil, err + } + } + + return b, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/pkg/ssl/verify.go b/vendor/github.com/in-toto/in-toto-golang/pkg/ssl/verify.go new file mode 100644 index 000000000000..426968debe0f --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/pkg/ssl/verify.go @@ -0,0 +1,71 @@ +/* +Package ssl implements the Secure Systems Lab signing-spec (sometimes +abbreviated SSL Siging spec. +https://github.com/secure-systems-lab/signing-spec +*/ +package ssl + +/* +Verifier verifies a complete message against a signature and key. +If the message was hashed prior to signature generation, the verifier +must perform the same steps. +If the key is not recognized ErrUnknownKey shall be returned. +*/ +type Verifier interface { + Verify(keyID string, data, sig []byte) error +} + +type EnvelopeVerifier struct { + providers []Verifier +} + +func (ev *EnvelopeVerifier) Verify(e *Envelope) error { + if len(e.Signatures) == 0 { + return ErrNoSignature + } + + // Decode payload (i.e serialized body) + body, err := b64Decode(e.Payload) + if err != nil { + return err + } + // Generate PAE(payloadtype, serialized body) + paeEnc := PAE(e.PayloadType, string(body)) + + // If *any* signature is found to be incorrect, the entire verification + // step fails even if *some* signatures are correct. + verified := false + for _, s := range e.Signatures { + sig, err := b64Decode(s.Sig) + if err != nil { + return err + } + + // Loop over the providers. If a provider recognizes the key, we exit + // the loop and use the result. + for _, v := range ev.providers { + err := v.Verify(s.KeyID, paeEnc, sig) + if err != nil { + if err == ErrUnknownKey { + continue + } + return err + } + + verified = true + break + } + } + if !verified { + return ErrUnknownKey + } + + return nil +} + +func NewEnvelopeVerifier(p ...Verifier) EnvelopeVerifier { + ev := EnvelopeVerifier{ + providers: p, + } + return ev +} diff --git a/vendor/github.com/shibumi/go-pathspec/.gitignore b/vendor/github.com/shibumi/go-pathspec/.gitignore new file mode 100644 index 000000000000..836562412fe8 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/shibumi/go-pathspec/GO-LICENSE b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE new file mode 100644 index 000000000000..74487567632c --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shibumi/go-pathspec/LICENSE b/vendor/github.com/shibumi/go-pathspec/LICENSE new file mode 100644 index 000000000000..5c304d1a4a7b --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/shibumi/go-pathspec/README.md b/vendor/github.com/shibumi/go-pathspec/README.md new file mode 100644 index 000000000000..c146cf69b012 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/README.md @@ -0,0 +1,45 @@ +# go-pathspec + +[![build](https://github.com/shibumi/go-pathspec/workflows/build/badge.svg)](https://github.com/shibumi/go-pathspec/actions?query=workflow%3Abuild) [![Coverage Status](https://coveralls.io/repos/github/shibumi/go-pathspec/badge.svg)](https://coveralls.io/github/shibumi/go-pathspec) [![PkgGoDev](https://pkg.go.dev/badge/github.com/shibumi/go-pathspec)](https://pkg.go.dev/github.com/shibumi/go-pathspec) + +go-pathspec implements gitignore-style pattern matching for paths. + +## Alternatives + +There are a few alternatives, that try to be gitignore compatible or even state +gitignore compatibility: + +### https://github.com/go-git/go-git + +go-git states it would be gitignore compatible, but actually they are missing a few +special cases. This issue describes one of the not working patterns: https://github.com/go-git/go-git/issues/108 + +What does not work is global filename pattern matching. Consider the following +`.gitignore` file: + +```gitignore +# gitignore test file +parse.go +``` + +Then `parse.go` should match on all filenames called `parse.go`. You can test this via +this shell script: +```shell +mkdir -p /tmp/test/internal/util +touch /tmp/test/internal/util/parse.go +cd /tmp/test/ +git init +echo "parse.go" > .gitignore +``` + +With git `parse.go` will be excluded. The go-git implementation behaves different. + +### https://github.com/monochromegane/go-gitignore + +monochromegane's go-gitignore does not support the use of `**`-operators. +This is not consistent to real gitignore behavior, too. + +## Authors + +Sander van Harmelen () +Christian Rebischke () diff --git a/vendor/github.com/shibumi/go-pathspec/gitignore.go b/vendor/github.com/shibumi/go-pathspec/gitignore.go new file mode 100644 index 000000000000..2b08d4e8a573 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/gitignore.go @@ -0,0 +1,299 @@ +// +// Copyright 2014, Sander van Harmelen +// Copyright 2020, Christian Rebischke +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package pathspec implements git compatible gitignore pattern matching. +// See the description below, if you are unfamiliar with it: +// +// A blank line matches no files, so it can serve as a separator for readability. +// +// A line starting with # serves as a comment. Put a backslash ("\") in front of +// the first hash for patterns that begin with a hash. +// +// An optional prefix "!" which negates the pattern; any matching file excluded +// by a previous pattern will become included again. If a negated pattern matches, +// this will override lower precedence patterns sources. Put a backslash ("\") in +// front of the first "!" for patterns that begin with a literal "!", for example, +// "\!important!.txt". +// +// If the pattern ends with a slash, it is removed for the purpose of the following +// description, but it would only find a match with a directory. In other words, +// foo/ will match a directory foo and paths underneath it, but will not match a +// regular file or a symbolic link foo (this is consistent with the way how pathspec +// works in general in Git). +// +// If the pattern does not contain a slash /, Git treats it as a shell glob pattern +// and checks for a match against the pathname relative to the location of the +// .gitignore file (relative to the toplevel of the work tree if not from a +// .gitignore file). +// +// Otherwise, Git treats the pattern as a shell glob suitable for consumption by +// fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will not match +// a / in the pathname. For example, "Documentation/*.html" matches +// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or/ +// "tools/perf/Documentation/perf.html". +// +// A leading slash matches the beginning of the pathname. For example, "/*.c" +// matches "cat-file.c" but not "mozilla-sha1/sha1.c". +// +// Two consecutive asterisks ("**") in patterns matched against full pathname +// may have special meaning: +// +// A leading "**" followed by a slash means match in all directories. For example, +// "**/foo" matches file or directory "foo" anywhere, the same as pattern "foo". +// "**/foo/bar" matches file or directory "bar" anywhere that is directly under +// directory "foo". +// +// A trailing "/" matches everything inside. For example, "abc/" matches all files +// inside directory "abc", relative to the location of the .gitignore file, with +// infinite depth. +// +// A slash followed by two consecutive asterisks then a slash matches zero or more +// directories. For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on. +// +// Other consecutive asterisks are considered invalid. +package pathspec + +import ( + "bufio" + "bytes" + "io" + "path/filepath" + "regexp" + "strings" +) + +type gitIgnorePattern struct { + Regex string + Include bool +} + +// GitIgnore uses a string slice of patterns for matching on a filepath string. +// On match it returns true, otherwise false. On error it passes the error through. +func GitIgnore(patterns []string, name string) (ignore bool, err error) { + for _, pattern := range patterns { + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, nil + } + ignore = true + } + } + return ignore, nil +} + +// ReadGitIgnore implements the io.Reader interface for reading a gitignore file +// line by line. It behaves exactly like the GitIgnore function. The only difference +// is that GitIgnore works on a string slice. +// +// ReadGitIgnore returns a boolean value if we match or not and an error. +func ReadGitIgnore(content io.Reader, name string) (ignore bool, err error) { + scanner := bufio.NewScanner(content) + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if len(pattern) == 0 || pattern[0] == '#' { + continue + } + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, scanner.Err() + } + ignore = true + } + } + return ignore, scanner.Err() +} + +func parsePattern(pattern string) *gitIgnorePattern { + p := &gitIgnorePattern{} + + // An optional prefix "!" which negates the pattern; any matching file + // excluded by a previous pattern will become included again. + if strings.HasPrefix(pattern, "!") { + pattern = pattern[1:] + p.Include = true + } else { + p.Include = false + } + + // Remove leading back-slash escape for escaped hash ('#') or + // exclamation mark ('!'). + if strings.HasPrefix(pattern, "\\") { + pattern = pattern[1:] + } + + // Split pattern into segments. + patternSegs := strings.Split(pattern, "/") + + // A pattern beginning with a slash ('/') will only match paths + // directly on the root directory instead of any descendant paths. + // So remove empty first segment to make pattern absoluut to root. + // A pattern without a beginning slash ('/') will match any + // descendant path. This is equivilent to "**/{pattern}". So + // prepend with double-asterisks to make pattern relative to + // root. + if patternSegs[0] == "" { + patternSegs = patternSegs[1:] + } else if patternSegs[0] != "**" { + patternSegs = append([]string{"**"}, patternSegs...) + } + + // A pattern ending with a slash ('/') will match all descendant + // paths of if it is a directory but not if it is a regular file. + // This is equivalent to "{pattern}/**". So, set last segment to + // double asterisks to include all descendants. + if patternSegs[len(patternSegs)-1] == "" { + patternSegs[len(patternSegs)-1] = "**" + } + + // Build regular expression from pattern. + var expr bytes.Buffer + expr.WriteString("^") + needSlash := false + + for i, seg := range patternSegs { + switch seg { + case "**": + switch { + case i == 0 && i == len(patternSegs)-1: + // A pattern consisting solely of double-asterisks ('**') + // will match every path. + expr.WriteString(".+") + case i == 0: + // A normalized pattern beginning with double-asterisks + // ('**') will match any leading path segments. + expr.WriteString("(?:.+/)?") + needSlash = false + case i == len(patternSegs)-1: + // A normalized pattern ending with double-asterisks ('**') + // will match any trailing path segments. + expr.WriteString("/.+") + default: + // A pattern with inner double-asterisks ('**') will match + // multiple (or zero) inner path segments. + expr.WriteString("(?:/.+)?") + needSlash = true + } + case "*": + // Match single path segment. + if needSlash { + expr.WriteString("/") + } + expr.WriteString("[^/]+") + needSlash = true + default: + // Match segment glob pattern. + if needSlash { + expr.WriteString("/") + } + expr.WriteString(translateGlob(seg)) + needSlash = true + } + } + expr.WriteString("$") + p.Regex = expr.String() + return p +} + +// NOTE: This is derived from `fnmatch.translate()` and is similar to +// the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. +func translateGlob(glob string) string { + var regex bytes.Buffer + escape := false + + for i := 0; i < len(glob); i++ { + char := glob[i] + // Escape the character. + switch { + case escape: + escape = false + regex.WriteString(regexp.QuoteMeta(string(char))) + case char == '\\': + // Escape character, escape next character. + escape = true + case char == '*': + // Multi-character wildcard. Match any string (except slashes), + // including an empty string. + regex.WriteString("[^/]*") + case char == '?': + // Single-character wildcard. Match any single character (except + // a slash). + regex.WriteString("[^/]") + case char == '[': + regex.WriteString(translateBracketExpression(&i, glob)) + default: + // Regular character, escape it for regex. + regex.WriteString(regexp.QuoteMeta(string(char))) + } + } + return regex.String() +} + +// Bracket expression wildcard. Except for the beginning +// exclamation mark, the whole bracket expression can be used +// directly as regex but we have to find where the expression +// ends. +// - "[][!]" matches ']', '[' and '!'. +// - "[]-]" matches ']' and '-'. +// - "[!]a-]" matches any character except ']', 'a' and '-'. +func translateBracketExpression(i *int, glob string) string { + regex := string(glob[*i]) + *i++ + j := *i + + // Pass bracket expression negation. + if j < len(glob) && glob[j] == '!' { + j++ + } + // Pass first closing bracket if it is at the beginning of the + // expression. + if j < len(glob) && glob[j] == ']' { + j++ + } + // Find closing bracket. Stop once we reach the end or find it. + for j < len(glob) && glob[j] != ']' { + j++ + } + + if j < len(glob) { + if glob[*i] == '!' { + regex = regex + "^" + *i++ + } + regex = regexp.QuoteMeta(glob[*i:j]) + *i = j + } else { + // Failed to find closing bracket, treat opening bracket as a + // bracket literal instead of as an expression. + regex = regexp.QuoteMeta(string(glob[*i])) + } + return "[" + regex + "]" +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 437be817c92b..80abb1b0937c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -461,6 +461,10 @@ github.com/hashicorp/go-retryablehttp # github.com/hashicorp/golang-lru v0.5.3 ## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru +# github.com/in-toto/in-toto-golang v0.3.3 +## explicit; go 1.17 +github.com/in-toto/in-toto-golang/in_toto +github.com/in-toto/in-toto-golang/pkg/ssl # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath @@ -552,6 +556,9 @@ github.com/russross/blackfriday/v2 # github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 ## explicit github.com/serialx/hashring +# github.com/shibumi/go-pathspec v1.2.0 +## explicit; go 1.14 +github.com/shibumi/go-pathspec # github.com/shurcooL/sanitized_anchor_name v1.0.0 ## explicit github.com/shurcooL/sanitized_anchor_name