diff --git a/acceptance/detector_test.go b/acceptance/detector_test.go index ad33ef119..e537df308 100644 --- a/acceptance/detector_test.go +++ b/acceptance/detector_test.go @@ -362,11 +362,13 @@ fail: fail_detect_buildpack@some_version "-extensions=/cnb/extensions", "-generated=/layers/generated", "-log-level=debug", + "-run=/layers/run.toml", // /cnb/run.toml is the default location of run.toml ), ) t.Log("runs /bin/detect for buildpacks and extensions") h.AssertStringContains(t, output, "Platform requested experimental feature 'Dockerfiles'") + h.AssertStringContains(t, output, "FOO=val-from-build-config") h.AssertStringContains(t, output, "simple_extension: output from /bin/detect") t.Log("writes group.toml") foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml") diff --git a/acceptance/testdata/analyzer/container/cnb/run.toml b/acceptance/testdata/analyzer/container/cnb/run.toml index 0bdc66f05..2dd8a99bf 100644 --- a/acceptance/testdata/analyzer/container/cnb/run.toml +++ b/acceptance/testdata/analyzer/container/cnb/run.toml @@ -1,5 +1,5 @@ -[[image]] +[[images]] image = "some-run-image-from-run-toml" -[[image]] +[[images]] image = "some-other-run-image" diff --git a/acceptance/testdata/creator/container/cnb/run.toml b/acceptance/testdata/creator/container/cnb/run.toml index 0bdc66f05..2dd8a99bf 100644 --- a/acceptance/testdata/creator/container/cnb/run.toml +++ b/acceptance/testdata/creator/container/cnb/run.toml @@ -1,5 +1,5 @@ -[[image]] +[[images]] image = "some-run-image-from-run-toml" -[[image]] +[[images]] image = "some-other-run-image" diff --git a/acceptance/testdata/detector/container/cnb/build-config/env/FOO b/acceptance/testdata/detector/container/cnb/build-config/env/FOO new file mode 100644 index 000000000..97b76931b --- /dev/null +++ b/acceptance/testdata/detector/container/cnb/build-config/env/FOO @@ -0,0 +1 @@ +val-from-build-config \ No newline at end of file diff --git a/acceptance/testdata/detector/container/cnb/buildpacks/buildpack_for_ext/buildpack_for_ext_version/bin/detect b/acceptance/testdata/detector/container/cnb/buildpacks/buildpack_for_ext/buildpack_for_ext_version/bin/detect index 67b322e4b..abe341f91 100755 --- a/acceptance/testdata/detector/container/cnb/buildpacks/buildpack_for_ext/buildpack_for_ext_version/bin/detect +++ b/acceptance/testdata/detector/container/cnb/buildpacks/buildpack_for_ext/buildpack_for_ext_version/bin/detect @@ -1,5 +1,8 @@ #!/usr/bin/env bash +echo "ENV" +env + plan_path=$2 cat >> "${plan_path}" <= 0.7 - if previousImageIDReference, err = a.getImageIdentifier(a.PreviousImage); err != nil { + if previousImageRef, err = a.getImageIdentifier(a.PreviousImage); err != nil { return platform.AnalyzedMetadata{}, errors.Wrap(err, "identifying previous image") } @@ -214,7 +214,7 @@ func (a *Analyzer) Analyze() (platform.AnalyzedMetadata, error) { } if a.RunImage != nil { - runImageIDReference, err = a.getImageIdentifier(a.RunImage) + runImageRef, err = a.getImageIdentifier(a.RunImage) if err != nil { return platform.AnalyzedMetadata{}, errors.Wrap(err, "identifying run image") } @@ -233,11 +233,9 @@ func (a *Analyzer) Analyze() (platform.AnalyzedMetadata, error) { } return platform.AnalyzedMetadata{ - PreviousImage: &platform.ImageIdentifier{ - Reference: previousImageIDReference, - }, - RunImage: &platform.RunImage{Reference: runImageIDReference}, - Metadata: appMeta, + PreviousImage: &platform.ImageIdentifier{Reference: previousImageRef}, + RunImage: &platform.RunImage{Reference: runImageRef}, + Metadata: appMeta, }, nil } diff --git a/api/apis.go b/api/apis.go index 91c22d805..bd15042a2 100644 --- a/api/apis.go +++ b/api/apis.go @@ -9,7 +9,7 @@ import ( var ( Platform = newApisMustParse([]string{"0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "0.10", "0.11", "0.12"}, []string{"0.3", "0.4", "0.5", "0.6"}) - Buildpack = newApisMustParse([]string{"0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9"}, []string{"0.2", "0.3", "0.4", "0.5", "0.6"}) + Buildpack = newApisMustParse([]string{"0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "0.10"}, []string{"0.2", "0.3", "0.4", "0.5", "0.6"}) ) type APIs struct { diff --git a/buildpack/dockerfile.go b/buildpack/dockerfile.go index 7e347a800..c9328bc44 100644 --- a/buildpack/dockerfile.go +++ b/buildpack/dockerfile.go @@ -2,6 +2,7 @@ package buildpack import ( "bytes" + "errors" "fmt" "os" "strings" @@ -15,14 +16,30 @@ import ( const ( DockerfileKindBuild = "build" DockerfileKindRun = "run" + + buildDockerfileName = "build.Dockerfile" + runDockerfileName = "run.Dockerfile" + + baseImageArgName = "base_image" + baseImageArgRef = "${base_image}" + + errArgumentsNotPermitted = "run.Dockerfile should not expect arguments" + errBuildMissingRequiredARGCommand = "build.Dockerfile did not start with required ARG command" + errBuildMissingRequiredFROMCommand = "build.Dockerfile did not contain required FROM ${base_image} command" + errMissingRequiredStage = "%s should have at least one stage" + errMissingRequiredInstruction = "%s should have at least one instruction" + errMultiStageNotPermitted = "%s is not permitted to use multistage build" + errRunOtherInstructionsNotPermitted = "run.Dockerfile is not permitted to have instructions other than FROM" + warnCommandNotRecommended = "%s command %s on line %d is not recommended" ) -var permittedCommandsBuild = []string{"FROM", "ADD", "ARG", "COPY", "ENV", "LABEL", "RUN", "SHELL", "USER", "WORKDIR"} +var permittedCommands = []string{"FROM", "ADD", "ARG", "COPY", "ENV", "LABEL", "RUN", "SHELL", "USER", "WORKDIR"} type DockerfileInfo struct { ExtensionID string Kind string Path string + NewBase string } type ExtendConfig struct { @@ -64,80 +81,77 @@ func VerifyBuildDockerfile(dockerfile string, logger log.Logger) error { // validate only 1 FROM if len(stages) > 1 { - return fmt.Errorf("build.Dockerfile is not permitted to use multistage build") + return fmt.Errorf(errMultiStageNotPermitted, buildDockerfileName) } // validate only permitted Commands for _, stage := range stages { for _, command := range stage.Commands { found := false - for _, permittedCommand := range permittedCommandsBuild { + for _, permittedCommand := range permittedCommands { if permittedCommand == strings.ToUpper(command.Name()) { found = true break } } if !found { - logger.Warnf("build.Dockerfile command %s on line %d is not recommended", strings.ToUpper(command.Name()), command.Location()[0].Start.Line) + logger.Warnf(warnCommandNotRecommended, buildDockerfileName, strings.ToUpper(command.Name()), command.Location()[0].Start.Line) } } } // validate build.Dockerfile preamble if len(margs) != 1 { - return fmt.Errorf("build.Dockerfile did not start with required ARG command") + return errors.New(errBuildMissingRequiredARGCommand) } - if margs[0].Args[0].Key != "base_image" { - return fmt.Errorf("build.Dockerfile did not start with required ARG base_image command") + if margs[0].Args[0].Key != baseImageArgName { + return errors.New(errBuildMissingRequiredARGCommand) } // sanity check to prevent panic if len(stages) == 0 { - return fmt.Errorf("build.Dockerfile should have at least one stage") + return fmt.Errorf(errMissingRequiredStage, buildDockerfileName) } - if stages[0].BaseName != "${base_image}" { - return fmt.Errorf("build.Dockerfile did not contain required FROM ${base_image} command") + + if stages[0].BaseName != baseImageArgRef { + return errors.New(errBuildMissingRequiredFROMCommand) } return nil } -func VerifyRunDockerfile(dockerfile string) error { - stages, margs, err := parseDockerfile(dockerfile) +func VerifyRunDockerfile(dockerfile string, logger log.Logger) (string, error) { + stages, _, err := parseDockerfile(dockerfile) if err != nil { - return err + return "", err } // validate only 1 FROM if len(stages) > 1 { - return fmt.Errorf("run.Dockerfile is not permitted to use multistage build") - } - - // validate FROM does not expect argument - if len(margs) > 0 { - return fmt.Errorf("run.Dockerfile should not expect arguments") + return "", fmt.Errorf(errMultiStageNotPermitted, runDockerfileName) } - - // sanity check to prevent panic if len(stages) == 0 { - return fmt.Errorf("run.Dockerfile should have at least one stage") + return "", fmt.Errorf(errMissingRequiredStage, runDockerfileName) } - // validate no instructions in stage - if len(stages[0].Commands) != 0 { - return fmt.Errorf("run.Dockerfile is not permitted to have instructions other than FROM") + var newBase string + // validate only permitted Commands + for _, stage := range stages { + if stage.BaseName != baseImageArgRef { + newBase = stage.BaseName + } + for _, command := range stage.Commands { + found := false + for _, permittedCommand := range permittedCommands { + if permittedCommand == strings.ToUpper(command.Name()) { + found = true + break + } + } + if !found { + logger.Warnf(warnCommandNotRecommended, runDockerfileName, strings.ToUpper(command.Name()), command.Location()[0].Start.Line) + } + } } - return nil -} - -func RetrieveFirstFromImageNameFromDockerfile(dockerfile string) (string, error) { - ins, _, err := parseDockerfile(dockerfile) - if err != nil { - return "", err - } - // sanity check to prevent panic - if len(ins) == 0 { - return "", fmt.Errorf("expected at least one instruction") - } - return ins[0].BaseName, nil + return newBase, nil } diff --git a/buildpack/dockerfile_test.go b/buildpack/dockerfile_test.go index f75fcef54..ef22f2902 100644 --- a/buildpack/dockerfile_test.go +++ b/buildpack/dockerfile_test.go @@ -37,15 +37,12 @@ func testDockerfile(t *testing.T, when spec.G, it spec.S) { }) it.After(func() { - os.RemoveAll(tmpDir) + _ = os.RemoveAll(tmpDir) }) when("verifying dockerfiles", func() { - when("build", func() { - when("valid", func() { - it("succeeds", func() { - dockerfileContents := []string{ - ` + validCases := []string{ + ` ARG base_image=0 FROM ${base_image} @@ -79,8 +76,52 @@ RUN echo ${build_id} RUN echo "this statement is never cached" `, - } - for i, content := range dockerfileContents { + } + + type testCase struct { + dockerfileContent string + expectedWarning string + } + + warnCases := []testCase{ + { + dockerfileContent: `CMD ["some-cmd"]`, + expectedWarning: "command CMD on line 4 is not recommended", + }, + { + dockerfileContent: `MAINTAINER "some-maintainer"`, + expectedWarning: "command MAINTAINER on line 4 is not recommended", + }, + { + dockerfileContent: `EXPOSE 80/tcp`, + expectedWarning: "command EXPOSE on line 4 is not recommended", + }, + { + dockerfileContent: `ENTRYPOINT ["some-executable"]`, + expectedWarning: "command ENTRYPOINT on line 4 is not recommended", + }, + { + dockerfileContent: `VOLUME ["/some-dir"]`, + expectedWarning: "command VOLUME on line 4 is not recommended", + }, + { + dockerfileContent: `ONBUILD RUN echo "hello" > /world.txt`, + expectedWarning: "command ONBUILD on line 4 is not recommended", + }, + { + dockerfileContent: `STOPSIGNAL SIGKILL`, + expectedWarning: "command STOPSIGNAL on line 4 is not recommended", + }, + { + dockerfileContent: `HEALTHCHECK NONE`, + expectedWarning: "command HEALTHCHECK on line 4 is not recommended", + }, + } + + when("build", func() { + when("valid", func() { + it("succeeds", func() { + for i, content := range validCases { dockerfileName := fmt.Sprintf("Dockerfile%d", i) dockerfilePath := filepath.Join(tmpDir, dockerfileName) h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(content), 0600)) @@ -95,56 +136,18 @@ RUN echo "this statement is never cached" when("valid, but violates SHOULD directives in spec", func() { it("succeeds with warning", func() { - type testCase struct { - dockerfileContent string - expectedWarning string - } preamble := ` ARG base_image=0 FROM ${base_image} ` - testCases := []testCase{ - { - dockerfileContent: preamble + `CMD ["some-cmd"]`, - expectedWarning: "build.Dockerfile command CMD on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `MAINTAINER "some-maintainer"`, - expectedWarning: "build.Dockerfile command MAINTAINER on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `EXPOSE 80/tcp`, - expectedWarning: "build.Dockerfile command EXPOSE on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `ENTRYPOINT ["some-executable"]`, - expectedWarning: "build.Dockerfile command ENTRYPOINT on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `VOLUME ["/some-dir"]`, - expectedWarning: "build.Dockerfile command VOLUME on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `ONBUILD RUN echo "hello" > /world.txt`, - expectedWarning: "build.Dockerfile command ONBUILD on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `STOPSIGNAL SIGKILL`, - expectedWarning: "build.Dockerfile command STOPSIGNAL on line 4 is not recommended", - }, - { - dockerfileContent: preamble + `HEALTHCHECK NONE`, - expectedWarning: "build.Dockerfile command HEALTHCHECK on line 4 is not recommended", - }, - } - for i, tc := range testCases { + for i, tc := range warnCases { dockerfilePath := filepath.Join(tmpDir, fmt.Sprintf("Dockerfile%d", i)) - h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(tc.dockerfileContent), 0600)) + h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(preamble+tc.dockerfileContent), 0600)) logHandler = memory.New() logger = &log.Logger{Handler: logHandler} err := buildpack.VerifyBuildDockerfile(dockerfilePath, logger) h.AssertNil(t, err) - assertLogEntry(t, logHandler, tc.expectedWarning) + assertLogEntry(t, logHandler, "build.Dockerfile "+tc.expectedWarning) } }) }) @@ -193,20 +196,45 @@ COPY --from=0 /some-source.txt ./some-dest.txt when("run", func() { when("valid", func() { it("succeeds", func() { - dockerfileContents := []string{ - `FROM some-run-image`, - } - for i, content := range dockerfileContents { + for i, content := range validCases { dockerfileName := fmt.Sprintf("Dockerfile%d", i) dockerfilePath := filepath.Join(tmpDir, dockerfileName) h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(content), 0600)) - err := buildpack.VerifyRunDockerfile(dockerfilePath) + _, err := buildpack.VerifyRunDockerfile(dockerfilePath, logger) if err != nil { t.Fatalf("Error verifying Dockerfile %d: %s", i, err) } h.AssertEq(t, len(logHandler.Entries), 0) } }) + + when("violates SHOULD directives in spec", func() { + it("succeeds with warning", func() { + preamble := ` +ARG base_image=0 +FROM ${base_image} +` + for i, tc := range warnCases { + dockerfilePath := filepath.Join(tmpDir, fmt.Sprintf("Dockerfile%d", i)) + h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(preamble+tc.dockerfileContent), 0600)) + logHandler = memory.New() + logger = &log.Logger{Handler: logHandler} + _, err := buildpack.VerifyRunDockerfile(dockerfilePath, logger) + h.AssertNil(t, err) + assertLogEntry(t, logHandler, "run.Dockerfile "+tc.expectedWarning) + } + }) + }) + + when("switching the runtime base image", func() { + it("returns the new base image", func() { + dockerfilePath := filepath.Join(tmpDir, "run.Dockerfile") + h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(`FROM some-base-image`), 0600)) + newBase, err := buildpack.VerifyRunDockerfile(dockerfilePath, logger) + h.AssertNil(t, err) + h.AssertEq(t, newBase, "some-base-image") + }) + }) }) when("invalid", func() { @@ -224,21 +252,18 @@ COPY --from=0 /some-source.txt ./some-dest.txt dockerfileContent: ` ARG base_image=0 FROM ${base_image} -`, - expectedError: "run.Dockerfile should not expect arguments", - }, - { - dockerfileContent: ` -FROM some-run-image RUN echo "hello" > /world.txt + +FROM some-base-image +COPY --from=0 /some-source.txt ./some-dest.txt `, - expectedError: "run.Dockerfile is not permitted to have instructions other than FROM", + expectedError: "run.Dockerfile is not permitted to use multistage build", }, } for i, tc := range testCases { dockerfilePath := filepath.Join(tmpDir, fmt.Sprintf("Dockerfile%d", i)) h.AssertNil(t, os.WriteFile(dockerfilePath, []byte(tc.dockerfileContent), 0600)) - err := buildpack.VerifyRunDockerfile(dockerfilePath) + _, err := buildpack.VerifyRunDockerfile(dockerfilePath, logger) h.AssertError(t, err, tc.expectedError) } }) diff --git a/buildpack/generate.go b/buildpack/generate.go index 13dfba9d5..9598cc98d 100644 --- a/buildpack/generate.go +++ b/buildpack/generate.go @@ -1,6 +1,7 @@ package buildpack import ( + "fmt" "io" "os" "os/exec" @@ -112,13 +113,13 @@ func readOutputFilesExt(d ExtDescriptor, extOutputDir string, extPlanIn Plan, lo br.MetRequires = names(extPlanIn.Entries) // set Dockerfiles - if dfInfo, found, err = addDockerfileByPathAndType(d, extOutputDir, "run.Dockerfile", DockerfileKindRun, logger); err != nil { + if dfInfo, found, err = findDockerfileFor(d, extOutputDir, DockerfileKindRun, logger); err != nil { return GenerateOutputs{}, err } else if found { br.Dockerfiles = append(br.Dockerfiles, dfInfo) } - if dfInfo, found, err = addDockerfileByPathAndType(d, extOutputDir, "build.Dockerfile", DockerfileKindBuild, logger); err != nil { + if dfInfo, found, err = findDockerfileFor(d, extOutputDir, DockerfileKindBuild, logger); err != nil { return GenerateOutputs{}, err } else if found { br.Dockerfiles = append(br.Dockerfiles, dfInfo) @@ -129,10 +130,10 @@ func readOutputFilesExt(d ExtDescriptor, extOutputDir string, extPlanIn Plan, lo return br, nil } -func addDockerfileByPathAndType(d ExtDescriptor, extOutputDir string, dockerfileName string, dockerfileType string, _ log.Logger) (DockerfileInfo, bool, error) { +func findDockerfileFor(d ExtDescriptor, extOutputDir string, kind string, logger log.Logger) (DockerfileInfo, bool, error) { var err error - dockerfile := filepath.Join(extOutputDir, dockerfileName) - if _, err = os.Stat(dockerfile); err != nil { + dockerfilePath := filepath.Join(extOutputDir, fmt.Sprintf("%s.Dockerfile", kind)) + if _, err = os.Stat(dockerfilePath); err != nil { // ignore file not found, no dockerfile to add. if !os.IsNotExist(err) { // any other errors are critical. @@ -140,5 +141,21 @@ func addDockerfileByPathAndType(d ExtDescriptor, extOutputDir string, dockerfile } return DockerfileInfo{}, false, nil } - return DockerfileInfo{ExtensionID: d.Extension.ID, Kind: dockerfileType, Path: dockerfile}, true, nil + + newBase, err := verifyDockerfileFor(d, dockerfilePath, kind, logger) + if err != nil { + return DockerfileInfo{}, true, fmt.Errorf("failed to parse %s.Dockerfile for extension %s: %w", kind, d.Extension.ID, err) + } + return DockerfileInfo{ExtensionID: d.Extension.ID, Kind: kind, Path: dockerfilePath, NewBase: newBase}, true, nil +} + +func verifyDockerfileFor(d ExtDescriptor, path string, kind string, logger log.Logger) (string, error) { + switch kind { + case DockerfileKindBuild: + return "", VerifyBuildDockerfile(path, logger) + case DockerfileKindRun: + return VerifyRunDockerfile(path, logger) + default: + return "", nil + } } diff --git a/buildpack/generate_test.go b/buildpack/generate_test.go index b398745ba..b8ddbf76e 100644 --- a/buildpack/generate_test.go +++ b/buildpack/generate_test.go @@ -263,32 +263,75 @@ func testGenerate(t *testing.T, when spec.G, it spec.S) { when("build result", func() { when("dockerfiles", func() { - it("includes run.Dockerfile", func() { - h.Mkfile(t, - "", - filepath.Join(appDir, "run.Dockerfile-A-v1"), - ) - - br, err := executor.Generate(descriptor, inputs, logger) - h.AssertNil(t, err) - - h.AssertEq(t, br.Dockerfiles[0].ExtensionID, "A") - h.AssertEq(t, br.Dockerfiles[0].Kind, buildpack.DockerfileKindRun) - h.AssertEq(t, br.Dockerfiles[0].Path, filepath.Join(outputDir, "A", "run.Dockerfile")) + when("run.Dockerfile", func() { + it("is included", func() { + h.Mkfile(t, + "ARG base_image\n"+ + "FROM ${base_image}", + filepath.Join(appDir, "run.Dockerfile-A-v1"), + ) + + br, err := executor.Generate(descriptor, inputs, logger) + h.AssertNil(t, err) + + h.AssertEq(t, br.Dockerfiles[0].ExtensionID, "A") + h.AssertEq(t, br.Dockerfiles[0].Kind, buildpack.DockerfileKindRun) + h.AssertEq(t, br.Dockerfiles[0].Path, filepath.Join(outputDir, "A", "run.Dockerfile")) + h.AssertEq(t, br.Dockerfiles[0].NewBase, "") + }) + + it("is validated", func() { + h.Mkfile(t, + "SOME-INVALID-CONTENT", + filepath.Join(appDir, "run.Dockerfile-A-v1"), + ) + _, err := executor.Generate(descriptor, inputs, logger) + h.AssertError(t, err, "failed to parse run.Dockerfile for extension A: dockerfile parse error line 1: unknown instruction: SOME-INVALID-CONTENT") + }) + + when("switching the runtime base image", func() { + it("image reference is included", func() { + h.Mkfile(t, + "FROM some-new-base-image", + filepath.Join(appDir, "run.Dockerfile-A-v1"), + ) + + br, err := executor.Generate(descriptor, inputs, logger) + h.AssertNil(t, err) + + h.AssertEq(t, br.Dockerfiles[0].ExtensionID, "A") + h.AssertEq(t, br.Dockerfiles[0].Kind, buildpack.DockerfileKindRun) + h.AssertEq(t, br.Dockerfiles[0].Path, filepath.Join(outputDir, "A", "run.Dockerfile")) + h.AssertEq(t, br.Dockerfiles[0].NewBase, "some-new-base-image") + }) + }) }) - it("includes build.Dockerfile", func() { - h.Mkfile(t, - "", - filepath.Join(appDir, "build.Dockerfile-A-v1"), - ) - - br, err := executor.Generate(descriptor, inputs, logger) - h.AssertNil(t, err) - - h.AssertEq(t, br.Dockerfiles[0].ExtensionID, "A") - h.AssertEq(t, br.Dockerfiles[0].Kind, buildpack.DockerfileKindBuild) - h.AssertEq(t, br.Dockerfiles[0].Path, filepath.Join(outputDir, "A", "build.Dockerfile")) + when("build.Dockerfile", func() { + it("is included", func() { + h.Mkfile(t, + "ARG base_image\n"+ + "FROM ${base_image}", + filepath.Join(appDir, "build.Dockerfile-A-v1"), + ) + + br, err := executor.Generate(descriptor, inputs, logger) + h.AssertNil(t, err) + + h.AssertEq(t, br.Dockerfiles[0].ExtensionID, "A") + h.AssertEq(t, br.Dockerfiles[0].Kind, buildpack.DockerfileKindBuild) + h.AssertEq(t, br.Dockerfiles[0].Path, filepath.Join(outputDir, "A", "build.Dockerfile")) + }) + + it("is validated", func() { + h.Mkfile(t, + "SOME-INVALID-CONTENT", + filepath.Join(appDir, "build.Dockerfile-A-v1"), + ) + + _, err := executor.Generate(descriptor, inputs, logger) + h.AssertError(t, err, "failed to parse build.Dockerfile for extension A: dockerfile parse error line 1: unknown instruction: SOME-INVALID-CONTENT") + }) }) }) diff --git a/cmd/lifecycle/detector.go b/cmd/lifecycle/detector.go index 2ca670ed3..68108610d 100644 --- a/cmd/lifecycle/detector.go +++ b/cmd/lifecycle/detector.go @@ -18,28 +18,24 @@ type detectCmd struct { // DefineFlags defines the flags that are considered valid and reads their values (if provided). func (d *detectCmd) DefineFlags() { - switch { - case d.PlatformAPI.AtLeast("0.10"): + if d.PlatformAPI.AtLeast("0.12") { + cli.FlagRunPath(&d.RunPath) + } + if d.PlatformAPI.AtLeast("0.11") { + cli.FlagBuildConfigDir(&d.BuildConfigDir) + } + if d.PlatformAPI.AtLeast("0.10") { cli.FlagAnalyzedPath(&d.AnalyzedPath) - cli.FlagAppDir(&d.AppDir) - cli.FlagBuildpacksDir(&d.BuildpacksDir) cli.FlagExtensionsDir(&d.ExtensionsDir) cli.FlagGeneratedDir(&d.GeneratedDir) - cli.FlagGroupPath(&d.GroupPath) - cli.FlagLayersDir(&d.LayersDir) - cli.FlagOrderPath(&d.OrderPath) - cli.FlagPlanPath(&d.PlanPath) - cli.FlagPlatformDir(&d.PlatformDir) - default: - cli.FlagAppDir(&d.AppDir) - cli.FlagBuildpacksDir(&d.BuildpacksDir) - cli.FlagGroupPath(&d.GroupPath) - cli.FlagOrderPath(&d.OrderPath) - cli.FlagLayersDir(&d.LayersDir) - cli.FlagPlanPath(&d.PlanPath) - cli.FlagPlatformDir(&d.PlatformDir) - cli.FlagBuildConfigDir(&d.BuildConfigDir) } + cli.FlagAppDir(&d.AppDir) + cli.FlagBuildpacksDir(&d.BuildpacksDir) + cli.FlagGroupPath(&d.GroupPath) + cli.FlagLayersDir(&d.LayersDir) + cli.FlagOrderPath(&d.OrderPath) + cli.FlagPlanPath(&d.PlanPath) + cli.FlagPlatformDir(&d.PlatformDir) } // Args validates arguments and flags, and fills in default values. @@ -96,16 +92,19 @@ func (d *detectCmd) Exec() error { if group.HasExtensions() { generatorFactory := lifecycle.NewGeneratorFactory( &cmd.BuildpackAPIVerifier{}, + lifecycle.Config, dirStore, ) var generator *lifecycle.Generator generator, err = generatorFactory.NewGenerator( + d.AnalyzedPath, d.AppDir, d.BuildConfigDir, group.GroupExtensions, d.GeneratedDir, plan, d.PlatformDir, + d.RunPath, cmd.Stdout, cmd.Stderr, cmd.DefaultLogger, ) @@ -117,14 +116,9 @@ func (d *detectCmd) Exec() error { if err != nil { return d.unwrapGenerateFail(err) } - // was a custom run image configured? - if result.RunImage != "" { - cmd.DefaultLogger.Debug("Updating analyzed metadata with new runImage") - detector.AnalyzeMD.RunImage = &platform.RunImage{Reference: result.RunImage} - if err = d.writeGenerateData(detector.AnalyzeMD); err != nil { - return err - } - cmd.DefaultLogger.Debugf("Updated analyzed metadata with new runImage '%s'", result.RunImage) + + if err = d.writeGenerateData(result.AnalyzedMD); err != nil { + return err } // was the build plan updated? if result.UsePlan { diff --git a/exporter_test.go b/exporter_test.go index 1e6ee37d8..293654d87 100644 --- a/exporter_test.go +++ b/exporter_test.go @@ -395,7 +395,7 @@ func testExporter(t *testing.T, when spec.G, it spec.S) { it("saves run image metadata to the resulting image", func() { opts.Stack = platform.StackMetadata{ - RunImage: platform.RunImageMetadata{ + RunImage: platform.RunImageForExport{ Image: "some/run", Mirrors: []string{"registry.example.com/some/run", "other.example.com/some/run"}, }, diff --git a/generator.go b/generator.go index 3bafa322c..012875d32 100644 --- a/generator.go +++ b/generator.go @@ -19,36 +19,43 @@ type Generator struct { BuildConfigDir string GeneratedDir string // e.g., /generated PlatformDir string + AnalyzedMD platform.AnalyzedMetadata DirStore DirStore Executor buildpack.GenerateExecutor Extensions []buildpack.GroupElement Logger log.Logger Out, Err io.Writer Plan platform.BuildPlan + RunMetadata platform.RunMetadata } type GeneratorFactory struct { - apiVerifier BuildpackAPIVerifier - dirStore DirStore + apiVerifier BuildpackAPIVerifier + configHandler ConfigHandler + dirStore DirStore } func NewGeneratorFactory( apiVerifier BuildpackAPIVerifier, + configHandler ConfigHandler, dirStore DirStore, ) *GeneratorFactory { return &GeneratorFactory{ - apiVerifier: apiVerifier, - dirStore: dirStore, + apiVerifier: apiVerifier, + configHandler: configHandler, + dirStore: dirStore, } } func (f *GeneratorFactory) NewGenerator( + analyzedPath string, appDir string, buildConfigDir string, extensions []buildpack.GroupElement, generatedDir string, plan platform.BuildPlan, platformDir string, + runPath string, stdout, stderr io.Writer, logger log.Logger, ) (*Generator, error) { @@ -68,6 +75,12 @@ func (f *GeneratorFactory) NewGenerator( if err := f.setExtensions(generator, extensions, logger); err != nil { return nil, err } + if err := f.setAnalyzedMD(generator, analyzedPath, logger); err != nil { + return nil, err + } + if err := f.setRunMD(generator, runPath, logger); err != nil { + return nil, err + } return generator, nil } @@ -81,10 +94,22 @@ func (f *GeneratorFactory) setExtensions(generator *Generator, extensions []buil return nil } +func (f *GeneratorFactory) setAnalyzedMD(generator *Generator, analyzedPath string, logger log.Logger) error { + var err error + generator.AnalyzedMD, err = f.configHandler.ReadAnalyzed(analyzedPath, logger) + return err +} + +func (f *GeneratorFactory) setRunMD(generator *Generator, runPath string, logger log.Logger) error { + var err error + generator.RunMetadata, err = f.configHandler.ReadRun(runPath, logger) + return err +} + type GenerateResult struct { - RunImage string - Plan platform.BuildPlan - UsePlan bool + AnalyzedMD platform.AnalyzedMetadata + Plan platform.BuildPlan + UsePlan bool } func (g *Generator) Generate() (GenerateResult, error) { @@ -123,24 +148,50 @@ func (g *Generator) Generate() (GenerateResult, error) { g.Logger.Debugf("Finished running generate for extension %s", ext) } - g.Logger.Debug("Validating Dockerfiles") - if err := g.validateDockerfiles(dockerfiles); err != nil { + g.Logger.Debug("Checking for new run image") + base, newBaseIdx, extend := g.checkNewRunImage(dockerfiles) + if err != nil { return GenerateResult{}, err } + if !satisfies(g.RunMetadata.Images, base) { + return GenerateResult{}, fmt.Errorf("new runtime base image '%s' not found in run metadata", base) + } g.Logger.Debug("Copying Dockerfiles") - if err := g.copyDockerfiles(dockerfiles); err != nil { + if err = g.copyDockerfiles(dockerfiles, newBaseIdx); err != nil { return GenerateResult{}, err } - g.Logger.Debug("Checking for new run image") - runImage, err := g.checkNewRunImage() - if err != nil { - return GenerateResult{}, err + newAnalyzedMD := g.AnalyzedMD + if newRunImage(base, g.AnalyzedMD) { + g.Logger.Debugf("Updating analyzed metadata with new run image '%s'", base) + newAnalyzedMD.RunImage = &platform.RunImage{ // target data is cleared + Reference: base, + Extend: extend, + } + } else if extend && g.AnalyzedMD.RunImage != nil { + g.Logger.Debug("Updating analyzed metadata with run image extend") + newAnalyzedMD.RunImage.Extend = true } - g.Logger.Debugf("Finished build, selected runImage '%s'", runImage) - return GenerateResult{Plan: filteredPlan, UsePlan: true, RunImage: runImage}, nil + return GenerateResult{ + AnalyzedMD: newAnalyzedMD, + Plan: filteredPlan, + UsePlan: true, + }, nil +} + +func satisfies(images []platform.RunImageForExport, imageName string) bool { + if len(images) == 0 { + // if no run image metadata was provided, consider it a match + return true + } + for _, image := range images { + if image.Image == imageName { + return true + } + } + return false } func (g *Generator) getGenerateInputs() buildpack.GenerateInputs { @@ -154,29 +205,17 @@ func (g *Generator) getGenerateInputs() buildpack.GenerateInputs { } } -func (g *Generator) validateDockerfiles(dockerfiles []buildpack.DockerfileInfo) error { - for _, dockerfile := range dockerfiles { - switch { - case dockerfile.Kind == buildpack.DockerfileKindRun: - if err := buildpack.VerifyRunDockerfile(dockerfile.Path); err != nil { - return fmt.Errorf("error parsing run.Dockerfile for extension %s: %w", dockerfile.ExtensionID, err) - } - case dockerfile.Kind == buildpack.DockerfileKindBuild: - if err := buildpack.VerifyBuildDockerfile(dockerfile.Path, g.Logger); err != nil { - return fmt.Errorf("error parsing build.Dockerfile for extension %s: %w", dockerfile.ExtensionID, err) - } - } - } - return nil -} - -func (g *Generator) copyDockerfiles(dockerfiles []buildpack.DockerfileInfo) error { - for _, dockerfile := range dockerfiles { +func (g *Generator) copyDockerfiles(dockerfiles []buildpack.DockerfileInfo, newBaseIdx int) error { + for currentIdx, dockerfile := range dockerfiles { targetDir := filepath.Join(g.GeneratedDir, dockerfile.Kind, launch.EscapeID(dockerfile.ExtensionID)) - targetPath := filepath.Join(targetDir, "Dockerfile") + var targetPath = filepath.Join(targetDir, "Dockerfile") + if dockerfile.Kind == buildpack.DockerfileKindRun && currentIdx < newBaseIdx { + targetPath += ".ignore" + } if err := os.MkdirAll(targetDir, os.ModePerm); err != nil { return err } + g.Logger.Debugf("Copying %s to %s", dockerfile.Path, targetPath) if err := fsutil.Copy(dockerfile.Path, targetPath); err != nil { return err } @@ -191,23 +230,32 @@ func (g *Generator) copyDockerfiles(dockerfiles []buildpack.DockerfileInfo) erro return nil } -func (g *Generator) checkNewRunImage() (string, error) { +func (g *Generator) checkNewRunImage(dockerfiles []buildpack.DockerfileInfo) (newBase string, newBaseIdx int, extend bool) { // There may be extensions that contribute only a build.Dockerfile; work backward through extensions until we find // a run.Dockerfile. - for i := len(g.Extensions) - 1; i >= 0; i-- { - extID := g.Extensions[i].ID - runDockerfile := filepath.Join(g.GeneratedDir, "run", launch.EscapeID(extID), "Dockerfile") - if _, err := os.Stat(runDockerfile); os.IsNotExist(err) { + for i := len(dockerfiles) - 1; i >= 0; i-- { + if dockerfiles[i].Kind != buildpack.DockerfileKindRun { continue } - - imageName, err := buildpack.RetrieveFirstFromImageNameFromDockerfile(runDockerfile) - if err != nil { - return "", err + if dockerfiles[i].NewBase != "" { + newBase = dockerfiles[i].NewBase + newBaseIdx = i + g.Logger.Debugf("Found a run.Dockerfile configuring image '%s' from extension with id '%s'", newBase, dockerfiles[i].ExtensionID) + break } + if dockerfiles[i].NewBase == "" { + extend = true + } + } + return newBase, newBaseIdx, extend +} - g.Logger.Debugf("Found a run.Dockerfile configuring image '%s' from extension with id '%s'", imageName, extID) - return imageName, nil +func newRunImage(base string, analyzedMD platform.AnalyzedMetadata) bool { + if base == "" { + return false + } + if analyzedMD.RunImage == nil { + return true } - return "", nil + return base != analyzedMD.RunImage.Reference } diff --git a/generator_test.go b/generator_test.go index 0e636beb7..8b5e15cd3 100644 --- a/generator_test.go +++ b/generator_test.go @@ -35,22 +35,25 @@ func TestGenerator(t *testing.T) { func testGeneratorFactory(t *testing.T, when spec.G, it spec.S) { when("#NewGenerator", func() { var ( - generatorFactory *lifecycle.GeneratorFactory - fakeAPIVerifier *testmock.MockBuildpackAPIVerifier - fakeDirStore *testmock.MockDirStore - logger *log.Logger - mockController *gomock.Controller - stdout, stderr *bytes.Buffer + generatorFactory *lifecycle.GeneratorFactory + fakeAPIVerifier *testmock.MockBuildpackAPIVerifier + fakeConfigHandler *testmock.MockConfigHandler + fakeDirStore *testmock.MockDirStore + logger *log.Logger + mockController *gomock.Controller + stdout, stderr *bytes.Buffer ) it.Before(func() { mockController = gomock.NewController(t) fakeAPIVerifier = testmock.NewMockBuildpackAPIVerifier(mockController) + fakeConfigHandler = testmock.NewMockConfigHandler(mockController) fakeDirStore = testmock.NewMockDirStore(mockController) logger = &log.Logger{Handler: &discard.Handler{}} generatorFactory = lifecycle.NewGeneratorFactory( fakeAPIVerifier, + fakeConfigHandler, fakeDirStore, ) }) @@ -61,6 +64,8 @@ func testGeneratorFactory(t *testing.T, when spec.G, it spec.S) { it("configures the generator", func() { fakeAPIVerifier.EXPECT().VerifyBuildpackAPI(buildpack.KindExtension, "A@v1", "0.9", logger) + fakeConfigHandler.EXPECT().ReadAnalyzed("some-analyzed-path", logger).Return(platform.AnalyzedMetadata{RunImage: &platform.RunImage{Reference: "some-run-image-ref"}}, nil) + fakeConfigHandler.EXPECT().ReadRun("some-run-path", logger).Return(platform.RunMetadata{Images: []platform.RunImageForExport{{Image: "some-run-image"}}}, nil) providedPlan := platform.BuildPlan{Entries: []platform.BuildPlanEntry{ { @@ -73,6 +78,7 @@ func testGeneratorFactory(t *testing.T, when spec.G, it spec.S) { }, }} generator, err := generatorFactory.NewGenerator( + "some-analyzed-path", "some-app-dir", "some-build-config-dir", []buildpack.GroupElement{ @@ -81,11 +87,13 @@ func testGeneratorFactory(t *testing.T, when spec.G, it spec.S) { "some-output-dir", providedPlan, "some-platform-dir", + "some-run-path", stdout, stderr, logger, ) h.AssertNil(t, err) + h.AssertEq(t, generator.AnalyzedMD, platform.AnalyzedMetadata{RunImage: &platform.RunImage{Reference: "some-run-image-ref"}}) h.AssertEq(t, generator.AppDir, "some-app-dir") h.AssertNotNil(t, generator.DirStore) h.AssertEq(t, generator.Extensions, []buildpack.GroupElement{ @@ -95,6 +103,7 @@ func testGeneratorFactory(t *testing.T, when spec.G, it spec.S) { h.AssertEq(t, generator.Logger, logger) h.AssertEq(t, generator.Plan, providedPlan) h.AssertEq(t, generator.PlatformDir, "some-platform-dir") + h.AssertEq(t, generator.RunMetadata, platform.RunMetadata{Images: []platform.RunImageForExport{{Image: "some-run-image"}}}) h.AssertEq(t, generator.Out, stdout) h.AssertEq(t, generator.Err, stderr) }) @@ -130,9 +139,10 @@ func testGenerator(t *testing.T, when spec.G, it spec.S) { stdout, stderr = &bytes.Buffer{}, &bytes.Buffer{} generator = &lifecycle.Generator{ - AppDir: appDir, - DirStore: dirStore, - Executor: executor, + AnalyzedMD: platform.AnalyzedMetadata{}, + AppDir: appDir, + DirStore: dirStore, + Executor: executor, Extensions: []buildpack.GroupElement{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String(), Homepage: "A Homepage"}, {ID: "ext/B", Version: "v2", API: api.Buildpack.Latest().String()}, @@ -152,6 +162,10 @@ func testGenerator(t *testing.T, when spec.G, it spec.S) { }) when(".Generate", func() { + extA := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "A", Version: "v1"}}} + extB := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "ext/B", Version: "v1"}}} + extC := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "C", Version: "v1"}}} + it("provides a subset of the build plan to each extension", func() { generator.Plan = platform.BuildPlan{ Entries: []platform.BuildPlanEntry{ @@ -197,7 +211,6 @@ func testGenerator(t *testing.T, when spec.G, it spec.S) { // OutputDir is ephemeral directory } - extA := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "A", Version: "v1"}}} dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) expectedAInputs := expectedInputs expectedAInputs.Plan = buildpack.Plan{Entries: []buildpack.Require{ @@ -209,10 +222,10 @@ func testGenerator(t *testing.T, when spec.G, it spec.S) { h.AssertEq(t, inputs.Plan, expectedAInputs.Plan) h.AssertEq(t, inputs.AppDir, expectedAInputs.AppDir) h.AssertEq(t, inputs.PlatformDir, expectedAInputs.PlatformDir) + h.AssertEq(t, inputs.BuildConfigDir, generator.BuildConfigDir) return buildpack.GenerateOutputs{MetRequires: []string{"some-dep"}}, nil }) - extB := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "ext/B", Version: "v1"}}} dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) expectedBInputs := expectedInputs expectedBInputs.Plan = buildpack.Plan{Entries: []buildpack.Require{ @@ -224,6 +237,7 @@ func testGenerator(t *testing.T, when spec.G, it spec.S) { h.AssertEq(t, inputs.Plan, expectedBInputs.Plan) h.AssertEq(t, inputs.AppDir, expectedBInputs.AppDir) h.AssertEq(t, inputs.PlatformDir, expectedBInputs.PlatformDir) + h.AssertEq(t, inputs.BuildConfigDir, generator.BuildConfigDir) return buildpack.GenerateOutputs{}, nil }) @@ -231,146 +245,301 @@ func testGenerator(t *testing.T, when spec.G, it spec.S) { h.AssertNil(t, err) }) - it("aggregates dockerfiles from each extension and returns the correct run image in the build result", func() { - // Extension A outputs a run.Dockerfile to the provided output directory when invoked. - extA := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "A", Version: "v1"}}} + it("copies Dockerfiles and extend-config.toml files to the correct locations", func() { + // mock generate for extension A dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) - executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).DoAndReturn( - func(_ buildpack.ExtDescriptor, inputs buildpack.GenerateInputs, _ llog.Logger) (buildpack.GenerateOutputs, error) { - // check inputs - h.AssertEq(t, inputs.AppDir, generator.AppDir) - h.AssertEq(t, inputs.BuildConfigDir, generator.BuildConfigDir) - h.AssertEq(t, inputs.PlatformDir, generator.PlatformDir) - - // create fixture - h.Mkdir(t, filepath.Join(inputs.OutputDir, "A")) - dockerfilePath1 := filepath.Join(inputs.OutputDir, "A", "run.Dockerfile") - h.Mkfile(t, `FROM some-run-image`, dockerfilePath1) - - return buildpack.GenerateOutputs{ - Dockerfiles: []buildpack.DockerfileInfo{ - {ExtensionID: "A", Path: dockerfilePath1, Kind: "run"}, - }, - }, nil + // extension A has a build.Dockerfile and an extend-config.toml + h.Mkdir(t, filepath.Join(tmpDir, "A")) + buildDockerfilePathA := filepath.Join(tmpDir, "A", "build.Dockerfile") + h.Mkfile(t, "some-build.Dockerfile-content-A", buildDockerfilePathA) + extendConfigPathA := filepath.Join(tmpDir, "A", "extend-config.toml") + h.Mkfile(t, "some-extend-config.toml-content-A", extendConfigPathA) + executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ + Dockerfiles: []buildpack.DockerfileInfo{ + { + ExtensionID: "A", + Kind: "build", + Path: buildDockerfilePathA, + }, }, - ) + }, nil) - // Extension B has a pre-populated root directory. - extB := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "ext/B", Version: "v1"}}} - bRootDir := filepath.Join(tmpDir, "some-b-root-dir") - h.Mkdir(t, bRootDir) - bDockerfilePath := filepath.Join(bRootDir, "run.Dockerfile") - h.Mkfile(t, `FROM other-run-image`, bDockerfilePath) + // mock generate for extension B dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) + // extension B has a run.Dockerfile + h.Mkdir(t, filepath.Join(tmpDir, "B")) + runDockerfilePathB := filepath.Join(tmpDir, "B", "build.Dockerfile") + h.Mkfile(t, "some-run.Dockerfile-content-B", runDockerfilePathB) executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ Dockerfiles: []buildpack.DockerfileInfo{ - {ExtensionID: "ext/B", Path: bDockerfilePath, Kind: "run"}, + { + ExtensionID: "B", + Kind: "run", + Path: runDockerfilePathB, + }, }, }, nil) - // Extension C has a pre-populated root directory. - extC := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "C", Version: "v1"}}} - cRootDir := filepath.Join(tmpDir, "some-c-root-dir") - h.Mkdir(t, cRootDir) - cDockerfilePath := filepath.Join(cRootDir, "build.Dockerfile") - h.Mkfile(t, ` -ARG base_image -FROM ${base_image} -RUN echo "hello" > world.txt -`, cDockerfilePath) - h.Mkfile(t, `some-extend-config-content`, filepath.Join(cRootDir, "extend-config.toml")) + // mock generate for extension C dirStore.EXPECT().LookupExt("C", "v1").Return(&extC, nil) + // extension C has a build.Dockerfile, run.Dockerfile, and extend-config.toml + h.Mkdir(t, filepath.Join(tmpDir, "C")) + buildDockerfilePathC := filepath.Join(tmpDir, "C", "build.Dockerfile") + h.Mkfile(t, "some-build.Dockerfile-content-C", buildDockerfilePathC) + runDockerfilePathC := filepath.Join(tmpDir, "C", "run.Dockerfile") + h.Mkfile(t, "some-run.Dockerfile-content-C", runDockerfilePathC) + extendConfigPathC := filepath.Join(tmpDir, "C", "extend-config.toml") + h.Mkfile(t, "some-extend-config.toml-content-C", extendConfigPathC) executor.EXPECT().Generate(extC, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ Dockerfiles: []buildpack.DockerfileInfo{ - {ExtensionID: "C", Path: cDockerfilePath, Kind: "build"}, + { + ExtensionID: "C", + Kind: "build", + Path: buildDockerfilePathC, + }, + { + ExtensionID: "C", + Kind: "run", + Path: runDockerfilePathC, + }, }, }, nil) + // add extension C to the group generator.Extensions = append(generator.Extensions, buildpack.GroupElement{ID: "C", Version: "v1", API: api.Buildpack.Latest().String()}) - result, err := generator.Generate() + // do generate + _, err := generator.Generate() h.AssertNil(t, err) - t.Log("copies Dockerfiles to the correct locations") - aContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "A", "Dockerfile")) - h.AssertEq(t, string(aContents), `FROM some-run-image`) - bContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "ext_B", "Dockerfile")) - h.AssertEq(t, string(bContents), `FROM other-run-image`) - cContents := h.MustReadFile(t, filepath.Join(generatedDir, "build", "C", "Dockerfile")) - h.AssertEq(t, string(cContents), ` -ARG base_image -FROM ${base_image} -RUN echo "hello" > world.txt -`) - - t.Log("copies the extend-config.toml if exists") - configContents := h.MustReadFile(t, filepath.Join(generatedDir, "build", "C", "extend-config.toml")) - h.AssertEq(t, string(configContents), `some-extend-config-content`) + t.Log("copies Dockerfiles") + contents := h.MustReadFile(t, filepath.Join(generatedDir, "build", "A", "Dockerfile")) + h.AssertEq(t, string(contents), "some-build.Dockerfile-content-A") + contents = h.MustReadFile(t, filepath.Join(generatedDir, "run", "B", "Dockerfile")) + h.AssertEq(t, string(contents), "some-run.Dockerfile-content-B") + contents = h.MustReadFile(t, filepath.Join(generatedDir, "build", "C", "Dockerfile")) + h.AssertEq(t, string(contents), "some-build.Dockerfile-content-C") + contents = h.MustReadFile(t, filepath.Join(generatedDir, "run", "C", "Dockerfile")) + h.AssertEq(t, string(contents), "some-run.Dockerfile-content-C") + + t.Log("copies extend-config.toml files if exist") + contents = h.MustReadFile(t, filepath.Join(generatedDir, "build", "A", "extend-config.toml")) + h.AssertEq(t, string(contents), "some-extend-config.toml-content-A") + contents = h.MustReadFile(t, filepath.Join(generatedDir, "build", "C", "extend-config.toml")) + h.AssertEq(t, string(contents), "some-extend-config.toml-content-C") + contents = h.MustReadFile(t, filepath.Join(generatedDir, "run", "C", "extend-config.toml")) + h.AssertEq(t, string(contents), "some-extend-config.toml-content-C") t.Log("does not pollute the output directory") h.AssertPathDoesNotExist(t, filepath.Join(generatedDir, "A", "run.Dockerfile")) - - t.Log("returns the correct run image") - h.AssertEq(t, result.RunImage, "other-run-image") + h.AssertPathDoesNotExist(t, filepath.Join(generatedDir, "B", "build.Dockerfile")) + h.AssertPathDoesNotExist(t, filepath.Join(generatedDir, "C", "run.Dockerfile")) + h.AssertPathDoesNotExist(t, filepath.Join(generatedDir, "C", "build.Dockerfile")) }) - it("validates build.Dockerfiles", func() { - extA := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "A", Version: "v1"}}} - dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) - executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).DoAndReturn( - func(_ buildpack.ExtDescriptor, inputs buildpack.GenerateInputs, _ llog.Logger) (buildpack.GenerateOutputs, error) { - // check inputs - h.AssertEq(t, inputs.AppDir, generator.AppDir) - h.AssertEq(t, inputs.PlatformDir, generator.PlatformDir) + when("determining the correct run image", func() { + var runDockerfilePathA, runDockerfilePathB string - // create fixture - h.Mkdir(t, filepath.Join(inputs.OutputDir, "A")) - dockerfilePath1 := filepath.Join(inputs.OutputDir, "A", "build.Dockerfile") - h.Mkfile(t, `some-invalid-content`, dockerfilePath1) + it.Before(func() { + runDockerfilePathA = filepath.Join(tmpDir, "run.Dockerfile.A") + h.Mkfile(t, "some-dockerfile-content-A", runDockerfilePathA) + runDockerfilePathB = filepath.Join(tmpDir, "run.Dockerfile.B") + h.Mkfile(t, "some-dockerfile-content-B", runDockerfilePathB) + }) - return buildpack.GenerateOutputs{ + when("all run.Dockerfiles declare `FROM ${base_image}`", func() { + it("returns the original run image in the result", func() { + generator.AnalyzedMD = platform.AnalyzedMetadata{ + RunImage: &platform.RunImage{ + Reference: "some-existing-run-image", + }, + } + + // mock generate for extension A + dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) + executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ Dockerfiles: []buildpack.DockerfileInfo{ - {ExtensionID: "A", Path: dockerfilePath1, Kind: "build"}, + { + ExtensionID: "A", + Kind: "run", + Path: runDockerfilePathA, + NewBase: "", + }, }, - }, nil - }, - ) + }, nil) - extB := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "ext/B", Version: "v1"}}} - dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) - executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).AnyTimes() + // mock generate for extension B + dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) + executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{}, nil) - _, err := generator.Generate() - h.AssertError(t, err, "error parsing build.Dockerfile for extension A: dockerfile parse error line 1: unknown instruction: SOME-INVALID-CONTENT") - }) + // do generate + result, err := generator.Generate() + h.AssertNil(t, err) - it("validates run.Dockerfiles", func() { - extA := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "A", Version: "v1"}}} - dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) - executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).DoAndReturn( - func(_ buildpack.ExtDescriptor, inputs buildpack.GenerateInputs, _ llog.Logger) (buildpack.GenerateOutputs, error) { - // check inputs - h.AssertEq(t, inputs.AppDir, generator.AppDir) - h.AssertEq(t, inputs.PlatformDir, generator.PlatformDir) + h.AssertEq(t, result.AnalyzedMD.RunImage.Reference, "some-existing-run-image") + t.Log("sets extend to true in the result") + h.AssertEq(t, result.AnalyzedMD.RunImage.Extend, true) + + t.Log("copies Dockerfiles to the correct locations") + aContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "A", "Dockerfile")) + h.AssertEq(t, string(aContents), `some-dockerfile-content-A`) + }) + }) + + when("run.Dockerfiles use FROM to switch the run image", func() { + it("returns the last image referenced in the `FROM` statement of the last run.Dockerfile not to declare `FROM ${base_image}`", func() { + generator.AnalyzedMD = platform.AnalyzedMetadata{ + RunImage: &platform.RunImage{ + Reference: "some-existing-run-image", + }, + } - // create fixture - h.Mkdir(t, filepath.Join(inputs.OutputDir, "A")) - dockerfilePath1 := filepath.Join(inputs.OutputDir, "A", "run.Dockerfile") - h.Mkfile(t, `some-invalid-content`, dockerfilePath1) + // mock generate for extension A + dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) + executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ + Dockerfiles: []buildpack.DockerfileInfo{ + { + ExtensionID: "A", + Kind: "run", + Path: runDockerfilePathA, + NewBase: "some-new-base-image", + }, + }, + }, nil) - return buildpack.GenerateOutputs{ + // mock generate for extension B + dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) + executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ Dockerfiles: []buildpack.DockerfileInfo{ - {ExtensionID: "A", Path: dockerfilePath1, Kind: "run"}, + { + ExtensionID: "B", + Kind: "run", + Path: runDockerfilePathB, + NewBase: "", + }, }, - }, nil - }, - ) + }, nil) - extB := buildpack.ExtDescriptor{Extension: buildpack.ExtInfo{BaseInfo: buildpack.BaseInfo{ID: "ext/B", Version: "v1"}}} - dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) - executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).AnyTimes() + // do generate + result, err := generator.Generate() + h.AssertNil(t, err) - _, err := generator.Generate() - h.AssertError(t, err, "error parsing run.Dockerfile for extension A: dockerfile parse error line 1: unknown instruction: SOME-INVALID-CONTENT") + h.AssertEq(t, result.AnalyzedMD.RunImage.Reference, "some-new-base-image") + t.Log("sets extend to true in the result") + h.AssertEq(t, result.AnalyzedMD.RunImage.Extend, true) + + t.Log("copies Dockerfiles to the correct locations") + aContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "A", "Dockerfile")) + h.AssertEq(t, string(aContents), `some-dockerfile-content-A`) + BContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "B", "Dockerfile")) + h.AssertEq(t, string(BContents), `some-dockerfile-content-B`) + }) + + when("no more run.Dockerfiles follow", func() { + it("sets extend to false in the result", func() { + // mock generate for extension A + dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) + executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ + Dockerfiles: []buildpack.DockerfileInfo{ + { + ExtensionID: "A", + Kind: "run", + Path: runDockerfilePathA, + NewBase: "some-new-base-image", + }, + }, + }, nil) + + // mock generate for extension B + dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) + executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ + Dockerfiles: []buildpack.DockerfileInfo{ + { + ExtensionID: "B", + Kind: "run", + Path: runDockerfilePathB, + NewBase: "some-other-base-image", + }, + }, + }, nil) + + // do generate + result, err := generator.Generate() + h.AssertNil(t, err) + + h.AssertEq(t, result.AnalyzedMD.RunImage.Reference, "some-other-base-image") + h.AssertEq(t, result.AnalyzedMD.RunImage.Extend, false) + + t.Log("copies Dockerfiles to the correct locations") + t.Log("renames earlier run.Dockerfiles to Dockerfile.ignore in the output directory") + aContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "A", "Dockerfile.ignore")) + h.AssertEq(t, string(aContents), `some-dockerfile-content-A`) + BContents := h.MustReadFile(t, filepath.Join(generatedDir, "run", "B", "Dockerfile")) + h.AssertEq(t, string(BContents), `some-dockerfile-content-B`) + }) + }) + + when("run metadata provided", func() { + it.Before(func() { + generator.RunMetadata = platform.RunMetadata{ + Images: []platform.RunImageForExport{ + {Image: "some-run-image"}, + }, + } + }) + + when("containing new run image", func() { + it("succeeds", func() { + // mock generate for extension A + dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) + executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ + Dockerfiles: []buildpack.DockerfileInfo{ + { + ExtensionID: "A", + Kind: "run", + Path: runDockerfilePathA, + NewBase: "some-run-image", + }, + }, + }, nil) + + // mock generate for extension B + dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) + executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{}, nil) + + // do generate + result, err := generator.Generate() + h.AssertNil(t, err) + + h.AssertEq(t, result.AnalyzedMD.RunImage.Reference, "some-run-image") + }) + }) + + when("not containing new run image", func() { + it("errors", func() { + // mock generate for extension A + dirStore.EXPECT().LookupExt("A", "v1").Return(&extA, nil) + executor.EXPECT().Generate(extA, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{ + Dockerfiles: []buildpack.DockerfileInfo{ + { + ExtensionID: "A", + Kind: "run", + Path: runDockerfilePathA, + NewBase: "some-other-run-image", + }, + }, + }, nil) + + // mock generate for extension B + dirStore.EXPECT().LookupExt("ext/B", "v2").Return(&extB, nil) + executor.EXPECT().Generate(extB, gomock.Any(), gomock.Any()).Return(buildpack.GenerateOutputs{}, nil) + + // do generate + _, err := generator.Generate() + h.AssertError(t, err, "new runtime base image 'some-other-run-image' not found in run metadata") + }) + }) + }) + }) }) when("extension generate failed", func() { diff --git a/handlers.go b/handlers.go index 568ef03ae..15846b50b 100644 --- a/handlers.go +++ b/handlers.go @@ -42,6 +42,7 @@ type ConfigHandler interface { ReadAnalyzed(path string, logr log.Logger) (platform.AnalyzedMetadata, error) ReadGroup(path string) (buildpackGroup []buildpack.GroupElement, extensionsGroup []buildpack.GroupElement, err error) ReadOrder(path string) (buildpack.Order, buildpack.Order, error) + ReadRun(runPath string, logger log.Logger) (platform.RunMetadata, error) } type DefaultConfigHandler struct{} @@ -99,3 +100,7 @@ func ReadOrder(path string) (buildpack.Order, buildpack.Order, error) { } return order.Order, order.OrderExtensions, err } + +func (h *DefaultConfigHandler) ReadRun(runPath string, logger log.Logger) (platform.RunMetadata, error) { + return platform.ReadRun(runPath, logger) +} diff --git a/platform/files.go b/platform/files.go index 5c38953d9..920937b11 100644 --- a/platform/files.go +++ b/platform/files.go @@ -49,6 +49,43 @@ type ImageIdentifier struct { Reference string `toml:"reference"` } +type RunImage struct { + Reference string `toml:"reference"` + Extend bool `toml:"extend,omitempty"` + Target *TargetMetadata `json:"target,omitempty" toml:"target,omitempty"` +} + +type TargetMetadata struct { + buildpack.TargetPartial + Distribution *buildpack.DistributionMetadata `json:"distribution,omitempty" toml:"distribution,omitempty"` +} + +// Satisfies treats optional fields (ArchVariant and Distributions) as wildcards if empty, returns true if +func (t *TargetMetadata) IsSatisfiedBy(o *buildpack.TargetMetadata) bool { + if t.Arch != o.Arch || t.OS != o.OS { + return false + } + if t.ArchVariant != "" && o.ArchVariant != "" && t.ArchVariant != o.ArchVariant { + return false + } + + // if either of the lengths of Distributions are zero, treat it as a wildcard. + if t.Distribution != nil && len(o.Distributions) > 0 { + // this could be more efficient but the lists are probably short... + found := false + for _, odist := range o.Distributions { + if t.Distribution.Name == odist.Name && t.Distribution.Version == odist.Version { + found = true + continue + } + } + if !found { + return false + } + } + return true +} + func ReadAnalyzed(analyzedPath string, logger log.Logger) (AnalyzedMetadata, error) { var analyzedMD AnalyzedMetadata if _, err := toml.DecodeFile(analyzedPath, &analyzedMD); err != nil { @@ -68,28 +105,28 @@ func (amd *AnalyzedMetadata) WriteTOML(path string) error { // NOTE: This struct MUST be kept in sync with `LayersMetadataCompat` type LayersMetadata struct { - App []LayerMetadata `json:"app" toml:"app"` - BOM *LayerMetadata `json:"sbom,omitempty" toml:"sbom,omitempty"` - Buildpacks []buildpack.LayersMetadata `json:"buildpacks" toml:"buildpacks"` - Config LayerMetadata `json:"config" toml:"config"` - Launcher LayerMetadata `json:"launcher" toml:"launcher"` - ProcessTypes LayerMetadata `json:"process-types" toml:"process-types"` - RunImage PreviousImageRunImageMetadata `json:"runImage" toml:"run-image"` - Stack StackMetadata `json:"stack" toml:"stack"` + App []LayerMetadata `json:"app" toml:"app"` + BOM *LayerMetadata `json:"sbom,omitempty" toml:"sbom,omitempty"` + Buildpacks []buildpack.LayersMetadata `json:"buildpacks" toml:"buildpacks"` + Config LayerMetadata `json:"config" toml:"config"` + Launcher LayerMetadata `json:"launcher" toml:"launcher"` + ProcessTypes LayerMetadata `json:"process-types" toml:"process-types"` + RunImage RunImageForRebase `json:"runImage" toml:"run-image"` + Stack StackMetadata `json:"stack" toml:"stack"` } // NOTE: This struct MUST be kept in sync with `LayersMetadata`. // It exists for situations where the `App` field type cannot be // guaranteed, yet the original struct data must be maintained. type LayersMetadataCompat struct { - App interface{} `json:"app" toml:"app"` - BOM *LayerMetadata `json:"sbom,omitempty" toml:"sbom,omitempty"` - Buildpacks []buildpack.LayersMetadata `json:"buildpacks" toml:"buildpacks"` - Config LayerMetadata `json:"config" toml:"config"` - Launcher LayerMetadata `json:"launcher" toml:"launcher"` - ProcessTypes LayerMetadata `json:"process-types" toml:"process-types"` - RunImage PreviousImageRunImageMetadata `json:"runImage" toml:"run-image"` - Stack StackMetadata `json:"stack" toml:"stack"` + App interface{} `json:"app" toml:"app"` + BOM *LayerMetadata `json:"sbom,omitempty" toml:"sbom,omitempty"` + Buildpacks []buildpack.LayersMetadata `json:"buildpacks" toml:"buildpacks"` + Config LayerMetadata `json:"config" toml:"config"` + Launcher LayerMetadata `json:"launcher" toml:"launcher"` + ProcessTypes LayerMetadata `json:"process-types" toml:"process-types"` + RunImage RunImageForRebase `json:"runImage" toml:"run-image"` + Stack StackMetadata `json:"stack" toml:"stack"` } func (m *LayersMetadata) MetadataForBuildpack(id string) buildpack.LayersMetadata { @@ -105,47 +142,11 @@ type LayerMetadata struct { SHA string `json:"sha" toml:"sha"` } -type PreviousImageRunImageMetadata struct { +type RunImageForRebase struct { TopLayer string `json:"topLayer" toml:"top-layer"` Reference string `json:"reference" toml:"reference"` } -type RunImage struct { - Reference string `toml:"reference"` - Target *TargetMetadata `json:"target,omitempty" toml:"target,omitempty"` -} - -type TargetMetadata struct { - buildpack.TargetPartial - Distribution *buildpack.DistributionMetadata `json:"distribution,omitempty" toml:"distribution,omitempty"` -} - -// Satisfies treats optional fields (ArchVariant and Distributions) as wildcards if empty, returns true if -func (t *TargetMetadata) IsSatisfiedBy(o *buildpack.TargetMetadata) bool { - if t.Arch != o.Arch || t.OS != o.OS { - return false - } - if t.ArchVariant != "" && o.ArchVariant != "" && t.ArchVariant != o.ArchVariant { - return false - } - - // if either of the lengths of Distributions are zero, treat it as a wildcard. - if t.Distribution != nil && len(o.Distributions) > 0 { - // this could be more efficient but the lists are probably short... - found := false - for _, odist := range o.Distributions { - if t.Distribution.Name == odist.Name && t.Distribution.Version == odist.Version { - found = true - continue - } - } - if !found { - return false - } - } - return true -} - // metadata.toml type BuildMetadata struct { @@ -312,7 +313,7 @@ type ImageReport struct { // run.toml type RunMetadata struct { - Images []RunImageMetadata `json:"-" toml:"image"` + Images []RunImageForExport `json:"-" toml:"images"` } func ReadRun(runPath string, logger log.Logger) (RunMetadata, error) { @@ -330,15 +331,15 @@ func ReadRun(runPath string, logger log.Logger) (RunMetadata, error) { // stack.toml type StackMetadata struct { - RunImage RunImageMetadata `json:"runImage" toml:"run-image"` + RunImage RunImageForExport `json:"runImage" toml:"run-image"` } -type RunImageMetadata struct { +type RunImageForExport struct { Image string `toml:"image" json:"image"` Mirrors []string `toml:"mirrors" json:"mirrors,omitempty"` } -func (rm *RunImageMetadata) BestRunImageMirror(registry string) (string, error) { +func (rm *RunImageForExport) BestRunImageMirror(registry string) (string, error) { if rm.Image == "" { return "", errors.New("missing run-image metadata") } diff --git a/platform/files_test.go b/platform/files_test.go index cba76be7f..90328de8c 100644 --- a/platform/files_test.go +++ b/platform/files_test.go @@ -100,7 +100,7 @@ func testFiles(t *testing.T, when spec.G, it spec.S) { var stackMD *platform.StackMetadata it.Before(func() { - stackMD = &platform.StackMetadata{RunImage: platform.RunImageMetadata{ + stackMD = &platform.StackMetadata{RunImage: platform.RunImageForExport{ Image: "first.com/org/repo", Mirrors: []string{ "myorg/myrepo", @@ -162,7 +162,7 @@ func testFiles(t *testing.T, when spec.G, it spec.S) { PreviousImage: &platform.ImageIdentifier{Reference: "previous-img"}, Metadata: platform.LayersMetadata{ Stack: platform.StackMetadata{ - RunImage: platform.RunImageMetadata{Image: "imagine that"}, + RunImage: platform.RunImageForExport{Image: "imagine that"}, }, }, RunImage: &platform.RunImage{Reference: "some-ref"}, @@ -180,7 +180,7 @@ func testFiles(t *testing.T, when spec.G, it spec.S) { PreviousImage: &platform.ImageIdentifier{Reference: "previous-img"}, Metadata: platform.LayersMetadata{ Stack: platform.StackMetadata{ - RunImage: platform.RunImageMetadata{Image: "imagine that"}, + RunImage: platform.RunImageForExport{Image: "imagine that"}, }, }, RunImage: &platform.RunImage{Reference: "some-ref"}, diff --git a/platform/testdata/cnb/run.toml b/platform/testdata/cnb/run.toml index f5b4d0e9e..4c776d7a2 100644 --- a/platform/testdata/cnb/run.toml +++ b/platform/testdata/cnb/run.toml @@ -1,3 +1,3 @@ -[[image]] +[[images]] image = "some-run-image" mirrors = ["some-run-image-mirror", "some-other-run-image-mirror"] diff --git a/testmock/config_handler.go b/testmock/config_handler.go index 7a711d79e..406fb40df 100644 --- a/testmock/config_handler.go +++ b/testmock/config_handler.go @@ -83,3 +83,18 @@ func (mr *MockConfigHandlerMockRecorder) ReadOrder(arg0 interface{}) *gomock.Cal mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadOrder", reflect.TypeOf((*MockConfigHandler)(nil).ReadOrder), arg0) } + +// ReadRun mocks base method. +func (m *MockConfigHandler) ReadRun(arg0 string, arg1 log.Logger) (platform.RunMetadata, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadRun", arg0, arg1) + ret0, _ := ret[0].(platform.RunMetadata) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadRun indicates an expected call of ReadRun. +func (mr *MockConfigHandlerMockRecorder) ReadRun(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadRun", reflect.TypeOf((*MockConfigHandler)(nil).ReadRun), arg0, arg1) +}