Skip to content

Commit

Permalink
Refactor the build loop. (#385)
Browse files Browse the repository at this point in the history
This change refactors the build loop a bit to make cache optimization easier in the future. Some notable changes:

The special casing around volume snapshots is removed. Every volume is added to the snapshotFiles list for every command that will snapshot anyway.
Snapshot saving was extracted to a sub-function
The decision on whether or not to snapshot was extracted
  • Loading branch information
dlorenc authored Oct 9, 2018
1 parent 0a13e04 commit 9a0e29c
Show file tree
Hide file tree
Showing 4 changed files with 87 additions and 78 deletions.
6 changes: 2 additions & 4 deletions pkg/commands/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ import (

type VolumeCommand struct {
BaseCommand
cmd *instructions.VolumeCommand
snapshotFiles []string
cmd *instructions.VolumeCommand
}

func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
Expand All @@ -57,7 +56,6 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
// Only create and snapshot the dir if it didn't exist already
if _, err := os.Stat(volume); os.IsNotExist(err) {
logrus.Infof("Creating directory %s", volume)
v.snapshotFiles = append(v.snapshotFiles, volume)
if err := os.MkdirAll(volume, 0755); err != nil {
return fmt.Errorf("Could not create directory for volume %s: %s", volume, err)
}
Expand All @@ -69,7 +67,7 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
}

func (v *VolumeCommand) FilesToSnapshot() []string {
return v.snapshotFiles
return []string{}
}

func (v *VolumeCommand) String() string {
Expand Down
1 change: 0 additions & 1 deletion pkg/commands/volume_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ func TestUpdateVolume(t *testing.T) {
cmd: &instructions.VolumeCommand{
Volumes: volumes,
},
snapshotFiles: []string{},
}

expectedVolumes := map[string]struct{}{
Expand Down
3 changes: 0 additions & 3 deletions pkg/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,6 @@ const (
// Docker command names
Cmd = "cmd"
Entrypoint = "entrypoint"

// VolumeCmdName is the name of the volume command
VolumeCmdName = "volume"
)

// KanikoBuildFiles is the list of files required to build kaniko
Expand Down
155 changes: 85 additions & 70 deletions pkg/executor/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ type stageBuilder struct {
cf *v1.ConfigFile
snapshotter *snapshot.Snapshotter
baseImageDigest string
opts *config.KanikoOptions
}

// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
Expand Down Expand Up @@ -81,6 +82,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
cf: imageConfig,
snapshotter: snapshotter,
baseImageDigest: digest.String(),
opts: opts,
}, nil
}

Expand Down Expand Up @@ -111,7 +113,7 @@ func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) erro
return err
}

func (s *stageBuilder) build(opts *config.KanikoOptions) error {
func (s *stageBuilder) build() error {
// Unpack file system to root
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
Expand All @@ -120,23 +122,26 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
if err := s.snapshotter.Init(); err != nil {
return err
}
var volumes []string

// Set the initial cache key to be the base image digest, the build args and the SrcContext.
compositeKey := NewCompositeCache(s.baseImageDigest)
contextHash, err := HashDir(opts.SrcContext)
contextHash, err := HashDir(s.opts.SrcContext)
if err != nil {
return err
}
compositeKey.AddKey(opts.BuildArgs...)
compositeKey.AddKey(s.opts.BuildArgs...)

args := dockerfile.NewBuildArgs(opts.BuildArgs)
for index, cmd := range s.stage.Commands {
finalCmd := index == len(s.stage.Commands)-1
command, err := commands.GetCommand(cmd, opts.SrcContext)
cmds := []commands.DockerCommand{}
for _, cmd := range s.stage.Commands {
command, err := commands.GetCommand(cmd, s.opts.SrcContext)
if err != nil {
return err
}
cmds = append(cmds, command)
}

args := dockerfile.NewBuildArgs(s.opts.BuildArgs)
for index, command := range cmds {
if command == nil {
continue
}
Expand All @@ -153,8 +158,8 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
return err
}

if command.CacheCommand() && opts.Cache {
image, err := cache.RetrieveLayer(opts, ck)
if command.CacheCommand() && s.opts.Cache {
image, err := cache.RetrieveLayer(s.opts, ck)
if err == nil {
if err := s.extractCachedLayer(image, command.String()); err != nil {
return errors.Wrap(err, "extracting cached layer")
Expand All @@ -163,84 +168,94 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
}
logrus.Info("No cached layer found, executing command...")
}

if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
return err
}
files := command.FilesToSnapshot()
if cmd.Name() == constants.VolumeCmdName {
volumes = append(volumes, files...)
var contents []byte

if !s.shouldTakeSnapshot(index, files) {
continue
}
var contents []byte

// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !s.stage.Final {
if finalCmd {
contents, err = s.snapshotter.TakeSnapshotFS()
}
if files == nil || s.opts.SingleSnapshot {
contents, err = s.snapshotter.TakeSnapshotFS()
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = s.snapshotter.TakeSnapshotFS()
}
} else {
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if files != nil {
if len(files) > 0 {
files = append(files, volumes...)
volumes = []string{}
}
contents, err = s.snapshotter.TakeSnapshot(files)
} else {
contents, err = s.snapshotter.TakeSnapshotFS()
volumes = []string{}
}
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
// Add them to the list of files to snapshot.
for v := range s.cf.Config.Volumes {
files = append(files, v)
}
contents, err = s.snapshotter.TakeSnapshot(files)
}
if err != nil {
return fmt.Errorf("Error taking snapshot of files for command %s: %s", command, err)
}

if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
continue
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return err
}
// Push layer to cache now along with new config file
if command.CacheCommand() && opts.Cache {
if err := pushLayerToCache(opts, ck, layer, command.String()); err != nil {
return err
}
}
s.image, err = mutate.Append(s.image,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: command.String(),
},
},
)
if err != nil {
if err := s.saveSnapshot(command, ck, contents); err != nil {
return err
}
}
return nil
}

func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
isLastCommand := index == len(s.stage.Commands)-1

// We only snapshot the very end of intermediate stages.
if !s.stage.Final {
return isLastCommand
}

// We only snapshot the very end with single snapshot mode on.
if s.opts.SingleSnapshot {
return isLastCommand
}

// nil means snapshot everything.
if files == nil {
return true
}

// Don't snapshot an empty list.
if len(files) == 0 {
return false
}
return true
}

func (s *stageBuilder) saveSnapshot(command commands.DockerCommand, ck string, contents []byte) error {
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
return nil
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return err
}
// Push layer to cache now along with new config file
if command.CacheCommand() && s.opts.Cache {
if err := pushLayerToCache(s.opts, ck, layer, command.String()); err != nil {
return err
}
}
s.image, err = mutate.Append(s.image,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: command.String(),
},
},
)
return err

}

// DoBuild executes building the Dockerfile
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
// Parse dockerfile and unpack base image to root
Expand All @@ -253,7 +268,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
}
if err := sb.build(opts); err != nil {
if err := sb.build(); err != nil {
return nil, errors.Wrap(err, "error building stage")
}
reviewConfig(stage, &sb.cf.Config)
Expand Down

0 comments on commit 9a0e29c

Please sign in to comment.