Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 54 additions & 50 deletions pkg/cmd/dockerregistry/dockerregistry.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,13 +141,22 @@ func Execute(configFile io.Reader) {
if err != nil {
log.Fatalf("error parsing configuration file: %s", err)
}

err = Start(dockerConfig, extraConfig)
if err != nil {
log.Fatal(err)
}
}

// Start runs the Docker registry. Start always returns a non-nil error.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this comment makes it sounds like Start will never return nil, but i don't think that's what you mean, is it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It will never return nil. It acts like http.ListenAndServe.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

got it.

func Start(dockerConfig *configuration.Configuration, extraConfig *registryconfig.Configuration) error {
setDefaultMiddleware(dockerConfig)
setDefaultLogParameters(dockerConfig)

ctx := context.Background()
ctx, err = configureLogging(ctx, dockerConfig)
ctx, err := configureLogging(ctx, dockerConfig)
if err != nil {
log.Fatalf("error configuring logger: %v", err)
return fmt.Errorf("error configuring logger: %v", err)
}

log.WithFields(versionFields()).Info("start registry")
Expand All @@ -171,69 +180,64 @@ func Execute(configFile io.Reader) {

if dockerConfig.HTTP.TLS.Certificate == "" {
context.GetLogger(ctx).Infof("listening on %v", dockerConfig.HTTP.Addr)
if err := http.ListenAndServe(dockerConfig.HTTP.Addr, handler); err != nil {
context.GetLogger(ctx).Fatalln(err)
return http.ListenAndServe(dockerConfig.HTTP.Addr, handler)
}

var (
minVersion uint16
cipherSuites []uint16
)
if s := os.Getenv("REGISTRY_HTTP_TLS_MINVERSION"); len(s) > 0 {
minVersion, err = crypto.TLSVersion(s)
if err != nil {
return fmt.Errorf("invalid TLS version %q specified in REGISTRY_HTTP_TLS_MINVERSION: %v (valid values are %q)", s, err, crypto.ValidTLSVersions())
}
} else {
var (
minVersion uint16
cipherSuites []uint16
)
if s := os.Getenv("REGISTRY_HTTP_TLS_MINVERSION"); len(s) > 0 {
minVersion, err = crypto.TLSVersion(s)
}
if s := os.Getenv("REGISTRY_HTTP_TLS_CIPHERSUITES"); len(s) > 0 {
for _, cipher := range strings.Split(s, ",") {
cipherSuite, err := crypto.CipherSuite(cipher)
if err != nil {
context.GetLogger(ctx).Fatalln(fmt.Errorf("invalid TLS version %q specified in REGISTRY_HTTP_TLS_MINVERSION: %v (valid values are %q)", s, err, crypto.ValidTLSVersions()))
}
}
if s := os.Getenv("REGISTRY_HTTP_TLS_CIPHERSUITES"); len(s) > 0 {
for _, cipher := range strings.Split(s, ",") {
cipherSuite, err := crypto.CipherSuite(cipher)
if err != nil {
context.GetLogger(ctx).Fatalln(fmt.Errorf("invalid cipher suite %q specified in REGISTRY_HTTP_TLS_CIPHERSUITES: %v (valid suites are %q)", s, err, crypto.ValidCipherSuites()))
}
cipherSuites = append(cipherSuites, cipherSuite)
return fmt.Errorf("invalid cipher suite %q specified in REGISTRY_HTTP_TLS_CIPHERSUITES: %v (valid suites are %q)", s, err, crypto.ValidCipherSuites())
}
cipherSuites = append(cipherSuites, cipherSuite)
}
}

tlsConf := crypto.SecureTLSConfig(&tls.Config{
ClientAuth: tls.NoClientCert,
MinVersion: minVersion,
CipherSuites: cipherSuites,
})

if len(dockerConfig.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
tlsConf := crypto.SecureTLSConfig(&tls.Config{
ClientAuth: tls.NoClientCert,
MinVersion: minVersion,
CipherSuites: cipherSuites,
})

for _, ca := range dockerConfig.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
context.GetLogger(ctx).Fatalln(err)
}
if len(dockerConfig.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()

if ok := pool.AppendCertsFromPEM(caPem); !ok {
context.GetLogger(ctx).Fatalln(fmt.Errorf("Could not add CA to pool"))
}
for _, ca := range dockerConfig.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
return err
}

for _, subj := range pool.Subjects() {
context.GetLogger(ctx).Debugf("CA Subject: %s", string(subj))
if ok := pool.AppendCertsFromPEM(caPem); !ok {
return fmt.Errorf("could not add CA to pool")
}

tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}

context.GetLogger(ctx).Infof("listening on %v, tls", dockerConfig.HTTP.Addr)
server := &http.Server{
Addr: dockerConfig.HTTP.Addr,
Handler: handler,
TLSConfig: tlsConf,
for _, subj := range pool.Subjects() {
context.GetLogger(ctx).Debugf("CA Subject: %s", string(subj))
}

if err := server.ListenAndServeTLS(dockerConfig.HTTP.TLS.Certificate, dockerConfig.HTTP.TLS.Key); err != nil {
context.GetLogger(ctx).Fatalln(err)
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}

context.GetLogger(ctx).Infof("listening on %v, tls", dockerConfig.HTTP.Addr)
server := &http.Server{
Addr: dockerConfig.HTTP.Addr,
Handler: handler,
TLSConfig: tlsConf,
}
return server.ListenAndServeTLS(dockerConfig.HTTP.TLS.Certificate, dockerConfig.HTTP.TLS.Key)
}

// configureLogging prepares the context with a logger using the
Expand Down
2 changes: 1 addition & 1 deletion pkg/dockerregistry/server/blobdescriptorservice_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func TestBlobDescriptorServiceIsApplied(t *testing.T) {
t.Fatalf("error parsing server url: %v", err)
}

desc, _, err := registrytest.UploadRandomTestBlob(serverURL, nil, "user/app")
desc, _, err := registrytest.UploadRandomTestBlob(ctx, serverURL, nil, "user/app")
if err != nil {
t.Fatal(err)
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/dockerregistry/server/manifesthandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ type ManifestHandler interface {
// Manifest returns a deserialized manifest object.
Manifest() distribution.Manifest

// Layers returns image layers and a value for the dockerLayersOrder annotation.
Layers(ctx context.Context) (order string, layers []imageapiv1.ImageLayer, err error)

// Payload returns manifest's media type, complete payload with signatures and canonical payload without
// signatures or an error if the information could not be fetched.
Payload() (mediaType string, payload []byte, canonical []byte, err error)
Expand Down
49 changes: 45 additions & 4 deletions pkg/dockerregistry/server/manifestschema1handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ import (
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"

imageapi "github.com/openshift/origin/pkg/image/apis/image"
imageapiv1 "github.com/openshift/origin/pkg/image/apis/image/v1"
)

func unmarshalManifestSchema1(content []byte, signatures [][]byte) (distribution.Manifest, error) {
Expand Down Expand Up @@ -41,8 +44,9 @@ func unmarshalManifestSchema1(content []byte, signatures [][]byte) (distribution
}

type manifestSchema1Handler struct {
repo *repository
manifest *schema1.SignedManifest
repo *repository
manifest *schema1.SignedManifest
blobsCache map[digest.Digest]distribution.Descriptor
}

var _ ManifestHandler = &manifestSchema1Handler{}
Expand All @@ -59,6 +63,44 @@ func (h *manifestSchema1Handler) Manifest() distribution.Manifest {
return h.manifest
}

func (h *manifestSchema1Handler) statBlob(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
desc, ok := h.blobsCache[dgst]
if ok {
return desc, nil
}

desc, err := h.repo.Blobs(ctx).Stat(ctx, dgst)
if err != nil {
return desc, err
}

if h.blobsCache == nil {
h.blobsCache = make(map[digest.Digest]distribution.Descriptor)
}
h.blobsCache[dgst] = desc

return desc, nil
}

func (h *manifestSchema1Handler) Layers(ctx context.Context) (string, []imageapiv1.ImageLayer, error) {
layers := make([]imageapiv1.ImageLayer, len(h.manifest.FSLayers))
for i, fslayer := range h.manifest.FSLayers {
desc, err := h.statBlob(ctx, fslayer.BlobSum)
if err != nil {
return "", nil, err
}

// In a schema1 manifest the layers are ordered from the youngest to
// the oldest. But we want to have layers in different order.
revidx := (len(h.manifest.FSLayers) - 1) - i // n-1, n-2, ..., 1, 0

layers[revidx].Name = fslayer.BlobSum.String()
layers[revidx].LayerSize = desc.Size
layers[revidx].MediaType = schema1.MediaTypeManifestLayer
}
return imageapi.DockerImageLayersOrderAscending, layers, nil
}

func (h *manifestSchema1Handler) Payload() (mediaType string, payload []byte, canonical []byte, err error) {
mt, payload, err := h.manifest.Payload()
return mt, payload, h.manifest.Canonical, err
Expand All @@ -72,7 +114,6 @@ func (h *manifestSchema1Handler) Verify(ctx context.Context, skipDependencyVerif
// and since we use pullthroughBlobStore all the layer existence checks will be
// successful. This means that the docker client will not attempt to send them
// to us as it will assume that the registry has them.
repo := h.repo

if len(path.Join(h.repo.config.registryAddr, h.manifest.Name)) > reference.NameTotalLengthMax {
errs = append(errs,
Expand Down Expand Up @@ -116,7 +157,7 @@ func (h *manifestSchema1Handler) Verify(ctx context.Context, skipDependencyVerif
}

for _, fsLayer := range h.manifest.References() {
_, err := repo.Blobs(ctx).Stat(ctx, fsLayer.Digest)
_, err := h.statBlob(ctx, fsLayer.Digest)
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
Expand Down
71 changes: 44 additions & 27 deletions pkg/dockerregistry/server/manifestschema2handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema2"

imageapi "github.com/openshift/origin/pkg/image/apis/image"
imageapiv1 "github.com/openshift/origin/pkg/image/apis/image/v1"
)

var (
Expand Down Expand Up @@ -65,11 +68,51 @@ func (h *manifestSchema2Handler) Manifest() distribution.Manifest {
return h.manifest
}

func (h *manifestSchema2Handler) Layers(ctx context.Context) (string, []imageapiv1.ImageLayer, error) {
layers := make([]imageapiv1.ImageLayer, len(h.manifest.Layers))
for i, layer := range h.manifest.Layers {
layers[i].Name = layer.Digest.String()
layers[i].LayerSize = layer.Size
layers[i].MediaType = layer.MediaType
}
return imageapi.DockerImageLayersOrderAscending, layers, nil
}

func (h *manifestSchema2Handler) Payload() (mediaType string, payload []byte, canonical []byte, err error) {
mt, p, err := h.manifest.Payload()
return mt, p, p, err
}

func (h *manifestSchema2Handler) verifyLayer(ctx context.Context, fsLayer distribution.Descriptor) error {
if fsLayer.MediaType == schema2.MediaTypeForeignLayer {
// Clients download this layer from an external URL, so do not check for
// its presense.
if len(fsLayer.URLs) == 0 {
return errMissingURL
}
return nil
}

if len(fsLayer.URLs) != 0 {
return errUnexpectedURL
}

desc, err := h.repo.Blobs(ctx).Stat(ctx, fsLayer.Digest)
if err != nil {
return err
}

if fsLayer.Size != desc.Size {
return ErrManifestBlobBadSize{
Digest: fsLayer.Digest,
ActualSize: desc.Size,
SizeInManifest: fsLayer.Size,
}
}

return nil
}

func (h *manifestSchema2Handler) Verify(ctx context.Context, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification

Expand All @@ -82,35 +125,9 @@ func (h *manifestSchema2Handler) Verify(ctx context.Context, skipDependencyVerif
// and since we use pullthroughBlobStore all the layer existence checks will be
// successful. This means that the docker client will not attempt to send them
// to us as it will assume that the registry has them.
repo := h.repo

target := h.manifest.Target()
_, err := repo.Blobs(ctx).Stat(ctx, target.Digest)
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}

// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest})
}

for _, fsLayer := range h.manifest.References() {
var err error
if fsLayer.MediaType != schema2.MediaTypeForeignLayer {
if len(fsLayer.URLs) == 0 {
_, err = repo.Blobs(ctx).Stat(ctx, fsLayer.Digest)
} else {
err = errUnexpectedURL
}
} else {
// Clients download this layer from an external URL, so do not check for
// its presense.
if len(fsLayer.URLs) == 0 {
err = errMissingURL
}
}
if err != nil {
if err := h.verifyLayer(ctx, fsLayer); err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
continue
Expand Down
22 changes: 22 additions & 0 deletions pkg/dockerregistry/server/manifestservice.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,20 @@ import (
quotautil "github.com/openshift/origin/pkg/quota/util"
)

// ErrManifestBlobBadSize is returned when the blob size in a manifest does
// not match the actual size. The docker/distribution does not check this and
// therefore does not provide an error for this.
type ErrManifestBlobBadSize struct {
Digest digest.Digest
ActualSize int64
SizeInManifest int64
}

func (err ErrManifestBlobBadSize) Error() string {
return fmt.Sprintf("the blob %s has the size (%d) different from the one specified in the manifest (%d)",
err.Digest, err.ActualSize, err.SizeInManifest)
}

var _ distribution.ManifestService = &manifestService{}

type manifestService struct {
Expand Down Expand Up @@ -104,6 +118,7 @@ func (m *manifestService) Put(ctx context.Context, manifest distribution.Manifes
if err != nil {
return "", regapi.ErrorCodeManifestInvalid.WithDetail(err)
}

mediaType, payload, _, err := mh.Payload()
if err != nil {
return "", regapi.ErrorCodeManifestInvalid.WithDetail(err)
Expand Down Expand Up @@ -134,6 +149,11 @@ func (m *manifestService) Put(ctx context.Context, manifest distribution.Manifes
return "", err
}

layerOrder, layers, err := mh.Layers(ctx)
if err != nil {
return "", err
}

// Upload to openshift
ism := imageapiv1.ImageStreamMapping{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -146,12 +166,14 @@ func (m *manifestService) Put(ctx context.Context, manifest distribution.Manifes
Annotations: map[string]string{
imageapi.ManagedByOpenShiftAnnotation: "true",
imageapi.ImageManifestBlobStoredAnnotation: "true",
imageapi.DockerImageLayersOrderAnnotation: layerOrder,
},
},
DockerImageReference: fmt.Sprintf("%s/%s/%s@%s", m.repo.config.registryAddr, m.repo.namespace, m.repo.name, dgst.String()),
DockerImageManifest: string(payload),
DockerImageManifestMediaType: mediaType,
DockerImageConfig: string(config),
DockerImageLayers: layers,
},
}

Expand Down
Loading