diff --git a/.generated_docs b/.generated_docs index 59ea8bcd973f6..8d5233ff08afb 100644 --- a/.generated_docs +++ b/.generated_docs @@ -43,6 +43,11 @@ docs/man/man1/kubectl-port-forward.1 docs/man/man1/kubectl-proxy.1 docs/man/man1/kubectl-replace.1 docs/man/man1/kubectl-rolling-update.1 +docs/man/man1/kubectl-rollout-history.1 +docs/man/man1/kubectl-rollout-pause.1 +docs/man/man1/kubectl-rollout-resume.1 +docs/man/man1/kubectl-rollout-undo.1 +docs/man/man1/kubectl-rollout.1 docs/man/man1/kubectl-run.1 docs/man/man1/kubectl-scale.1 docs/man/man1/kubectl-stop.1 @@ -88,6 +93,11 @@ docs/user-guide/kubectl/kubectl_port-forward.md docs/user-guide/kubectl/kubectl_proxy.md docs/user-guide/kubectl/kubectl_replace.md docs/user-guide/kubectl/kubectl_rolling-update.md +docs/user-guide/kubectl/kubectl_rollout.md +docs/user-guide/kubectl/kubectl_rollout_history.md +docs/user-guide/kubectl/kubectl_rollout_pause.md +docs/user-guide/kubectl/kubectl_rollout_resume.md +docs/user-guide/kubectl/kubectl_rollout_undo.md docs/user-guide/kubectl/kubectl_run.md docs/user-guide/kubectl/kubectl_scale.md docs/user-guide/kubectl/kubectl_uncordon.md diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ec321004a85e7..ef9c747995cd7 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -47,6 +47,10 @@ "Comment": "v0.7.4-6-g5d54e27", "Rev": "5d54e27f1764a0309eafe12c9df7bac03f241646" }, + { + "ImportPath": "github.com/armon/go-metrics", + "Rev": "345426c77237ece5dab0e1605c3e4b35c3f54757" + }, { "ImportPath": "github.com/aws/aws-sdk-go/aws", "Comment": "v1.0.7", @@ -128,12 +132,8 @@ }, { "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.0-119-g90fef38", - "Rev": "90fef389f98027ca55594edd7dbd6e7f3926fdad" - }, - { - "ImportPath": "github.com/bradfitz/http2", - "Rev": "3e36af6d3af0e56fa3da71099f864933dea3d9fb" + "Comment": "v1.1.0-65-gee4a088", + "Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873" }, { "ImportPath": "github.com/camlistore/go4/errorutil", @@ -146,123 +146,123 @@ }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/error", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/timeutil", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/storage", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/store", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Comment": "v2.2.2-4-ge0c7768", - "Rev": "e0c7768f94cdc268b2fce31ada1dea823f11f505" + "Comment": "v2.2.5", + "Rev": "bc9ddf260115d2680191c46977ae72b837785472" }, { "ImportPath": "github.com/coreos/go-etcd/etcd", @@ -336,8 +336,8 @@ }, { "ImportPath": "github.com/coreos/rkt/api/v1alpha", - "Comment": "v0.13.0-98-gddfa976", - "Rev": "ddfa97689c1f8e89aff51368300c34da4c74091b" + "Comment": "v1.0.0", + "Rev": "1ddc36601c8688ff207210bc9ecbf973d09573fa" }, { "ImportPath": "github.com/cpuguy83/go-md2man/md2man", @@ -392,9 +392,14 @@ "Comment": "v1.4.1-4831-g0f5c9d3", "Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d" }, + { + "ImportPath": "github.com/docker/go-units", + "Comment": "v0.1.0-21-g0bbddae", + "Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b" + }, { "ImportPath": "github.com/docker/spdystream", - "Rev": "c33989bcb56748d2473194d11f8ac3fc563688eb" + "Rev": "106e140db2cb50923efe088bf2906b2ee5a45fec" }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", @@ -416,8 +421,7 @@ }, { "ImportPath": "github.com/fsouza/go-dockerclient", - "Comment": "0.2.1-728-g1399676", - "Rev": "299d728486342c894e7fafd68e3a4b89623bef1d" + "Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2" }, { "ImportPath": "github.com/garyburd/redigo/internal", @@ -443,118 +447,118 @@ }, { "ImportPath": "github.com/gogo/protobuf/gogoproto", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/description", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/equal", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/face", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/gostring", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/grpc", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/marshalto", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/populate", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/size", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/stringer", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/testgen", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/union", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/gogo/protobuf/vanity", - "Comment": "v0.1-108-g9dc5109", - "Rev": "9dc510915846dd5a05607d3b5bf41f5ca5cce972" + "Comment": "v0.1-125-g82d16f7", + "Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" }, { "ImportPath": "github.com/golang/glog", @@ -566,7 +570,7 @@ }, { "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "7f07925444bb51fa4cf9dfe6f7661876f8852275" + "Rev": "b982704f8bb716bb608144408cff30e15fbde841" }, { "ImportPath": "github.com/google/btree", @@ -574,93 +578,93 @@ }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.20.1", - "Rev": "634965abc45557ed03c268bb193a00cfcbedbd32" + "Comment": "v0.21.1", + "Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d" }, { "ImportPath": "github.com/google/gofuzz", @@ -674,6 +678,18 @@ "ImportPath": "github.com/gorilla/mux", "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5" }, + { + "ImportPath": "github.com/hashicorp/go-msgpack/codec", + "Rev": "fa3f63826f7c23912c15263591e65d54d080b458" + }, + { + "ImportPath": "github.com/hashicorp/raft", + "Rev": "057b893fd996696719e98b6c44649ea14968c811" + }, + { + "ImportPath": "github.com/hashicorp/raft-boltdb", + "Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee" + }, { "ImportPath": "github.com/imdario/mergo", "Comment": "0.1.3-8-g6633656", @@ -685,8 +701,33 @@ }, { "ImportPath": "github.com/influxdb/influxdb/client", - "Comment": "v0.8.8", - "Rev": "afde71eb1740fd763ab9450e1f700ba0e53c36d0" + "Comment": "v0.9.2.1", + "Rev": "b237c68bab4756507baf6840023be103853e77db" + }, + { + "ImportPath": "github.com/influxdb/influxdb/influxql", + "Comment": "v0.9.2.1", + "Rev": "b237c68bab4756507baf6840023be103853e77db" + }, + { + "ImportPath": "github.com/influxdb/influxdb/meta", + "Comment": "v0.9.2.1", + "Rev": "b237c68bab4756507baf6840023be103853e77db" + }, + { + "ImportPath": "github.com/influxdb/influxdb/snapshot", + "Comment": "v0.9.2.1", + "Rev": "b237c68bab4756507baf6840023be103853e77db" + }, + { + "ImportPath": "github.com/influxdb/influxdb/toml", + "Comment": "v0.9.2.1", + "Rev": "b237c68bab4756507baf6840023be103853e77db" + }, + { + "ImportPath": "github.com/influxdb/influxdb/tsdb", + "Comment": "v0.9.2.1", + "Rev": "b237c68bab4756507baf6840023be103853e77db" }, { "ImportPath": "github.com/jmespath/go-jmespath", @@ -716,48 +757,53 @@ }, { "ImportPath": "github.com/mesos/mesos-go/auth", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/detector", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/executor", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/mesosproto", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/mesosutil", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/messenger", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/scheduler", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/mesos/mesos-go/upid", - "Comment": "v0.0.2-5-ged907b1", - "Rev": "ed907b10717e66325cf2894eb90a0553a89fcb11" + "Comment": "before-0.26-protos-14-g4a7554a", + "Rev": "4a7554aad396c70d19c9fc3469980547c9f117ae" }, { "ImportPath": "github.com/miekg/dns", "Rev": "3f504e8dabd5d562e997d19ce0200aa41973e1b2" }, + { + "ImportPath": "github.com/mistifyio/go-zfs", + "Comment": "v2.1.1-5-g1b4ae6f", + "Rev": "1b4ae6fb4e77b095934d4430860ff202060169f8" + }, { "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" @@ -773,8 +819,8 @@ }, { "ImportPath": "github.com/onsi/ginkgo", - "Comment": "v1.2.0-6-gd981d36", - "Rev": "d981d36e9884231afa909627b9c275e4ba678f90" + "Comment": "v1.2.0-42-g07d85e6", + "Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b" }, { "ImportPath": "github.com/onsi/gomega", @@ -783,13 +829,17 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v0.0.5", - "Rev": "97bc9a7faf3dd660d9be90a2880b2e37f3cdbf38" + "Comment": "v0.0.7", + "Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6" }, { "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, + { + "ImportPath": "github.com/pmezard/go-difflib/difflib", + "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", "Comment": "0.7.0-39-g3b78d7a", @@ -851,28 +901,24 @@ "ImportPath": "github.com/spf13/pflag", "Rev": "08b1a584251b5b62f458943640fc8ebd4d50aaa5" }, - { - "ImportPath": "github.com/ssoroka/ttime", - "Rev": "881f221816e0300201ac24f6c31e54e3bb958de7" - }, { "ImportPath": "github.com/stretchr/objx", "Rev": "d40df0cc104c06eae2dfe03d7dddb83802d52f9a" }, { "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.0-17-g089c718", - "Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff" + "Comment": "v1.0-88-ge3a8ff8", + "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" }, { "ImportPath": "github.com/stretchr/testify/mock", - "Comment": "v1.0-17-g089c718", - "Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff" + "Comment": "v1.0-88-ge3a8ff8", + "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" }, { "ImportPath": "github.com/stretchr/testify/require", - "Comment": "v1.0-17-g089c718", - "Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff" + "Comment": "v1.0-88-ge3a8ff8", + "Rev": "e3a8ff8ce36581f87a15341206f205b1da467059" }, { "ImportPath": "github.com/syndtr/gocapability/capability", @@ -880,7 +926,7 @@ }, { "ImportPath": "github.com/ugorji/go/codec", - "Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065" + "Rev": "4a79e5b7b21e51ae8d61641bca20399b79735a32" }, { "ImportPath": "github.com/vishvananda/netlink", @@ -919,6 +965,10 @@ "ImportPath": "golang.org/x/net/html", "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" }, + { + "ImportPath": "golang.org/x/net/http2", + "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" + }, { "ImportPath": "golang.org/x/net/internal/timeseries", "Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e" @@ -977,7 +1027,7 @@ }, { "ImportPath": "google.golang.org/grpc", - "Rev": "4bd040ce23a624ff9a1d07b0e729ee189bddd51c" + "Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" }, { "ImportPath": "gopkg.in/natefinch/lumberjack.v2", @@ -990,8 +1040,8 @@ }, { "ImportPath": "k8s.io/heapster/api/v1/types", - "Comment": "v0.17.0-75-g0e1b652", - "Rev": "0e1b652781812dee2c51c75180fc590223e0b9c6" + "Comment": "v0.19.1-44-g0991ac5", + "Rev": "0991ac528ea24aae194e45d6dcf01896cb42cbea" }, { "ImportPath": "speter.net/go/exp/math/dec/inf", diff --git a/Godeps/LICENSES.md b/Godeps/LICENSES.md index 6877baf48e4be..448927f8de5c5 100644 --- a/Godeps/LICENSES.md +++ b/Godeps/LICENSES.md @@ -8,11 +8,11 @@ bitbucket.org/ww/goautoneg | spdxBSD3 github.com/abbot/go-http-auth | Apache-2 github.com/appc/cni | Apache-2 github.com/appc/spec | Apache-2 +github.com/armon/go-metrics | MITname github.com/aws/aws-sdk-go | Apache-2 github.com/beorn7/perks/quantile | MIT? github.com/blang/semver | MITname github.com/boltdb/bolt | MITname -github.com/bradfitz/http2 | BSDlikeRef github.com/camlistore/go4 | Apache-2 github.com/ClusterHQ/flocker-go | UNKNOWN github.com/codegangsta/negroni | MITname @@ -22,13 +22,14 @@ github.com/coreos/go-oidc | Apache-2 github.com/coreos/go-semver | Apache-2 github.com/coreos/go-systemd | Apache-2 github.com/coreos/pkg | Apache-2 -github.com/coreos/rkt | Apache-2 +github.com/coreos/rkt | MITname github.com/cpuguy83/go-md2man | MITname github.com/davecgh/go-spew | MIToldwithoutSellandNoDocumentationRequi github.com/daviddengcn/go-colortext | BSD? github.com/dgrijalva/jwt-go | spdxMIT github.com/docker/docker | Apache-2 github.com/docker/docker/pkg/symlink | spdxBSD3 +github.com/docker/go-units | Apache-2 github.com/docker/spdystream | SeeFile github.com/elazarl/go-bindata-assetfs | spdxBSD2 github.com/elazarl/goproxy | BSDWarr @@ -49,6 +50,9 @@ github.com/google/cadvisor | Apache-2 github.com/google/gofuzz | Apache-2 github.com/gorilla/context | spdxBSD3 github.com/gorilla/mux | spdxBSD3 +github.com/hashicorp/go-msgpack | spdxBSD3 +github.com/hashicorp/raft | IntelPart08 +github.com/hashicorp/raft-boltdb | IntelPart08 github.com/imdario/mergo | spdxBSD3 github.com/inconshreveable/mousetrap | Apache-2 github.com/influxdb/influxdb | MITname @@ -60,6 +64,7 @@ github.com/kr/pty | spdxMIT github.com/matttproud/golang_protobuf_extensions | Apache-2 github.com/mesos/mesos-go | Apache-2 github.com/miekg/dns | spdxBSD3 +github.com/mistifyio/go-zfs | Apache-2 github.com/mitchellh/mapstructure | MITname github.com/mvdan/xurls | spdxBSD3 github.com/mxk/go-flowrate | spdxBSD3 @@ -67,6 +72,7 @@ github.com/onsi/ginkgo | spdxMIT github.com/onsi/gomega | spdxMIT github.com/opencontainers/runc | Apache-2 github.com/pborman/uuid | spdxBSD3 +github.com/pmezard/go-difflib | BSD3 github.com/prometheus/client_golang | Apache-2 github.com/prometheus/client_model | Apache-2 github.com/prometheus/common/expfmt | Apache-2 @@ -82,9 +88,8 @@ github.com/Sirupsen/logrus | MITname github.com/skynetservices/skydns | MITname github.com/spf13/cobra | Apache-2 github.com/spf13/pflag | spdxBSD3 -github.com/ssoroka/ttime | UNKNOWN github.com/stretchr/objx | MIT? -github.com/stretchr/testify | MIT? +github.com/stretchr/testify | spdxMIT github.com/syndtr/gocapability | spdxBSD2 github.com/ugorji/go | MITname github.com/vishvananda/netlink | Apache-2 diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore b/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore new file mode 100644 index 0000000000000..00268614f0456 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE new file mode 100644 index 0000000000000..106569e542b0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/README.md b/Godeps/_workspace/src/github.com/armon/go-metrics/README.md new file mode 100644 index 0000000000000..7b6f23e29f839 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/README.md @@ -0,0 +1,71 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +===== + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Examples +======== + +Here is an example of using the package: + + func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) + } + + // Configure a statsite sink as the global metrics sink + sink, _ := metrics.NewStatsiteSink("statsite:8125") + metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + + // Emit a Key/Value pair + metrics.EmitKey([]string{"questions", "meaning of life"}, 42) + + +Here is an example of setting up an signal handler: + + // Setup the inmem sink and signal handler + inm := metrics.NewInmemSink(10*time.Second, time.Minute) + sig := metrics.DefaultInmemSignal(inm) + metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + + // Run some code + inm.SetGauge([]string{"foo"}, 42) + inm.EmitKey([]string{"bar"}, 30) + + inm.IncrCounter([]string{"baz"}, 42) + inm.IncrCounter([]string{"baz"}, 1) + inm.IncrCounter([]string{"baz"}, 80) + + inm.AddSample([]string{"method", "wow"}, 42) + inm.AddSample([]string{"method", "wow"}, 100) + inm.AddSample([]string{"method", "wow"}, 22) + + .... + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 + diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go b/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 0000000000000..31098dd57e555 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go b/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 0000000000000..38136af3e4237 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/datadog/dogstatsd.go b/Godeps/_workspace/src/github.com/armon/go-metrics/datadog/dogstatsd.go new file mode 100644 index 0000000000000..aaba9fe0e2244 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/datadog/dogstatsd.go @@ -0,0 +1,125 @@ +package datadog + +import ( + "fmt" + "strings" + + "github.com/DataDog/datadog-go/statsd" +) + +// DogStatsdSink provides a MetricSink that can be used +// with a dogstatsd server. It utilizes the Dogstatsd client at github.com/DataDog/datadog-go/statsd +type DogStatsdSink struct { + client *statsd.Client + hostName string + propagateHostname bool +} + +// NewDogStatsdSink is used to create a new DogStatsdSink with sane defaults +func NewDogStatsdSink(addr string, hostName string) (*DogStatsdSink, error) { + client, err := statsd.New(addr) + if err != nil { + return nil, err + } + sink := &DogStatsdSink{ + client: client, + hostName: hostName, + propagateHostname: false, + } + return sink, nil +} + +// SetTags sets common tags on the Dogstatsd Client that will be sent +// along with all dogstatsd packets. +// Ref: http://docs.datadoghq.com/guides/dogstatsd/#tags +func (s *DogStatsdSink) SetTags(tags []string) { + s.client.Tags = tags +} + +// EnableHostnamePropagation forces a Dogstatsd `host` tag with the value specified by `s.HostName` +// Since the go-metrics package has its own mechanism for attaching a hostname to metrics, +// setting the `propagateHostname` flag ensures that `s.HostName` overrides the host tag naively set by the DogStatsd server +func (s *DogStatsdSink) EnableHostNamePropagation() { + s.propagateHostname = true +} + +func (s *DogStatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +func (s *DogStatsdSink) parseKey(key []string) ([]string, []string) { + // Since DogStatsd supports dimensionality via tags on metric keys, this sink's approach is to splice the hostname out of the key in favor of a `host` tag + // The `host` tag is either forced here, or set downstream by the DogStatsd server + + var tags []string + hostName := s.hostName + + //Splice the hostname out of the key + for i, el := range key { + if el == hostName { + key = append(key[:i], key[i+1:]...) + } + } + + if s.propagateHostname { + tags = append(tags, fmt.Sprintf("host:%s", hostName)) + } + return key, tags +} + +// Implementation of methods in the MetricSink interface + +func (s *DogStatsdSink) SetGauge(key []string, val float32) { + s.SetGaugeWithTags(key, val, []string{}) +} + +func (s *DogStatsdSink) IncrCounter(key []string, val float32) { + s.IncrCounterWithTags(key, val, []string{}) +} + +// EmitKey is not implemented since DogStatsd does not provide a metric type that holds an +// arbitrary number of values +func (s *DogStatsdSink) EmitKey(key []string, val float32) { +} + +func (s *DogStatsdSink) AddSample(key []string, val float32) { + s.AddSampleWithTags(key, val, []string{}) +} + +// The following ...WithTags methods correspond to Datadog's Tag extension to Statsd. +// http://docs.datadoghq.com/guides/dogstatsd/#tags + +func (s *DogStatsdSink) SetGaugeWithTags(key []string, val float32, tags []string) { + flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags) + rate := 1.0 + s.client.Gauge(flatKey, float64(val), tags, rate) +} + +func (s *DogStatsdSink) IncrCounterWithTags(key []string, val float32, tags []string) { + flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags) + rate := 1.0 + s.client.Count(flatKey, int64(val), tags, rate) +} + +func (s *DogStatsdSink) AddSampleWithTags(key []string, val float32, tags []string) { + flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags) + rate := 1.0 + s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate) +} + +func (s *DogStatsdSink) getFlatkeyAndCombinedTags(key []string, tags []string) (flattenedKey string, combinedTags []string) { + key, hostTags := s.parseKey(key) + flatKey := s.flattenKey(key) + tags = append(tags, hostTags...) + return flatKey, tags +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go new file mode 100644 index 0000000000000..da503296060ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,241 @@ +package metrics + +import ( + "fmt" + "math" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]float32 + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]*AggregateSample + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]*AggregateSample +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]float32), + Points: make(map[string][]float32), + Counters: make(map[string]*AggregateSample), + Samples: make(map[string]*AggregateSample), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Sum float64 // The sum of values + SumSq float64 // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = val +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg := intv.Counters[k] + if agg == nil { + agg = &AggregateSample{} + intv.Counters[k] = agg + } + agg.Ingest(float64(val)) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg := intv.Samples[k] + if agg == nil { + agg = &AggregateSample{} + intv.Samples[k] = agg + } + agg.Ingest(float64(val)) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + intervals := make([]*IntervalMetrics, len(i.intervals)) + copy(intervals, i.intervals) + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Replace(joined, " ", "_", -1) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 0000000000000..95d08ee10f0b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for i := 0; i < len(data)-1; i++ { + intv := data[i] + intv.RLock() + for name, val := range intv.Gauges { + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for name, agg := range intv.Counters { + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) + } + for name, agg := range intv.Samples { + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go new file mode 100644 index 0000000000000..b818e4182c0cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "runtime" + "time" +) + +func (m *Metrics) SetGauge(key []string, val float32) { + if m.HostName != "" && m.EnableHostname { + key = insert(0, m.HostName, key) + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.SetGauge(key, val) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.IncrCounter(key, val) +} + +func (m *Metrics) AddSample(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.AddSample(key, val) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSample(key, msec) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go b/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go new file mode 100644 index 0000000000000..362dbfb623d29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go @@ -0,0 +1,88 @@ +// +build go1.3 +package prometheus + +import ( + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type PrometheusSink struct { + mu sync.Mutex + gauges map[string]prometheus.Gauge + summaries map[string]prometheus.Summary + counters map[string]prometheus.Counter +} + +func NewPrometheusSink() (*PrometheusSink, error) { + return &PrometheusSink{ + gauges: make(map[string]prometheus.Gauge), + summaries: make(map[string]prometheus.Summary), + counters: make(map[string]prometheus.Counter), + }, nil +} + +func (p *PrometheusSink) flattenKey(parts []string) string { + joined := strings.Join(parts, "_") + joined = strings.Replace(joined, " ", "_", -1) + joined = strings.Replace(joined, ".", "_", -1) + joined = strings.Replace(joined, "-", "_", -1) + return joined +} + +func (p *PrometheusSink) SetGauge(parts []string, val float32) { + p.mu.Lock() + defer p.mu.Unlock() + key := p.flattenKey(parts) + g, ok := p.gauges[key] + if !ok { + g = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: key, + Help: key, + }) + prometheus.MustRegister(g) + p.gauges[key] = g + } + g.Set(float64(val)) +} + +func (p *PrometheusSink) AddSample(parts []string, val float32) { + p.mu.Lock() + defer p.mu.Unlock() + key := p.flattenKey(parts) + g, ok := p.summaries[key] + if !ok { + g = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: key, + Help: key, + MaxAge: 10 * time.Second, + }) + prometheus.MustRegister(g) + p.summaries[key] = g + } + g.Observe(float64(val)) +} + +// EmitKey is not implemented. Prometheus doesn’t offer a type for which an +// arbitrary number of values is retained, as Prometheus works with a pull +// model, rather than a push model. +func (p *PrometheusSink) EmitKey(key []string, val float32) { +} + +func (p *PrometheusSink) IncrCounter(parts []string, val float32) { + p.mu.Lock() + defer p.mu.Unlock() + key := p.flattenKey(parts) + g, ok := p.counters[key] + if !ok { + g = prometheus.NewCounter(prometheus.CounterOpts{ + Name: key, + Help: key, + }) + prometheus.MustRegister(g) + p.counters[key] = g + } + g.Add(float64(val)) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go b/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go new file mode 100644 index 0000000000000..0c240c2c47ee8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go @@ -0,0 +1,52 @@ +package metrics + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + for _, s := range fh { + s.SetGauge(key, val) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + for _, s := range fh { + s.IncrCounter(key, val) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + for _, s := range fh { + s.AddSample(key, val) + } +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/start.go b/Godeps/_workspace/src/github.com/armon/go-metrics/start.go new file mode 100644 index 0000000000000..44113f100426b --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/start.go @@ -0,0 +1,95 @@ +package metrics + +import ( + "os" + "time" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to seperate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink +} + +// Shared global metrics instance +var globalMetrics *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics = &Metrics{sink: &BlackholeSink{}} +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics = metrics + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.SetGauge(key, val) +} + +func EmitKey(key []string, val float32) { + globalMetrics.EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.IncrCounter(key, val) +} + +func AddSample(key []string, val float32) { + globalMetrics.AddSample(key, val) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.MeasureSince(key, start) +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go new file mode 100644 index 0000000000000..65a5021a05743 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,154 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go new file mode 100644 index 0000000000000..68730139a73a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,142 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile index cfbed514bbbc5..e035e63adcd7d 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile @@ -1,54 +1,18 @@ -TEST=. -BENCH=. -COVERPROFILE=/tmp/c.out BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" default: build -bench: - go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) - -# http://cloc.sourceforge.net/ -cloc: - @cloc --not-match-f='Makefile|_test.go' . - -cover: fmt - go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . - go tool cover -html=$(COVERPROFILE) - rm $(COVERPROFILE) - -cpuprofile: fmt - @go test -c - @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" # go get github.com/kisielk/errcheck errcheck: - @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt -fmt: - @go fmt ./... - -get: - @go get -d ./... - -build: get - @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt - -test: fmt - @go get github.com/stretchr/testify/assert - @echo "=== TESTS ===" - @go test -v -cover -test.run=$(TEST) - @echo "" - @echo "" - @echo "=== CLI ===" - @go test -v -test.run=$(TEST) ./cmd/bolt - @echo "" - @echo "" - @echo "=== RACE DETECTOR ===" - @go test -v -race -test.run="TestSimulate_(100op|1000op)" +test: + @go test -v -cover . + @go test -v ./cmd/bolt -.PHONY: bench cloc cover cpuprofile fmt memprofile test +.PHONY: fmt test diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md index 00fad6afb8b57..82e85742c0413 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md @@ -1,8 +1,8 @@ -Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) ==== -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and -the [LMDB project][lmdb]. The goal of the project is to provide a simple, +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database server such as Postgres or MySQL. @@ -13,7 +13,6 @@ and setting values. That's it. [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ - ## Project Status Bolt is stable and the API is fixed. Full unit test coverage and randomized @@ -22,6 +21,36 @@ Bolt is currently in high-load production environments serving databases as large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed services every day. +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started @@ -180,8 +209,8 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but _please_ be sure to close the -transaction. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. ```go // Start a writable transaction. @@ -256,7 +285,7 @@ db.View(func(tx *bolt.Tx) error { ``` The `Get()` function does not return an error because its operation is -guarenteed to work (unless there is some kind of system failure). If the key +guaranteed to work (unless there is some kind of system failure). If the key exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. @@ -268,6 +297,49 @@ transaction is open. If you need to use a value outside of the transaction then you must use `copy()` to copy it to another byte slice. +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + ### Iterating over keys Bolt stores its keys in byte-sorted order within a bucket. This makes sequential @@ -276,7 +348,9 @@ iteration over these keys extremely fast. To iterate over keys we'll use a ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys b := tx.Bucket([]byte("MyBucket")) + c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -300,10 +374,15 @@ Next() Move to the next key. Prev() Move to the previous key. ``` -When you have iterated to the end of the cursor then `Next()` will return `nil`. -You must seek to a position using `First()`, `Last()`, or `Seek()` before -calling `Next()` or `Prev()`. If you do not seek to a position then these -functions will return `nil`. +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. #### Prefix scans @@ -312,6 +391,7 @@ To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") @@ -331,7 +411,7 @@ date range like this: ```go db.View(func(tx *bolt.Tx) error { - // Assume our events bucket has RFC3339 encoded time keys. + // Assume our events bucket exists and has RFC3339 encoded time keys. c := tx.Bucket([]byte("Events")).Cursor() // Our time range spans the 90's decade. @@ -355,7 +435,9 @@ all the keys in a bucket: ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys b := tx.Bucket([]byte("MyBucket")) + b.ForEach(func(k, v []byte) error { fmt.Printf("key=%s, value=%s\n", k, v) return nil @@ -382,8 +464,11 @@ func (*Bucket) DeleteBucket(key []byte) error Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` function to write a consistent view of the database to a writer. If you call this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. It will also use `O_DIRECT` when available -to prevent page cache trashing. +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to do database backups: @@ -465,6 +550,84 @@ if err != nil { } ``` +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +contstructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` ## Resources @@ -500,7 +663,7 @@ they are libraries bundled into the application, however, their underlying structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes random writes by using a write ahead log and multi-tiered, sorted files called SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade offs. +have trade-offs. If you require a high random write throughput (>10,000 w/sec) or you need to use spinning disks then LevelDB could be a good choice. If your application is @@ -536,9 +699,8 @@ It's important to pick the right tool for the job and Bolt is no exception. Here are a few things to note when evaluating and using Bolt: * Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can add a write-ahead log or - [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt - to mitigate this issue. + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. * Bolt uses a B+tree internally so there can be a lot of random page access. SSDs provide a significant performance boost over spinning disks. @@ -568,11 +730,13 @@ Here are a few things to note when evaluating and using Bolt: can in memory and will release memory as needed to other processes. This means that Bolt can show very high memory usage when working with large databases. However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM. + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. * The data structures in the Bolt database are memory mapped so the data file will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most + little endian machine to a big endian machine and have it work. For most users this is not a concern since most modern CPUs are little endian. * Because of the way pages are laid out on disk, Bolt cannot truncate data files @@ -587,6 +751,56 @@ Here are a few things to note when evaluating and using Bolt: [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + ## Other Projects Using Bolt Below is a list of public, open source projects that use Bolt: @@ -597,25 +811,30 @@ Below is a list of public, open source projects that use Bolt: * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. -* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go deleted file mode 100644 index 84acae6bbf089..0000000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go +++ /dev/null @@ -1,138 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 0000000000000..6d2309352e063 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go index e9d1c907b63a5..2b67666140906 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go @@ -4,8 +4,6 @@ import ( "syscall" ) -var odirect = syscall.O_DIRECT - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return syscall.Fdatasync(int(db.file.Fd())) diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go index 7c1bef1a4f404..7058c3d734ef7 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go @@ -11,8 +11,6 @@ const ( msInvalidate // invalidate cached data ) -var odirect int - func msync(db *DB) error { _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) if errno != 0 { diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 0000000000000..8351e129f6a37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 0000000000000..f4dd26bbba7c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go index 6eef6b2203524..4b0723aac2370 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go @@ -46,19 +46,8 @@ func funlock(f *os.File) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go index f480ee76d1f1e..1c4e48d63a021 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -1,4 +1,3 @@ - package bolt import ( @@ -7,6 +6,7 @@ import ( "syscall" "time" "unsafe" + "golang.org/x/sys/unix" ) @@ -56,19 +56,8 @@ func funlock(f *os.File) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go index 8b782be5f9eff..91c4968f6a179 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go @@ -8,7 +8,37 @@ import ( "unsafe" ) -var odirect int +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { @@ -16,13 +46,37 @@ func fdatasync(db *DB) error { } // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, _ bool, _ time.Duration) error { - return nil +func flock(f *os.File, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } } // funlock releases an advisory lock on a file descriptor. func funlock(f *os.File) error { - return nil + return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{}) } // mmap memory maps a DB's data file. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go index 8db89776fe660..f50442523c384 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go @@ -2,8 +2,6 @@ package bolt -var odirect int - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return db.file.Sync() diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go index 6766992100f18..d2f8c524e42fb 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go @@ -11,7 +11,7 @@ const ( MaxKeySize = 32768 // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = 4294967295 + MaxValueSize = (1 << 31) - 2 ) const ( @@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor { // Bucket retrieves a nested bucket by name. // Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) Bucket(name []byte) *Bucket { if b.buckets != nil { if child := b.buckets[string(name)]; child != nil { @@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if b.tx.db == nil { return nil, ErrTxClosed @@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { child, err := b.CreateBucket(key) if err == ErrBucketExists { @@ -270,6 +273,7 @@ func (b *Bucket) Get(key []byte) []byte { // Put sets the value for a key in the bucket. // If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) error { if b.tx.db == nil { @@ -346,7 +350,8 @@ func (b *Bucket) NextSequence() (uint64, error) { // ForEach executes a function for each key/value pair in a bucket. // If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { return ErrTxClosed diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go index c41ebe404d959..b96e6f73511f3 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go @@ -825,7 +825,10 @@ func (cmd *StatsCommand) Run(args ...string) error { fmt.Fprintln(cmd.Stdout, "Bucket statistics") fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + percentage = 0 + if s.BucketN != 0 { + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + } fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) percentage = 0 if s.LeafInuse != 0 { diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go index 006c54889e8ab..1be9f35e3ef85 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go @@ -34,6 +34,13 @@ func (c *Cursor) First() (key []byte, value []byte) { p, n := c.bucket.pageNode(c.bucket.root) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil @@ -209,28 +216,37 @@ func (c *Cursor) last() { // next moves to the next leaf element and returns the key and value. // If the cursor is at the last leaf element then it stays there and returns nil. func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } } - } - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - return c.keyValue() + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } } // search recursively performs a binary search against a given page/node until it finds a given key. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go index d39c4aa9ccef2..0f1e1bc3d74a2 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go @@ -1,8 +1,10 @@ package bolt import ( + "errors" "fmt" "hash/fnv" + "log" "os" "runtime" "runtime/debug" @@ -24,13 +26,14 @@ const magic uint32 = 0xED0CDAED // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronzied using the msync(2) syscall. +// must be synchronized using the msync(2) syscall. const IgnoreNoSync = runtime.GOOS == "openbsd" // Default values if not set in a DB instance. const ( DefaultMaxBatchSize int = 1000 DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 ) // DB represents a collection of buckets persisted to a file on disk. @@ -63,6 +66,10 @@ type DB struct { // https://github.com/boltdb/bolt/issues/284 NoGrowSync bool + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + // MaxBatchSize is the maximum size of a batch. Default value is // copied from DefaultMaxBatchSize in Open. // @@ -79,11 +86,17 @@ type DB struct { // Do not change concurrently with calls to Batch. MaxBatchDelay time.Duration + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + path string file *os.File dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int + filesz int // current on disk file size meta0 *meta meta1 *meta pageSize int @@ -136,10 +149,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { options = DefaultOptions } db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize flag := os.O_RDWR if options.ReadOnly { @@ -172,7 +187,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { - return nil, fmt.Errorf("stat error: %s", err) + return nil, err } else if info.Size() == 0 { // Initialize new files with meta pages. if err := db.init(); err != nil { @@ -184,14 +199,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if _, err := db.file.ReadAt(buf[:], 0); err == nil { m := db.pageInBuffer(buf[:], 0).meta() if err := m.validate(); err != nil { - return nil, fmt.Errorf("meta0 error: %s", err) + return nil, err } db.pageSize = int(m.pageSize) } } // Memory map the data file. - if err := db.mmap(0); err != nil { + if err := db.mmap(options.InitialMmapSize); err != nil { _ = db.close() return nil, err } @@ -248,10 +263,10 @@ func (db *DB) mmap(minsz int) error { // Validate the meta pages. if err := db.meta0.validate(); err != nil { - return fmt.Errorf("meta0 error: %s", err) + return err } if err := db.meta1.validate(); err != nil { - return fmt.Errorf("meta1 error: %s", err) + return err } return nil @@ -266,7 +281,7 @@ func (db *DB) munmap() error { } // mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 1MB and doubles until it reaches 1GB. +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. // Returns an error if the new mmap size is greater than the max allowed. func (db *DB) mmapSize(size int) (int, error) { // Double the size from 32KB until 1GB. @@ -382,7 +397,9 @@ func (db *DB) close() error { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. - _ = funlock(db.file) + if err := funlock(db.file); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } } // Close the file descriptor. @@ -401,11 +418,15 @@ func (db *DB) close() error { // will cause the calls to block and be serialized until the current write // transaction finishes. // -// Transactions should not be depedent on one another. Opening a read +// Transactions should not be dependent on one another. Opening a read // transaction and a write transaction in the same goroutine can cause the // writer to deadlock because the database periodically needs to re-mmap itself // as it grows and it cannot do that while a read transaction is open. // +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. func (db *DB) Begin(writable bool) (*Tx, error) { @@ -589,6 +610,136 @@ func (db *DB) View(fn func(*Tx) error) error { return nil } +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + // Sync executes fdatasync() against the database file handle. // // This is not necessary under normal operation, however, if you use NoSync @@ -655,6 +806,36 @@ func (db *DB) allocate(count int) (*page, error) { return p, nil } +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + func (db *DB) IsReadOnly() bool { return db.readOnly } @@ -672,6 +853,19 @@ type Options struct { // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go index 6b52b2c896452..e74d2cae76045 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go @@ -29,6 +29,14 @@ type Tx struct { pages map[pgid]*page stats TxStats commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int } // init initializes the transaction. @@ -87,18 +95,21 @@ func (tx *Tx) Stats() TxStats { // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) Bucket(name []byte) *Bucket { return tx.root.Bucket(name) } // CreateBucket creates a new bucket. // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { return tx.root.CreateBucket(name) } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist. // Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { return tx.root.CreateBucketIfNotExists(name) } @@ -157,6 +168,8 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root + opgid := tx.meta.pgid + // Free the freelist and allocate new pages for it. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) @@ -171,6 +184,14 @@ func (tx *Tx) Commit() error { } tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + // Write dirty pages to disk. startTime = time.Now() if err := tx.write(); err != nil { @@ -236,7 +257,8 @@ func (tx *Tx) close() { var freelistPendingN = tx.db.freelist.pending_count() var freelistAlloc = tx.db.freelist.size() - // Remove writer lock. + // Remove transaction ref & writer lock. + tx.db.rwtx = nil tx.db.rwlock.Unlock() // Merge statistics. @@ -250,11 +272,16 @@ func (tx *Tx) close() { } else { tx.db.removeTx(tx) } + + // Clear all references. tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil } // Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() in +// This function exists for backwards compatibility. Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -263,21 +290,18 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader directly. - var f *os.File - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { - // Fallback to a regular open if that doesn't work. - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { - return 0, err - } + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err } + defer func() { _ = f.Close() }() // Copy the meta pages. tx.db.metalock.Lock() n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) tx.db.metalock.Unlock() if err != nil { - _ = f.Close() return n, fmt.Errorf("meta copy: %s", err) } @@ -285,7 +309,6 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) n += wn if err != nil { - _ = f.Close() return n, err } @@ -492,7 +515,7 @@ func (tx *Tx) writeMeta() error { } // page returns a reference to the page with a given id. -// If page has been written to then a temporary bufferred page is returned. +// If page has been written to then a temporary buffered page is returned. func (tx *Tx) page(id pgid) *page { // Check the dirty pages first. if tx.pages != nil { diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore b/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore deleted file mode 100644 index b25c15b81fae0..0000000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*~ diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS b/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS deleted file mode 100644 index dab9dbe75140a..0000000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS +++ /dev/null @@ -1,19 +0,0 @@ -# This file is like Go's AUTHORS file: it lists Copyright holders. -# The list of humans who have contributd is in the CONTRIBUTORS file. -# -# To contribute to this project, because it will eventually be folded -# back in to Go itself, you need to submit a CLA: -# -# http://golang.org/doc/contribute.html#copyright -# -# Then you get added to CONTRIBUTORS and you or your company get added -# to the AUTHORS file. - -Blake Mizerany github=bmizerany -Daniel Morsing github=DanielMorsing -Gabriel Aszalos github=gbbr -Google, Inc. -Keith Rarick github=kr -Matthew Keenan github=mattkeenan -Matt Layher github=mdlayher -Tatsuhiro Tsujikawa github=tatsuhiro-t diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS b/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS deleted file mode 100644 index 22e8c8c544ffc..0000000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS +++ /dev/null @@ -1,19 +0,0 @@ -# This file is like Go's CONTRIBUTORS file: it lists humans. -# The list of copyright holders (which may be companies) are in the AUTHORS file. -# -# To contribute to this project, because it will eventually be folded -# back in to Go itself, you need to submit a CLA: -# -# http://golang.org/doc/contribute.html#copyright -# -# Then you get added to CONTRIBUTORS and you or your company get added -# to the AUTHORS file. - -Blake Mizerany github=bmizerany -Brad Fitzpatrick github=bradfitz -Daniel Morsing github=DanielMorsing -Gabriel Aszalos github=gbbr -Keith Rarick github=kr -Matthew Keenan github=mattkeenan -Matt Layher github=mdlayher -Tatsuhiro Tsujikawa github=tatsuhiro-t diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING b/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING deleted file mode 100644 index 69aafe4d170ea..0000000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/HACKING +++ /dev/null @@ -1,5 +0,0 @@ -We only accept contributions from users who have gone through Go's -contribution process (signed a CLA). - -Please acknowledge whether you have (and use the same email) if -sending a pull request. diff --git a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE b/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE deleted file mode 100644 index 2dc6853cae2a0..0000000000000 --- a/Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright 2014 Google & the Go AUTHORS - -Go AUTHORS are: -See https://code.google.com/p/go/source/browse/AUTHORS - -Licensed under the terms of Go itself: -https://code.google.com/p/go/source/browse/LICENSE diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/client/client.go b/Godeps/_workspace/src/github.com/coreos/etcd/client/client.go index e484032c1aa57..cf8aea8775b5f 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/client/client.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/client/client.go @@ -24,6 +24,7 @@ import ( "net/url" "reflect" "sort" + "strconv" "sync" "time" @@ -99,6 +100,8 @@ type Config struct { // watch start. But if server is behind some kind of proxy, the response // header may be cached at proxy, and Client cannot rely on this behavior. // + // Especially, wait request will ignore this timeout. + // // One API call may send multiple requests to different etcd servers until it // succeeds. Use context of the API to specify the overall timeout. // @@ -162,6 +165,11 @@ type Client interface { // this may differ from the initial Endpoints provided in the Config. Endpoints() []string + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + httpClient } @@ -176,7 +184,7 @@ func New(cfg Config) (Client, error) { password: cfg.Password, } } - if err := c.reset(cfg.Endpoints); err != nil { + if err := c.SetEndpoints(cfg.Endpoints); err != nil { return nil, err } return c, nil @@ -219,7 +227,7 @@ type httpClusterClient struct { rand *rand.Rand } -func (c *httpClusterClient) reset(eps []string) error { +func (c *httpClusterClient) SetEndpoints(eps []string) error { if len(eps) == 0 { return ErrNoEndpoints } @@ -341,7 +349,7 @@ func (c *httpClusterClient) Sync(ctx context.Context) error { return nil } - return c.reset(eps) + return c.SetEndpoints(eps) } func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { @@ -378,9 +386,21 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon return nil, nil, err } + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + var hctx context.Context var hcancel context.CancelFunc - if c.headerTimeout > 0 { + if !isWait && c.headerTimeout > 0 { hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) } else { hctx, hcancel = context.WithCancel(ctx) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth.go index 9e7dd4e79fdb4..2a6b17333f95d 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth.go @@ -95,8 +95,7 @@ type store struct { timeout time.Duration ensuredOnce bool - mu sync.Mutex // protect enabled - enabled *bool + mu sync.Mutex } type User struct { @@ -409,8 +408,6 @@ func (s *store) EnableAuth() error { } err = s.enableAuth() if err == nil { - b := true - s.enabled = &b plog.Noticef("auth: enabled auth") } else { plog.Errorf("error enabling auth (%v)", err) @@ -428,8 +425,6 @@ func (s *store) DisableAuth() error { err := s.disableAuth() if err == nil { - b := false - s.enabled = &b plog.Noticef("auth: disabled auth") } else { plog.Errorf("error disabling auth (%v)", err) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth_requests.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth_requests.go index ca30060ce6e98..0761121d7476a 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth_requests.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/auth/auth_requests.go @@ -85,15 +85,10 @@ func (s *store) detectAuth() bool { if s.server == nil { return false } - if s.enabled != nil { - return *s.enabled - } value, err := s.requestResource("/enabled", false) if err != nil { if e, ok := err.(*etcderr.Error); ok { if e.ErrorCode == etcderr.EcodeKeyNotFound { - b := false - s.enabled = &b return false } } @@ -107,7 +102,6 @@ func (s *store) detectAuth() bool { plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err) return false } - s.enabled = &u return u } diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index 447fda41cb828..6f46c2fe9cf7a 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -13,38 +13,56 @@ It has these top-level messages: Request Metadata + InternalRaftRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestUnion + ResponseUnion + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse */ package etcdserverpb -import proto "github.com/gogo/protobuf/proto" -import math "math" +import ( + "fmt" + + proto "github.com/gogo/protobuf/proto" +) -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" +import math "math" import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf type Request struct { - ID uint64 `protobuf:"varint,1,opt" json:"ID"` - Method string `protobuf:"bytes,2,opt" json:"Method"` - Path string `protobuf:"bytes,3,opt" json:"Path"` - Val string `protobuf:"bytes,4,opt" json:"Val"` - Dir bool `protobuf:"varint,5,opt" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt" json:"Since"` - Recursive bool `protobuf:"varint,12,opt" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt" json:"Time"` - Stream bool `protobuf:"varint,16,opt" json:"Stream"` + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` XXX_unrecognized []byte `json:"-"` } @@ -53,8 +71,8 @@ func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt" json:"ClusterID"` + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` XXX_unrecognized []byte `json:"-"` } @@ -62,6 +80,10 @@ func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) String() string { return proto.CompactTextString(m) } func (*Metadata) ProtoMessage() {} +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} func (m *Request) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -287,8 +309,12 @@ func (m *Request) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -301,6 +327,12 @@ func (m *Request) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -308,6 +340,9 @@ func (m *Request) Unmarshal(data []byte) error { } m.ID = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -324,6 +359,9 @@ func (m *Request) Unmarshal(data []byte) error { } var stringLen uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -350,6 +388,9 @@ func (m *Request) Unmarshal(data []byte) error { } var stringLen uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -376,6 +417,9 @@ func (m *Request) Unmarshal(data []byte) error { } var stringLen uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -402,6 +446,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -419,6 +466,9 @@ func (m *Request) Unmarshal(data []byte) error { } var stringLen uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -445,6 +495,9 @@ func (m *Request) Unmarshal(data []byte) error { } m.PrevIndex = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -461,6 +514,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -479,6 +535,9 @@ func (m *Request) Unmarshal(data []byte) error { } m.Expiration = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -495,6 +554,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -512,6 +574,9 @@ func (m *Request) Unmarshal(data []byte) error { } m.Since = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -528,6 +593,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -545,6 +613,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -562,6 +633,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -579,6 +653,9 @@ func (m *Request) Unmarshal(data []byte) error { } m.Time = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -595,6 +672,9 @@ func (m *Request) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -607,15 +687,7 @@ func (m *Request) Unmarshal(data []byte) error { } m.Stream = bool(v != 0) default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipEtcdserver(data[iNdEx:]) if err != nil { return err @@ -631,14 +703,21 @@ func (m *Request) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *Metadata) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -651,6 +730,12 @@ func (m *Metadata) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -658,6 +743,9 @@ func (m *Metadata) Unmarshal(data []byte) error { } m.NodeID = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -674,6 +762,9 @@ func (m *Metadata) Unmarshal(data []byte) error { } m.ClusterID = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -685,15 +776,7 @@ func (m *Metadata) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipEtcdserver(data[iNdEx:]) if err != nil { return err @@ -709,6 +792,9 @@ func (m *Metadata) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipEtcdserver(data []byte) (n int, err error) { @@ -717,6 +803,9 @@ func skipEtcdserver(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -730,7 +819,10 @@ func skipEtcdserver(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -746,6 +838,9 @@ func skipEtcdserver(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -766,6 +861,9 @@ func skipEtcdserver(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -801,4 +899,5 @@ func skipEtcdserver(data []byte) (n int, err error) { var ( ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 8d6fb22cd318d..47402aebf7281 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -4,15 +4,20 @@ package etcdserverpb -import proto "github.com/gogo/protobuf/proto" +import ( + "fmt" -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +import math "math" import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf // An InternalRaftRequest is the union of all requests which can be // sent via raft. @@ -28,6 +33,9 @@ func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } func (*InternalRaftRequest) ProtoMessage() {} +func init() { + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") +} func (m *InternalRaftRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -162,48 +170,16 @@ func sovRaftInternal(x uint64) (n int) { func sozRaftInternal(x uint64) (n int) { return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *InternalRaftRequest) GetValue() interface{} { - if this.V2 != nil { - return this.V2 - } - if this.Range != nil { - return this.Range - } - if this.Put != nil { - return this.Put - } - if this.DeleteRange != nil { - return this.DeleteRange - } - if this.Txn != nil { - return this.Txn - } - return nil -} - -func (this *InternalRaftRequest) SetValue(value interface{}) bool { - switch vt := value.(type) { - case *Request: - this.V2 = vt - case *RangeRequest: - this.Range = vt - case *PutRequest: - this.Put = vt - case *DeleteRangeRequest: - this.DeleteRange = vt - case *TxnRequest: - this.Txn = vt - default: - return false - } - return true -} func (m *InternalRaftRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -216,6 +192,12 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -223,6 +205,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -253,6 +238,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -283,6 +271,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -313,6 +304,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -343,6 +337,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -368,15 +365,7 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaftInternal(data[iNdEx:]) if err != nil { return err @@ -391,6 +380,9 @@ func (m *InternalRaftRequest) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipRaftInternal(data []byte) (n int, err error) { @@ -399,6 +391,9 @@ func skipRaftInternal(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -412,7 +407,10 @@ func skipRaftInternal(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -428,6 +426,9 @@ func skipRaftInternal(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -448,6 +449,9 @@ func skipRaftInternal(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -483,4 +487,5 @@ func skipRaftInternal(data []byte) (n int, err error) { var ( ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto index 7cf7b67d03ad4..32efcdafc9dac 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -13,12 +13,9 @@ option (gogoproto.goproto_getters_all) = false; // An InternalRaftRequest is the union of all requests which can be // sent via raft. message InternalRaftRequest { - option (gogoproto.onlyone) = true; - oneof value { Request v2 = 1; RangeRequest range = 2; PutRequest put = 3; DeleteRangeRequest delete_range = 4; TxnRequest txn = 5; - } } diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 462b0610b9206..9dd30fb02f7d1 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -4,9 +4,14 @@ package etcdserverpb -import proto "github.com/gogo/protobuf/proto" +import ( + "fmt" + + proto "github.com/gogo/protobuf/proto" +) + +import math "math" -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" import storagepb "github.com/coreos/etcd/storage/storagepb" import ( @@ -15,10 +20,11 @@ import ( ) import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf type Compare_CompareResult int32 @@ -178,86 +184,404 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { } type RequestUnion struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range" json:"request_range,omitempty"` - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put" json:"request_put,omitempty"` - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range" json:"request_delete_range,omitempty"` + // Types that are valid to be assigned to Request: + // *RequestUnion_RequestRange + // *RequestUnion_RequestPut + // *RequestUnion_RequestDeleteRange + Request isRequestUnion_Request `protobuf_oneof:"request"` } func (m *RequestUnion) Reset() { *m = RequestUnion{} } func (m *RequestUnion) String() string { return proto.CompactTextString(m) } func (*RequestUnion) ProtoMessage() {} -func (m *RequestUnion) GetRequestRange() *RangeRequest { +type isRequestUnion_Request interface { + isRequestUnion_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type RequestUnion_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,oneof"` +} +type RequestUnion_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,oneof"` +} +type RequestUnion_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,oneof"` +} + +func (*RequestUnion_RequestRange) isRequestUnion_Request() {} +func (*RequestUnion_RequestPut) isRequestUnion_Request() {} +func (*RequestUnion_RequestDeleteRange) isRequestUnion_Request() {} + +func (m *RequestUnion) GetRequest() isRequestUnion_Request { if m != nil { - return m.RequestRange + return m.Request + } + return nil +} + +func (m *RequestUnion) GetRequestRange() *RangeRequest { + if x, ok := m.GetRequest().(*RequestUnion_RequestRange); ok { + return x.RequestRange } return nil } func (m *RequestUnion) GetRequestPut() *PutRequest { - if m != nil { - return m.RequestPut + if x, ok := m.GetRequest().(*RequestUnion_RequestPut); ok { + return x.RequestPut } return nil } func (m *RequestUnion) GetRequestDeleteRange() *DeleteRangeRequest { - if m != nil { - return m.RequestDeleteRange + if x, ok := m.GetRequest().(*RequestUnion_RequestDeleteRange); ok { + return x.RequestDeleteRange } return nil } +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RequestUnion) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _RequestUnion_OneofMarshaler, _RequestUnion_OneofUnmarshaler, []interface{}{ + (*RequestUnion_RequestRange)(nil), + (*RequestUnion_RequestPut)(nil), + (*RequestUnion_RequestDeleteRange)(nil), + } +} + +func _RequestUnion_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RequestUnion) + // request + switch x := m.Request.(type) { + case *RequestUnion_RequestRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestRange); err != nil { + return err + } + case *RequestUnion_RequestPut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestPut); err != nil { + return err + } + case *RequestUnion_RequestDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RequestUnion.Request has unexpected type %T", x) + } + return nil +} + +func _RequestUnion_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RequestUnion) + switch tag { + case 1: // request.request_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestUnion_RequestRange{msg} + return true, err + case 2: // request.request_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestUnion_RequestPut{msg} + return true, err + case 3: // request.request_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestUnion_RequestDeleteRange{msg} + return true, err + default: + return false, nil + } +} + type ResponseUnion struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range" json:"response_range,omitempty"` - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put" json:"response_put,omitempty"` - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range" json:"response_delete_range,omitempty"` + // Types that are valid to be assigned to Response: + // *ResponseUnion_ResponseRange + // *ResponseUnion_ResponsePut + // *ResponseUnion_ResponseDeleteRange + Response isResponseUnion_Response `protobuf_oneof:"response"` } func (m *ResponseUnion) Reset() { *m = ResponseUnion{} } func (m *ResponseUnion) String() string { return proto.CompactTextString(m) } func (*ResponseUnion) ProtoMessage() {} -func (m *ResponseUnion) GetResponseRange() *RangeResponse { +type isResponseUnion_Response interface { + isResponseUnion_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type ResponseUnion_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,oneof"` +} +type ResponseUnion_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,oneof"` +} +type ResponseUnion_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,oneof"` +} + +func (*ResponseUnion_ResponseRange) isResponseUnion_Response() {} +func (*ResponseUnion_ResponsePut) isResponseUnion_Response() {} +func (*ResponseUnion_ResponseDeleteRange) isResponseUnion_Response() {} + +func (m *ResponseUnion) GetResponse() isResponseUnion_Response { if m != nil { - return m.ResponseRange + return m.Response + } + return nil +} + +func (m *ResponseUnion) GetResponseRange() *RangeResponse { + if x, ok := m.GetResponse().(*ResponseUnion_ResponseRange); ok { + return x.ResponseRange } return nil } func (m *ResponseUnion) GetResponsePut() *PutResponse { - if m != nil { - return m.ResponsePut + if x, ok := m.GetResponse().(*ResponseUnion_ResponsePut); ok { + return x.ResponsePut } return nil } func (m *ResponseUnion) GetResponseDeleteRange() *DeleteRangeResponse { - if m != nil { - return m.ResponseDeleteRange + if x, ok := m.GetResponse().(*ResponseUnion_ResponseDeleteRange); ok { + return x.ResponseDeleteRange } return nil } +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseUnion) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _ResponseUnion_OneofMarshaler, _ResponseUnion_OneofUnmarshaler, []interface{}{ + (*ResponseUnion_ResponseRange)(nil), + (*ResponseUnion_ResponsePut)(nil), + (*ResponseUnion_ResponseDeleteRange)(nil), + } +} + +func _ResponseUnion_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseUnion) + // response + switch x := m.Response.(type) { + case *ResponseUnion_ResponseRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseRange); err != nil { + return err + } + case *ResponseUnion_ResponsePut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponsePut); err != nil { + return err + } + case *ResponseUnion_ResponseDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseUnion.Response has unexpected type %T", x) + } + return nil +} + +func _ResponseUnion_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseUnion) + switch tag { + case 1: // response.response_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseUnion_ResponseRange{msg} + return true, err + case 2: // response.response_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseUnion_ResponsePut{msg} + return true, err + case 3: // response.response_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseUnion_ResponseDeleteRange{msg} + return true, err + default: + return false, nil + } +} + type Compare struct { Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` // key path Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - // version of the given key - Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - // create revision of the given key - CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,proto3" json:"create_revision,omitempty"` - // last modified revision of the given key - ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,proto3" json:"mod_revision,omitempty"` - // value of the given key - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` } func (m *Compare) Reset() { *m = Compare{} } func (m *Compare) String() string { return proto.CompactTextString(m) } func (*Compare) ProtoMessage() {} +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,proto3,oneof"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,proto3,oneof"` +} +type Compare_Value struct { + Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} + +func (m *Compare) GetTargetUnion() isCompare_TargetUnion { + if m != nil { + return m.TargetUnion + } + return nil +} + +func (m *Compare) GetVersion() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Version); ok { + return x.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { + return x.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { + return x.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() []byte { + if x, ok := m.GetTargetUnion().(*Compare_Value); ok { + return x.Value + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { + return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + } +} + +func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.ModRevision)) + case *Compare_Value: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeRawBytes(x.Value) + case nil: + default: + return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) + } + return nil +} + +func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Compare) + switch tag { + case 4: // target_union.version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Version{int64(x)} + return true, err + case 5: // target_union.create_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_CreateRevision{int64(x)} + return true, err + case 6: // target_union.mod_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_ModRevision{int64(x)} + return true, err + case 7: // target_union.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TargetUnion = &Compare_Value{x} + return true, err + default: + return false, nil + } +} + // From google paxosdb paper: // Our implementation hinges around a powerful primitive which we call MultiOp. All other database // operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically @@ -356,6 +680,20 @@ func (m *CompactionResponse) GetHeader() *ResponseHeader { } func init() { + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestUnion)(nil), "etcdserverpb.RequestUnion") + proto.RegisterType((*ResponseUnion)(nil), "etcdserverpb.ResponseUnion") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) } @@ -465,9 +803,9 @@ func RegisterEtcdServer(s *grpc.Server, srv EtcdServer) { s.RegisterService(&_Etcd_serviceDesc, srv) } -func _Etcd_Range_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _Etcd_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(RangeRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(EtcdServer).Range(ctx, in) @@ -477,9 +815,9 @@ func _Etcd_Range_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, return out, nil } -func _Etcd_Put_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _Etcd_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(PutRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(EtcdServer).Put(ctx, in) @@ -489,9 +827,9 @@ func _Etcd_Put_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, b return out, nil } -func _Etcd_DeleteRange_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _Etcd_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(DeleteRangeRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(EtcdServer).DeleteRange(ctx, in) @@ -501,9 +839,9 @@ func _Etcd_DeleteRange_Handler(srv interface{}, ctx context.Context, codec grpc. return out, nil } -func _Etcd_Txn_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _Etcd_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(TxnRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(EtcdServer).Txn(ctx, in) @@ -513,9 +851,9 @@ func _Etcd_Txn_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, b return out, nil } -func _Etcd_Compact_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _Etcd_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(CompactionRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(EtcdServer).Compact(ctx, in) @@ -830,39 +1168,58 @@ func (m *RequestUnion) MarshalTo(data []byte) (int, error) { _ = i var l int _ = l + if m.Request != nil { + nn4, err := m.Request.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += nn4 + } + return i, nil +} + +func (m *RequestUnion_RequestRange) MarshalTo(data []byte) (int, error) { + i := 0 if m.RequestRange != nil { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.RequestRange.Size())) - n4, err := m.RequestRange.MarshalTo(data[i:]) + n5, err := m.RequestRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n4 + i += n5 } + return i, nil +} +func (m *RequestUnion_RequestPut) MarshalTo(data []byte) (int, error) { + i := 0 if m.RequestPut != nil { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.RequestPut.Size())) - n5, err := m.RequestPut.MarshalTo(data[i:]) + n6, err := m.RequestPut.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n5 + i += n6 } + return i, nil +} +func (m *RequestUnion_RequestDeleteRange) MarshalTo(data []byte) (int, error) { + i := 0 if m.RequestDeleteRange != nil { data[i] = 0x1a i++ i = encodeVarintRpc(data, i, uint64(m.RequestDeleteRange.Size())) - n6, err := m.RequestDeleteRange.MarshalTo(data[i:]) + n7, err := m.RequestDeleteRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } return i, nil } - func (m *ResponseUnion) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -878,39 +1235,58 @@ func (m *ResponseUnion) MarshalTo(data []byte) (int, error) { _ = i var l int _ = l + if m.Response != nil { + nn8, err := m.Response.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += nn8 + } + return i, nil +} + +func (m *ResponseUnion_ResponseRange) MarshalTo(data []byte) (int, error) { + i := 0 if m.ResponseRange != nil { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.ResponseRange.Size())) - n7, err := m.ResponseRange.MarshalTo(data[i:]) + n9, err := m.ResponseRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n7 + i += n9 } + return i, nil +} +func (m *ResponseUnion_ResponsePut) MarshalTo(data []byte) (int, error) { + i := 0 if m.ResponsePut != nil { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.ResponsePut.Size())) - n8, err := m.ResponsePut.MarshalTo(data[i:]) + n10, err := m.ResponsePut.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n8 + i += n10 } + return i, nil +} +func (m *ResponseUnion_ResponseDeleteRange) MarshalTo(data []byte) (int, error) { + i := 0 if m.ResponseDeleteRange != nil { data[i] = 0x1a i++ i = encodeVarintRpc(data, i, uint64(m.ResponseDeleteRange.Size())) - n9, err := m.ResponseDeleteRange.MarshalTo(data[i:]) + n11, err := m.ResponseDeleteRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n11 } return i, nil } - func (m *Compare) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -944,32 +1320,47 @@ func (m *Compare) MarshalTo(data []byte) (int, error) { i += copy(data[i:], m.Key) } } - if m.Version != 0 { - data[i] = 0x20 - i++ - i = encodeVarintRpc(data, i, uint64(m.Version)) - } - if m.CreateRevision != 0 { - data[i] = 0x28 - i++ - i = encodeVarintRpc(data, i, uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - data[i] = 0x30 - i++ - i = encodeVarintRpc(data, i, uint64(m.ModRevision)) - } - if m.Value != nil { - if len(m.Value) > 0 { - data[i] = 0x3a - i++ - i = encodeVarintRpc(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) + if m.TargetUnion != nil { + nn12, err := m.TargetUnion.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += nn12 } return i, nil } +func (m *Compare_Version) MarshalTo(data []byte) (int, error) { + i := 0 + data[i] = 0x20 + i++ + i = encodeVarintRpc(data, i, uint64(m.Version)) + return i, nil +} +func (m *Compare_CreateRevision) MarshalTo(data []byte) (int, error) { + i := 0 + data[i] = 0x28 + i++ + i = encodeVarintRpc(data, i, uint64(m.CreateRevision)) + return i, nil +} +func (m *Compare_ModRevision) MarshalTo(data []byte) (int, error) { + i := 0 + data[i] = 0x30 + i++ + i = encodeVarintRpc(data, i, uint64(m.ModRevision)) + return i, nil +} +func (m *Compare_Value) MarshalTo(data []byte) (int, error) { + i := 0 + if m.Value != nil { + data[i] = 0x3a + i++ + i = encodeVarintRpc(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + } + return i, nil +} func (m *TxnRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -1043,11 +1434,11 @@ func (m *TxnResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n10, err := m.Header.MarshalTo(data[i:]) + n13, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n10 + i += n13 } if m.Succeeded { data[i] = 0x10 @@ -1116,11 +1507,11 @@ func (m *CompactionResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n11, err := m.Header.MarshalTo(data[i:]) + n14, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n14 } return i, nil } @@ -1274,41 +1665,77 @@ func (m *DeleteRangeResponse) Size() (n int) { } func (m *RequestUnion) Size() (n int) { + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + return n +} + +func (m *RequestUnion_RequestRange) Size() (n int) { var l int _ = l if m.RequestRange != nil { l = m.RequestRange.Size() n += 1 + l + sovRpc(uint64(l)) } + return n +} +func (m *RequestUnion_RequestPut) Size() (n int) { + var l int + _ = l if m.RequestPut != nil { l = m.RequestPut.Size() n += 1 + l + sovRpc(uint64(l)) } + return n +} +func (m *RequestUnion_RequestDeleteRange) Size() (n int) { + var l int + _ = l if m.RequestDeleteRange != nil { l = m.RequestDeleteRange.Size() n += 1 + l + sovRpc(uint64(l)) } return n } - func (m *ResponseUnion) Size() (n int) { + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + return n +} + +func (m *ResponseUnion_ResponseRange) Size() (n int) { var l int _ = l if m.ResponseRange != nil { l = m.ResponseRange.Size() n += 1 + l + sovRpc(uint64(l)) } + return n +} +func (m *ResponseUnion_ResponsePut) Size() (n int) { + var l int + _ = l if m.ResponsePut != nil { l = m.ResponsePut.Size() n += 1 + l + sovRpc(uint64(l)) } + return n +} +func (m *ResponseUnion_ResponseDeleteRange) Size() (n int) { + var l int + _ = l if m.ResponseDeleteRange != nil { l = m.ResponseDeleteRange.Size() n += 1 + l + sovRpc(uint64(l)) } return n } - func (m *Compare) Size() (n int) { var l int _ = l @@ -1324,24 +1751,39 @@ func (m *Compare) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Version != 0 { - n += 1 + sovRpc(uint64(m.Version)) - } - if m.CreateRevision != 0 { - n += 1 + sovRpc(uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - n += 1 + sovRpc(uint64(m.ModRevision)) + if m.TargetUnion != nil { + n += m.TargetUnion.Size() } + return n +} + +func (m *Compare_Version) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Version)) + return n +} +func (m *Compare_CreateRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.CreateRevision)) + return n +} +func (m *Compare_ModRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.ModRevision)) + return n +} +func (m *Compare_Value) Size() (n int) { + var l int + _ = l if m.Value != nil { l = len(m.Value) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } + n += 1 + l + sovRpc(uint64(l)) } return n } - func (m *TxnRequest) Size() (n int) { var l int _ = l @@ -1421,8 +1863,12 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1435,6 +1881,12 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -1442,6 +1894,9 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } var stringLen uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1468,6 +1923,9 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } m.ClusterId = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1484,6 +1942,9 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } m.MemberId = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1500,6 +1961,9 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } m.Revision = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1516,6 +1980,9 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } m.RaftTerm = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1527,15 +1994,7 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -1550,14 +2009,21 @@ func (m *ResponseHeader) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *RangeRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1570,6 +2036,12 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -1577,6 +2049,9 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1594,7 +2069,10 @@ func (m *RangeRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append([]byte{}, data[iNdEx:postIndex]...) + m.Key = append(m.Key[:0], data[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { @@ -1602,6 +2080,9 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1619,7 +2100,10 @@ func (m *RangeRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RangeEnd = append([]byte{}, data[iNdEx:postIndex]...) + m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } iNdEx = postIndex case 3: if wireType != 0 { @@ -1627,6 +2111,9 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } m.Limit = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1643,6 +2130,9 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } m.Revision = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1654,15 +2144,7 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -1677,14 +2159,21 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *RangeResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1697,6 +2186,12 @@ func (m *RangeResponse) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -1704,6 +2199,9 @@ func (m *RangeResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1734,6 +2232,9 @@ func (m *RangeResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1762,6 +2263,9 @@ func (m *RangeResponse) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1774,15 +2278,7 @@ func (m *RangeResponse) Unmarshal(data []byte) error { } m.More = bool(v != 0) default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -1797,14 +2293,21 @@ func (m *RangeResponse) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *PutRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1817,6 +2320,12 @@ func (m *PutRequest) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -1824,6 +2333,9 @@ func (m *PutRequest) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1841,7 +2353,10 @@ func (m *PutRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append([]byte{}, data[iNdEx:postIndex]...) + m.Key = append(m.Key[:0], data[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { @@ -1849,6 +2364,9 @@ func (m *PutRequest) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1866,18 +2384,13 @@ func (m *PutRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = append([]byte{}, data[iNdEx:postIndex]...) + m.Value = append(m.Value[:0], data[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -1892,14 +2405,21 @@ func (m *PutRequest) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *PutResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1912,6 +2432,12 @@ func (m *PutResponse) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -1919,6 +2445,9 @@ func (m *PutResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1944,15 +2473,7 @@ func (m *PutResponse) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -1967,14 +2488,21 @@ func (m *PutResponse) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *DeleteRangeRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1987,6 +2515,12 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -1994,6 +2528,9 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2011,7 +2548,10 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append([]byte{}, data[iNdEx:postIndex]...) + m.Key = append(m.Key[:0], data[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { @@ -2019,6 +2559,9 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2036,18 +2579,13 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RangeEnd = append([]byte{}, data[iNdEx:postIndex]...) + m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2062,14 +2600,21 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *DeleteRangeResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2082,6 +2627,12 @@ func (m *DeleteRangeResponse) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -2089,6 +2640,9 @@ func (m *DeleteRangeResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2114,15 +2668,7 @@ func (m *DeleteRangeResponse) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2137,14 +2683,21 @@ func (m *DeleteRangeResponse) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *RequestUnion) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2157,6 +2710,12 @@ func (m *RequestUnion) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestUnion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestUnion: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -2164,6 +2723,9 @@ func (m *RequestUnion) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2181,12 +2743,11 @@ func (m *RequestUnion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequestRange == nil { - m.RequestRange = &RangeRequest{} - } - if err := m.RequestRange.Unmarshal(data[iNdEx:postIndex]); err != nil { + v := &RangeRequest{} + if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } + m.Request = &RequestUnion_RequestRange{v} iNdEx = postIndex case 2: if wireType != 2 { @@ -2194,6 +2755,9 @@ func (m *RequestUnion) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2211,12 +2775,11 @@ func (m *RequestUnion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequestPut == nil { - m.RequestPut = &PutRequest{} - } - if err := m.RequestPut.Unmarshal(data[iNdEx:postIndex]); err != nil { + v := &PutRequest{} + if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } + m.Request = &RequestUnion_RequestPut{v} iNdEx = postIndex case 3: if wireType != 2 { @@ -2224,6 +2787,9 @@ func (m *RequestUnion) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2241,23 +2807,14 @@ func (m *RequestUnion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequestDeleteRange == nil { - m.RequestDeleteRange = &DeleteRangeRequest{} - } - if err := m.RequestDeleteRange.Unmarshal(data[iNdEx:postIndex]); err != nil { + v := &DeleteRangeRequest{} + if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } + m.Request = &RequestUnion_RequestDeleteRange{v} iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2272,14 +2829,21 @@ func (m *RequestUnion) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *ResponseUnion) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2292,6 +2856,12 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseUnion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseUnion: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -2299,6 +2869,9 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2316,12 +2889,11 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResponseRange == nil { - m.ResponseRange = &RangeResponse{} - } - if err := m.ResponseRange.Unmarshal(data[iNdEx:postIndex]); err != nil { + v := &RangeResponse{} + if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } + m.Response = &ResponseUnion_ResponseRange{v} iNdEx = postIndex case 2: if wireType != 2 { @@ -2329,6 +2901,9 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2346,12 +2921,11 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResponsePut == nil { - m.ResponsePut = &PutResponse{} - } - if err := m.ResponsePut.Unmarshal(data[iNdEx:postIndex]); err != nil { + v := &PutResponse{} + if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } + m.Response = &ResponseUnion_ResponsePut{v} iNdEx = postIndex case 3: if wireType != 2 { @@ -2359,6 +2933,9 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2376,23 +2953,14 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResponseDeleteRange == nil { - m.ResponseDeleteRange = &DeleteRangeResponse{} - } - if err := m.ResponseDeleteRange.Unmarshal(data[iNdEx:postIndex]); err != nil { + v := &DeleteRangeResponse{} + if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } + m.Response = &ResponseUnion_ResponseDeleteRange{v} iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2407,14 +2975,21 @@ func (m *ResponseUnion) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *Compare) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2427,6 +3002,12 @@ func (m *Compare) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -2434,6 +3015,9 @@ func (m *Compare) Unmarshal(data []byte) error { } m.Result = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2450,6 +3034,9 @@ func (m *Compare) Unmarshal(data []byte) error { } m.Target = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2466,6 +3053,9 @@ func (m *Compare) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2483,62 +3073,80 @@ func (m *Compare) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append([]byte{}, data[iNdEx:postIndex]...) + m.Key = append(m.Key[:0], data[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - m.Version = 0 + var v int64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ - m.Version |= (int64(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } + m.TargetUnion = &Compare_Version{v} case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) } - m.CreateRevision = 0 + var v int64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ - m.CreateRevision |= (int64(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } + m.TargetUnion = &Compare_CreateRevision{v} case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) } - m.ModRevision = 0 + var v int64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ - m.ModRevision |= (int64(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } + m.TargetUnion = &Compare_ModRevision{v} case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2556,18 +3164,12 @@ func (m *Compare) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = append([]byte{}, data[iNdEx:postIndex]...) + v := make([]byte, postIndex-iNdEx) + copy(v, data[iNdEx:postIndex]) + m.TargetUnion = &Compare_Value{v} iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2582,14 +3184,21 @@ func (m *Compare) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *TxnRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2602,6 +3211,12 @@ func (m *TxnRequest) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -2609,6 +3224,9 @@ func (m *TxnRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2637,6 +3255,9 @@ func (m *TxnRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2665,6 +3286,9 @@ func (m *TxnRequest) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2688,15 +3312,7 @@ func (m *TxnRequest) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2711,14 +3327,21 @@ func (m *TxnRequest) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *TxnResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2731,6 +3354,12 @@ func (m *TxnResponse) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -2738,6 +3367,9 @@ func (m *TxnResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2768,6 +3400,9 @@ func (m *TxnResponse) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2785,6 +3420,9 @@ func (m *TxnResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2808,15 +3446,7 @@ func (m *TxnResponse) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2831,14 +3461,21 @@ func (m *TxnResponse) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *CompactionRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2851,6 +3488,12 @@ func (m *CompactionRequest) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -2858,6 +3501,9 @@ func (m *CompactionRequest) Unmarshal(data []byte) error { } m.Revision = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2869,15 +3515,7 @@ func (m *CompactionRequest) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2892,14 +3530,21 @@ func (m *CompactionRequest) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *CompactionResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2912,6 +3557,12 @@ func (m *CompactionResponse) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -2919,6 +3570,9 @@ func (m *CompactionResponse) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -2944,15 +3598,7 @@ func (m *CompactionResponse) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) if err != nil { return err @@ -2967,6 +3613,9 @@ func (m *CompactionResponse) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipRpc(data []byte) (n int, err error) { @@ -2975,6 +3624,9 @@ func skipRpc(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -2988,7 +3640,10 @@ func skipRpc(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -3004,6 +3659,9 @@ func skipRpc(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -3024,6 +3682,9 @@ func skipRpc(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -3059,4 +3720,5 @@ func skipRpc(data []byte) (n int, err error) { var ( ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/server.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/server.go index 42282bea3d684..170c61cd4bf22 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/server.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/server.go @@ -174,12 +174,17 @@ type EtcdServer struct { // configuration is considered static for the lifetime of the EtcdServer. func NewServer(cfg *ServerConfig) (*EtcdServer, error) { st := store.New(StoreClusterPrefix, StoreKeysPrefix) + var w *wal.WAL var n raft.Node var s *raft.MemoryStorage var id types.ID var cl *cluster + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { + return nil, fmt.Errorf("cannot access data directory: %v", terr) + } + // Run the migrations. dataVer, err := version.DetectDataDir(cfg.DataDir) if err != nil { @@ -189,11 +194,6 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) { return nil, err } - err = os.MkdirAll(cfg.MemberDir(), privateDirMode) - if err != nil && err != os.ErrExist { - return nil, err - } - haveWAL := wal.Exist(cfg.WALDir()) ss := snap.New(cfg.SnapDir()) @@ -255,10 +255,6 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) { cfg.PrintWithInitial() id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) case haveWAL: - if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil { - return nil, fmt.Errorf("cannot write to data directory: %v", err) - } - if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { return nil, fmt.Errorf("cannot write to member directory: %v", err) } @@ -295,6 +291,10 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) { return nil, fmt.Errorf("unsupported bootstrap config") } + if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { + return nil, fmt.Errorf("cannot access member directory: %v", terr) + } + sstats := &stats.ServerStats{ Name: cfg.Name, ID: id.String(), diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/v3demo_server.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/v3demo_server.go index 06cb68140a566..9d2972518bb97 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/v3demo_server.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/v3demo_server.go @@ -19,6 +19,7 @@ import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" dstorage "github.com/coreos/etcd/storage" + "github.com/coreos/etcd/storage/storagepb" "github.com/gogo/protobuf/proto" "golang.org/x/net/context" ) @@ -106,17 +107,24 @@ func doTxn(kv dstorage.KV, rt *pb.TxnRequest) *pb.TxnResponse { } func doUnion(kv dstorage.KV, union *pb.RequestUnion) *pb.ResponseUnion { - switch { - case union.RequestRange != nil: - return &pb.ResponseUnion{ResponseRange: doRange(kv, union.RequestRange)} - case union.RequestPut != nil: - return &pb.ResponseUnion{ResponsePut: doPut(kv, union.RequestPut)} - case union.RequestDeleteRange != nil: - return &pb.ResponseUnion{ResponseDeleteRange: doDeleteRange(kv, union.RequestDeleteRange)} + switch tv := union.Request.(type) { + case *pb.RequestUnion_RequestRange: + if tv.RequestRange != nil { + return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{ResponseRange: doRange(kv, tv.RequestRange)}} + } + case *pb.RequestUnion_RequestPut: + if tv.RequestPut != nil { + return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponsePut{ResponsePut: doPut(kv, tv.RequestPut)}} + } + case *pb.RequestUnion_RequestDeleteRange: + if tv.RequestDeleteRange != nil { + return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: doDeleteRange(kv, tv.RequestDeleteRange)}} + } default: // empty union return nil } + return nil } func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) { @@ -124,20 +132,35 @@ func doCompare(kv dstorage.KV, c *pb.Compare) (int64, bool) { if err != nil { return rev, false } - - ckv := ckvs[0] + var ckv storagepb.KeyValue + if len(ckvs) != 0 { + ckv = ckvs[0] + } // -1 is less, 0 is equal, 1 is greater var result int switch c.Target { case pb.Compare_VALUE: - result = bytes.Compare(ckv.Value, c.Value) + tv, _ := c.TargetUnion.(*pb.Compare_Value) + if tv != nil { + result = bytes.Compare(ckv.Value, tv.Value) + } case pb.Compare_CREATE: - result = compareInt64(ckv.CreateRevision, c.CreateRevision) + tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) + if tv != nil { + result = compareInt64(ckv.CreateRevision, tv.CreateRevision) + } + case pb.Compare_MOD: - result = compareInt64(ckv.ModRevision, c.ModRevision) + tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) + if tv != nil { + result = compareInt64(ckv.ModRevision, tv.ModRevision) + } case pb.Compare_VERSION: - result = compareInt64(ckv.Version, c.Version) + tv, _ := c.TargetUnion.(*pb.Compare_Version) + if tv != nil { + result = compareInt64(ckv.Version, tv.Version) + } } switch c.Result { diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go index b052553c11dd5..60552a77038ed 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -25,6 +25,8 @@ import ( const ( privateFileMode = 0600 + // owner can make/remove files inside the directory + privateDirMode = 0700 ) var ( @@ -55,3 +57,13 @@ func ReadDir(dirpath string) ([]string, error) { sort.Strings(names) return names, nil } + +// TouchDirAll is simliar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + err := os.MkdirAll(dir, privateDirMode) + if err != nil && err != os.ErrExist { + return err + } + return IsDirWriteable(dir) +} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/ioutil/util.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/ioutil/util.go new file mode 100644 index 0000000000000..d7ed18b454086 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/ioutil/util.go @@ -0,0 +1,41 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "io" + "os" +) + +// WriteAndSyncFile behaviors just like ioutil.WriteFile in standard library +// but calls Sync before closing the file. WriteAndSyncFile guarantees the data +// is synced if there is no error returned. +func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err == nil { + err = f.Sync() + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/keepalive_listener.go index 6f580619ab071..1fe1ba80dd4ea 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/keepalive_listener.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/keepalive_listener.go @@ -21,17 +21,19 @@ import ( "time" ) +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + // NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. // http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html -func NewKeepAliveListener(addr string, scheme string, info TLSInfo) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - +func NewKeepAliveListener(l net.Listener, scheme string, info TLSInfo) (net.Listener, error) { if scheme == "https" { if info.Empty() { - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") } cfg, err := info.ServerConfig() if err != nil { @@ -53,13 +55,13 @@ func (kln *keepaliveListener) Accept() (net.Conn, error) { if err != nil { return nil, err } - tcpc := c.(*net.TCPConn) + kac := c.(keepAliveConn) // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl // default on linux: 30 + 8 * 30 // default on osx: 30 + 8 * 75 - tcpc.SetKeepAlive(true) - tcpc.SetKeepAlivePeriod(30 * time.Second) - return tcpc, nil + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil } // A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. @@ -75,12 +77,12 @@ func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { if err != nil { return } - tcpc := c.(*net.TCPConn) + kac := c.(keepAliveConn) // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl // default on linux: 30 + 8 * 30 // default on osx: 30 + 8 * 75 - tcpc.SetKeepAlive(true) - tcpc.SetKeepAlivePeriod(30 * time.Second) + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) c = tls.Server(c, l.config) return } diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/limit_listen.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/limit_listen.go new file mode 100644 index 0000000000000..8a81a6b93f764 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/transport/limit_listen.go @@ -0,0 +1,70 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package transport + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + ErrNotTCP = errors.New("only tcp connections have keepalive") +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 60cb517ad8d65..6c3bd480172bf 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -19,16 +19,19 @@ */ package raftpb -import proto "github.com/gogo/protobuf/proto" -import math "math" +import ( + "fmt" + + proto "github.com/gogo/protobuf/proto" +) -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" +import math "math" import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf type EntryType int32 @@ -164,10 +167,10 @@ func (x *ConfChangeType) UnmarshalJSON(data []byte) error { } type Entry struct { - Type EntryType `protobuf:"varint,1,opt,enum=raftpb.EntryType" json:"Type"` - Term uint64 `protobuf:"varint,2,opt" json:"Term"` - Index uint64 `protobuf:"varint,3,opt" json:"Index"` - Data []byte `protobuf:"bytes,4,opt" json:"Data,omitempty"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -236,10 +239,10 @@ func (m *ConfState) String() string { return proto.CompactTextString(m) } func (*ConfState) ProtoMessage() {} type ConfChange struct { - ID uint64 `protobuf:"varint,1,opt" json:"ID"` - Type ConfChangeType `protobuf:"varint,2,opt,enum=raftpb.ConfChangeType" json:"Type"` - NodeID uint64 `protobuf:"varint,3,opt" json:"NodeID"` - Context []byte `protobuf:"bytes,4,opt" json:"Context,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` + NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` + Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -248,6 +251,13 @@ func (m *ConfChange) String() string { return proto.CompactTextString(m) } func (*ConfChange) ProtoMessage() {} func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) @@ -681,8 +691,12 @@ func (m *Entry) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -695,6 +709,12 @@ func (m *Entry) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -702,6 +722,9 @@ func (m *Entry) Unmarshal(data []byte) error { } m.Type = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -718,6 +741,9 @@ func (m *Entry) Unmarshal(data []byte) error { } m.Term = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -734,6 +760,9 @@ func (m *Entry) Unmarshal(data []byte) error { } m.Index = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -750,6 +779,9 @@ func (m *Entry) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -767,18 +799,13 @@ func (m *Entry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append([]byte{}, data[iNdEx:postIndex]...) + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -794,14 +821,21 @@ func (m *Entry) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *SnapshotMetadata) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -814,6 +848,12 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -821,6 +861,9 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -848,6 +891,9 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error { } m.Index = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -864,6 +910,9 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error { } m.Term = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -875,15 +924,7 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -899,14 +940,21 @@ func (m *SnapshotMetadata) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *Snapshot) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -919,6 +967,12 @@ func (m *Snapshot) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -926,6 +980,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -943,7 +1000,10 @@ func (m *Snapshot) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append([]byte{}, data[iNdEx:postIndex]...) + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { @@ -951,6 +1011,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -973,15 +1036,7 @@ func (m *Snapshot) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -997,14 +1052,21 @@ func (m *Snapshot) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *Message) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1017,6 +1079,12 @@ func (m *Message) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -1024,6 +1092,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.Type = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1040,6 +1111,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.To = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1056,6 +1130,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.From = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1072,6 +1149,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.Term = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1088,6 +1168,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.LogTerm = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1104,6 +1187,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.Index = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1120,6 +1206,9 @@ func (m *Message) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1148,6 +1237,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.Commit = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1164,6 +1256,9 @@ func (m *Message) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1191,6 +1286,9 @@ func (m *Message) Unmarshal(data []byte) error { } var v int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1208,6 +1306,9 @@ func (m *Message) Unmarshal(data []byte) error { } m.RejectHint = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1219,15 +1320,7 @@ func (m *Message) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -1243,14 +1336,21 @@ func (m *Message) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *HardState) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1263,6 +1363,12 @@ func (m *HardState) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -1270,6 +1376,9 @@ func (m *HardState) Unmarshal(data []byte) error { } m.Term = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1286,6 +1395,9 @@ func (m *HardState) Unmarshal(data []byte) error { } m.Vote = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1302,6 +1414,9 @@ func (m *HardState) Unmarshal(data []byte) error { } m.Commit = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1313,15 +1428,7 @@ func (m *HardState) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -1337,14 +1444,21 @@ func (m *HardState) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *ConfState) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1357,6 +1471,12 @@ func (m *ConfState) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -1364,6 +1484,9 @@ func (m *ConfState) Unmarshal(data []byte) error { } var v uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1376,15 +1499,7 @@ func (m *ConfState) Unmarshal(data []byte) error { } m.Nodes = append(m.Nodes, v) default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -1400,14 +1515,21 @@ func (m *ConfState) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *ConfChange) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1420,6 +1542,12 @@ func (m *ConfChange) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -1427,6 +1555,9 @@ func (m *ConfChange) Unmarshal(data []byte) error { } m.ID = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1443,6 +1574,9 @@ func (m *ConfChange) Unmarshal(data []byte) error { } m.Type = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1459,6 +1593,9 @@ func (m *ConfChange) Unmarshal(data []byte) error { } m.NodeID = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1475,6 +1612,9 @@ func (m *ConfChange) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -1492,18 +1632,13 @@ func (m *ConfChange) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Context = append([]byte{}, data[iNdEx:postIndex]...) + m.Context = append(m.Context[:0], data[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRaft(data[iNdEx:]) if err != nil { return err @@ -1519,6 +1654,9 @@ func (m *ConfChange) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipRaft(data []byte) (n int, err error) { @@ -1527,6 +1665,9 @@ func skipRaft(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -1540,7 +1681,10 @@ func skipRaft(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -1556,6 +1700,9 @@ func skipRaft(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -1576,6 +1723,9 @@ func skipRaft(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -1611,4 +1761,5 @@ func skipRaft(data []byte) (n int, err error) { var ( ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/snap/snappb/snap.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/snap/snappb/snap.pb.go index 1013dbbfc8716..5d1d21ab31db2 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -13,16 +13,19 @@ */ package snappb -import proto "github.com/gogo/protobuf/proto" -import math "math" +import ( + "fmt" + + proto "github.com/gogo/protobuf/proto" +) -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" +import math "math" import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf type Snapshot struct { @@ -35,6 +38,9 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} +func init() { + proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") +} func (m *Snapshot) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -123,8 +129,12 @@ func (m *Snapshot) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -137,6 +147,12 @@ func (m *Snapshot) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -144,6 +160,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } m.Crc = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -160,6 +179,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -177,18 +199,13 @@ func (m *Snapshot) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append([]byte{}, data[iNdEx:postIndex]...) + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipSnap(data[iNdEx:]) if err != nil { return err @@ -204,6 +221,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipSnap(data []byte) (n int, err error) { @@ -212,6 +232,9 @@ func skipSnap(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -225,7 +248,10 @@ func skipSnap(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -241,6 +267,9 @@ func skipSnap(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -261,6 +290,9 @@ func skipSnap(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -296,4 +328,5 @@ func skipSnap(data []byte) (n int, err error) { var ( ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/storage/storagepb/kv.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/storage/storagepb/kv.pb.go index 03f6fe98ed55f..2ac835a1ae25c 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/storage/storagepb/kv.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/storage/storagepb/kv.pb.go @@ -14,15 +14,20 @@ */ package storagepb -import proto "github.com/gogo/protobuf/proto" +import ( + "fmt" -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +import math "math" import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf type Event_EventType int32 @@ -76,6 +81,8 @@ func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func init() { + proto.RegisterType((*KeyValue)(nil), "storagepb.KeyValue") + proto.RegisterType((*Event)(nil), "storagepb.Event") proto.RegisterEnum("storagepb.Event_EventType", Event_EventType_name, Event_EventType_value) } func (m *KeyValue) Marshal() (data []byte, err error) { @@ -244,8 +251,12 @@ func (m *KeyValue) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -258,6 +269,12 @@ func (m *KeyValue) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 2 { @@ -265,6 +282,9 @@ func (m *KeyValue) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -282,7 +302,10 @@ func (m *KeyValue) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append([]byte{}, data[iNdEx:postIndex]...) + m.Key = append(m.Key[:0], data[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 2: if wireType != 0 { @@ -290,6 +313,9 @@ func (m *KeyValue) Unmarshal(data []byte) error { } m.CreateRevision = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -306,6 +332,9 @@ func (m *KeyValue) Unmarshal(data []byte) error { } m.ModRevision = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -322,6 +351,9 @@ func (m *KeyValue) Unmarshal(data []byte) error { } m.Version = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -338,6 +370,9 @@ func (m *KeyValue) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -355,18 +390,13 @@ func (m *KeyValue) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = append([]byte{}, data[iNdEx:postIndex]...) + m.Value = append(m.Value[:0], data[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipKv(data[iNdEx:]) if err != nil { return err @@ -381,14 +411,21 @@ func (m *KeyValue) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *Event) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -401,6 +438,12 @@ func (m *Event) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -408,6 +451,9 @@ func (m *Event) Unmarshal(data []byte) error { } m.Type = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -424,6 +470,9 @@ func (m *Event) Unmarshal(data []byte) error { } var msglen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -449,15 +498,7 @@ func (m *Event) Unmarshal(data []byte) error { } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipKv(data[iNdEx:]) if err != nil { return err @@ -472,6 +513,9 @@ func (m *Event) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipKv(data []byte) (n int, err error) { @@ -480,6 +524,9 @@ func skipKv(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -493,7 +540,10 @@ func skipKv(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -509,6 +559,9 @@ func skipKv(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -529,6 +582,9 @@ func skipKv(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -564,4 +620,5 @@ func skipKv(data []byte) (n int, err error) { var ( ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/version/version.go b/Godeps/_workspace/src/github.com/coreos/etcd/version/version.go index a2436553e6bcb..5e2b778547aa8 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/version/version.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/version/version.go @@ -27,7 +27,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "2.1.0" - Version = "2.2.2+git" + Version = "2.2.5" // Git SHA Value will be set during build GitSHA = "Not provided (use ./build instead of go build)" diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/wal/walpb/record.pb.go b/Godeps/_workspace/src/github.com/coreos/etcd/wal/walpb/record.pb.go index a9b38a47ef9be..638bdc3b69f39 100644 --- a/Godeps/_workspace/src/github.com/coreos/etcd/wal/walpb/record.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -14,16 +14,19 @@ */ package walpb -import proto "github.com/gogo/protobuf/proto" -import math "math" +import ( + "fmt" -// discarding unused import gogoproto "github.com/coreos/etcd/Godeps/_workspace/src/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +import math "math" import io "io" -import fmt "fmt" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf type Record struct { @@ -47,6 +50,10 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} +func init() { + proto.RegisterType((*Record)(nil), "walpb.Record") + proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") +} func (m *Record) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -177,8 +184,12 @@ func (m *Record) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -191,6 +202,12 @@ func (m *Record) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Record: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -198,6 +215,9 @@ func (m *Record) Unmarshal(data []byte) error { } m.Type = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -214,6 +234,9 @@ func (m *Record) Unmarshal(data []byte) error { } m.Crc = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -230,6 +253,9 @@ func (m *Record) Unmarshal(data []byte) error { } var byteLen int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -247,18 +273,13 @@ func (m *Record) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append([]byte{}, data[iNdEx:postIndex]...) + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRecord(data[iNdEx:]) if err != nil { return err @@ -274,14 +295,21 @@ func (m *Record) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func (m *Snapshot) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { + preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -294,6 +322,12 @@ func (m *Snapshot) Unmarshal(data []byte) error { } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } switch fieldNum { case 1: if wireType != 0 { @@ -301,6 +335,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } m.Index = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -317,6 +354,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } m.Term = 0 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -328,15 +368,7 @@ func (m *Snapshot) Unmarshal(data []byte) error { } } default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire + iNdEx = preIndex skippy, err := skipRecord(data[iNdEx:]) if err != nil { return err @@ -352,6 +384,9 @@ func (m *Snapshot) Unmarshal(data []byte) error { } } + if iNdEx > l { + return io.ErrUnexpectedEOF + } return nil } func skipRecord(data []byte) (n int, err error) { @@ -360,6 +395,9 @@ func skipRecord(data []byte) (n int, err error) { for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -373,7 +411,10 @@ func skipRecord(data []byte) (n int, err error) { wireType := int(wire & 0x7) switch wireType { case 0: - for { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -389,6 +430,9 @@ func skipRecord(data []byte) (n int, err error) { case 2: var length int for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -409,6 +453,9 @@ func skipRecord(data []byte) (n int, err error) { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } @@ -444,4 +491,5 @@ func skipRecord(data []byte) (n int, err error) { var ( ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") ) diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE b/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE index e06d2081865a7..f152ee101ed7c 100644 --- a/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE +++ b/Godeps/_workspace/src/github.com/coreos/rkt/LICENSE @@ -200,3 +200,28 @@ Apache License See the License for the specific language governing permissions and limitations under the License. + +Third Party Sources Bundled + +This project includes code derived from the MIT licensed naegelejd/go-acl +project. Here's a copy of its license: + + Copyright (c) 2015 Joseph Naegele + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md index 7d4eada04a27b..738ae2e7617d0 100644 --- a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/README.md @@ -2,11 +2,10 @@ The API defined here is proposed, experimental, and (for now) subject to change at any time. -**Do not use it.** - If you think you want to use it, or for any other queries, contact or file an [issue](https://github.com/coreos/rkt/issues/new) For more information, see: - #1208 - #1359 - #1468 +- [API Service Subcommand](../../Documentation/subcommands/api-service.md) diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go index 40aa856f006de..1b64bb03a3304 100644 --- a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.pb.go @@ -51,6 +51,10 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + // ImageType defines the supported image type. type ImageType int32 @@ -77,6 +81,7 @@ var ImageType_value = map[string]int32{ func (x ImageType) String() string { return proto.EnumName(ImageType_name, int32(x)) } +func (ImageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // AppState defines the possible states of the app. type AppState int32 @@ -101,6 +106,7 @@ var AppState_value = map[string]int32{ func (x AppState) String() string { return proto.EnumName(AppState_name, int32(x)) } +func (AppState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // PodState defines the possible states of the pod. // See https://github.com/coreos/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed @@ -148,6 +154,7 @@ var PodState_value = map[string]int32{ func (x PodState) String() string { return proto.EnumName(PodState_name, int32(x)) } +func (PodState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // EventType defines the type of the events that will be received via ListenEvents(). type EventType int32 @@ -196,6 +203,7 @@ var EventType_value = map[string]int32{ func (x EventType) String() string { return proto.EnumName(EventType_name, int32(x)) } +func (EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } // ImageFormat defines the format of the image. type ImageFormat struct { @@ -205,9 +213,10 @@ type ImageFormat struct { Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` } -func (m *ImageFormat) Reset() { *m = ImageFormat{} } -func (m *ImageFormat) String() string { return proto.CompactTextString(m) } -func (*ImageFormat) ProtoMessage() {} +func (m *ImageFormat) Reset() { *m = ImageFormat{} } +func (m *ImageFormat) String() string { return proto.CompactTextString(m) } +func (*ImageFormat) ProtoMessage() {} +func (*ImageFormat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Image describes the image's information. type Image struct { @@ -226,11 +235,16 @@ type Image struct { ImportTimestamp int64 `protobuf:"varint,5,opt,name=import_timestamp" json:"import_timestamp,omitempty"` // JSON-encoded byte array that represents the image manifest, optional. Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"` + // Size is the size in bytes of this image in the store. + Size int64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"` + // Annotations on this image. + Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"` } -func (m *Image) Reset() { *m = Image{} } -func (m *Image) String() string { return proto.CompactTextString(m) } -func (*Image) ProtoMessage() {} +func (m *Image) Reset() { *m = Image{} } +func (m *Image) String() string { return proto.CompactTextString(m) } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *Image) GetBaseFormat() *ImageFormat { if m != nil { @@ -239,6 +253,13 @@ func (m *Image) GetBaseFormat() *ImageFormat { return nil } +func (m *Image) GetAnnotations() []*KeyValue { + if m != nil { + return m.Annotations + } + return nil +} + // Network describes the network information of a pod. type Network struct { // Name of the network that a pod belongs to, required. @@ -249,9 +270,10 @@ type Network struct { Ipv6 string `protobuf:"bytes,3,opt,name=ipv6" json:"ipv6,omitempty"` } -func (m *Network) Reset() { *m = Network{} } -func (m *Network) String() string { return proto.CompactTextString(m) } -func (*Network) ProtoMessage() {} +func (m *Network) Reset() { *m = Network{} } +func (m *Network) String() string { return proto.CompactTextString(m) } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // App describes the information of an app that's running in a pod. type App struct { @@ -265,11 +287,14 @@ type App struct { // Exit code of the app. optional, only valid if it's returned by InspectPod() and // the app has already exited. ExitCode int32 `protobuf:"zigzag32,4,opt,name=exit_code" json:"exit_code,omitempty"` + // Annotations for this app. + Annotations []*KeyValue `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty"` } -func (m *App) Reset() { *m = App{} } -func (m *App) String() string { return proto.CompactTextString(m) } -func (*App) ProtoMessage() {} +func (m *App) Reset() { *m = App{} } +func (m *App) String() string { return proto.CompactTextString(m) } +func (*App) ProtoMessage() {} +func (*App) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *App) GetImage() *Image { if m != nil { @@ -278,26 +303,46 @@ func (m *App) GetImage() *Image { return nil } +func (m *App) GetAnnotations() []*KeyValue { + if m != nil { + return m.Annotations + } + return nil +} + // Pod describes a pod's information. +// If a pod is in Embryo, Preparing, AbortedPrepare state, +// only id and state will be returned. +// +// If a pod is in other states, the pod manifest and +// apps will be returned when 'detailed' is true in the request. +// +// A valid pid of the stage1 process of the pod will be returned +// if the pod is Running has run once. +// +// Networks are only returned when a pod is in Running. type Pod struct { - // ID of the pod, in the form of a UUID, required. + // ID of the pod, in the form of a UUID. Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - // PID of the pod, optional, only valid if it's returned by InspectPod(). A negative value means the pod has exited. + // PID of the stage1 process of the pod. Pid int32 `protobuf:"zigzag32,2,opt,name=pid" json:"pid,omitempty"` - // State of the pod, required. + // State of the pod. State PodState `protobuf:"varint,3,opt,name=state,enum=v1alpha.PodState" json:"state,omitempty"` - // List of apps in the pod, required. + // List of apps in the pod. Apps []*App `protobuf:"bytes,4,rep,name=apps" json:"apps,omitempty"` - // Network information of the pod, optional, non-empty if the pod is running in private net. + // Network information of the pod. // Note that a pod can be in multiple networks. Networks []*Network `protobuf:"bytes,5,rep,name=networks" json:"networks,omitempty"` - // JSON-encoded byte array that represents the pod manifest of the pod, required. + // JSON-encoded byte array that represents the pod manifest of the pod. Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"` + // Annotations on this pod. + Annotations []*KeyValue `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty"` } -func (m *Pod) Reset() { *m = Pod{} } -func (m *Pod) String() string { return proto.CompactTextString(m) } -func (*Pod) ProtoMessage() {} +func (m *Pod) Reset() { *m = Pod{} } +func (m *Pod) String() string { return proto.CompactTextString(m) } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *Pod) GetApps() []*App { if m != nil { @@ -313,16 +358,24 @@ func (m *Pod) GetNetworks() []*Network { return nil } +func (m *Pod) GetAnnotations() []*KeyValue { + if m != nil { + return m.Annotations + } + return nil +} + type KeyValue struct { // Key part of the key-value pair. - Key string `protobuf:"bytes,1,opt" json:"Key,omitempty"` + Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"` // Value part of the key-value pair. Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } // PodFilter defines the condition that the returned pods need to satisfy in ListPods(). // The conditions are combined by 'AND', and different filters are combined by 'OR'. @@ -341,9 +394,10 @@ type PodFilter struct { Annotations []*KeyValue `protobuf:"bytes,6,rep,name=annotations" json:"annotations,omitempty"` } -func (m *PodFilter) Reset() { *m = PodFilter{} } -func (m *PodFilter) String() string { return proto.CompactTextString(m) } -func (*PodFilter) ProtoMessage() {} +func (m *PodFilter) Reset() { *m = PodFilter{} } +func (m *PodFilter) String() string { return proto.CompactTextString(m) } +func (*PodFilter) ProtoMessage() {} +func (*PodFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *PodFilter) GetAnnotations() []*KeyValue { if m != nil { @@ -374,11 +428,14 @@ type ImageFilter struct { ImportedBefore int64 `protobuf:"varint,7,opt,name=imported_before" json:"imported_before,omitempty"` // If not empty, the images that have all of the annotations will be returned. Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"` + // If not empty, the images that have any of the exact full names will be returned. + FullNames []string `protobuf:"bytes,9,rep,name=full_names" json:"full_names,omitempty"` } -func (m *ImageFilter) Reset() { *m = ImageFilter{} } -func (m *ImageFilter) String() string { return proto.CompactTextString(m) } -func (*ImageFilter) ProtoMessage() {} +func (m *ImageFilter) Reset() { *m = ImageFilter{} } +func (m *ImageFilter) String() string { return proto.CompactTextString(m) } +func (*ImageFilter) ProtoMessage() {} +func (*ImageFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ImageFilter) GetLabels() []*KeyValue { if m != nil { @@ -404,9 +461,10 @@ type Info struct { ApiVersion string `protobuf:"bytes,3,opt,name=api_version" json:"api_version,omitempty"` } -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } // Event describes the events that will be received via ListenEvents(). type Event struct { @@ -427,9 +485,10 @@ type Event struct { Data []*KeyValue `protobuf:"bytes,5,rep,name=data" json:"data,omitempty"` } -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *Event) GetData() []*KeyValue { if m != nil { @@ -456,26 +515,29 @@ type EventFilter struct { UntilTime int64 `protobuf:"varint,5,opt,name=until_time" json:"until_time,omitempty"` } -func (m *EventFilter) Reset() { *m = EventFilter{} } -func (m *EventFilter) String() string { return proto.CompactTextString(m) } -func (*EventFilter) ProtoMessage() {} +func (m *EventFilter) Reset() { *m = EventFilter{} } +func (m *EventFilter) String() string { return proto.CompactTextString(m) } +func (*EventFilter) ProtoMessage() {} +func (*EventFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } // Request for GetInfo(). type GetInfoRequest struct { } -func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } -func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetInfoRequest) ProtoMessage() {} +func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } +func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetInfoRequest) ProtoMessage() {} +func (*GetInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } // Response for GetInfo(). type GetInfoResponse struct { Info *Info `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"` } -func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } -func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetInfoResponse) ProtoMessage() {} +func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } +func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetInfoResponse) ProtoMessage() {} +func (*GetInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *GetInfoResponse) GetInfo() *Info { if m != nil { @@ -490,9 +552,10 @@ type ListPodsRequest struct { Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"` } -func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} } -func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) } -func (*ListPodsRequest) ProtoMessage() {} +func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} } +func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) } +func (*ListPodsRequest) ProtoMessage() {} +func (*ListPodsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *ListPodsRequest) GetFilters() []*PodFilter { if m != nil { @@ -506,9 +569,10 @@ type ListPodsResponse struct { Pods []*Pod `protobuf:"bytes,1,rep,name=pods" json:"pods,omitempty"` } -func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} } -func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) } -func (*ListPodsResponse) ProtoMessage() {} +func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} } +func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) } +func (*ListPodsResponse) ProtoMessage() {} +func (*ListPodsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *ListPodsResponse) GetPods() []*Pod { if m != nil { @@ -523,18 +587,20 @@ type InspectPodRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } -func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} } -func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) } -func (*InspectPodRequest) ProtoMessage() {} +func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} } +func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) } +func (*InspectPodRequest) ProtoMessage() {} +func (*InspectPodRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } // Response for InspectPod(). type InspectPodResponse struct { Pod *Pod `protobuf:"bytes,1,opt,name=pod" json:"pod,omitempty"` } -func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} } -func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) } -func (*InspectPodResponse) ProtoMessage() {} +func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} } +func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) } +func (*InspectPodResponse) ProtoMessage() {} +func (*InspectPodResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *InspectPodResponse) GetPod() *Pod { if m != nil { @@ -549,9 +615,10 @@ type ListImagesRequest struct { Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"` } -func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } -func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } -func (*ListImagesRequest) ProtoMessage() {} +func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } +func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } +func (*ListImagesRequest) ProtoMessage() {} +func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *ListImagesRequest) GetFilters() []*ImageFilter { if m != nil { @@ -565,9 +632,10 @@ type ListImagesResponse struct { Images []*Image `protobuf:"bytes,1,rep,name=images" json:"images,omitempty"` } -func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } -func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } -func (*ListImagesResponse) ProtoMessage() {} +func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } +func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } +func (*ListImagesResponse) ProtoMessage() {} +func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *ListImagesResponse) GetImages() []*Image { if m != nil { @@ -581,18 +649,20 @@ type InspectImageRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } -func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} } -func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) } -func (*InspectImageRequest) ProtoMessage() {} +func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} } +func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) } +func (*InspectImageRequest) ProtoMessage() {} +func (*InspectImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } // Response for InspectImage(). type InspectImageResponse struct { Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` } -func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} } -func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) } -func (*InspectImageResponse) ProtoMessage() {} +func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} } +func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) } +func (*InspectImageResponse) ProtoMessage() {} +func (*InspectImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *InspectImageResponse) GetImage() *Image { if m != nil { @@ -606,9 +676,10 @@ type ListenEventsRequest struct { Filter *EventFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` } -func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} } -func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) } -func (*ListenEventsRequest) ProtoMessage() {} +func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} } +func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) } +func (*ListenEventsRequest) ProtoMessage() {} +func (*ListenEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *ListenEventsRequest) GetFilter() *EventFilter { if m != nil { @@ -623,9 +694,10 @@ type ListenEventsResponse struct { Events []*Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` } -func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} } -func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) } -func (*ListenEventsResponse) ProtoMessage() {} +func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} } +func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) } +func (*ListenEventsResponse) ProtoMessage() {} +func (*ListenEventsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *ListenEventsResponse) GetEvents() []*Event { if m != nil { @@ -655,9 +727,10 @@ type GetLogsRequest struct { UntilTime int64 `protobuf:"varint,6,opt,name=until_time" json:"until_time,omitempty"` } -func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} } -func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) } -func (*GetLogsRequest) ProtoMessage() {} +func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} } +func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) } +func (*GetLogsRequest) ProtoMessage() {} +func (*GetLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } // Response for GetLogs(). type GetLogsResponse struct { @@ -665,11 +738,37 @@ type GetLogsResponse struct { Lines []string `protobuf:"bytes,1,rep,name=lines" json:"lines,omitempty"` } -func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} } -func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) } -func (*GetLogsResponse) ProtoMessage() {} +func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} } +func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) } +func (*GetLogsResponse) ProtoMessage() {} +func (*GetLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func init() { + proto.RegisterType((*ImageFormat)(nil), "v1alpha.ImageFormat") + proto.RegisterType((*Image)(nil), "v1alpha.Image") + proto.RegisterType((*Network)(nil), "v1alpha.Network") + proto.RegisterType((*App)(nil), "v1alpha.App") + proto.RegisterType((*Pod)(nil), "v1alpha.Pod") + proto.RegisterType((*KeyValue)(nil), "v1alpha.KeyValue") + proto.RegisterType((*PodFilter)(nil), "v1alpha.PodFilter") + proto.RegisterType((*ImageFilter)(nil), "v1alpha.ImageFilter") + proto.RegisterType((*Info)(nil), "v1alpha.Info") + proto.RegisterType((*Event)(nil), "v1alpha.Event") + proto.RegisterType((*EventFilter)(nil), "v1alpha.EventFilter") + proto.RegisterType((*GetInfoRequest)(nil), "v1alpha.GetInfoRequest") + proto.RegisterType((*GetInfoResponse)(nil), "v1alpha.GetInfoResponse") + proto.RegisterType((*ListPodsRequest)(nil), "v1alpha.ListPodsRequest") + proto.RegisterType((*ListPodsResponse)(nil), "v1alpha.ListPodsResponse") + proto.RegisterType((*InspectPodRequest)(nil), "v1alpha.InspectPodRequest") + proto.RegisterType((*InspectPodResponse)(nil), "v1alpha.InspectPodResponse") + proto.RegisterType((*ListImagesRequest)(nil), "v1alpha.ListImagesRequest") + proto.RegisterType((*ListImagesResponse)(nil), "v1alpha.ListImagesResponse") + proto.RegisterType((*InspectImageRequest)(nil), "v1alpha.InspectImageRequest") + proto.RegisterType((*InspectImageResponse)(nil), "v1alpha.InspectImageResponse") + proto.RegisterType((*ListenEventsRequest)(nil), "v1alpha.ListenEventsRequest") + proto.RegisterType((*ListenEventsResponse)(nil), "v1alpha.ListenEventsResponse") + proto.RegisterType((*GetLogsRequest)(nil), "v1alpha.GetLogsRequest") + proto.RegisterType((*GetLogsResponse)(nil), "v1alpha.GetLogsResponse") proto.RegisterEnum("v1alpha.ImageType", ImageType_name, ImageType_value) proto.RegisterEnum("v1alpha.AppState", AppState_name, AppState_value) proto.RegisterEnum("v1alpha.PodState", PodState_name, PodState_value) @@ -851,9 +950,9 @@ func RegisterPublicAPIServer(s *grpc.Server, srv PublicAPIServer) { s.RegisterService(&_PublicAPI_serviceDesc, srv) } -func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(GetInfoRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(PublicAPIServer).GetInfo(ctx, in) @@ -863,9 +962,9 @@ func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, codec grpc return out, nil } -func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(ListPodsRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(PublicAPIServer).ListPods(ctx, in) @@ -875,9 +974,9 @@ func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, codec grp return out, nil } -func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(InspectPodRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(PublicAPIServer).InspectPod(ctx, in) @@ -887,9 +986,9 @@ func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, codec g return out, nil } -func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(ListImagesRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(PublicAPIServer).ListImages(ctx, in) @@ -899,9 +998,9 @@ func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, codec g return out, nil } -func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, codec grpc.Codec, buf []byte) (interface{}, error) { +func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(InspectImageRequest) - if err := codec.Unmarshal(buf, in); err != nil { + if err := dec(in); err != nil { return nil, err } out, err := srv.(PublicAPIServer).InspectImage(ctx, in) @@ -991,3 +1090,90 @@ var _PublicAPI_serviceDesc = grpc.ServiceDesc{ }, }, } + +var fileDescriptor0 = []byte{ + // 1318 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x72, 0xdb, 0x46, + 0x13, 0x35, 0x08, 0x82, 0x3f, 0x4d, 0x9a, 0x02, 0x47, 0xb2, 0x45, 0xd3, 0x7f, 0x32, 0xbe, 0x2f, + 0x2e, 0x47, 0x0b, 0x25, 0x91, 0x1d, 0x6f, 0x52, 0x95, 0x32, 0x25, 0x41, 0x2a, 0xc6, 0x12, 0xc9, + 0xa2, 0x69, 0x55, 0xbc, 0x42, 0x41, 0xe2, 0xd0, 0x41, 0x89, 0x04, 0x10, 0x02, 0x92, 0xad, 0x2c, + 0x73, 0x81, 0x5c, 0x21, 0xb7, 0x48, 0x55, 0x36, 0xb9, 0x41, 0x6e, 0x91, 0x7b, 0xa4, 0x67, 0x30, + 0x00, 0x06, 0x20, 0xb8, 0xc8, 0x8e, 0xe8, 0xee, 0x79, 0xfd, 0xba, 0x7b, 0xfa, 0x01, 0x84, 0xba, + 0xed, 0x3b, 0x7b, 0xfe, 0xd2, 0x0b, 0x3d, 0x52, 0xbd, 0xf9, 0xc6, 0x9e, 0xfb, 0x3f, 0xd9, 0xc6, + 0x1b, 0x68, 0xf4, 0x17, 0xf6, 0x47, 0x7a, 0xec, 0x2d, 0x17, 0x76, 0x48, 0x76, 0xa0, 0x1c, 0xde, + 0xfa, 0xb4, 0xa3, 0xec, 0x28, 0x2f, 0x5a, 0xfb, 0x64, 0x4f, 0x84, 0xed, 0xf1, 0x98, 0x09, 0x7a, + 0xc8, 0x06, 0x54, 0x6f, 0xe8, 0x32, 0x70, 0x3c, 0xb7, 0x53, 0xc2, 0xa0, 0xba, 0xf1, 0x97, 0x02, + 0x1a, 0x77, 0x93, 0x2f, 0xa1, 0x71, 0x61, 0x07, 0xd4, 0x9a, 0x71, 0x2c, 0x8e, 0xd1, 0xd8, 0xdf, + 0xca, 0x62, 0x88, 0x3c, 0x00, 0x25, 0x67, 0x1a, 0x01, 0x90, 0x26, 0x94, 0x5d, 0x7b, 0x41, 0x3b, + 0x2a, 0x7f, 0x92, 0xf0, 0xcb, 0xdc, 0xd0, 0x01, 0xdd, 0x59, 0xf8, 0xde, 0x32, 0xb4, 0x42, 0x67, + 0x41, 0x83, 0xd0, 0x5e, 0xf8, 0x1d, 0x0d, 0x3d, 0x2a, 0xd1, 0xa1, 0xb6, 0xb0, 0x5d, 0x67, 0x86, + 0xc6, 0x4e, 0x05, 0x2d, 0x4d, 0x06, 0x15, 0x38, 0xbf, 0xd0, 0x4e, 0x95, 0xfb, 0x9f, 0x43, 0xc3, + 0x76, 0x5d, 0x2f, 0xb4, 0x43, 0x44, 0x0b, 0x3a, 0xb5, 0x1d, 0x15, 0xf9, 0xb4, 0x13, 0x3e, 0x6f, + 0xe9, 0xed, 0xb9, 0x3d, 0xbf, 0xa6, 0xc6, 0x4b, 0xa8, 0x0e, 0x68, 0xf8, 0xc9, 0x5b, 0x5e, 0x25, + 0x5c, 0x94, 0x98, 0x99, 0xe3, 0xdf, 0xbc, 0x4a, 0x79, 0xe2, 0xd3, 0xeb, 0x88, 0xa7, 0xf1, 0x9b, + 0x02, 0x6a, 0xcf, 0xf7, 0x73, 0x27, 0x1e, 0x83, 0xe6, 0xb0, 0x32, 0xf9, 0x91, 0xc6, 0x7e, 0x2b, + 0x5b, 0x3c, 0xb6, 0x57, 0xc3, 0x02, 0xc2, 0xa8, 0xd6, 0x96, 0xc4, 0x05, 0x91, 0xde, 0x31, 0x07, + 0x69, 0x43, 0x9d, 0x7e, 0x76, 0x42, 0xeb, 0xd2, 0x9b, 0x52, 0xde, 0x80, 0x76, 0xbe, 0x0c, 0x6d, + 0x5d, 0x19, 0x7f, 0x22, 0xa3, 0x91, 0x37, 0x15, 0xbd, 0x8d, 0xf8, 0x34, 0x40, 0xf5, 0x45, 0xa3, + 0xdb, 0xeb, 0xb3, 0xe3, 0xa9, 0x28, 0x7b, 0x17, 0xca, 0xb6, 0xef, 0x07, 0x98, 0x98, 0xe5, 0x68, + 0xca, 0xf4, 0x88, 0x01, 0x35, 0x37, 0xea, 0x52, 0xcc, 0x41, 0x4f, 0xfc, 0x71, 0xfb, 0x56, 0x27, + 0x92, 0x23, 0x5f, 0x5d, 0x47, 0xfe, 0x39, 0xd4, 0xe2, 0xdf, 0x8c, 0x34, 0xfe, 0x16, 0x15, 0xdc, + 0x05, 0xed, 0x86, 0x59, 0xc5, 0x6d, 0xfb, 0x5d, 0x81, 0x3a, 0xd2, 0x3d, 0x76, 0xe6, 0x21, 0x5d, + 0xb2, 0x48, 0x67, 0x1a, 0x60, 0xa4, 0x8a, 0x91, 0xcf, 0xa0, 0xc2, 0xcb, 0x0b, 0x30, 0x54, 0x2d, + 0xae, 0xaf, 0xcd, 0x76, 0xc0, 0xb7, 0xd8, 0xc0, 0x02, 0xec, 0x02, 0x3b, 0x85, 0x26, 0x3e, 0x31, + 0x8b, 0x01, 0x95, 0xb9, 0xe9, 0x1e, 0xdc, 0x15, 0x95, 0x8a, 0x48, 0x8d, 0x9b, 0x73, 0xa5, 0x54, + 0xd6, 0x95, 0xf2, 0xb7, 0x12, 0xef, 0x54, 0x01, 0x49, 0xec, 0x90, 0xbf, 0xa4, 0x33, 0xe7, 0xb3, + 0xa0, 0x59, 0x27, 0x38, 0x2f, 0xbe, 0x35, 0x32, 0x29, 0x8c, 0xba, 0xa2, 0xb7, 0xc8, 0x20, 0xe1, + 0x84, 0xc5, 0xcd, 0xed, 0x0b, 0x3a, 0x5f, 0x3f, 0x7f, 0x72, 0x1f, 0x5a, 0xd1, 0xa2, 0xd0, 0xa9, + 0x65, 0xcf, 0x30, 0x33, 0x1f, 0x81, 0x4a, 0xb6, 0x61, 0x23, 0xb1, 0x5f, 0x50, 0x5c, 0xce, 0xff, + 0xba, 0x1f, 0xc7, 0x50, 0xee, 0xbb, 0x33, 0x8f, 0x6c, 0x42, 0x63, 0x79, 0x15, 0x5a, 0xf1, 0x7a, + 0x46, 0xf3, 0xd9, 0x82, 0x26, 0xb6, 0xf4, 0xd2, 0xca, 0x88, 0x02, 0x0b, 0x45, 0xb1, 0x49, 0x8c, + 0xd1, 0xca, 0x2c, 0x41, 0x33, 0x6f, 0xa8, 0xbb, 0x5e, 0x65, 0xb8, 0x97, 0xab, 0x4c, 0x4e, 0x1f, + 0x66, 0x4b, 0x6f, 0x21, 0xf4, 0x01, 0x9f, 0x98, 0x0e, 0xf0, 0xdd, 0x50, 0xc9, 0x53, 0x28, 0x4f, + 0xed, 0xd0, 0x5e, 0xbf, 0x14, 0x21, 0x34, 0x38, 0xaa, 0x98, 0xc5, 0x33, 0xd0, 0x58, 0xe6, 0x68, + 0x1a, 0xc5, 0xa9, 0xc5, 0xb8, 0xa2, 0xe1, 0xe0, 0xed, 0x93, 0xe7, 0x82, 0xbc, 0x02, 0xc7, 0xbd, + 0xa4, 0x96, 0x44, 0x01, 0x6d, 0xd7, 0x6e, 0xe8, 0xcc, 0x23, 0x1b, 0x57, 0x26, 0x43, 0x87, 0xd6, + 0x09, 0x0d, 0x59, 0xd3, 0xc6, 0xf4, 0xe7, 0x6b, 0xdc, 0x06, 0x63, 0x0f, 0x36, 0x12, 0x4b, 0xe0, + 0x63, 0xbb, 0x29, 0x79, 0x88, 0x7a, 0x82, 0xcf, 0x42, 0x27, 0xef, 0xa6, 0x52, 0x81, 0x46, 0xec, + 0xf9, 0xc6, 0xa9, 0x13, 0x84, 0x78, 0x73, 0x03, 0x01, 0x41, 0xfe, 0x07, 0xd5, 0x19, 0xaf, 0x22, + 0x62, 0xdf, 0x90, 0xd8, 0xa7, 0x1b, 0xd1, 0x82, 0xca, 0x94, 0x86, 0xb6, 0x33, 0xe7, 0xcd, 0xab, + 0x61, 0x5e, 0x3d, 0xc5, 0x11, 0x89, 0x71, 0xcb, 0x7d, 0x6f, 0x1a, 0xa3, 0x34, 0x65, 0x14, 0xe3, + 0x29, 0xb4, 0xfb, 0x6e, 0xe0, 0xd3, 0x4b, 0x76, 0x24, 0xce, 0x2c, 0x29, 0x8a, 0xf1, 0x15, 0x10, + 0x39, 0x40, 0x40, 0x3e, 0x40, 0x9d, 0xf1, 0xa6, 0xa2, 0x94, 0x2c, 0xe2, 0x0f, 0xd0, 0x66, 0x0c, + 0xf8, 0x46, 0x24, 0xb5, 0x7c, 0x91, 0xaf, 0x25, 0xff, 0x9a, 0x28, 0xae, 0xe6, 0x15, 0x10, 0x19, + 0x4b, 0x24, 0x7f, 0x02, 0x15, 0xbe, 0xc2, 0x31, 0x56, 0x4e, 0x75, 0x8d, 0x67, 0xb0, 0x29, 0x28, + 0xf3, 0xe7, 0xa2, 0xaa, 0xbe, 0x85, 0xad, 0x6c, 0x88, 0x80, 0x4e, 0xf4, 0x5c, 0x29, 0xd2, 0x73, + 0xe3, 0x3b, 0xd8, 0x64, 0x7c, 0xa8, 0xcb, 0xaf, 0x4f, 0x52, 0xdd, 0xff, 0xa1, 0x12, 0x55, 0xb7, + 0xf2, 0x0e, 0x94, 0xee, 0xa2, 0xf1, 0x1a, 0xb6, 0xb2, 0x87, 0xd3, 0x72, 0x28, 0xb7, 0xac, 0x94, + 0xc3, 0x03, 0x8d, 0x5b, 0x7e, 0xb9, 0x4e, 0xbd, 0x8f, 0x49, 0x3e, 0x6c, 0x13, 0x76, 0xdf, 0x4a, + 0x54, 0x1f, 0xe5, 0x23, 0x96, 0x39, 0xb1, 0x43, 0x78, 0x8f, 0xe7, 0x8e, 0xcb, 0xef, 0xb1, 0xf2, + 0x42, 0x63, 0x07, 0x66, 0xde, 0x7c, 0xee, 0x7d, 0xe2, 0x77, 0xb8, 0x96, 0xbb, 0xd7, 0x5a, 0xc1, + 0xbd, 0xe6, 0x52, 0x62, 0xec, 0xf0, 0x5b, 0x1c, 0xa5, 0x16, 0x6c, 0x13, 0x64, 0xae, 0x6f, 0xbb, + 0x14, 0xea, 0xe9, 0xb7, 0x42, 0x07, 0xbb, 0x7a, 0xd6, 0x3b, 0x31, 0xad, 0xc9, 0x87, 0x91, 0x69, + 0xbd, 0x1f, 0x1c, 0x99, 0xc7, 0xfd, 0x81, 0x79, 0xa4, 0xdf, 0x41, 0x7d, 0xd8, 0x90, 0x3c, 0xbd, + 0xd1, 0xe8, 0x50, 0x57, 0x50, 0x77, 0xdb, 0x92, 0xf1, 0x68, 0x78, 0xf8, 0xd6, 0x1c, 0xeb, 0x25, + 0x24, 0xd2, 0x92, 0xcc, 0xc3, 0xc3, 0xbe, 0xae, 0xee, 0x8e, 0xa0, 0x96, 0xbc, 0x32, 0xb7, 0x61, + 0x13, 0x01, 0xac, 0x77, 0x93, 0xde, 0x24, 0x9b, 0x04, 0xf1, 0x52, 0xc7, 0xf8, 0xfd, 0x60, 0xd0, + 0x1f, 0x9c, 0x60, 0x9a, 0x2d, 0xd0, 0x53, 0xb3, 0xf9, 0x63, 0x7f, 0x82, 0xc1, 0xa5, 0xdd, 0x7f, + 0x14, 0xa8, 0x25, 0xef, 0x09, 0x84, 0x1c, 0x0d, 0x8f, 0x0a, 0x20, 0xf1, 0x6c, 0xea, 0x30, 0xcf, + 0x0e, 0xc6, 0x1f, 0x86, 0x88, 0x98, 0x09, 0x1f, 0x8d, 0xcd, 0x51, 0x6f, 0xcc, 0x52, 0x95, 0x50, + 0x92, 0x49, 0xde, 0x81, 0x30, 0x2a, 0x63, 0x96, 0xda, 0x63, 0x66, 0x65, 0xbc, 0x6d, 0x0f, 0x52, + 0x73, 0xef, 0x60, 0x38, 0x46, 0x6a, 0xf1, 0x31, 0x5d, 0xcb, 0x25, 0x8f, 0x88, 0x57, 0xb2, 0x39, + 0x8e, 0xcc, 0x53, 0x73, 0xc2, 0xc0, 0xaa, 0xd9, 0x1c, 0x27, 0xbd, 0xf1, 0x01, 0xb6, 0x50, 0xaf, + 0xed, 0xfe, 0x51, 0x82, 0x7a, 0x2a, 0x76, 0x38, 0x21, 0xf3, 0xdc, 0x1c, 0x4c, 0x56, 0x27, 0xf4, + 0x10, 0xb6, 0x25, 0x0f, 0x43, 0x4a, 0xf8, 0x2b, 0xf8, 0x2d, 0xf0, 0xa4, 0xd8, 0x19, 0xb3, 0xc6, + 0xda, 0xbb, 0x70, 0x3f, 0x17, 0x83, 0x54, 0xb8, 0x4f, 0x45, 0xb9, 0xb8, 0x97, 0xf3, 0x89, 0x72, + 0xca, 0xb8, 0x3b, 0x3b, 0x39, 0x97, 0xe0, 0x6e, 0x1d, 0x0e, 0x4f, 0x4f, 0xcd, 0x43, 0x16, 0xa5, + 0xe5, 0xc0, 0xc5, 0x38, 0xc7, 0x51, 0x43, 0xb2, 0xe0, 0xcc, 0x27, 0xc0, 0xab, 0xac, 0xc1, 0x92, + 0x2b, 0xba, 0x55, 0xfd, 0xb3, 0x51, 0x44, 0xb9, 0x46, 0x1e, 0x41, 0x67, 0xc5, 0x3d, 0x36, 0xcf, + 0x86, 0xe7, 0xe8, 0xad, 0xef, 0xff, 0x5a, 0xc6, 0x4f, 0x8f, 0xeb, 0x8b, 0xb9, 0x73, 0xd9, 0x1b, + 0xf5, 0xc9, 0xf7, 0x50, 0x15, 0x82, 0x4e, 0xb6, 0x93, 0x05, 0xcd, 0x8a, 0x7e, 0xb7, 0xb3, 0xea, + 0x88, 0xb6, 0xc6, 0xb8, 0x43, 0x7a, 0x50, 0x8b, 0x85, 0x99, 0xa4, 0x71, 0x39, 0xcd, 0xef, 0x3e, + 0x28, 0xf0, 0x24, 0x10, 0x27, 0x00, 0xa9, 0x14, 0x93, 0xae, 0xf4, 0x02, 0xc9, 0x09, 0x78, 0xf7, + 0x61, 0xa1, 0x4f, 0x06, 0x4a, 0x65, 0x55, 0x02, 0x5a, 0xd1, 0x6d, 0x09, 0x68, 0x55, 0x87, 0x11, + 0xe8, 0x0c, 0x9a, 0xb2, 0x8c, 0x92, 0x47, 0xf9, 0xbc, 0xb2, 0x00, 0x77, 0x1f, 0xaf, 0xf1, 0x26, + 0x70, 0x43, 0x68, 0xca, 0x0a, 0x29, 0xc1, 0x15, 0xa8, 0xae, 0x04, 0x57, 0x24, 0xab, 0xc6, 0x9d, + 0xaf, 0x15, 0xf2, 0x86, 0x0f, 0x8d, 0xe9, 0x57, 0x76, 0x68, 0x92, 0x98, 0x66, 0x87, 0x26, 0x4b, + 0x1d, 0x43, 0xb8, 0xa8, 0xf0, 0xff, 0x4f, 0x2f, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x84, 0xa0, + 0x2b, 0xe3, 0x4c, 0x0d, 0x00, 0x00, +} diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto index 2b68cdbee8529..73b453d47aa16 100644 --- a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/api.proto @@ -21,8 +21,6 @@ // The API defined here is proposed, experimental, // // and (for now) subject to change at any time. // // // -// Do not use it. // -// // // If you think you want to use it, or for any other // // queries, contact // // or file an issue on github.com/coreos/rkt // @@ -73,6 +71,12 @@ message Image { // JSON-encoded byte array that represents the image manifest, optional. bytes manifest = 6; + + // Size is the size in bytes of this image in the store. + int64 size = 7; + + // Annotations on this image. + repeated KeyValue annotations = 8; } // Network describes the network information of a pod. @@ -109,6 +113,9 @@ message App { // Exit code of the app. optional, only valid if it's returned by InspectPod() and // the app has already exited. sint32 exit_code = 4; + + // Annotations for this app. + repeated KeyValue annotations = 5; } // PodState defines the possible states of the pod. @@ -133,25 +140,38 @@ enum PodState { } // Pod describes a pod's information. +// If a pod is in Embryo, Preparing, AbortedPrepare state, +// only id and state will be returned. +// +// If a pod is in other states, the pod manifest and +// apps will be returned when 'detailed' is true in the request. +// +// A valid pid of the stage1 process of the pod will be returned +// if the pod is Running has run once. +// +// Networks are only returned when a pod is in Running. message Pod { - // ID of the pod, in the form of a UUID, required. + // ID of the pod, in the form of a UUID. string id = 1; - // PID of the pod, optional, only valid if it's returned by InspectPod(). A negative value means the pod has exited. + // PID of the stage1 process of the pod. sint32 pid = 2; - // State of the pod, required. + // State of the pod. PodState state = 3; - // List of apps in the pod, required. + // List of apps in the pod. repeated App apps = 4; - // Network information of the pod, optional, non-empty if the pod is running in private net. + // Network information of the pod. // Note that a pod can be in multiple networks. repeated Network networks = 5; - // JSON-encoded byte array that represents the pod manifest of the pod, required. + // JSON-encoded byte array that represents the pod manifest of the pod. bytes manifest = 6; + + // Annotations on this pod. + repeated KeyValue annotations = 7; } message KeyValue { @@ -212,6 +232,9 @@ message ImageFilter { // If not empty, the images that have all of the annotations will be returned. repeated KeyValue annotations = 8; + + // If not empty, the images that have any of the exact full names will be returned. + repeated string full_names = 9; } // Info describes the information of rkt on the machine. diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/client_example.go b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/client_example.go new file mode 100644 index 0000000000000..ec807d9ee319d --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/rkt/api/v1alpha/client_example.go @@ -0,0 +1,74 @@ +// Copyright 2015 The rkt Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +package main + +import ( + "fmt" + "os" + + "github.com/coreos/rkt/api/v1alpha" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func main() { + conn, err := grpc.Dial("localhost:15441", grpc.WithInsecure()) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + c := v1alpha.NewPublicAPIClient(conn) + defer conn.Close() + + // List pods. + podResp, err := c.ListPods(context.Background(), &v1alpha.ListPodsRequest{ + // Specify the request: Fetch and print only running pods and their details. + Detail: true, + Filters: []*v1alpha.PodFilter{ + { + States: []v1alpha.PodState{v1alpha.PodState_POD_STATE_RUNNING}, + }, + }, + }) + if err != nil { + fmt.Println(err) + os.Exit(2) + } + + for _, p := range podResp.Pods { + fmt.Printf("Pod %q is running\n", p.Id) + } + + // List images. + imgResp, err := c.ListImages(context.Background(), &v1alpha.ListImagesRequest{ + // In this request, we fetch the details of images whose names are prefixed with "coreos.com". + Detail: true, + Filters: []*v1alpha.ImageFilter{ + { + Prefixes: []string{"coreos.com"}, + }, + }, + }) + if err != nil { + fmt.Println(err) + os.Exit(3) + } + + for _, im := range imgResp.Images { + fmt.Printf("Found image %q\n", im.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000000000..9ea86d784ecab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code new file mode 100644 index 0000000000000..b55b37bc31620 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs new file mode 100644 index 0000000000000..e26cd4fc8ed91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000000000..477be8b214bfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/Godeps/_workspace/src/github.com/docker/go-units/README.md b/Godeps/_workspace/src/github.com/docker/go-units/README.md new file mode 100644 index 0000000000000..3ce4d79daca6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/README.md @@ -0,0 +1,18 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/circle.yml b/Godeps/_workspace/src/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000000000..9043b35478c91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/Godeps/_workspace/src/github.com/docker/go-units/duration.go b/Godeps/_workspace/src/github.com/docker/go-units/duration.go new file mode 100644 index 0000000000000..c219a8a968c40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/duration.go @@ -0,0 +1,33 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/size.go b/Godeps/_workspace/src/github.com/docker/go-units/size.go new file mode 100644 index 0000000000000..3b59daff31b73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/size.go @@ -0,0 +1,95 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + for size >= base { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go b/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000000000..5ac7fd825fcee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/connection.go b/Godeps/_workspace/src/github.com/docker/spdystream/connection.go index aa6c75202e330..6031a0db1ab49 100644 --- a/Godeps/_workspace/src/github.com/docker/spdystream/connection.go +++ b/Godeps/_workspace/src/github.com/docker/spdystream/connection.go @@ -320,6 +320,7 @@ func (s *Connection) Serve(newHandler StreamHandler) { partitionRoundRobin int goAwayFrame *spdy.GoAwayFrame ) +Loop: for { readFrame, err := s.framer.ReadFrame() if err != nil { @@ -362,7 +363,7 @@ func (s *Connection) Serve(newHandler StreamHandler) { case *spdy.GoAwayFrame: // hold on to the go away frame and exit the loop goAwayFrame = frame - break + break Loop default: priority = 7 partition = partitionRoundRobin diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml index 597b849d4c37f..4a6a01fb7bf67 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml @@ -3,8 +3,8 @@ sudo: required go: - 1.3.3 - 1.4.2 - - 1.5.2 - - 1.6beta1 + - 1.5.3 + - 1.6rc2 - tip env: - GOARCH=amd64 DOCKER_VERSION=1.7.1 @@ -13,6 +13,8 @@ env: - GOARCH=386 DOCKER_VERSION=1.8.3 - GOARCH=amd64 DOCKER_VERSION=1.9.1 - GOARCH=386 DOCKER_VERSION=1.9.1 + - GOARCH=amd64 DOCKER_VERSION=1.10.0 + - GOARCH=386 DOCKER_VERSION=1.10.0 install: - make prepare_docker script: diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS index 3359e7288c3a4..0c42ae3444a9e 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS @@ -12,6 +12,7 @@ Antonio Murdaca Artem Sidorenko Ben Marini Ben McCann +Ben Parees Benno van den Berg Brendan Fosberry Brian Lalor @@ -37,6 +38,7 @@ Dave Choi David Huie Dawn Chen Dinesh Subhraveti +Drew Wells Ed Elias G. Schneevoigt Erez Horev @@ -48,6 +50,7 @@ Flavia Missi Francisco Souza Grégoire Delattre Guillermo Álvarez Fernández +Harry Zhang He Simei Ivan Mikushin James Bardin @@ -109,6 +112,7 @@ Summer Mousa Sunjin Lee Tarsis Azevedo Tim Schindler +Timothy St. Clair Tobi Knaup Tom Wilkie Tonic diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile index 205d8f3c22db3..7a94eaa1e0b06 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile @@ -41,7 +41,7 @@ prepare_docker: sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list sudo apt-get update - sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes + sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" pretest: lint vet fmtcheck diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go index 3a7d755f5adcc..114fb87b704bb 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go @@ -60,7 +60,8 @@ func NewAPIVersion(input string) (APIVersion, error) { if !strings.Contains(input, ".") { return nil, fmt.Errorf("Unable to parse version %q", input) } - arr := strings.Split(input, ".") + raw := strings.Split(input, "-") + arr := strings.Split(raw[0], ".") ret := make(APIVersion, len(arr)) var err error for i, val := range arr { @@ -586,7 +587,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close if err != nil { return nil, err } - req.Header.Set("Content-Type", "plain/text") + req.Header.Set("Content-Type", "application/json") req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", "tcp") protocol := c.endpointURL.Scheme diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go index 0baf01f9703d3..317814b90bfb4 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go @@ -510,6 +510,7 @@ type HostConfig struct { BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty"` } // StartContainer starts a container, returning an error in case of failure. @@ -638,6 +639,7 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { // See https://goo.gl/GNmLHb for more details. type Stats struct { Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` + Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty"` Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"` MemoryStats struct { Stats struct { @@ -670,6 +672,8 @@ type Stats struct { Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` + HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty"` + Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty"` } `json:"stats,omitempty" yaml:"stats,omitempty"` MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go index fd13bc23f22c2..47da77dbeb355 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go @@ -410,6 +410,8 @@ type BuildImageOptions struct { Memory int64 `qs:"memory"` Memswap int64 `qs:"memswap"` CPUShares int64 `qs:"cpushares"` + CPUQuota int64 `qs:"cpuquota"` + CPUPeriod int64 `qs:"cpuperiod"` CPUSetCPUs string `qs:"cpusetcpus"` InputStream io.Reader `qs:"-"` OutputStream io.Writer `qs:"-"` diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go index 38b3432461c05..30d54230a432a 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go @@ -17,19 +17,20 @@ var ErrNetworkAlreadyExists = errors.New("network already exists") // Network represents a network. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type Network struct { Name string ID string `json:"Id"` Scope string Driver string + IPAM IPAMOptions Containers map[string]Endpoint Options map[string]string } // Endpoint contains network resources allocated and used for a container in a network // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type Endpoint struct { Name string ID string `json:"EndpointID"` @@ -40,7 +41,7 @@ type Endpoint struct { // ListNetworks returns all networks. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) ListNetworks() ([]Network, error) { resp, err := c.do("GET", "/networks", doOptions{}) if err != nil { @@ -56,7 +57,7 @@ func (c *Client) ListNetworks() ([]Network, error) { // NetworkInfo returns information about a network by its ID. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) NetworkInfo(id string) (*Network, error) { path := "/networks/" + id resp, err := c.do("GET", path, doOptions{}) @@ -77,7 +78,7 @@ func (c *Client) NetworkInfo(id string) (*Network, error) { // CreateNetworkOptions specify parameters to the CreateNetwork function and // (for now) is the expected body of the "create network" http request message // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type CreateNetworkOptions struct { Name string `json:"Name"` CheckDuplicate bool `json:"CheckDuplicate"` @@ -107,7 +108,7 @@ type IPAMConfig struct { // CreateNetwork creates a new network, returning the network instance, // or an error in case of failure. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { resp, err := c.do( "POST", @@ -144,7 +145,7 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { // RemoveNetwork removes a network or returns an error in case of failure. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) RemoveNetwork(id string) error { resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) if err != nil { @@ -159,14 +160,14 @@ func (c *Client) RemoveNetwork(id string) error { // NetworkConnectionOptions specify parameters to the ConnectNetwork and DisconnectNetwork function. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type NetworkConnectionOptions struct { Container string } // ConnectNetwork adds a container to a network or returns an error in case of failure. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts}) if err != nil { @@ -181,7 +182,7 @@ func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error // DisconnectNetwork removes a container from a network or returns an error in case of failure. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) if err != nil { diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go index 2c09f21c2924e..b16e713676ebd 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go @@ -38,6 +38,7 @@ var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) // For more details on the remote API, check http://goo.gl/G3plxW. type DockerServer struct { containers []*docker.Container + uploadedFiles map[string]string execs []*docker.ExecInspect execMut sync.RWMutex cMut sync.RWMutex @@ -89,6 +90,7 @@ func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*h execCallbacks: make(map[string]func()), statsCallbacks: make(map[string]func(string) docker.Stats), customHandlers: make(map[string]http.Handler), + uploadedFiles: make(map[string]string), cChan: containerChan, } server.buildMuxer() @@ -120,6 +122,7 @@ func (s *DockerServer) buildMuxer() { s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer)) s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer)) + s.mux.Path("/containers/{id:.*}/archive").Methods("PUT").HandlerFunc(s.handlerWrapper(s.uploadToContainer)) s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer)) s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer)) s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer)) @@ -440,8 +443,8 @@ func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) { s.cMut.Unlock() w.WriteHeader(http.StatusCreated) s.notify(&container) - var c = struct{ ID string }{ID: container.ID} - json.NewEncoder(w).Encode(c) + + json.NewEncoder(w).Encode(container) } func (s *DockerServer) generateID() string { @@ -503,6 +506,23 @@ func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) { } } +func (s *DockerServer) uploadToContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if !container.State.Running { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Container %s is not running", id) + return + } + path := r.URL.Query().Get("path") + s.uploadedFiles[id] = path + w.WriteHeader(http.StatusOK) +} + func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["id"] container, _, err := s.findContainer(id) diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 9f1f5312cca1f..760e4e61d86fe 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -230,6 +230,14 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ Tag: "varint,63027,opt,name=gogoproto_import", } +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", +} + var E_GoprotoGetters = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.MessageOptions)(nil), ExtensionType: (*bool)(nil), @@ -382,6 +390,14 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ Tag: "varint,64026,opt,name=goproto_unrecognized", } +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", +} + var E_Nullable = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.FieldOptions)(nil), ExtensionType: (*bool)(nil), @@ -481,6 +497,7 @@ func init() { proto.RegisterExtension(E_GoprotoExtensionsMapAll) proto.RegisterExtension(E_GoprotoUnrecognizedAll) proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -500,6 +517,7 @@ func init() { proto.RegisterExtension(E_UnsafeUnmarshaler) proto.RegisterExtension(E_GoprotoExtensionsMap) proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto index 84310e8fdbb86..3373faf319767 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -67,6 +67,8 @@ extend google.protobuf.FileOptions { optional bool goproto_extensions_map_all = 63025; optional bool goproto_unrecognized_all = 63026; optional bool gogoproto_import = 63027; + + optional bool protosizer_all = 63028; } extend google.protobuf.MessageOptions { @@ -93,6 +95,8 @@ extend google.protobuf.MessageOptions { optional bool goproto_extensions_map = 64025; optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; } extend google.protobuf.FieldOptions { diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go index b1889f258ca28..b5a18538447a5 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go @@ -213,6 +213,10 @@ func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) } +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) } diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go index 460dc77523e7c..af8fd96811b2a 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go @@ -80,7 +80,8 @@ var overwriters []map[string]gogoproto.EnableFunc = []map[string]gogoproto.Enabl "verboseequal": gogoproto.HasVerboseEqual, }, { - "size": gogoproto.IsSizer, + "size": gogoproto.IsSizer, + "protosizer": gogoproto.IsProtoSizer, }, { "unmarshaler": gogoproto.IsUnmarshaler, diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go index 60362c5eeb660..52d36098fe02b 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go @@ -303,12 +303,12 @@ func wireToType(wire string) int { panic("unreachable") } -func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorProto_Type, varName string) { +func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorProto_Type, varName string, protoSizer bool) { switch fieldTyp { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - p.callFixed64(p.mathPkg.Use(), `.Float64bits(`, varName, `)`) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(`, varName, `))`) case descriptor.FieldDescriptorProto_TYPE_FLOAT: - p.callFixed32(p.mathPkg.Use(), `.Float32bits(`, varName, `)`) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(`, varName, `))`) case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, @@ -341,7 +341,11 @@ func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorP case descriptor.FieldDescriptorProto_TYPE_SINT64: p.callVarint(`(uint64(`, varName, `) << 1) ^ uint64((`, varName, ` >> 63))`) case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - p.callVarint(varName, `.Size()`) + if protoSizer { + p.callVarint(varName, `.ProtoSize()`) + } else { + p.callVarint(varName, `.Size()`) + } p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() @@ -371,6 +375,8 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi nullable := gogoproto.IsNullable(field) repeated := field.IsRepeated() required := field.IsRequired() + + protoSizer := gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) if required && nullable { p.P(`if m.`, fieldname, `== nil {`) p.In() @@ -397,13 +403,13 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } switch *field.Type { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if !p.unsafe { + if !p.unsafe || gogoproto.IsCastType(field) { if packed { p.encodeKey(fieldNumber, wireType) p.callVarint(`len(m.`, fieldname, `) * 8`) p.P(`for _, num := range m.`, fieldname, ` {`) p.In() - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(num)`) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) p.encodeFixed64("f" + numGen.Current()) p.Out() p.P(`}`) @@ -411,7 +417,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`for _, num := range m.`, fieldname, ` {`) p.In() p.encodeKey(fieldNumber, wireType) - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(num)`) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) p.encodeFixed64("f" + numGen.Current()) p.Out() p.P(`}`) @@ -419,15 +425,15 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`if m.`, fieldname, ` != 0 {`) p.In() p.encodeKey(fieldNumber, wireType) - p.callFixed64(p.mathPkg.Use(), `.Float64bits(m.`+fieldname, `)`) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) p.Out() p.P(`}`) } else if !nullable { p.encodeKey(fieldNumber, wireType) - p.callFixed64(p.mathPkg.Use(), `.Float64bits(m.`+fieldname, `)`) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) } else { p.encodeKey(fieldNumber, wireType) - p.callFixed64(p.mathPkg.Use(), `.Float64bits(*m.`+fieldname, `)`) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(*m.`+fieldname, `))`) } } else { if packed { @@ -461,13 +467,13 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } } case descriptor.FieldDescriptorProto_TYPE_FLOAT: - if !p.unsafe { + if !p.unsafe || gogoproto.IsCastType(field) { if packed { p.encodeKey(fieldNumber, wireType) p.callVarint(`len(m.`, fieldname, `) * 4`) p.P(`for _, num := range m.`, fieldname, ` {`) p.In() - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(num)`) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) p.encodeFixed32("f" + numGen.Current()) p.Out() p.P(`}`) @@ -475,7 +481,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`for _, num := range m.`, fieldname, ` {`) p.In() p.encodeKey(fieldNumber, wireType) - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(num)`) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) p.encodeFixed32("f" + numGen.Current()) p.Out() p.P(`}`) @@ -483,15 +489,15 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`if m.`, fieldname, ` != 0 {`) p.In() p.encodeKey(fieldNumber, wireType) - p.callFixed32(p.mathPkg.Use(), `.Float32bits(m.`+fieldname, `)`) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) p.Out() p.P(`}`) } else if !nullable { p.encodeKey(fieldNumber, wireType) - p.callFixed32(p.mathPkg.Use(), `.Float32bits(m.`+fieldname, `)`) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) } else { p.encodeKey(fieldNumber, wireType) - p.callFixed32(p.mathPkg.Use(), `.Float32bits(*m.`+fieldname, `)`) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(*m.`+fieldname, `))`) } } else { if packed { @@ -896,22 +902,30 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } else if !nullable { accessor = `(&v)` } - p.P(`msgSize := `, accessor, `.Size()`) + if protoSizer { + p.P(`msgSize := `, accessor, `.ProtoSize()`) + } else { + p.P(`msgSize := `, accessor, `.Size()`) + } sum = append(sum, `msgSize + sov`+p.localName+`(uint64(msgSize))`) } p.P(`mapSize := `, strings.Join(sum, " + ")) p.callVarint("mapSize") p.encodeKey(1, wireToType(keywire)) - p.mapField(numGen, m.KeyField.GetType(), "k") + p.mapField(numGen, m.KeyField.GetType(), "k", protoSizer) p.encodeKey(2, wireToType(valuewire)) - p.mapField(numGen, m.ValueField.GetType(), accessor) + p.mapField(numGen, m.ValueField.GetType(), accessor, protoSizer) p.Out() p.P(`}`) } else if repeated { p.P(`for _, msg := range m.`, fieldname, ` {`) p.In() p.encodeKey(fieldNumber, wireType) - p.callVarint("msg.Size()") + if protoSizer { + p.callVarint("msg.ProtoSize()") + } else { + p.callVarint("msg.Size()") + } p.P(`n, err := msg.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() @@ -923,7 +937,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`}`) } else { p.encodeKey(fieldNumber, wireType) - p.callVarint(`m.`, fieldname, `.Size()`) + if protoSizer { + p.callVarint(`m.`, fieldname, `.ProtoSize()`) + } else { + p.callVarint(`m.`, fieldname, `.Size()`) + } p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() @@ -960,7 +978,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`for _, msg := range m.`, fieldname, ` {`) p.In() p.encodeKey(fieldNumber, wireType) - p.callVarint(`msg.Size()`) + if protoSizer { + p.callVarint(`msg.ProtoSize()`) + } else { + p.callVarint(`msg.Size()`) + } p.P(`n, err := msg.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() @@ -972,7 +994,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`}`) } else { p.encodeKey(fieldNumber, wireType) - p.callVarint(`m.`, fieldname, `.Size()`) + if protoSizer { + p.callVarint(`m.`, fieldname, `.ProtoSize()`) + } else { + p.callVarint(`m.`, fieldname, `.Size()`) + } p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() @@ -1126,7 +1152,11 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { p.P(`func (m *`, ccTypeName, `) Marshal() (data []byte, err error) {`) p.In() - p.P(`size := m.Size()`) + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := m.ProtoSize()`) + } else { + p.P(`size := m.Size()`) + } p.P(`data = make([]byte, size)`) p.P(`n, err := m.MarshalTo(data)`) p.P(`if err != nil {`) diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go index 70597db3aa823..6594096f7f732 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go @@ -290,7 +290,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato } else if field.IsMessage() || p.IsGroup(field) { funcCall := getFuncCall(goTypName) if field.IsRepeated() { - p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(p.varGen.Next(), ` := r.Intn(5)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) p.In() @@ -346,7 +346,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato } } else if field.IsBytes() { if field.IsRepeated() { - p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(p.varGen.Next(), ` := r.Intn(10)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) p.In() @@ -387,7 +387,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato } else { typName := generator.GoTypeToName(goTyp) if field.IsRepeated() { - p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(p.varGen.Next(), ` := r.Intn(10)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) p.In() diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go index b657611b3ddda..49b1f962cd389 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go @@ -25,7 +25,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* -The size plugin generates a Size method for each message. +The size plugin generates a Size or ProtoSize method for each message. This is useful with the MarshalTo method generated by the marshalto plugin and the gogoproto.marshaler and gogoproto.marshaler_all extensions. @@ -33,6 +33,8 @@ It is enabled by the following extensions: - sizer - sizer_all + - protosizer + - protosizer_all The size plugin also generates a test given it is enabled using one of the following extensions: @@ -195,7 +197,7 @@ func (p *size) sizeZigZag() { }`) } -func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { +func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, sizeName string) { fieldname := p.GetOneOfFieldName(message, field) nullable := gogoproto.IsNullable(field) repeated := field.IsRepeated() @@ -393,17 +395,17 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag p.P(`if v != nil {`) p.In() if valuegoTyp != valuegoAliasTyp { - p.P(`l = ((`, valuegoTyp, `)(v)).Size()`) + p.P(`l = ((`, valuegoTyp, `)(v)).`, sizeName, `()`) } else { - p.P(`l = v.Size()`) + p.P(`l = v.`, sizeName, `()`) } p.Out() p.P(`}`) } else { if valuegoTyp != valuegoAliasTyp { - p.P(`l = ((*`, valuegoTyp, `)(&v)).Size()`) + p.P(`l = ((*`, valuegoTyp, `)(&v)).`, sizeName, `()`) } else { - p.P(`l = v.Size()`) + p.P(`l = v.`, sizeName, `()`) } } sum = append(sum, `l+sov`+p.localName+`(uint64(l))`) @@ -415,12 +417,12 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag } else if repeated { p.P(`for _, e := range m.`, fieldname, ` { `) p.In() - p.P(`l=e.Size()`) + p.P(`l=e.`, sizeName, `()`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.Out() p.P(`}`) } else { - p.P(`l=m.`, fieldname, `.Size()`) + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) } case descriptor.FieldDescriptorProto_TYPE_BYTES: @@ -447,12 +449,12 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag if repeated { p.P(`for _, e := range m.`, fieldname, ` { `) p.In() - p.P(`l=e.Size()`) + p.P(`l=e.`, sizeName, `()`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.Out() p.P(`}`) } else { - p.P(`l=m.`, fieldname, `.Size()`) + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) } } @@ -501,7 +503,12 @@ func (p *size) Generate(file *generator.FileDescriptor) { protoPkg = p.NewImport("github.com/golang/protobuf/proto") } for _, message := range file.Messages() { - if !gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "Size" + } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "ProtoSize" + } else { continue } if message.DescriptorProto.GetOptions().GetMapEntry() { @@ -509,7 +516,7 @@ func (p *size) Generate(file *generator.FileDescriptor) { } p.atleastOne = true ccTypeName := generator.CamelCaseSlice(message.TypeName()) - p.P(`func (m *`, ccTypeName, `) Size() (n int) {`) + p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`) p.In() p.P(`var l int`) p.P(`_ = l`) @@ -518,7 +525,7 @@ func (p *size) Generate(file *generator.FileDescriptor) { oneof := field.OneofIndex != nil if !oneof { proto3 := gogoproto.IsProto3(file.FileDescriptorProto) - p.generateField(proto3, file, message, field) + p.generateField(proto3, file, message, field, sizeName) } else { fieldname := p.GetFieldName(message, field) if _, ok := oneofs[fieldname]; ok { @@ -528,7 +535,7 @@ func (p *size) Generate(file *generator.FileDescriptor) { } p.P(`if m.`, fieldname, ` != nil {`) p.In() - p.P(`n+=m.`, fieldname, `.Size()`) + p.P(`n+=m.`, fieldname, `.`, sizeName, `()`) p.Out() p.P(`}`) } @@ -564,12 +571,12 @@ func (p *size) Generate(file *generator.FileDescriptor) { continue } ccTypeName := p.OneOfTypeName(message, f) - p.P(`func (m *`, ccTypeName, `) Size() (n int) {`) + p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`) p.In() p.P(`var l int`) p.P(`_ = l`) vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f) - p.generateField(false, file, message, f) + p.generateField(false, file, message, f, sizeName) p.P(`return n`) p.Out() p.P(`}`) diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go index 0851a29a475f5..4fa946e57e890 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go @@ -51,7 +51,12 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes } for _, message := range file.Messages() { ccTypeName := generator.CamelCaseSlice(message.TypeName()) - if !gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "Size" + } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "ProtoSize" + } else { continue } if message.DescriptorProto.GetOptions().GetMapEntry() { @@ -60,7 +65,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { used = true - p.P(`func Test`, ccTypeName, `Size(t *`, testingPkg.Use(), `.T) {`) + p.P(`func Test`, ccTypeName, sizeName, `(t *`, testingPkg.Use(), `.T) {`) p.In() p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) @@ -72,7 +77,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() p.P(`}`) - p.P(`size := p.Size()`) + p.P(`size := p.`, sizeName, `()`) p.P(`if len(data) != size {`) p.In() p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))`) @@ -96,7 +101,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) { used = true - p.P(`func Benchmark`, ccTypeName, `Size(b *`, testingPkg.Use(), `.B) {`) + p.P(`func Benchmark`, ccTypeName, sizeName, `(b *`, testingPkg.Use(), `.B) {`) p.In() p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) p.P(`total := 0`) @@ -109,7 +114,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes p.P(`b.ResetTimer()`) p.P(`for i := 0; i < b.N; i++ {`) p.In() - p.P(`total += pops[i%1000].Size()`) + p.P(`total += pops[i%1000].`, sizeName, `()`) p.Out() p.P(`}`) p.P(`b.SetBytes(int64(total / b.N))`) diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go index de6c7217db0d2..a48a1c2ccbc8b 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go @@ -341,7 +341,11 @@ func (p *testProto) Generate(imports generator.PluginImports, file *generator.Fi p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) - p.P(`size := p.Size()`) + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := p.ProtoSize()`) + } else { + p.P(`size := p.Size()`) + } p.P(`data := make([]byte, size)`) p.P(`for i := range data {`) p.In() diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go index acb32d9db07ab..13285960988ec 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go @@ -419,7 +419,7 @@ func (p *unmarshal) noStarOrSliceType(msg *generator.Descriptor, field *descript return typ } -func (p *unmarshal) field(file *descriptor.FileDescriptorProto, msg *generator.Descriptor, field *descriptor.FieldDescriptorProto, fieldname string, proto3 bool) { +func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descriptor, field *descriptor.FieldDescriptorProto, fieldname string, proto3 bool) { repeated := field.IsRepeated() nullable := gogoproto.IsNullable(field) typ := p.noStarOrSliceType(msg, field) @@ -676,7 +676,7 @@ func (p *unmarshal) field(file *descriptor.FileDescriptorProto, msg *generator.D p.Out() p.P(`}`) p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if generator.IsMap(file, field) { + } else if generator.IsMap(file.FileDescriptorProto, field) { m := p.GoMapType(nil, field) keygoTyp, _ := p.GoType(nil, m.KeyField) @@ -773,7 +773,12 @@ func (p *unmarshal) field(file *descriptor.FileDescriptorProto, msg *generator.D p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, make([]byte, postIndex-iNdEx))`) p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], data[iNdEx:postIndex])`) } else { - p.P(`m.`, fieldname, ` = append([]byte{}`, `, data[iNdEx:postIndex]...)`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `[:0] , data[iNdEx:postIndex]...)`) + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + p.P(`m.`, fieldname, ` = []byte{}`) + p.Out() + p.P(`}`) } } else { _, ctyp, err := generator.GetCustomType(field) @@ -1061,13 +1066,13 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.P(`}`) p.P(`for iNdEx < postIndex {`) p.In() - p.field(file.FileDescriptorProto, message, field, fieldname, false) + p.field(file, message, field, fieldname, false) p.Out() p.P(`}`) p.Out() p.P(`} else if wireType == `, strconv.Itoa(wireType), `{`) p.In() - p.field(file.FileDescriptorProto, message, field, fieldname, false) + p.field(file, message, field, fieldname, false) p.Out() p.P(`} else {`) p.In() @@ -1080,7 +1085,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) p.Out() p.P(`}`) - p.field(file.FileDescriptorProto, message, field, fieldname, proto3) + p.field(file, message, field, fieldname, proto3) } if field.IsRequired() { diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go index f3a690a6b66d3..33c501b3e4204 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go @@ -601,7 +601,7 @@ func (g *Generator) CommandLineParameters(parameter string) { if pluginList == "none" { pluginList = "" } - gogoPluginNames := []string{"unmarshal", "unsafeunmarshaler", "union", "stringer", "size", "populate", "marshalto", "unsafemarshaler", "gostring", "face", "equal", "enumstringer", "embedcheck", "description", "defaultcheck", "oneofcheck"} + gogoPluginNames := []string{"unmarshal", "unsafeunmarshaler", "union", "stringer", "size", "protosizer", "populate", "marshalto", "unsafemarshaler", "gostring", "face", "equal", "enumstringer", "embedcheck", "description", "defaultcheck", "oneofcheck"} pluginList = strings.Join(append(gogoPluginNames, pluginList), "+") if pluginList != "" { // Amend the set of plugins. @@ -1857,11 +1857,11 @@ var methodNames = [...]string{ "ExtensionRangeArray", "ExtensionMap", "Descriptor", - "Size", "MarshalTo", "Equal", "VerboseEqual", "GoString", + "ProtoSize", } // Generate the type and default constant definitions for this Descriptor. @@ -1875,6 +1875,9 @@ func (g *Generator) generateMessage(message *Descriptor) { for _, n := range methodNames { usedNames[n] = true } + if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) { + usedNames["Size"] = true + } fieldNames := make(map[*descriptor.FieldDescriptorProto]string) fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string) fieldTypes := make(map[*descriptor.FieldDescriptorProto]string) @@ -2210,6 +2213,9 @@ func (g *Generator) generateMessage(message *Descriptor) { if gogoproto.IsSizer(g.file.FileDescriptorProto, message.DescriptorProto) { g.P(`Size() int`) } + if gogoproto.IsProtoSizer(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(`ProtoSize() int`) + } g.Out() g.P("}") } diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go index 048cd9b36944c..258d6a9b6af7a 100644 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go @@ -181,6 +181,11 @@ func (g *Generator) GetFieldName(message *Descriptor, field *descriptor.FieldDes return fieldname + "_" } } + if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) { + if fieldname == "Size" { + return fieldname + "_" + } + } return fieldname } @@ -198,6 +203,11 @@ func (g *Generator) GetOneOfFieldName(message *Descriptor, field *descriptor.Fie return fieldname + "_" } } + if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) { + if fieldname == "Size" { + return fieldname + "_" + } + } return fieldname } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile index fb838ed2d756a..f1f06564a157d 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile @@ -39,5 +39,5 @@ test: install generate-test-pbs generate-test-pbs: make install make -C testdata - make -C proto3_proto + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto make diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go index ae276fd77c0d1..e98ddec9815b6 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go @@ -29,8 +29,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Protocol buffer deep copy. -// TODO: MessageSet and RawMessage. +// Protocol buffer deep copy and merge. +// TODO: RawMessage. package proto @@ -75,12 +75,13 @@ func Merge(dst, src Message) { } func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) for i := 0; i < in.NumField(); i++ { f := in.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } - mergeAny(out.Field(i), in.Field(i)) + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } if emIn, ok := in.Addr().Interface().(extendableProto); ok { @@ -98,7 +99,10 @@ func mergeStruct(out, in reflect.Value) { } } -func mergeAny(out, in reflect.Value) { +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { if in.Type() == protoMessageType { if !in.IsNil() { if out.IsNil() { @@ -112,7 +116,21 @@ func mergeAny(out, in reflect.Value) { switch in.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) case reflect.Map: if in.Len() == 0 { return @@ -127,7 +145,7 @@ func mergeAny(out, in reflect.Value) { switch elemKind { case reflect.Ptr: val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key)) + mergeAny(val, in.MapIndex(key), false, nil) case reflect.Slice: val = in.MapIndex(key) val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) @@ -143,13 +161,21 @@ func mergeAny(out, in reflect.Value) { if out.IsNil() { out.Set(reflect.New(in.Elem().Type())) } - mergeAny(out.Elem(), in.Elem()) + mergeAny(out.Elem(), in.Elem(), true, nil) case reflect.Slice: if in.IsNil() { return } if in.Type().Elem().Kind() == reflect.Uint8 { // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + // Make a deep copy. // Append to []byte{} instead of []byte(nil) so that we never end up // with a nil result. @@ -167,7 +193,7 @@ func mergeAny(out, in reflect.Value) { default: for i := 0; i < n; i++ { x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i)) + mergeAny(x, in.Index(i), false, nil) out.Set(reflect.Append(out, x)) } } @@ -184,7 +210,7 @@ func mergeExtension(out, in map[int32]Extension) { eOut := Extension{desc: eIn.desc} if eIn.value != nil { v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value)) + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) eOut.value = v.Interface() } if eIn.enc != nil { diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go index 88622c305a3b0..5810782fd84d5 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go @@ -46,6 +46,10 @@ import ( // errOverflow is returned when an integer is too large to be represented. var errOverflow = errors.New("proto: integer overflow") +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + // The fundamental decoders that interpret bytes on the wire. // Those that take integer types all return uint64 and are // therefore of type valueDecoder. @@ -314,6 +318,24 @@ func UnmarshalMerge(buf []byte, pb Message) error { return NewBuffer(buf).Unmarshal(pb) } +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be @@ -377,6 +399,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group continue } } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } err = o.skipAndSave(st, tag, wire, base, prop.unrecField) continue } @@ -518,9 +554,7 @@ func (o *Buffer) dec_string(p *Properties, base structPointer) error { if err != nil { return err } - sp := new(string) - *sp = s - *structPointer_String(base, p.field) = sp + *structPointer_String(base, p.field) = &s return nil } @@ -563,9 +597,13 @@ func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error return err } nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } y := *v - for i := 0; i < nb; i++ { + for o.index < fin { u, err := p.valDec(o) if err != nil { return err @@ -677,7 +715,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { oi := o.index // index at the end of this map entry o.index -= len(raw) // move buffer back to start of map entry - mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V if mptr.Elem().IsNil() { mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) } @@ -729,8 +767,14 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { return fmt.Errorf("proto: bad map data tag %d", raw[0]) } } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } - v.SetMapIndex(keyptr.Elem(), valptr.Elem()) + v.SetMapIndex(keyelem, valelem) return nil } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go index 1512d605b2dd0..231b07401a383 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go @@ -60,9 +60,9 @@ func (e *RequiredNotSetError) Error() string { } var ( - // ErrRepeatedHasNil is the error returned if Marshal is called with + // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. - ErrRepeatedHasNil = errors.New("proto: repeated field has nil element") + errRepeatedHasNil = errors.New("proto: repeated field has nil element") // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") @@ -105,6 +105,11 @@ func (p *Buffer) EncodeVarint(x uint64) error { return nil } +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + func sizeVarint(x uint64) (n int) { for { n++ @@ -228,6 +233,20 @@ func Marshal(pb Message) ([]byte, error) { return p.buf, err } +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + // Marshal takes the protocol buffer // and encodes it into the wire format, writing the result to the // Buffer. @@ -318,7 +337,7 @@ func size_bool(p *Properties, base structPointer) int { func size_proto3_bool(p *Properties, base structPointer) int { v := *structPointer_BoolVal(base, p.field) - if !v { + if !v && !p.oneof { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte @@ -361,7 +380,7 @@ func size_int32(p *Properties, base structPointer) (n int) { func size_proto3_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -407,7 +426,7 @@ func size_uint32(p *Properties, base structPointer) (n int) { func size_proto3_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -452,7 +471,7 @@ func size_int64(p *Properties, base structPointer) (n int) { func size_proto3_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -495,7 +514,7 @@ func size_string(p *Properties, base structPointer) (n int) { func size_proto3_string(p *Properties, base structPointer) (n int) { v := *structPointer_StringVal(base, p.field) - if v == "" { + if v == "" && !p.oneof { return 0 } n += len(p.tagcode) @@ -529,7 +548,7 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) - return nil + return state.err } o.buf = append(o.buf, p.tagcode...) @@ -667,7 +686,7 @@ func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) - if s == nil { + if s == nil && !p.oneof { return 0 } n += len(p.tagcode) @@ -677,7 +696,7 @@ func size_slice_byte(p *Properties, base structPointer) (n int) { func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { + if len(s) == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -939,7 +958,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { - return ErrRepeatedHasNil + return errRepeatedHasNil } // Can the object marshal itself? @@ -958,7 +977,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err err := o.enc_len_struct(p.sprop, structp, &state) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { - return ErrRepeatedHasNil + return errRepeatedHasNil } return err } @@ -1001,7 +1020,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { - return ErrRepeatedHasNil + return errRepeatedHasNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) @@ -1010,7 +1029,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { - return ErrRepeatedHasNil + return errRepeatedHasNil } return err } @@ -1084,7 +1103,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { repeated MapFieldEntry map_field = N; */ - v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V if v.Len() == 0 { return nil } @@ -1101,11 +1120,15 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { return nil } - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { val := v.MapIndex(key) + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + keycopy.Set(key) valcopy.Set(val) @@ -1118,7 +1141,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { } func size_new_map(p *Properties, base structPointer) int { - v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) @@ -1128,10 +1151,12 @@ func size_new_map(p *Properties, base structPointer) int { keycopy.Set(key) valcopy.Set(val) - // Tag codes are two bytes per map entry. - n += 2 - n += p.mkeyprop.size(p.mkeyprop, keybase) - n += p.mvalprop.size(p.mvalprop, valbase) + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry } return n } @@ -1184,6 +1209,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { if p.Required && state.err == nil { state.err = &RequiredNotSetError{p.Name} } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") } else if !state.shouldContinue(err, p) { return err } @@ -1191,6 +1219,14 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { } } + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) @@ -1216,6 +1252,12 @@ func size_struct(prop *StructProperties, base structPointer) (n int) { n += len(v) } + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + return } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go index d8673a3e97aef..f5db1def3c248 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go @@ -30,7 +30,6 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer comparison. -// TODO: MessageSet. package proto @@ -51,7 +50,9 @@ Equality is defined in this way: are equal, and extensions sets are equal. - Two set scalar fields are equal iff their values are equal. If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, and their corresponding elements are equal (a "bytes" field, although represented by []byte, is not a repeated field) @@ -89,6 +90,7 @@ func Equal(a, b Message) bool { // v1 and v2 are known to have the same type. func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) for i := 0; i < v1.NumField(); i++ { f := v1.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { @@ -114,7 +116,7 @@ func equalStruct(v1, v2 reflect.Value) bool { } f1, f2 = f1.Elem(), f2.Elem() } - if !equalAny(f1, f2) { + if !equalAny(f1, f2, sprop.Prop[i]) { return false } } @@ -141,7 +143,8 @@ func equalStruct(v1, v2 reflect.Value) bool { } // v1 and v2 are known to have the same type. -func equalAny(v1, v2 reflect.Value) bool { +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { if v1.Type() == protoMessageType { m1, _ := v1.Interface().(Message) m2, _ := v2.Interface().(Message) @@ -154,6 +157,17 @@ func equalAny(v1, v2 reflect.Value) bool { return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) case reflect.Map: if v1.Len() != v2.Len() { return false @@ -164,16 +178,22 @@ func equalAny(v1, v2 reflect.Value) bool { // This key was not found in the second map. return false } - if !equalAny(v1.MapIndex(key), val2) { + if !equalAny(v1.MapIndex(key), val2, nil) { return false } } return true case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem()) + return equalAny(v1.Elem(), v2.Elem(), prop) case reflect.Slice: if v1.Type().Elem().Kind() == reflect.Uint8 { // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } if v1.IsNil() != v2.IsNil() { return false } @@ -184,7 +204,7 @@ func equalAny(v1, v2 reflect.Value) bool { return false } for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i)) { + if !equalAny(v1.Index(i), v2.Index(i), prop) { return false } } @@ -219,7 +239,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { if m1 != nil && m2 != nil { // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { return false } continue @@ -247,7 +267,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) return false } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { return false } } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go index f7667fab48c8d..054f4f1df7887 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go @@ -37,6 +37,7 @@ package proto import ( "errors" + "fmt" "reflect" "strconv" "sync" @@ -221,7 +222,7 @@ func ClearExtension(pb extendableProto, extension *ExtensionDesc) { } // GetExtension parses and returns the given extension of pb. -// If the extension is not present it returns ErrMissingExtension. +// If the extension is not present and has no default value it returns ErrMissingExtension. func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { if err := checkExtensionTypes(pb, extension); err != nil { return nil, err @@ -230,8 +231,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er emap := pb.ExtensionMap() e, ok := emap[extension.Field] if !ok { - return nil, ErrMissingExtension + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) } + if e.value != nil { // Already decoded. Check the descriptor, though. if e.desc != extension { @@ -257,12 +261,46 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er return e.value, nil } +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { o := NewBuffer(b) t := reflect.TypeOf(extension.ExtensionType) - rep := extension.repeated() props := extensionProperties(extension) @@ -284,7 +322,7 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { return nil, err } - if !rep || o.index >= len(o.buf) { + if o.index >= len(o.buf) { break } } @@ -321,6 +359,14 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{ if typ != reflect.TypeOf(value) { return errors.New("proto: bad extension value type") } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} return nil diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go index 87c6b9d1acf82..0de8f8dffd088 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go @@ -30,171 +30,237 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* - Package proto converts data structures to and from the wire format of - protocol buffers. It works in concert with the Go source code generated - for .proto files by the protocol compiler. - - A summary of the properties of the protocol buffer interface - for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - Helpers for getting values are superseded by the - GetFoo methods and their use is deprecated. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed with the enum's type name. Enum types have - a String method, and a Enum method to assist in message construction. - - Nested groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Marshal and Unmarshal are functions to encode and decode the wire format. - - The simplest way to describe this is to see an example. - Given file test.proto, containing - - package example; - - enum FOO { X = 17; }; - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } - - The resulting file, test.pb.go, is: - - package example - - import "github.com/golang/protobuf/proto" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` - } - func (this *Test) Reset() { *this = Test{} } - func (this *Test) String() string { return proto.CompactTextString(this) } - const Default_Test_Type int32 = 77 - - func (this *Test) GetLabel() string { - if this != nil && this.Label != nil { - return *this.Label - } - return "" +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err } + *x = FOO(value) + return nil + } - func (this *Test) GetType() int32 { - if this != nil && this.Type != nil { - return *this.Type - } - return Default_Test_Type + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union } + return nil + } + const Default_Test_Type int32 = 77 - func (this *Test) GetOptionalgroup() *Test_OptionalGroup { - if this != nil { - return this.Optionalgroup - } - return nil + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label } + return "" + } - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type } - func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} } - func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) } + return Default_Test_Type + } - func (this *Test_OptionalGroup) GetRequiredField() string { - if this != nil && this.RequiredField != nil { - return *this.RequiredField - } - return "" + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup } + return nil + } - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField } + return "" + } - To create and play with a Test object: + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } - package main + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } - import ( - "log" + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } - "github.com/golang/protobuf/proto" - "./example.pb" - ) +To create and play with a Test object: - func main() { - test := &example.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Optionalgroup: &example.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := new(example.Test) - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } */ package proto @@ -203,6 +269,7 @@ import ( "fmt" "log" "reflect" + "sort" "strconv" "sync" ) @@ -377,13 +444,13 @@ func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, // DebugPrint dumps the encoded data in b in a debugging format with a header // including the string s. Used in testing but made available for general debugging. -func (o *Buffer) DebugPrint(s string, b []byte) { +func (p *Buffer) DebugPrint(s string, b []byte) { var u uint64 - obuf := o.buf - index := o.index - o.buf = b - o.index = 0 + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 depth := 0 fmt.Printf("\n--- %s ---\n", s) @@ -394,12 +461,12 @@ out: fmt.Print(" ") } - index := o.index - if index == len(o.buf) { + index := p.index + if index == len(p.buf) { break } - op, err := o.DecodeVarint() + op, err := p.DecodeVarint() if err != nil { fmt.Printf("%3d: fetching op err %v\n", index, err) break out @@ -416,7 +483,7 @@ out: case WireBytes: var r []byte - r, err = o.DecodeRawBytes(false) + r, err = p.DecodeRawBytes(false) if err != nil { break out } @@ -437,7 +504,7 @@ out: fmt.Printf("\n") case WireFixed32: - u, err = o.DecodeFixed32() + u, err = p.DecodeFixed32() if err != nil { fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) break out @@ -445,16 +512,15 @@ out: fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) case WireFixed64: - u, err = o.DecodeFixed64() + u, err = p.DecodeFixed64() if err != nil { fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - break case WireVarint: - u, err = o.DecodeVarint() + u, err = p.DecodeVarint() if err != nil { fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) break out @@ -462,30 +528,22 @@ out: fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) case WireStartGroup: - if err != nil { - fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) - break out - } fmt.Printf("%3d: t=%3d start\n", index, tag) depth++ case WireEndGroup: depth-- - if err != nil { - fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) - break out - } fmt.Printf("%3d: t=%3d end\n", index, tag) } } if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth) + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) } fmt.Printf("\n") - o.buf = obuf - o.index = index + p.buf = obuf + p.index = index } // SetDefaults sets unset protocol buffer fields to their default values. @@ -599,13 +657,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) { for _, ni := range dm.nested { f := v.Field(ni) - if f.IsNil() { - continue - } - // f is *T or []*T - if f.Kind() == reflect.Ptr { + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } setDefaults(f, recur, zeros) - } else { + + case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) if e.IsNil() { @@ -613,6 +673,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) { } setDefaults(e, recur, zeros) } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } } } } @@ -638,10 +707,6 @@ type scalarField struct { value interface{} // the proto-declared default value, or nil } -func ptrToStruct(t reflect.Type) bool { - return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct -} - // t is a struct type. func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { sprop := GetProperties(t) @@ -653,88 +718,118 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { } ft := t.Field(fi).Type - // nested messages - if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) { + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: dm.nested = append(dm.nested, fi) - continue + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) } + } - sf := scalarField{ - index: fi, - kind: ft.Elem().Kind(), - } + return dm +} - // scalar fields without defaults - if !prop.HasDefault { - dm.scalars = append(dm.scalars, sf) - continue +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field } - // a scalar field: either *T or []byte + case reflect.Slice: switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - log.Printf("proto: bad default bool %q: %v", prop.Default, err) - continue - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - log.Printf("proto: bad default float32 %q: %v", prop.Default, err) - continue - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - log.Printf("proto: bad default float64 %q: %v", prop.Default, err) - continue - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - log.Printf("proto: bad default int32 %q: %v", prop.Default, err) - continue - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - log.Printf("proto: bad default int64 %q: %v", prop.Default, err) - continue - } - sf.value = x - case reflect.String: - sf.value = prop.Default + case reflect.Ptr: + nestedMessage = true // repeated message case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - log.Printf("proto: bad default uint32 %q: %v", prop.Default, err) - continue - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - log.Printf("proto: bad default uint64 %q: %v", prop.Default, err) - continue - } - sf.value = x - default: - log.Printf("proto: unhandled def kind %v", ft.Elem().Kind()) - continue + canHaveDefault = true // bytes field } - dm.scalars = append(dm.scalars, sf) + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } } - return dm + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil } // Map fields may have key types of non-float scalars, strings and enums. @@ -742,10 +837,58 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. -type mapKeys []reflect.Value +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) } + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go index 9d912bce19bb6..e25e01e637483 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go @@ -44,11 +44,11 @@ import ( "sort" ) -// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. // A message type ID is required for storing a protocol buffer in a message set. -var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") +var errNoMessageTypeID = errors.New("proto does not have a message type ID") -// The first two types (_MessageSet_Item and MessageSet) +// The first two types (_MessageSet_Item and messageSet) // model what the protocol compiler produces for the following protocol message: // message MessageSet { // repeated group Item = 1 { @@ -58,27 +58,20 @@ var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") // } // That is the MessageSet wire format. We can't use a proto to generate these // because that would introduce a circular dependency between it and this package. -// -// When a proto1 proto has a field that looks like: -// optional message info = 3; -// the protocol compiler produces a field in the generated struct that looks like: -// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` -// The package is automatically inserted so there is no need for that proto file to -// import this package. type _MessageSet_Item struct { TypeId *int32 `protobuf:"varint,2,req,name=type_id"` Message []byte `protobuf:"bytes,3,req,name=message"` } -type MessageSet struct { +type messageSet struct { Item []*_MessageSet_Item `protobuf:"group,1,rep"` XXX_unrecognized []byte // TODO: caching? } -// Make sure MessageSet is a Message. -var _ Message = (*MessageSet)(nil) +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) // messageTypeIder is an interface satisfied by a protocol buffer type // that may be stored in a MessageSet. @@ -86,7 +79,7 @@ type messageTypeIder interface { MessageTypeId() int32 } -func (ms *MessageSet) find(pb Message) *_MessageSet_Item { +func (ms *messageSet) find(pb Message) *_MessageSet_Item { mti, ok := pb.(messageTypeIder) if !ok { return nil @@ -100,24 +93,24 @@ func (ms *MessageSet) find(pb Message) *_MessageSet_Item { return nil } -func (ms *MessageSet) Has(pb Message) bool { +func (ms *messageSet) Has(pb Message) bool { if ms.find(pb) != nil { return true } return false } -func (ms *MessageSet) Unmarshal(pb Message) error { +func (ms *messageSet) Unmarshal(pb Message) error { if item := ms.find(pb); item != nil { return Unmarshal(item.Message, pb) } if _, ok := pb.(messageTypeIder); !ok { - return ErrNoMessageTypeId + return errNoMessageTypeID } return nil // TODO: return error instead? } -func (ms *MessageSet) Marshal(pb Message) error { +func (ms *messageSet) Marshal(pb Message) error { msg, err := Marshal(pb) if err != nil { return err @@ -130,7 +123,7 @@ func (ms *MessageSet) Marshal(pb Message) error { mti, ok := pb.(messageTypeIder) if !ok { - return ErrNoMessageTypeId + return errNoMessageTypeID } mtid := mti.MessageTypeId() @@ -141,9 +134,9 @@ func (ms *MessageSet) Marshal(pb Message) error { return nil } -func (ms *MessageSet) Reset() { *ms = MessageSet{} } -func (ms *MessageSet) String() string { return CompactTextString(ms) } -func (*MessageSet) ProtoMessage() {} +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} // Support for the message_set_wire_format message option. @@ -169,7 +162,7 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { } sort.Ints(ids) - ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} for _, id := range ids { e := m[int32(id)] // Remove the wire type and field number varint, as well as the length varint. @@ -186,7 +179,7 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(MessageSet) + ms := new(messageSet) if err := Unmarshal(buf, ms); err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go index 93259a3d65717..749919d250a1a 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine,!appenginevm +// +build appengine // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -144,8 +144,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } -// Map returns the reflect.Value for the address of a map field in the struct. -func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return structPointer_field(p, f).Addr() } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go index c52db1ca5ecde..e9be0fe92ee70 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine appenginevm +// +build !appengine // This file contains the implementation of the proto field accesses using package unsafe. @@ -130,8 +130,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } -// Map returns the reflect.Value for the address of a map field in the struct. -func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go index 730a59579735a..d4531c0563847 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go @@ -37,6 +37,7 @@ package proto import ( "fmt" + "log" "os" "reflect" "sort" @@ -84,6 +85,15 @@ type decoder func(p *Buffer, prop *Properties, base structPointer) error // A valueDecoder decodes a single integer in a particular encoding. type valueDecoder func(o *Buffer) (x uint64, err error) +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -132,6 +142,22 @@ type StructProperties struct { order []int // list of struct field numbers in tag order unrecField field // field id of the XXX_unrecognized []byte field extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties } // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. @@ -156,6 +182,7 @@ type Properties struct { Packed bool // relevant for repeated primitives only Enum string // set for enum types only proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided @@ -208,6 +235,9 @@ func (p *Properties) String() string { if p.proto3 { s += ",proto3" } + if p.oneof { + s += ",oneof" + } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } @@ -284,6 +314,8 @@ func (p *Properties) Parse(s string) { p.Enum = f[5:] case f == "proto3": p.proto3 = true + case f == "oneof": + p.oneof = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string @@ -440,7 +472,12 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte p.size = size_slice_byte - if p.proto3 { + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte } @@ -595,7 +632,7 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF } var ( - mutex sync.Mutex + propertiesMu sync.RWMutex propertiesMap = make(map[reflect.Type]*StructProperties) ) @@ -605,13 +642,26 @@ func GetProperties(t reflect.Type) *StructProperties { if t.Kind() != reflect.Struct { panic("proto: type must have kind struct") } - mutex.Lock() - sprop := getPropertiesLocked(t) - mutex.Unlock() + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() return sprop } -// getPropertiesLocked requires that mutex is held. +// getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { if collectStats { @@ -647,6 +697,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case prop.Prop[i] = p prop.order[i] = i if debug { @@ -656,7 +707,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } @@ -664,6 +715,41 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + // build required counts // build tags reqCount := 0 @@ -722,3 +808,35 @@ func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[ } enumValueMaps[typeName] = valueMap } + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile deleted file mode 100644 index 75144b582e6d5..0000000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2014 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f proto3.pb.go - make proto3.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff proto3.pb.go diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 0000000000000..37c77820921d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto index 3e327ded1d2cc..e2311d9294df6 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -31,6 +31,8 @@ syntax = "proto3"; +import "testdata/test.proto"; + package proto3_proto; message Message { @@ -51,8 +53,16 @@ message Message { repeated uint64 key = 5; Nested nested = 6; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; } message Nested { string bunny = 1; } + +message MessageWithMap { + map byte_mapping = 1; +} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go index 720eac4705050..2336b144c12f4 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go @@ -37,11 +37,11 @@ import ( "bufio" "bytes" "encoding" + "errors" "fmt" "io" "log" "math" - "os" "reflect" "sort" "strings" @@ -170,20 +170,12 @@ func writeName(w *textWriter, props *Properties) error { return nil } -var ( - messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() -) - // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } func writeStruct(w *textWriter, sv reflect.Value) error { - if sv.Type() == messageSetType { - return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) - } - st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -246,7 +238,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if fv.Kind() == reflect.Map { // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() // TODO: should we sort these for deterministic output? + keys := fv.MapKeys() sort.Sort(mapKeys(keys)) for _, key := range keys { val := fv.MapIndex(key) @@ -283,20 +275,23 @@ func writeStruct(w *textWriter, sv reflect.Value) error { if err := w.WriteByte('\n'); err != nil { return err } - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { return err } - } - if err := writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err } // close struct w.unindent() @@ -315,26 +310,34 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value - switch fv.Kind() { - case reflect.Bool: - if !fv.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if fv.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if fv.Uint() == 0 { + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { continue } - case reflect.Float32, reflect.Float64: - if fv.Float() == 0 { - continue - } - case reflect.String: - if fv.String() == "" { - continue + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() } } } @@ -514,44 +517,6 @@ func writeString(w *textWriter, s string) error { return w.WriteByte('"') } -func writeMessageSet(w *textWriter, ms *MessageSet) error { - for _, item := range ms.Item { - id := *item.TypeId - if msd, ok := messageSetMap[id]; ok { - // Known message set type. - if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { - return err - } - w.indent() - - pb := reflect.New(msd.t.Elem()) - if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err - } - } else { - if err := writeStruct(w, pb.Elem()); err != nil { - return err - } - } - } else { - // Unknown type. - if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { - return err - } - w.indent() - if err := writeUnknownStruct(w, item.Message); err != nil { - return err - } - } - w.unindent() - if _, err := w.Write(gtNewline); err != nil { - return err - } - } - return nil -} - func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { @@ -666,10 +631,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { pb, err := GetExtension(ep, desc) if err != nil { - if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { - return err - } - continue + return fmt.Errorf("failed getting extension: %v", err) } // Repeated extensions will appear as a slice. diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go index ddd9579cdf47c..451323262cf18 100644 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go @@ -119,6 +119,14 @@ func isWhitespace(c byte) bool { return false } +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + func (p *textParser) skipWhitespace() { i := 0 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { @@ -174,7 +182,7 @@ func (p *textParser) advance() { } unq, err := unquoteC(p.s[1:i], rune(p.s[0])) if err != nil { - p.errorf("invalid quoted string %v", p.s[0:i+1]) + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) return } p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] @@ -333,13 +341,13 @@ func (p *textParser) next() *token { p.advance() if p.done { p.cur.value = "" - } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { // Look for multiple quoted strings separated by whitespace, // and concatenate them. cat := p.cur for { p.skipWhitespace() - if p.done || p.s[0] != '"' { + if p.done || !isQuote(p.s[0]) { break } p.advance() @@ -385,8 +393,7 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSet } // Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { - sprops := GetProperties(st) +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { i, ok := sprops.decoderOrigNames[name] if ok { return i, sprops.Prop[i], true @@ -438,7 +445,8 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() - reqCount := GetProperties(st).reqCount + sprops := GetProperties(st) + reqCount := sprops.reqCount var reqFieldErr error fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of @@ -520,99 +528,113 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { sl = reflect.Append(sl, ext) SetExtension(ep, desc, sl.Interface()) } - } else { - // This is a normal, non-extension field. - name := tok.value - fi, props, ok := structFieldByName(st, name) - if !ok { - return p.errorf("unknown field name %q in %v", name, st) + if err := p.consumeOptionalSeparator(); err != nil { + return err } + continue + } - dst := sv.Field(fi) - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } - dst.SetMapIndex(key, val) - continue + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err } - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) } - - if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { return err } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- + if err := p.consumeToken(":"); err != nil { + return err } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue } - // For backward compatibility, permit a semicolon or comma after a field. - tok = p.next() - if tok.err != nil { - return tok.err + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- } - if tok.value != ";" && tok.value != "," { - p.back() + + if err := p.consumeOptionalSeparator(); err != nil { + return err } + } if reqCount > 0 { @@ -621,6 +643,19 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + func (p *textParser) readAny(v reflect.Value, props *Properties) error { tok := p.next() if tok.err != nil { @@ -645,18 +680,32 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.ValueOf(bytes)) return nil } - // Repeated field. May already exist. - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(at, flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil } - fv.SetLen(flen + 1) - - // Read one. + // One value of the repeated field. p.back() - return p.readAny(fv.Index(flen), props) + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: // Either "true", "false", 1 or 0. switch tok.value { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go index a9324bf731bec..a4b39cdadabe6 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go @@ -19,7 +19,6 @@ import ( "net/http" "path" "strconv" - "time" info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/info/v2" @@ -32,6 +31,7 @@ const ( containersApi = "containers" subcontainersApi = "subcontainers" machineApi = "machine" + machineStatsApi = "machinestats" dockerApi = "docker" summaryApi = "summary" statsApi = "stats" @@ -63,8 +63,9 @@ func getApiVersions() []ApiVersion { v1_2 := newVersion1_2(v1_1) v1_3 := newVersion1_3(v1_2) v2_0 := newVersion2_0() + v2_1 := newVersion2_1(v2_0) - return []ApiVersion{v1_0, v1_1, v1_2, v1_3, v2_0} + return []ApiVersion{v1_0, v1_1, v1_2, v1_3, v2_0, v2_1} } @@ -358,47 +359,47 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma case statsApi: name := getContainerName(request) glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt) - conts, err := m.GetRequestedContainersInfo(name, opt) + infos, err := m.GetRequestedContainersInfo(name, opt) if err != nil { return err } - contStats := make(map[string][]v2.ContainerStats, 0) - for name, cont := range conts { - contStats[name] = convertStats(cont) + contStats := make(map[string][]v2.DeprecatedContainerStats, 0) + for name, cinfo := range infos { + contStats[name] = v2.DeprecatedStatsFromV1(cinfo) } return writeResult(contStats, w) case customMetricsApi: containerName := getContainerName(request) glog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt) - conts, err := m.GetRequestedContainersInfo(containerName, opt) + infos, err := m.GetContainerInfoV2(containerName, opt) if err != nil { return err } contMetrics := make(map[string]map[string]map[string][]info.MetricValBasic, 0) - for _, cont := range conts { + for _, cinfo := range infos { metrics := make(map[string]map[string][]info.MetricValBasic, 0) - contStats := convertStats(cont) - for _, contStat := range contStats { - if contStat.HasCustomMetrics { - for name, allLabels := range contStat.CustomMetrics { - metricLabels := make(map[string][]info.MetricValBasic, 0) - for _, metric := range allLabels { - if !metric.Timestamp.IsZero() { - metVal := info.MetricValBasic{ - Timestamp: metric.Timestamp, - IntValue: metric.IntValue, - FloatValue: metric.FloatValue, - } - labels := metrics[name] - if labels != nil { - values := labels[metric.Label] - values = append(values, metVal) - labels[metric.Label] = values - metrics[name] = labels - } else { - metricLabels[metric.Label] = []info.MetricValBasic{metVal} - metrics[name] = metricLabels - } + for _, contStat := range cinfo.Stats { + if len(contStat.CustomMetrics) == 0 { + continue + } + for name, allLabels := range contStat.CustomMetrics { + metricLabels := make(map[string][]info.MetricValBasic, 0) + for _, metric := range allLabels { + if !metric.Timestamp.IsZero() { + metVal := info.MetricValBasic{ + Timestamp: metric.Timestamp, + IntValue: metric.IntValue, + FloatValue: metric.FloatValue, + } + labels := metrics[name] + if labels != nil { + values := labels[metric.Label] + values = append(values, metVal) + labels[metric.Label] = values + metrics[name] = labels + } else { + metricLabels[metric.Label] = []info.MetricValBasic{metVal} + metrics[name] = metricLabels } } } @@ -451,102 +452,61 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma } } -func instCpuStats(last, cur *info.ContainerStats) (*v2.CpuInstStats, error) { - if last == nil { - return nil, nil - } - if !cur.Timestamp.After(last.Timestamp) { - return nil, fmt.Errorf("container stats move backwards in time") - } - if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) { - return nil, fmt.Errorf("different number of cpus") - } - timeDelta := cur.Timestamp.Sub(last.Timestamp) - if timeDelta <= 100*time.Millisecond { - return nil, fmt.Errorf("time delta unexpectedly small") - } - // Nanoseconds to gain precision and avoid having zero seconds if the - // difference between the timestamps is just under a second - timeDeltaNs := uint64(timeDelta.Nanoseconds()) - convertToRate := func(lastValue, curValue uint64) (uint64, error) { - if curValue < lastValue { - return 0, fmt.Errorf("cumulative stats decrease") - } - valueDelta := curValue - lastValue - return (valueDelta * 1e9) / timeDeltaNs, nil +type version2_1 struct { + baseVersion *version2_0 +} + +func newVersion2_1(v *version2_0) *version2_1 { + return &version2_1{ + baseVersion: v, } - total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total) +} + +func (self *version2_1) Version() string { + return "v2.1" +} + +func (self *version2_1) SupportedRequestTypes() []string { + return append([]string{machineStatsApi}, self.baseVersion.SupportedRequestTypes()...) +} + +func (self *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { + // Get the query request. + opt, err := getRequestOptions(r) if err != nil { - return nil, err + return err } - percpu := make([]uint64, len(last.Cpu.Usage.PerCpu)) - for i := range percpu { - var err error - percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i]) + + switch requestType { + case machineStatsApi: + glog.V(4).Infof("Api - MachineStats(%v)", request) + cont, err := m.GetRequestedContainersInfo("/", opt) if err != nil { - return nil, err + return err } - } - user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User) - if err != nil { - return nil, err - } - system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System) - if err != nil { - return nil, err - } - return &v2.CpuInstStats{ - Usage: v2.CpuInstUsage{ - Total: total, - PerCpu: percpu, - User: user, - System: system, - }, - }, nil -} - -func convertStats(cont *info.ContainerInfo) []v2.ContainerStats { - stats := make([]v2.ContainerStats, 0, len(cont.Stats)) - var last *info.ContainerStats - for _, val := range cont.Stats { - stat := v2.ContainerStats{ - Timestamp: val.Timestamp, - HasCpu: cont.Spec.HasCpu, - HasMemory: cont.Spec.HasMemory, - HasNetwork: cont.Spec.HasNetwork, - HasFilesystem: cont.Spec.HasFilesystem, - HasDiskIo: cont.Spec.HasDiskIo, - HasCustomMetrics: cont.Spec.HasCustomMetrics, + return writeResult(v2.MachineStatsFromV1(cont["/"]), w) + case statsApi: + name := getContainerName(request) + glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt) + conts, err := m.GetRequestedContainersInfo(name, opt) + if err != nil { + return err } - if stat.HasCpu { - stat.Cpu = val.Cpu - cpuInst, err := instCpuStats(last, val) - if err != nil { - glog.Warningf("Could not get instant cpu stats: %v", err) - } else { - stat.CpuInst = cpuInst + contStats := make(map[string]v2.ContainerInfo, len(conts)) + for name, cont := range conts { + if name == "/" { + // Root cgroup stats should be exposed as machine stats + continue + } + contStats[name] = v2.ContainerInfo{ + Spec: v2.ContainerSpecFromV1(&cont.Spec, cont.Aliases, cont.Namespace), + Stats: v2.ContainerStatsFromV1(&cont.Spec, cont.Stats), } - last = val - } - if stat.HasMemory { - stat.Memory = val.Memory - } - if stat.HasNetwork { - stat.Network.Interfaces = val.Network.Interfaces - } - if stat.HasFilesystem { - stat.Filesystem = val.Filesystem - } - if stat.HasDiskIo { - stat.DiskIo = val.DiskIo - } - if stat.HasCustomMetrics { - stat.CustomMetrics = val.CustomMetrics } - // TODO(rjnagal): Handle load stats. - stats = append(stats, stat) + return writeResult(contStats, w) + default: + return self.baseVersion.HandleRequest(requestType, request, m, w, r) } - return stats } func getRequestOptions(r *http.Request) (v2.RequestOptions, error) { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go index f41c006acf432..f925a5e5d0819 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go @@ -81,4 +81,8 @@ type ContainerHandler interface { // Cleanup frees up any resources being held like fds or go routines, etc. Cleanup() + + // Start starts any necessary background goroutines - must be cleaned up in Cleanup(). + // It is expected that most implementations will be a no-op. + Start() } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go index 69c981fa81a7a..1518d0e3eacd3 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go @@ -40,6 +40,7 @@ var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "do var DockerNamespace = "docker" // Basepath to all container specific information that libcontainer stores. +// TODO: Deprecate this flag var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)") var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path to the Docker run directory (default: /var/run/docker)") @@ -49,6 +50,8 @@ var dockerCgroupRegexp = regexp.MustCompile(`.+-([a-z0-9]{64})\.scope$`) var noSystemd = flag.Bool("nosystemd", false, "Explicitly disable systemd support for Docker containers") +var dockerEnvWhitelist = flag.String("docker_env_metadata_whitelist", "", "a comma-separated list of environment variable keys that needs to be collected for docker containers") + // TODO(vmarmol): Export run dir too for newer Dockers. // Directory holding Docker container state information. func DockerStateDir() string { @@ -59,6 +62,10 @@ func DockerStateDir() string { var useSystemd = false var check = sync.Once{} +const ( + dockerRootDirKey = "Root Dir" +) + func UseSystemd() bool { check.Do(func() { if *noSystemd { @@ -92,12 +99,14 @@ const ( devicemapperStorageDriver storageDriver = "devicemapper" aufsStorageDriver storageDriver = "aufs" overlayStorageDriver storageDriver = "overlay" + zfsStorageDriver storageDriver = "zfs" ) type dockerFactory struct { machineInfoFactory info.MachineInfoFactory storageDriver storageDriver + storageDir string client *docker.Client @@ -106,6 +115,8 @@ type dockerFactory struct { // Information about mounted filesystems. fsInfo fs.FsInfo + + dockerVersion []int } func (self *dockerFactory) String() string { @@ -117,14 +128,20 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool if err != nil { return } + + metadataEnvs := strings.Split(*dockerEnvWhitelist, ",") + handler, err = newDockerContainerHandler( client, name, self.machineInfoFactory, self.fsInfo, self.storageDriver, + self.storageDir, &self.cgroupSubsystems, inHostNamespace, + metadataEnvs, + self.dockerVersion, ) return } @@ -207,36 +224,45 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error { if err != nil { return fmt.Errorf("unable to communicate with docker daemon: %v", err) } + var dockerVersion []int if version, err := client.Version(); err != nil { return fmt.Errorf("unable to communicate with docker daemon: %v", err) } else { expected_version := []int{1, 0, 0} version_string := version.Get("Version") - version, err := parseDockerVersion(version_string) + dockerVersion, err = parseDockerVersion(version_string) if err != nil { return fmt.Errorf("couldn't parse docker version: %v", err) } - for index, number := range version { + for index, number := range dockerVersion { if number > expected_version[index] { break } else if number < expected_version[index] { - return fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as \"%v\"", expected_version, version, version_string) + return fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as \"%v\"", expected_version, dockerVersion, version_string) } } } - // Check that the libcontainer execdriver is used. - information, err := DockerInfo() + information, err := client.Info() if err != nil { return fmt.Errorf("failed to detect Docker info: %v", err) } - execDriver, ok := information["ExecutionDriver"] - if !ok || !strings.HasPrefix(execDriver, "native") { + + // Check that the libcontainer execdriver is used. + execDriver := information.Get("ExecutionDriver") + if !strings.HasPrefix(execDriver, "native") { return fmt.Errorf("docker found, but not using native exec driver") } - sd, _ := information["Driver"] + sd := information.Get("Driver") + if sd == "" { + return fmt.Errorf("failed to find docker storage driver") + } + storageDir := information.Get("DockerRootDir") + if storageDir == "" { + storageDir = *dockerRootDir + } cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() if err != nil { return fmt.Errorf("failed to get cgroup subsystems: %v", err) @@ -244,11 +270,13 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error { glog.Infof("Registering Docker factory") f := &dockerFactory{ - machineInfoFactory: factory, - client: client, - storageDriver: storageDriver(sd), cgroupSubsystems: cgroupSubsystems, + client: client, + dockerVersion: dockerVersion, fsInfo: fsInfo, + machineInfoFactory: factory, + storageDriver: storageDriver(sd), + storageDir: storageDir, } container.RegisterContainerHandlerFactory(f) return nil diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go index ada5e76ce9cad..215b54561a485 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go @@ -26,17 +26,19 @@ import ( type fsHandler interface { start() - usage() uint64 + usage() (uint64, uint64) stop() } type realFsHandler struct { sync.RWMutex - lastUpdate time.Time - usageBytes uint64 - period time.Duration - storageDirs []string - fsInfo fs.FsInfo + lastUpdate time.Time + usageBytes uint64 + baseUsageBytes uint64 + period time.Duration + rootfs string + extraDir string + fsInfo fs.FsInfo // Tells the container to stop. stopChan chan struct{} } @@ -45,14 +47,16 @@ const longDu = time.Second var _ fsHandler = &realFsHandler{} -func newFsHandler(period time.Duration, storageDirs []string, fsInfo fs.FsInfo) fsHandler { +func newFsHandler(period time.Duration, rootfs, extraDir string, fsInfo fs.FsInfo) fsHandler { return &realFsHandler{ - lastUpdate: time.Time{}, - usageBytes: 0, - period: period, - storageDirs: storageDirs, - fsInfo: fsInfo, - stopChan: make(chan struct{}, 1), + lastUpdate: time.Time{}, + usageBytes: 0, + baseUsageBytes: 0, + period: period, + rootfs: rootfs, + extraDir: extraDir, + fsInfo: fsInfo, + stopChan: make(chan struct{}, 1), } } @@ -61,23 +65,27 @@ func (fh *realFsHandler) needsUpdate() bool { } func (fh *realFsHandler) update() error { - var usage uint64 - for _, dir := range fh.storageDirs { - // TODO(Vishh): Add support for external mounts. - dirUsage, err := fh.fsInfo.GetDirUsage(dir) - if err != nil { - return err - } - usage += dirUsage + // TODO(vishh): Add support for external mounts. + baseUsage, err := fh.fsInfo.GetDirUsage(fh.rootfs) + if err != nil { + return err + } + + extraDirUsage, err := fh.fsInfo.GetDirUsage(fh.extraDir) + if err != nil { + return err } + fh.Lock() defer fh.Unlock() fh.lastUpdate = time.Now() - fh.usageBytes = usage + fh.usageBytes = baseUsage + extraDirUsage + fh.baseUsageBytes = baseUsage return nil } func (fh *realFsHandler) trackUsage() { + fh.update() for { select { case <-fh.stopChan: @@ -89,7 +97,7 @@ func (fh *realFsHandler) trackUsage() { } duration := time.Since(start) if duration > longDu { - glog.V(3).Infof("`du` on following dirs took %v: %v", duration, fh.storageDirs) + glog.V(3).Infof("`du` on following dirs took %v: %v", duration, []string{fh.rootfs, fh.extraDir}) } } } @@ -103,8 +111,8 @@ func (fh *realFsHandler) stop() { close(fh.stopChan) } -func (fh *realFsHandler) usage() uint64 { +func (fh *realFsHandler) usage() (baseUsageBytes, totalUsageBytes uint64) { fh.RLock() defer fh.RUnlock() - return fh.usageBytes + return fh.baseUsageBytes, fh.usageBytes } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go index 2a1b3661ce629..36c4907897a4d 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go @@ -17,6 +17,7 @@ package docker import ( "fmt" + "io/ioutil" "math" "path" "strings" @@ -35,14 +36,10 @@ import ( ) const ( - // Path to aufs dir where all the files exist. - // aufs/layers is ignored here since it does not hold a lot of data. - // aufs/mnt contains the mount points used to compose the rootfs. Hence it is also ignored. - pathToAufsDir = "aufs/diff" + // The read write layers exist here. + aufsRWLayer = "diff" // Path to the directory where docker stores log files if the json logging driver is enabled. pathToContainersDir = "containers" - // Path to the overlayfs storage driver directory. - pathToOverlayDir = "overlay" ) type dockerContainerHandler struct { @@ -59,15 +56,16 @@ type dockerContainerHandler struct { // Manager of this container's cgroups. cgroupManager cgroups.Manager - storageDriver storageDriver - fsInfo fs.FsInfo - storageDirs []string + storageDriver storageDriver + fsInfo fs.FsInfo + rootfsStorageDir string // Time at which this container was created. creationTime time.Time - // Metadata labels associated with the container. + // Metadata associated with the container. labels map[string]string + envs map[string]string // The container PID used to switch namespaces as required pid int @@ -85,14 +83,34 @@ type dockerContainerHandler struct { fsHandler fsHandler } +func getRwLayerID(containerID, storageDir string, sd storageDriver, dockerVersion []int) (string, error) { + const ( + // Docker version >=1.10.0 have a randomized ID for the root fs of a container. + randomizedRWLayerMinorVersion = 10 + rwLayerIDFile = "mount-id" + ) + if (dockerVersion[0] <= 1) && (dockerVersion[1] < randomizedRWLayerMinorVersion) { + return containerID, nil + } + + bytes, err := ioutil.ReadFile(path.Join(storageDir, "image", string(sd), "layerdb", "mounts", containerID, rwLayerIDFile)) + if err != nil { + return "", fmt.Errorf("failed to identify the read-write layer ID for container %q. - %v", containerID, err) + } + return string(bytes), err +} + func newDockerContainerHandler( client *docker.Client, name string, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, storageDriver storageDriver, + storageDir string, cgroupSubsystems *containerlibcontainer.CgroupSubsystems, inHostNamespace bool, + metadataEnvs []string, + dockerVersion []int, ) (container.ContainerHandler, error) { // Create the cgroup paths. cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) @@ -116,14 +134,18 @@ func newDockerContainerHandler( id := ContainerNameToDockerId(name) // Add the Containers dir where the log files are stored. - storageDirs := []string{path.Join(*dockerRootDir, pathToContainersDir, id)} + otherStorageDir := path.Join(storageDir, pathToContainersDir, id) + rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion) + if err != nil { + return nil, err + } + var rootfsStorageDir string switch storageDriver { case aufsStorageDriver: - // Add writable layer for aufs. - storageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id)) + rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID) case overlayStorageDriver: - storageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToOverlayDir, id)) + rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID) } handler := &dockerContainerHandler{ @@ -136,13 +158,11 @@ func newDockerContainerHandler( storageDriver: storageDriver, fsInfo: fsInfo, rootFs: rootFs, - storageDirs: storageDirs, - fsHandler: newFsHandler(time.Minute, storageDirs, fsInfo), + rootfsStorageDir: rootfsStorageDir, + fsHandler: newFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo), + envs: make(map[string]string), } - // Start the filesystem handler. - handler.fsHandler.start() - // We assume that if Inspect fails then the container is not known to docker. ctnr, err := client.InspectContainer(id) if err != nil { @@ -157,18 +177,35 @@ func newDockerContainerHandler( handler.image = ctnr.Config.Image handler.networkMode = ctnr.HostConfig.NetworkMode + // split env vars to get metadata map. + for _, exposedEnv := range metadataEnvs { + for _, envVar := range ctnr.Config.Env { + splits := strings.SplitN(envVar, "=", 2) + if splits[0] == exposedEnv { + handler.envs[strings.ToLower(exposedEnv)] = splits[1] + } + } + } + return handler, nil } +func (self *dockerContainerHandler) Start() { + // Start the filesystem handler. + self.fsHandler.start() +} + func (self *dockerContainerHandler) Cleanup() { self.fsHandler.stop() } func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) { return info.ContainerReference{ + Id: self.id, Name: self.name, Aliases: self.aliases, Namespace: DockerNamespace, + Labels: self.labels, }, nil } @@ -193,36 +230,31 @@ func libcontainerConfigToContainerSpec(config *libcontainerconfigs.Config, mi *i spec.HasMemory = true spec.Memory.Limit = math.MaxUint64 spec.Memory.SwapLimit = math.MaxUint64 - if config.Cgroups.Memory > 0 { - spec.Memory.Limit = uint64(config.Cgroups.Memory) - } - if config.Cgroups.MemorySwap > 0 { - spec.Memory.SwapLimit = uint64(config.Cgroups.MemorySwap) - } - // Get CPU info - spec.HasCpu = true - spec.Cpu.Limit = 1024 - if config.Cgroups.CpuShares != 0 { - spec.Cpu.Limit = uint64(config.Cgroups.CpuShares) + if config.Cgroups.Resources != nil { + if config.Cgroups.Resources.Memory > 0 { + spec.Memory.Limit = uint64(config.Cgroups.Resources.Memory) + } + if config.Cgroups.Resources.MemorySwap > 0 { + spec.Memory.SwapLimit = uint64(config.Cgroups.Resources.MemorySwap) + } + + // Get CPU info + spec.HasCpu = true + spec.Cpu.Limit = 1024 + if config.Cgroups.Resources.CpuShares != 0 { + spec.Cpu.Limit = uint64(config.Cgroups.Resources.CpuShares) + } + spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.Resources.CpusetCpus, mi.NumCores) } - spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores) spec.HasDiskIo = true return spec } -var ( - hasNetworkModes = map[string]bool{ - "host": true, - "bridge": true, - "default": true, - } -) - func hasNet(networkMode string) bool { - return hasNetworkModes[networkMode] + return !strings.HasPrefix(networkMode, "container:") } func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { @@ -239,13 +271,14 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { spec.CreationTime = self.creationTime switch self.storageDriver { - case aufsStorageDriver, overlayStorageDriver: + case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: spec.HasFilesystem = true default: spec.HasFilesystem = false } spec.Labels = self.labels + spec.Envs = self.envs spec.Image = self.image spec.HasNetwork = hasNet(self.networkMode) @@ -254,14 +287,12 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { switch self.storageDriver { - case aufsStorageDriver, overlayStorageDriver: + case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: default: return nil } - // As of now we assume that all the storage dirs are on the same device. - // The first storage dir will be that of the image layers. - deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) + deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir) if err != nil { return err } @@ -281,7 +312,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} - fsStat.Usage = self.fsHandler.usage() + fsStat.BaseUsage, fsStat.Usage = self.fsHandler.usage() stats.Filesystem = append(stats.Filesystem, fsStat) return nil diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatibility.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatibility.go index b14d3b36b593d..7d484170e4bb7 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatibility.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatibility.go @@ -292,31 +292,32 @@ func convertOldConfigToNew(config v1Config) *configs.Config { result.Routes = config.Config.Routes var newCgroup = &configs.Cgroup{ - Name: old.Name, - Parent: old.Parent, - AllowAllDevices: old.AllowAllDevices, - AllowedDevices: old.AllowedDevices, - DeniedDevices: old.DeniedDevices, - Memory: old.Memory, - MemoryReservation: old.MemoryReservation, - MemorySwap: old.MemorySwap, - KernelMemory: old.KernelMemory, - CpuShares: old.CpuShares, - CpuQuota: old.CpuQuota, - CpuPeriod: old.CpuPeriod, - CpuRtRuntime: old.CpuRtRuntime, - CpuRtPeriod: old.CpuRtPeriod, - CpusetCpus: old.CpusetCpus, - CpusetMems: old.CpusetMems, - BlkioWeight: old.BlkioWeight, - BlkioLeafWeight: old.BlkioLeafWeight, - Freezer: old.Freezer, - HugetlbLimit: old.HugetlbLimit, - Slice: old.Slice, - OomKillDisable: old.OomKillDisable, - MemorySwappiness: old.MemorySwappiness, - NetPrioIfpriomap: old.NetPrioIfpriomap, - NetClsClassid: old.NetClsClassid, + Name: old.Name, + Parent: old.Parent, + Resources: &configs.Resources{ + AllowAllDevices: old.Resources.AllowAllDevices, + AllowedDevices: old.Resources.AllowedDevices, + DeniedDevices: old.Resources.DeniedDevices, + Memory: old.Resources.Memory, + MemoryReservation: old.Resources.MemoryReservation, + MemorySwap: old.Resources.MemorySwap, + KernelMemory: old.Resources.KernelMemory, + CpuShares: old.Resources.CpuShares, + CpuQuota: old.Resources.CpuQuota, + CpuPeriod: old.Resources.CpuPeriod, + CpuRtRuntime: old.Resources.CpuRtRuntime, + CpuRtPeriod: old.Resources.CpuRtPeriod, + CpusetCpus: old.Resources.CpusetCpus, + CpusetMems: old.Resources.CpusetMems, + BlkioWeight: old.Resources.BlkioWeight, + BlkioLeafWeight: old.Resources.BlkioLeafWeight, + Freezer: old.Resources.Freezer, + HugetlbLimit: old.Resources.HugetlbLimit, + OomKillDisable: old.Resources.OomKillDisable, + MemorySwappiness: old.Resources.MemorySwappiness, + NetPrioIfpriomap: old.Resources.NetPrioIfpriomap, + NetClsClassid: old.Resources.NetClsClassid, + }, } result.Cgroups = newCgroup diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go index 19d3adb939b3a..e35cb8a532e67 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -378,6 +378,8 @@ func toContainerStats1(s *cgroups.Stats, ret *info.ContainerStats) { func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) { ret.Memory.Usage = s.MemoryStats.Usage.Usage ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt + ret.Memory.Cache = s.MemoryStats.Stats["cache"] + ret.Memory.RSS = s.MemoryStats.Stats["rss"] if v, ok := s.MemoryStats.Stats["pgfault"]; ok { ret.Memory.ContainerData.Pgfault = v ret.Memory.HierarchicalData.Pgfault = v diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go index 40a80419084b7..0306b4047a328 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go @@ -51,6 +51,8 @@ func (self *MockContainerHandler) ContainerReference() (info.ContainerReference, return args.Get(0).(info.ContainerReference), args.Error(1) } +func (self *MockContainerHandler) Start() {} + func (self *MockContainerHandler) Cleanup() {} func (self *MockContainerHandler) GetSpec() (info.ContainerSpec, error) { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go index 4e0a22d363144..b7dd500590e25 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go @@ -166,6 +166,9 @@ func (self *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error) return nd, nil } +// Nothing to start up. +func (self *rawContainerHandler) Start() {} + // Nothing to clean up. func (self *rawContainerHandler) Cleanup() {} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go b/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go index af21a6e12b46a..c08fe44593061 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go @@ -32,6 +32,7 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/golang/glog" + zfs "github.com/mistifyio/go-zfs" ) const ( @@ -53,6 +54,8 @@ type RealFsInfo struct { // Map from label to block device path. // Labels are intent-specific tags that are auto-detected. labels map[string]string + + dmsetup dmsetupClient } type Context struct { @@ -66,58 +69,104 @@ func NewFsInfo(context Context) (FsInfo, error) { if err != nil { return nil, err } - partitions := make(map[string]partition, 0) - fsInfo := &RealFsInfo{} - fsInfo.labels = make(map[string]string, 0) + fsInfo := &RealFsInfo{ + partitions: make(map[string]partition, 0), + labels: make(map[string]string, 0), + dmsetup: &defaultDmsetupClient{}, + } supportedFsType := map[string]bool{ // all ext systems are checked through prefix. "btrfs": true, "xfs": true, + "zfs": true, } for _, mount := range mounts { + var Fstype string if !strings.HasPrefix(mount.Fstype, "ext") && !supportedFsType[mount.Fstype] { continue } // Avoid bind mounts. - if _, ok := partitions[mount.Source]; ok { + if _, ok := fsInfo.partitions[mount.Source]; ok { continue } - partitions[mount.Source] = partition{ + if mount.Fstype == "zfs" { + Fstype = mount.Fstype + } + fsInfo.partitions[mount.Source] = partition{ + fsType: Fstype, mountpoint: mount.Mountpoint, major: uint(mount.Major), minor: uint(mount.Minor), } } - if storageDriver, ok := context.DockerInfo["Driver"]; ok && storageDriver == "devicemapper" { - dev, major, minor, blockSize, err := dockerDMDevice(context.DockerInfo["DriverStatus"]) - if err != nil { - glog.Warningf("Could not get Docker devicemapper device: %v", err) - } else { - partitions[dev] = partition{ - fsType: "devicemapper", - major: major, - minor: minor, - blockSize: blockSize, - } - fsInfo.labels[LabelDockerImages] = dev - } - } - glog.Infof("Filesystem partitions: %+v", partitions) - fsInfo.partitions = partitions - fsInfo.addLabels(context) + + // need to call this before the log line below printing out the partitions, as this function may + // add a "partition" for devicemapper to fsInfo.partitions + fsInfo.addDockerImagesLabel(context) + + glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) + fsInfo.addSystemRootLabel() return fsInfo, nil } -func (self *RealFsInfo) addLabels(context Context) { - dockerPaths := getDockerImagePaths(context) +// getDockerDeviceMapperInfo returns information about the devicemapper device and "partition" if +// docker is using devicemapper for its storage driver. If a loopback device is being used, don't +// return any information or error, as we want to report based on the actual partition where the +// loopback file resides, inside of the loopback file itself. +func (self *RealFsInfo) getDockerDeviceMapperInfo(dockerInfo map[string]string) (string, *partition, error) { + if storageDriver, ok := dockerInfo["Driver"]; ok && storageDriver != "devicemapper" { + return "", nil, nil + } + + var driverStatus [][]string + if err := json.Unmarshal([]byte(dockerInfo["DriverStatus"]), &driverStatus); err != nil { + return "", nil, err + } + + dataLoopFile := dockerStatusValue(driverStatus, "Data loop file") + if len(dataLoopFile) > 0 { + return "", nil, nil + } + + dev, major, minor, blockSize, err := dockerDMDevice(driverStatus, self.dmsetup) + if err != nil { + return "", nil, err + } + + return dev, &partition{ + fsType: "devicemapper", + major: major, + minor: minor, + blockSize: blockSize, + }, nil +} + +// addSystemRootLabel attempts to determine which device contains the mount for /. +func (self *RealFsInfo) addSystemRootLabel() { for src, p := range self.partitions { if p.mountpoint == "/" { if _, ok := self.labels[LabelSystemRoot]; !ok { self.labels[LabelSystemRoot] = src } } - self.updateDockerImagesPath(src, p.mountpoint, dockerPaths) - // TODO(rjnagal): Add label for docker devicemapper pool. + } +} + +// addDockerImagesLabel attempts to determine which device contains the mount for docker images. +func (self *RealFsInfo) addDockerImagesLabel(context Context) { + dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.DockerInfo) + if err != nil { + glog.Warningf("Could not get Docker devicemapper device: %v", err) + } + if len(dockerDev) > 0 && dockerPartition != nil { + self.partitions[dockerDev] = *dockerPartition + self.labels[LabelDockerImages] = dockerDev + } else { + dockerPaths := getDockerImagePaths(context) + + for src, p := range self.partitions { + self.updateDockerImagesPath(src, p.mountpoint, dockerPaths) + } } } @@ -128,7 +177,7 @@ func getDockerImagePaths(context Context) []string { // TODO(rjnagal): Detect docker root and graphdriver directories from docker info. dockerRoot := context.DockerRoot dockerImagePaths := []string{} - for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay"} { + for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "zfs"} { dockerImagePaths = append(dockerImagePaths, path.Join(dockerRoot, dir)) } for dockerRoot != "/" && dockerRoot != "." { @@ -201,6 +250,8 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er switch partition.fsType { case "devicemapper": total, free, avail, err = getDMStats(device, partition.blockSize) + case "zfs": + total, free, avail, err = getZfstats(device) default: total, free, avail, err = getVfsStats(partition.mountpoint) } @@ -336,20 +387,30 @@ func dockerStatusValue(status [][]string, target string) string { return "" } +// dmsetupClient knows to to interact with dmsetup to retrieve information about devicemapper. +type dmsetupClient interface { + table(poolName string) ([]byte, error) + //TODO add status(poolName string) ([]byte, error) and use it in getDMStats so we can unit test +} + +// defaultDmsetupClient implements the standard behavior for interacting with dmsetup. +type defaultDmsetupClient struct{} + +var _ dmsetupClient = &defaultDmsetupClient{} + +func (*defaultDmsetupClient) table(poolName string) ([]byte, error) { + return exec.Command("dmsetup", "table", poolName).Output() +} + // Devicemapper thin provisioning is detailed at // https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt -func dockerDMDevice(driverStatus string) (string, uint, uint, uint, error) { - var config [][]string - err := json.Unmarshal([]byte(driverStatus), &config) - if err != nil { - return "", 0, 0, 0, err - } - poolName := dockerStatusValue(config, "Pool Name") +func dockerDMDevice(driverStatus [][]string, dmsetup dmsetupClient) (string, uint, uint, uint, error) { + poolName := dockerStatusValue(driverStatus, "Pool Name") if len(poolName) == 0 { return "", 0, 0, 0, fmt.Errorf("Could not get dm pool name") } - out, err := exec.Command("dmsetup", "table", poolName).Output() + out, err := dmsetup.table(poolName) if err != nil { return "", 0, 0, 0, err } @@ -423,3 +484,15 @@ func parseDMStatus(dmStatus string) (uint64, uint64, error) { return used, total, nil } + +// getZfstats returns ZFS mount stats using zfsutils +func getZfstats(poolName string) (uint64, uint64, uint64, error) { + dataset, err := zfs.GetDataset(poolName) + if err != nil { + return 0, 0, 0, err + } + + total := dataset.Used + dataset.Avail + dataset.Usedbydataset + + return total, dataset.Avail, dataset.Avail, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go index 53b8b5d21fa1e..bb4fc322e2302 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go @@ -45,6 +45,8 @@ type ContainerSpec struct { // Metadata labels associated with this container. Labels map[string]string `json:"labels,omitempty"` + // Metadata envs associated with this container. Only whitelisted envs are added. + Envs map[string]string `json:"envs,omitempty"` HasCpu bool `json:"has_cpu"` Cpu CpuSpec `json:"cpu,omitempty"` @@ -68,6 +70,9 @@ type ContainerSpec struct { // Container reference contains enough information to uniquely identify a container type ContainerReference struct { + // The container id + Id string `json:"id,omitempty"` + // The absolute name of the container. This is unique on the machine. Name string `json:"name"` @@ -78,6 +83,8 @@ type ContainerReference struct { // Namespace under which the aliases of a container are unique. // An example of a namespace is "docker" for Docker containers. Namespace string `json:"namespace,omitempty"` + + Labels map[string]string `json:"labels,omitempty"` } // Sorts by container name. @@ -306,6 +313,15 @@ type MemoryStats struct { // Units: Bytes. Usage uint64 `json:"usage"` + // Number of bytes of page cache memory. + // Units: Bytes. + Cache uint64 `json:"cache"` + + // The amount of anonymous and swap cache memory (includes transparent + // hugepages). + // Units: Bytes. + RSS uint64 `json:"rss"` + // The amount of working set memory, this includes recently accessed memory, // dirty memory, and kernel memory. Working set is <= "usage". // Units: Bytes. @@ -387,6 +403,10 @@ type FsStats struct { // Number of bytes that is consumed by the container on this filesystem. Usage uint64 `json:"usage"` + // Base Usage that is consumed by the container's writable layer. + // This field is only applicable for docker container's as of now. + BaseUsage uint64 `json:"base_usage"` + // Number of bytes available for non-root user. Available uint64 `json:"available"` diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go index 7f50ce3b244fb..f26291c11254e 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go @@ -136,7 +136,7 @@ type MachineInfo struct { CpuFrequency uint64 `json:"cpu_frequency_khz"` // The amount of memory (in bytes) in this machine - MemoryCapacity int64 `json:"memory_capacity"` + MemoryCapacity uint64 `json:"memory_capacity"` // The machine id MachineID string `json:"machine_id"` diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/test/datagen.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/test/datagen.go index 24431692f002a..42a0526d418e8 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/test/datagen.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/test/datagen.go @@ -44,6 +44,8 @@ func GenerateRandomStats(numStats, numCores int, duration time.Duration) []*info stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Memory.Usage = uint64(rand.Int63n(4096)) + stats.Memory.Cache = uint64(rand.Int63n(4096)) + stats.Memory.RSS = uint64(rand.Int63n(4096)) ret[i] = stats } return ret diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go index 3c854260bd2c2..805f73d30b019 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go @@ -52,6 +52,14 @@ type MemorySpec struct { SwapLimit uint64 `json:"swap_limit,omitempty"` } +type ContainerInfo struct { + // Describes the container. + Spec ContainerSpec `json:"spec,omitempty"` + + // Historical statistics gathered from the container. + Stats []*ContainerStats `json:"stats,omitempty"` +} + type ContainerSpec struct { // Time at which the container was created. CreationTime time.Time `json:"creation_time,omitempty"` @@ -66,6 +74,8 @@ type ContainerSpec struct { // Metadata labels associated with this container. Labels map[string]string `json:"labels,omitempty"` + // Metadata envs associated with this container. Only whitelisted envs are added. + Envs map[string]string `json:"envs,omitempty"` HasCpu bool `json:"has_cpu"` Cpu CpuSpec `json:"cpu,omitempty"` @@ -85,7 +95,7 @@ type ContainerSpec struct { Image string `json:"image,omitempty"` } -type ContainerStats struct { +type DeprecatedContainerStats struct { // The time of this stat point. Timestamp time.Time `json:"timestamp"` // CPU statistics @@ -114,6 +124,28 @@ type ContainerStats struct { CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"` } +type ContainerStats struct { + // The time of this stat point. + Timestamp time.Time `json:"timestamp"` + // CPU statistics + // In nanoseconds (aggregated) + Cpu *v1.CpuStats `json:"cpu,omitempty"` + // In nanocores per second (instantaneous) + CpuInst *CpuInstStats `json:"cpu_inst,omitempty"` + // Disk IO statistics + DiskIo *v1.DiskIoStats `json:"diskio,omitempty"` + // Memory statistics + Memory *v1.MemoryStats `json:"memory,omitempty"` + // Network statistics + Network *NetworkStats `json:"network,omitempty"` + // Filesystem statistics + Filesystem *FilesystemStats `json:"filesystem,omitempty"` + // Task load statistics + Load *v1.LoadStats `json:"load_stats,omitempty"` + // Custom Metrics + CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"` +} + type Percentiles struct { // Indicates whether the stats are present or not. // If true, values below do not have any data. @@ -252,3 +284,11 @@ type CpuInstUsage struct { // Unit: nanocores per second System uint64 `json:"system"` } + +// Filesystem usage statistics. +type FilesystemStats struct { + // Total Number of bytes consumed by container. + TotalUsageBytes *uint64 `json:"totalUsageBytes,omitempty"` + // Number of bytes consumed by a container through its root filesystem. + BaseUsageBytes *uint64 `json:"baseUsageBytes,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/conversion.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/conversion.go new file mode 100644 index 0000000000000..56aef777caff4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/conversion.go @@ -0,0 +1,322 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "github.com/google/cadvisor/info/v1" +) + +func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats { + var result []MachineFsStats + for _, stat := range fsStats { + readDuration := time.Millisecond * time.Duration(stat.ReadTime) + writeDuration := time.Millisecond * time.Duration(stat.WriteTime) + ioDuration := time.Millisecond * time.Duration(stat.IoTime) + weightedDuration := time.Millisecond * time.Duration(stat.WeightedIoTime) + result = append(result, MachineFsStats{ + Device: stat.Device, + Capacity: &stat.Limit, + Usage: &stat.Usage, + Available: &stat.Available, + DiskStats: DiskStats{ + ReadsCompleted: &stat.ReadsCompleted, + ReadsMerged: &stat.ReadsMerged, + SectorsRead: &stat.SectorsRead, + ReadDuration: &readDuration, + WritesCompleted: &stat.WritesCompleted, + WritesMerged: &stat.WritesMerged, + SectorsWritten: &stat.SectorsWritten, + WriteDuration: &writeDuration, + IoInProgress: &stat.IoInProgress, + IoDuration: &ioDuration, + WeightedIoDuration: &weightedDuration, + }, + }) + } + return result +} + +func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats { + var stats []MachineStats + var last *v1.ContainerStats + for _, val := range cont.Stats { + stat := MachineStats{ + Timestamp: val.Timestamp, + } + if cont.Spec.HasCpu { + stat.Cpu = &val.Cpu + cpuInst, err := InstCpuStats(last, val) + if err != nil { + glog.Warningf("Could not get instant cpu stats: %v", err) + } else { + stat.CpuInst = cpuInst + } + last = val + } + if cont.Spec.HasMemory { + stat.Memory = &val.Memory + } + if cont.Spec.HasNetwork { + stat.Network = &NetworkStats{ + // FIXME: Use reflection instead. + Tcp: TcpStat(val.Network.Tcp), + Tcp6: TcpStat(val.Network.Tcp6), + Interfaces: val.Network.Interfaces, + } + } + if cont.Spec.HasFilesystem { + stat.Filesystem = machineFsStatsFromV1(val.Filesystem) + } + // TODO(rjnagal): Handle load stats. + stats = append(stats, stat) + } + return stats +} + +func ContainerStatsFromV1(spec *v1.ContainerSpec, stats []*v1.ContainerStats) []*ContainerStats { + newStats := make([]*ContainerStats, 0, len(stats)) + var last *v1.ContainerStats + for _, val := range stats { + stat := &ContainerStats{ + Timestamp: val.Timestamp, + } + if spec.HasCpu { + stat.Cpu = &val.Cpu + cpuInst, err := InstCpuStats(last, val) + if err != nil { + glog.Warningf("Could not get instant cpu stats: %v", err) + } else { + stat.CpuInst = cpuInst + } + last = val + } + if spec.HasMemory { + stat.Memory = &val.Memory + } + if spec.HasNetwork { + // TODO: Handle TcpStats + stat.Network = &NetworkStats{ + Interfaces: val.Network.Interfaces, + } + } + if spec.HasFilesystem { + if len(val.Filesystem) == 1 { + stat.Filesystem = &FilesystemStats{ + TotalUsageBytes: &val.Filesystem[0].Usage, + BaseUsageBytes: &val.Filesystem[0].BaseUsage, + } + } else if len(val.Filesystem) > 1 { + // Cannot handle multiple devices per container. + glog.V(2).Infof("failed to handle multiple devices for container. Skipping Filesystem stats") + } + } + if spec.HasDiskIo { + stat.DiskIo = &val.DiskIo + } + if spec.HasCustomMetrics { + stat.CustomMetrics = val.CustomMetrics + } + // TODO(rjnagal): Handle load stats. + newStats = append(newStats, stat) + } + return newStats +} + +func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats { + stats := make([]DeprecatedContainerStats, 0, len(cont.Stats)) + var last *v1.ContainerStats + for _, val := range cont.Stats { + stat := DeprecatedContainerStats{ + Timestamp: val.Timestamp, + HasCpu: cont.Spec.HasCpu, + HasMemory: cont.Spec.HasMemory, + HasNetwork: cont.Spec.HasNetwork, + HasFilesystem: cont.Spec.HasFilesystem, + HasDiskIo: cont.Spec.HasDiskIo, + HasCustomMetrics: cont.Spec.HasCustomMetrics, + } + if stat.HasCpu { + stat.Cpu = val.Cpu + cpuInst, err := InstCpuStats(last, val) + if err != nil { + glog.Warningf("Could not get instant cpu stats: %v", err) + } else { + stat.CpuInst = cpuInst + } + last = val + } + if stat.HasMemory { + stat.Memory = val.Memory + } + if stat.HasNetwork { + stat.Network.Interfaces = val.Network.Interfaces + } + if stat.HasFilesystem { + stat.Filesystem = val.Filesystem + } + if stat.HasDiskIo { + stat.DiskIo = val.DiskIo + } + if stat.HasCustomMetrics { + stat.CustomMetrics = val.CustomMetrics + } + // TODO(rjnagal): Handle load stats. + stats = append(stats, stat) + } + return stats +} + +func InstCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) { + if last == nil { + return nil, nil + } + if !cur.Timestamp.After(last.Timestamp) { + return nil, fmt.Errorf("container stats move backwards in time") + } + if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) { + return nil, fmt.Errorf("different number of cpus") + } + timeDelta := cur.Timestamp.Sub(last.Timestamp) + if timeDelta <= 100*time.Millisecond { + return nil, fmt.Errorf("time delta unexpectedly small") + } + // Nanoseconds to gain precision and avoid having zero seconds if the + // difference between the timestamps is just under a second + timeDeltaNs := uint64(timeDelta.Nanoseconds()) + convertToRate := func(lastValue, curValue uint64) (uint64, error) { + if curValue < lastValue { + return 0, fmt.Errorf("cumulative stats decrease") + } + valueDelta := curValue - lastValue + return (valueDelta * 1e9) / timeDeltaNs, nil + } + total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total) + if err != nil { + return nil, err + } + percpu := make([]uint64, len(last.Cpu.Usage.PerCpu)) + for i := range percpu { + var err error + percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i]) + if err != nil { + return nil, err + } + } + user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User) + if err != nil { + return nil, err + } + system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System) + if err != nil { + return nil, err + } + return &CpuInstStats{ + Usage: CpuInstUsage{ + Total: total, + PerCpu: percpu, + User: user, + System: system, + }, + }, nil +} + +// Get V2 container spec from v1 container info. +func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace string) ContainerSpec { + specV2 := ContainerSpec{ + CreationTime: specV1.CreationTime, + HasCpu: specV1.HasCpu, + HasMemory: specV1.HasMemory, + HasFilesystem: specV1.HasFilesystem, + HasNetwork: specV1.HasNetwork, + HasDiskIo: specV1.HasDiskIo, + HasCustomMetrics: specV1.HasCustomMetrics, + Image: specV1.Image, + Labels: specV1.Labels, + } + if specV1.HasCpu { + specV2.Cpu.Limit = specV1.Cpu.Limit + specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit + specV2.Cpu.Mask = specV1.Cpu.Mask + } + if specV1.HasMemory { + specV2.Memory.Limit = specV1.Memory.Limit + specV2.Memory.Reservation = specV1.Memory.Reservation + specV2.Memory.SwapLimit = specV1.Memory.SwapLimit + } + if specV1.HasCustomMetrics { + specV2.CustomMetrics = specV1.CustomMetrics + } + specV2.Aliases = aliases + specV2.Namespace = namespace + return specV2 +} + +func instCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) { + if last == nil { + return nil, nil + } + if !cur.Timestamp.After(last.Timestamp) { + return nil, fmt.Errorf("container stats move backwards in time") + } + if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) { + return nil, fmt.Errorf("different number of cpus") + } + timeDelta := cur.Timestamp.Sub(last.Timestamp) + if timeDelta <= 100*time.Millisecond { + return nil, fmt.Errorf("time delta unexpectedly small") + } + // Nanoseconds to gain precision and avoid having zero seconds if the + // difference between the timestamps is just under a second + timeDeltaNs := uint64(timeDelta.Nanoseconds()) + convertToRate := func(lastValue, curValue uint64) (uint64, error) { + if curValue < lastValue { + return 0, fmt.Errorf("cumulative stats decrease") + } + valueDelta := curValue - lastValue + return (valueDelta * 1e9) / timeDeltaNs, nil + } + total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total) + if err != nil { + return nil, err + } + percpu := make([]uint64, len(last.Cpu.Usage.PerCpu)) + for i := range percpu { + var err error + percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i]) + if err != nil { + return nil, err + } + } + user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User) + if err != nil { + return nil, err + } + system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System) + if err != nil { + return nil, err + } + return &CpuInstStats{ + Usage: CpuInstUsage{ + Total: total, + PerCpu: percpu, + User: user, + System: system, + }, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go index 4aef3d835fb0d..b6c3f24f3598b 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go @@ -16,6 +16,8 @@ package v2 import ( // TODO(rjnagal): Move structs from v1. + "time" + "github.com/google/cadvisor/info/v1" ) @@ -39,7 +41,7 @@ type Attributes struct { CpuFrequency uint64 `json:"cpu_frequency_khz"` // The amount of memory (in bytes) in this machine - MemoryCapacity int64 `json:"memory_capacity"` + MemoryCapacity uint64 `json:"memory_capacity"` // The machine id MachineID string `json:"machine_id"` @@ -86,3 +88,97 @@ func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes { InstanceType: mi.InstanceType, } } + +// MachineStats contains usage statistics for the entire machine. +type MachineStats struct { + // The time of this stat point. + Timestamp time.Time `json:"timestamp"` + // In nanoseconds (aggregated) + Cpu *v1.CpuStats `json:"cpu,omitempty"` + // In nanocores per second (instantaneous) + CpuInst *CpuInstStats `json:"cpu_inst,omitempty"` + // Memory statistics + Memory *v1.MemoryStats `json:"memory,omitempty"` + // Network statistics + Network *NetworkStats `json:"network,omitempty"` + // Filesystem statistics + Filesystem []MachineFsStats `json:"filesystem,omitempty"` + // Task load statistics + Load *v1.LoadStats `json:"load_stats,omitempty"` +} + +// MachineFsStats contains per filesystem capacity and usage information. +type MachineFsStats struct { + // The block device name associated with the filesystem. + Device string `json:"device"` + + // Number of bytes that can be consumed on this filesystem. + Capacity *uint64 `json:"capacity,omitempty"` + + // Number of bytes that is currently consumed on this filesystem. + Usage *uint64 `json:"usage,omitempty"` + + // Number of bytes available for non-root user on this filesystem. + Available *uint64 `json:"available,omitempty"` + + // DiskStats for this device. + DiskStats `json:"inline"` +} + +// DiskStats contains per partition usage information. +// This information is only available at the machine level. +type DiskStats struct { + // Number of reads completed + // This is the total number of reads completed successfully. + ReadsCompleted *uint64 `json:"reads_completed,omitempty"` + + // Number of reads merged + // Reads and writes which are adjacent to each other may be merged for + // efficiency. Thus two 4K reads may become one 8K read before it is + // ultimately handed to the disk, and so it will be counted (and queued) + // as only one I/O. This field lets you know how often this was done. + ReadsMerged *uint64 `json:"reads_merged,omitempty"` + + // Number of sectors read + // This is the total number of sectors read successfully. + SectorsRead *uint64 `json:"sectors_read,omitempty"` + + // Time spent reading + // This is the total number of milliseconds spent by all reads (as + // measured from __make_request() to end_that_request_last()). + ReadDuration *time.Duration `json:"read_duration,omitempty"` + + // Number of writes completed + // This is the total number of writes completed successfully. + WritesCompleted *uint64 `json:"writes_completed,omitempty"` + + // Number of writes merged + // See the description of reads merged. + WritesMerged *uint64 `json:"writes_merged,omitempty"` + + // Number of sectors written + // This is the total number of sectors written successfully. + SectorsWritten *uint64 `json:"sectors_written,omitempty"` + + // Time spent writing + // This is the total number of milliseconds spent by all writes (as + // measured from __make_request() to end_that_request_last()). + WriteDuration *time.Duration `json:"write_duration,omitempty"` + + // Number of I/Os currently in progress + // The only field that should go to zero. Incremented as requests are + // given to appropriate struct request_queue and decremented as they finish. + IoInProgress *uint64 `json:"io_in_progress,omitempty"` + + // Time spent doing I/Os + // This field increases so long as field 9 is nonzero. + IoDuration *time.Duration `json:"io_duration,omitempty"` + + // weighted time spent doing I/Os + // This field is incremented at each I/O start, I/O completion, I/O + // merge, or read of these stats by the number of I/Os in progress + // (field 9) times the number of milliseconds spent doing I/O since the + // last update of this field. This can provide an easy measure of both + // I/O completion time and the backlog that may be accumulating. + WeightedIoDuration *time.Duration `json:"weighted_io_duration,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go index 2ebae11da1445..b1d8d3309be53 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go @@ -19,6 +19,7 @@ import ( "fmt" "io/ioutil" "math" + "math/rand" "os/exec" "path" "regexp" @@ -36,14 +37,14 @@ import ( "github.com/google/cadvisor/summary" "github.com/google/cadvisor/utils/cpuload" - "github.com/docker/docker/pkg/units" + units "github.com/docker/go-units" "github.com/golang/glog" ) // Housekeeping interval. var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings") -var cgroupPathRegExp = regexp.MustCompile(`.*devices.*:(.*?)[,;$].*`) +var cgroupPathRegExp = regexp.MustCompile(`devices[^:]*:(.*?)[,;$]`) type containerInfo struct { info.ContainerReference @@ -78,6 +79,17 @@ type containerData struct { collectorManager collector.CollectorManager } +// jitter returns a time.Duration between duration and duration + maxFactor * duration, +// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a +// suggested default value will be chosen. +func jitter(duration time.Duration, maxFactor float64) time.Duration { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + return wait +} + func (c *containerData) Start() error { go c.housekeeping() return nil @@ -356,11 +368,14 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim } } - return lastHousekeeping.Add(self.housekeepingInterval) + return lastHousekeeping.Add(jitter(self.housekeepingInterval, 1.0)) } // TODO(vmarmol): Implement stats collecting as a custom collector. func (c *containerData) housekeeping() { + // Start any background goroutines - must be cleaned up in c.handler.Cleanup(). + c.handler.Start() + // Long housekeeping is either 100ms or half of the housekeeping interval. longHousekeeping := 100 * time.Millisecond if *HousekeepingInterval/2 < longHousekeeping { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go index 121580d62450c..5e4d9daf79400 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go @@ -62,6 +62,9 @@ type Manager interface { // Get information about a container. GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) + // Get V2 information about a container. + GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) + // Get information about all subcontainers of the specified container (includes self). SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) @@ -375,33 +378,8 @@ func (self *manager) GetContainerSpec(containerName string, options v2.RequestOp // Get V2 container spec from v1 container info. func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec { - specV1 := self.getAdjustedSpec(cinfo) - specV2 := v2.ContainerSpec{ - CreationTime: specV1.CreationTime, - HasCpu: specV1.HasCpu, - HasMemory: specV1.HasMemory, - HasFilesystem: specV1.HasFilesystem, - HasNetwork: specV1.HasNetwork, - HasDiskIo: specV1.HasDiskIo, - HasCustomMetrics: specV1.HasCustomMetrics, - Image: specV1.Image, - } - if specV1.HasCpu { - specV2.Cpu.Limit = specV1.Cpu.Limit - specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit - specV2.Cpu.Mask = specV1.Cpu.Mask - } - if specV1.HasMemory { - specV2.Memory.Limit = specV1.Memory.Limit - specV2.Memory.Reservation = specV1.Memory.Reservation - specV2.Memory.SwapLimit = specV1.Memory.SwapLimit - } - if specV1.HasCustomMetrics { - specV2.CustomMetrics = specV1.CustomMetrics - } - specV2.Aliases = cinfo.Aliases - specV2.Namespace = cinfo.Namespace - return specV2 + spec := self.getAdjustedSpec(cinfo) + return v2.ContainerSpecFromV1(&spec, cinfo.Aliases, cinfo.Namespace) } func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec { @@ -417,7 +395,6 @@ func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec { return spec } -// Get a container by name. func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) { cont, err := self.getContainerData(containerName) if err != nil { @@ -426,6 +403,34 @@ func (self *manager) GetContainerInfo(containerName string, query *info.Containe return self.containerDataToContainerInfo(cont, query) } +func (self *manager) GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) { + containers, err := self.getRequestedContainers(containerName, options) + if err != nil { + return nil, err + } + + infos := make(map[string]v2.ContainerInfo, len(containers)) + for name, container := range containers { + cinfo, err := container.GetInfo() + if err != nil { + return nil, err + } + + var nilTime time.Time // Ignored. + stats, err := self.memoryCache.RecentStats(name, nilTime, nilTime, options.Count) + if err != nil { + return nil, err + } + + infos[name] = v2.ContainerInfo{ + Spec: self.getV2Spec(cinfo), + Stats: v2.ContainerStatsFromV1(&cinfo.Spec, stats), + } + } + + return infos, nil +} + func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) { // Get the info from the container. cinfo, err := cont.GetInfo() @@ -741,6 +746,18 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c // Create a container. func (m *manager) createContainer(containerName string) error { + m.containersLock.Lock() + defer m.containersLock.Unlock() + + namespacedName := namespacedContainerName{ + Name: containerName, + } + + // Check that the container didn't already exist. + if _, ok := m.containers[namespacedName]; ok { + return nil + } + handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace) if err != nil { return err @@ -770,35 +787,15 @@ func (m *manager) createContainer(containerName string) error { return err } - // Add to the containers map. - alreadyExists := func() bool { - m.containersLock.Lock() - defer m.containersLock.Unlock() - - namespacedName := namespacedContainerName{ - Name: containerName, - } - - // Check that the container didn't already exist. - _, ok := m.containers[namespacedName] - if ok { - return true - } - - // Add the container name and all its aliases. The aliases must be within the namespace of the factory. - m.containers[namespacedName] = cont - for _, alias := range cont.info.Aliases { - m.containers[namespacedContainerName{ - Namespace: cont.info.Namespace, - Name: alias, - }] = cont - } - - return false - }() - if alreadyExists { - return nil + // Add the container name and all its aliases. The aliases must be within the namespace of the factory. + m.containers[namespacedName] = cont + for _, alias := range cont.info.Aliases { + m.containers[namespacedContainerName{ + Namespace: cont.info.Namespace, + Name: alias, + }] = cont } + glog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) contSpec, err := cont.handler.GetSpec() @@ -822,9 +819,7 @@ func (m *manager) createContainer(containerName string) error { } // Start the container's housekeeping. - cont.Start() - - return nil + return cont.Start() } func (m *manager) destroyContainer(containerName string) error { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go index 79e09d73a6e1d..adce51010e01a 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go @@ -16,6 +16,7 @@ package metrics import ( "fmt" + "regexp" "time" info "github.com/google/cadvisor/info/v1" @@ -126,6 +127,20 @@ func NewPrometheusCollector(infoProvider infoProvider, f ContainerNameToLabelsFu } return values }, + }, { + name: "container_memory_cache", + help: "Number of bytes of page cache memory.", + valueType: prometheus.GaugeValue, + getValues: func(s *info.ContainerStats) metricValues { + return metricValues{{value: float64(s.Memory.Cache)}} + }, + }, { + name: "container_memory_rss", + help: "Size of RSS in bytes.", + valueType: prometheus.GaugeValue, + getValues: func(s *info.ContainerStats) metricValues { + return metricValues{{value: float64(s.Memory.RSS)}} + }, }, { name: "container_memory_failcnt", help: "Number of memory usage hits limits", @@ -508,11 +523,20 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) if c.containerNameToLabels != nil { newLabels := c.containerNameToLabels(name) for k, v := range newLabels { - baseLabels = append(baseLabels, k) + baseLabels = append(baseLabels, sanitizeLabelName(k)) baseLabelValues = append(baseLabelValues, v) } } + for k, v := range container.Spec.Labels { + baseLabels = append(baseLabels, sanitizeLabelName(k)) + baseLabelValues = append(baseLabelValues, v) + } + for k, v := range container.Spec.Envs { + baseLabels = append(baseLabels, sanitizeLabelName(k)) + baseLabelValues = append(baseLabelValues, v) + } + // Container spec desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...) @@ -571,3 +595,11 @@ func specMemoryValue(v uint64) float64 { } return float64(v) } + +var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) + +// sanitizeLabelName replaces anything that doesn't match +// client_label.LabelNameRE with an underscore. +func sanitizeLabelName(name string) string { + return invalidLabelCharRE.ReplaceAllString(name, "_") +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/elasticsearch/elasticsearch.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/elasticsearch/elasticsearch.go index 6a3f0c40fb211..b43beed2546c0 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/storage/elasticsearch/elasticsearch.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/elasticsearch/elasticsearch.go @@ -103,7 +103,8 @@ func (self *elasticStorage) AddStats(ref info.ContainerReference, stats *info.Co Do() if err != nil { // Handle error - panic(fmt.Errorf("failed to write stats to ElasticSearch- %s", err)) + fmt.Printf("failed to write stats to ElasticSearch - %s", err) + return } }() return nil @@ -135,14 +136,15 @@ func newStorage( ) if err != nil { // Handle error - panic(err) + return nil, fmt.Errorf("failed to create the elasticsearch client - %s", err) } // Ping the Elasticsearch server to get e.g. the version number info, code, err := client.Ping().URL(elasticHost).Do() if err != nil { // Handle error - panic(err) + return nil, fmt.Errorf("failed to ping the elasticsearch - %s", err) + } fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go index 58d97c621c24a..82739b58c2bd6 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go @@ -16,12 +16,14 @@ package influxdb import ( "fmt" + "net/url" "os" "sync" "time" info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/storage" + "github.com/google/cadvisor/version" influxdb "github.com/influxdb/influxdb/client" ) @@ -31,39 +33,44 @@ func init() { } type influxdbStorage struct { - client *influxdb.Client - machineName string - tableName string - bufferDuration time.Duration - lastWrite time.Time - series []*influxdb.Series - lock sync.Mutex - readyToFlush func() bool + client *influxdb.Client + machineName string + database string + retentionPolicy string + bufferDuration time.Duration + lastWrite time.Time + points []*influxdb.Point + lock sync.Mutex + readyToFlush func() bool } +// Series names const ( - colTimestamp string = "time" - colMachineName string = "machine" - colContainerName string = "container_name" - colCpuCumulativeUsage string = "cpu_cumulative_usage" + // Cumulative CPU usage + serCpuUsageTotal string = "cpu_usage_total" + serCpuUsageSystem string = "cpu_usage_system" + serCpuUsageUser string = "cpu_usage_user" + serCpuUsagePerCpu string = "cpu_usage_per_cpu" + // Smoothed average of number of runnable threads x 1000. + serLoadAverage string = "load_average" // Memory Usage - colMemoryUsage string = "memory_usage" + serMemoryUsage string = "memory_usage" // Working set size - colMemoryWorkingSet string = "memory_working_set" + serMemoryWorkingSet string = "memory_working_set" // Cumulative count of bytes received. - colRxBytes string = "rx_bytes" + serRxBytes string = "rx_bytes" // Cumulative count of receive errors encountered. - colRxErrors string = "rx_errors" + serRxErrors string = "rx_errors" // Cumulative count of bytes transmitted. - colTxBytes string = "tx_bytes" + serTxBytes string = "tx_bytes" // Cumulative count of transmit errors encountered. - colTxErrors string = "tx_errors" + serTxErrors string = "tx_errors" // Filesystem device. - colFsDevice = "fs_device" + serFsDevice string = "fs_device" // Filesystem limit. - colFsLimit = "fs_limit" + serFsLimit string = "fs_limit" // Filesystem usage. - colFsUsage = "fs_usage" + serFsUsage string = "fs_usage" ) func new() (storage.StorageDriver, error) { @@ -83,84 +90,122 @@ func new() (storage.StorageDriver, error) { ) } -func (self *influxdbStorage) getSeriesDefaultValues( - ref info.ContainerReference, - stats *info.ContainerStats, - columns *[]string, - values *[]interface{}) { - // Timestamp - *columns = append(*columns, colTimestamp) - *values = append(*values, stats.Timestamp.UnixNano()/1E3) - - // Machine name - *columns = append(*columns, colMachineName) - *values = append(*values, self.machineName) - - // Container name - *columns = append(*columns, colContainerName) - if len(ref.Aliases) > 0 { - *values = append(*values, ref.Aliases[0]) - } else { - *values = append(*values, ref.Name) - } -} +// Field names +const ( + fieldValue string = "value" + fieldType string = "type" + fieldDevice string = "device" +) -// In order to maintain a fixed column format, we add a new series for each filesystem partition. -func (self *influxdbStorage) containerFilesystemStatsToSeries( +// Tag names +const ( + tagMachineName string = "machine" + tagContainerName string = "container_name" +) + +func (self *influxdbStorage) containerFilesystemStatsToPoints( ref info.ContainerReference, - stats *info.ContainerStats) (series []*influxdb.Series) { + stats *info.ContainerStats) (points []*influxdb.Point) { if len(stats.Filesystem) == 0 { - return series + return points } for _, fsStat := range stats.Filesystem { - columns := make([]string, 0) - values := make([]interface{}, 0) - self.getSeriesDefaultValues(ref, stats, &columns, &values) + tagsFsUsage := map[string]string{ + fieldDevice: fsStat.Device, + fieldType: "usage", + } + fieldsFsUsage := map[string]interface{}{ + fieldValue: int64(fsStat.Usage), + } + pointFsUsage := &influxdb.Point{ + Measurement: serFsUsage, + Tags: tagsFsUsage, + Fields: fieldsFsUsage, + } + + tagsFsLimit := map[string]string{ + fieldDevice: fsStat.Device, + fieldType: "limit", + } + fieldsFsLimit := map[string]interface{}{ + fieldValue: int64(fsStat.Limit), + } + pointFsLimit := &influxdb.Point{ + Measurement: serFsLimit, + Tags: tagsFsLimit, + Fields: fieldsFsLimit, + } + + points = append(points, pointFsUsage, pointFsLimit) + } - columns = append(columns, colFsDevice) - values = append(values, fsStat.Device) + self.tagPoints(ref, stats, points) - columns = append(columns, colFsLimit) - values = append(values, fsStat.Limit) + return points +} + +// Set tags and timestamp for all points of the batch. +// Points should inherit the tags that are set for BatchPoints, but that does not seem to work. +func (self *influxdbStorage) tagPoints(ref info.ContainerReference, stats *info.ContainerStats, points []*influxdb.Point) { + // Use container alias if possible + var containerName string + if len(ref.Aliases) > 0 { + containerName = ref.Aliases[0] + } else { + containerName = ref.Name + } - columns = append(columns, colFsUsage) - values = append(values, fsStat.Usage) - series = append(series, self.newSeries(columns, values)) + commonTags := map[string]string{ + tagMachineName: self.machineName, + tagContainerName: containerName, + } + for i := 0; i < len(points); i++ { + // merge with existing tags if any + addTagsToPoint(points[i], commonTags) + points[i].Time = stats.Timestamp } - return series } -func (self *influxdbStorage) containerStatsToValues( +func (self *influxdbStorage) containerStatsToPoints( ref info.ContainerReference, stats *info.ContainerStats, -) (columns []string, values []interface{}) { - self.getSeriesDefaultValues(ref, stats, &columns, &values) - // Cumulative Cpu Usage - columns = append(columns, colCpuCumulativeUsage) - values = append(values, stats.Cpu.Usage.Total) +) (points []*influxdb.Point) { + // CPU usage: Total usage in nanoseconds + points = append(points, makePoint(serCpuUsageTotal, stats.Cpu.Usage.Total)) - // Memory Usage - columns = append(columns, colMemoryUsage) - values = append(values, stats.Memory.Usage) + // CPU usage: Time spend in system space (in nanoseconds) + points = append(points, makePoint(serCpuUsageSystem, stats.Cpu.Usage.System)) - // Working set size - columns = append(columns, colMemoryWorkingSet) - values = append(values, stats.Memory.WorkingSet) + // CPU usage: Time spent in user space (in nanoseconds) + points = append(points, makePoint(serCpuUsageUser, stats.Cpu.Usage.User)) - // Network stats. - columns = append(columns, colRxBytes) - values = append(values, stats.Network.RxBytes) + // CPU usage per CPU + for i := 0; i < len(stats.Cpu.Usage.PerCpu); i++ { + point := makePoint(serCpuUsagePerCpu, stats.Cpu.Usage.PerCpu[i]) + tags := map[string]string{"instance": fmt.Sprintf("%v", i)} + addTagsToPoint(point, tags) - columns = append(columns, colRxErrors) - values = append(values, stats.Network.RxErrors) + points = append(points, point) + } + + // Load Average + points = append(points, makePoint(serLoadAverage, stats.Cpu.LoadAverage)) + + // Memory Usage + points = append(points, makePoint(serMemoryUsage, stats.Memory.Usage)) - columns = append(columns, colTxBytes) - values = append(values, stats.Network.TxBytes) + // Working Set Size + points = append(points, makePoint(serMemoryWorkingSet, stats.Memory.WorkingSet)) - columns = append(columns, colTxErrors) - values = append(values, stats.Network.TxErrors) + // Network Stats + points = append(points, makePoint(serRxBytes, stats.Network.RxBytes)) + points = append(points, makePoint(serRxErrors, stats.Network.RxErrors)) + points = append(points, makePoint(serTxBytes, stats.Network.TxBytes)) + points = append(points, makePoint(serTxErrors, stats.Network.TxErrors)) - return columns, values + self.tagPoints(ref, stats, points) + + return points } func (self *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) { @@ -175,27 +220,38 @@ func (self *influxdbStorage) AddStats(ref info.ContainerReference, stats *info.C if stats == nil { return nil } - var seriesToFlush []*influxdb.Series + var pointsToFlush []*influxdb.Point func() { // AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write. self.lock.Lock() defer self.lock.Unlock() - self.series = append(self.series, self.newSeries(self.containerStatsToValues(ref, stats))) - self.series = append(self.series, self.containerFilesystemStatsToSeries(ref, stats)...) + self.points = append(self.points, self.containerStatsToPoints(ref, stats)...) + self.points = append(self.points, self.containerFilesystemStatsToPoints(ref, stats)...) if self.readyToFlush() { - seriesToFlush = self.series - self.series = make([]*influxdb.Series, 0) + pointsToFlush = self.points + self.points = make([]*influxdb.Point, 0) self.lastWrite = time.Now() } }() - if len(seriesToFlush) > 0 { - err := self.client.WriteSeriesWithTimePrecision(seriesToFlush, influxdb.Microsecond) - if err != nil { + if len(pointsToFlush) > 0 { + points := make([]influxdb.Point, len(pointsToFlush)) + for i, p := range pointsToFlush { + points[i] = *p + } + + batchTags := map[string]string{tagMachineName: self.machineName} + bp := influxdb.BatchPoints{ + Points: points, + Database: self.database, + Tags: batchTags, + Time: stats.Timestamp, + } + response, err := self.client.Write(bp) + if err != nil || checkResponseForErrors(response) != nil { return fmt.Errorf("failed to write stats to influxDb - %s", err) } } - return nil } @@ -204,21 +260,9 @@ func (self *influxdbStorage) Close() error { return nil } -// Returns a new influxdb series. -func (self *influxdbStorage) newSeries(columns []string, points []interface{}) *influxdb.Series { - out := &influxdb.Series{ - Name: self.tableName, - Columns: columns, - // There's only one point for each stats - Points: make([][]interface{}, 1), - } - out.Points[0] = points - return out -} - // machineName: A unique identifier to identify the host that current cAdvisor // instance is running on. -// influxdbHost: The host which runs influxdb. +// influxdbHost: The host which runs influxdb (host:port) func newStorage( machineName, tablename, @@ -229,28 +273,97 @@ func newStorage( isSecure bool, bufferDuration time.Duration, ) (*influxdbStorage, error) { - config := &influxdb.ClientConfig{ - Host: influxdbHost, - Username: username, - Password: password, - Database: database, - IsSecure: isSecure, + url := &url.URL{ + Scheme: "http", + Host: influxdbHost, } - client, err := influxdb.NewClient(config) + if isSecure { + url.Scheme = "https" + } + + config := &influxdb.Config{ + URL: *url, + Username: username, + Password: password, + UserAgent: fmt.Sprintf("%v/%v", "cAdvisor", version.Info["version"]), + } + client, err := influxdb.NewClient(*config) if err != nil { return nil, err } - // TODO(monnand): With go 1.3, we cannot compress data now. - client.DisableCompression() ret := &influxdbStorage{ client: client, machineName: machineName, - tableName: tablename, + database: database, bufferDuration: bufferDuration, lastWrite: time.Now(), - series: make([]*influxdb.Series, 0), + points: make([]*influxdb.Point, 0), } ret.readyToFlush = ret.defaultReadyToFlush return ret, nil } + +// Creates a measurement point with a single value field +func makePoint(name string, value interface{}) *influxdb.Point { + fields := map[string]interface{}{ + fieldValue: toSignedIfUnsigned(value), + } + + return &influxdb.Point{ + Measurement: name, + Fields: fields, + } +} + +// Adds additional tags to the existing tags of a point +func addTagsToPoint(point *influxdb.Point, tags map[string]string) { + if point.Tags == nil { + point.Tags = tags + } else { + for k, v := range tags { + point.Tags[k] = v + } + } +} + +// Checks response for possible errors +func checkResponseForErrors(response *influxdb.Response) error { + const msg = "failed to write stats to influxDb - %s" + + if response != nil && response.Err != nil { + return fmt.Errorf(msg, response.Err) + } + if response != nil && response.Results != nil { + for _, result := range response.Results { + if result.Err != nil { + return fmt.Errorf(msg, result.Err) + } + if result.Series != nil { + for _, row := range result.Series { + if row.Err != nil { + return fmt.Errorf(msg, row.Err) + } + } + } + } + } + return nil +} + +// Some stats have type unsigned integer, but the InfluxDB client accepts only signed integers. +func toSignedIfUnsigned(value interface{}) interface{} { + switch v := value.(type) { + case uint64: + return int64(v) + case uint32: + return int32(v) + case uint16: + return int16(v) + case uint8: + return int8(v) + case uint: + return int(v) + } + return value +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/kafka/kafka.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/kafka/kafka.go new file mode 100644 index 0000000000000..e43a374c81dc2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/kafka/kafka.go @@ -0,0 +1,114 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "encoding/json" + "flag" + "os" + "strings" + "time" + + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/storage" + "github.com/google/cadvisor/utils/container" + + kafka "github.com/Shopify/sarama" + "github.com/golang/glog" +) + +func init() { + storage.RegisterStorageDriver("kafka", new) +} + +var ( + brokers = flag.String("storage_driver_kafka_broker_list", "localhost:9092", "kafka broker(s) csv") + topic = flag.String("storage_driver_kafka_topic", "stats", "kafka topic") +) + +type kafkaStorage struct { + producer kafka.AsyncProducer + topic string + machineName string +} + +type detailSpec struct { + Timestamp time.Time `json:"timestamp"` + MachineName string `json:"machine_name,omitempty"` + ContainerName string `json:"container_Name,omitempty"` + ContainerID string `json:"container_Id,omitempty"` + ContainerLabels map[string]string `json:"container_labels,omitempty"` + ContainerStats *info.ContainerStats `json:"container_stats,omitempty"` +} + +func (driver *kafkaStorage) infoToDetailSpec(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec { + timestamp := time.Now() + containerID := ref.Id + containerLabels := ref.Labels + containerName := container.GetPreferredName(ref) + + detail := &detailSpec{ + Timestamp: timestamp, + MachineName: driver.machineName, + ContainerName: containerName, + ContainerID: containerID, + ContainerLabels: containerLabels, + ContainerStats: stats, + } + return detail +} + +func (driver *kafkaStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + detail := driver.infoToDetailSpec(ref, stats) + b, err := json.Marshal(detail) + + driver.producer.Input() <- &kafka.ProducerMessage{ + Topic: driver.topic, + Value: kafka.StringEncoder(b), + } + + return err +} + +func (self *kafkaStorage) Close() error { + return self.producer.Close() +} + +func new() (storage.StorageDriver, error) { + machineName, err := os.Hostname() + if err != nil { + return nil, err + } + return newStorage(machineName) +} + +func newStorage(machineName string) (storage.StorageDriver, error) { + config := kafka.NewConfig() + config.Producer.RequiredAcks = kafka.WaitForAll + + brokerList := strings.Split(*brokers, ",") + glog.V(4).Infof("Kafka brokers:%q", brokers) + + producer, err := kafka.NewAsyncProducer(brokerList, config) + if err != nil { + return nil, err + } + ret := &kafkaStorage{ + producer: producer, + topic: *topic, + machineName: machineName, + } + return ret, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/container/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/container/container.go new file mode 100644 index 0000000000000..ea956a5c6e9f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/container/container.go @@ -0,0 +1,31 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package container + +import ( + info "github.com/google/cadvisor/info/v1" +) + +// Returns the alias a container is known by within a certain namespace, +// if available. Otherwise returns the absolute name of the container. +func GetPreferredName(ref info.ContainerReference) string { + var containerName string + if len(ref.Aliases) > 0 { + containerName = ref.Aliases[0] + } else { + containerName = ref.Name + } + return containerName +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go index 547f1077dbd9f..fb12581693403 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go @@ -82,8 +82,8 @@ func GetClockSpeed(procInfo []byte) (uint64, error) { } // GetMachineMemoryCapacity returns the machine's total memory from /proc/meminfo. -// Returns the total memory capacity as an int64 (number of bytes). -func GetMachineMemoryCapacity() (int64, error) { +// Returns the total memory capacity as an uint64 (number of bytes). +func GetMachineMemoryCapacity() (uint64, error) { out, err := ioutil.ReadFile("/proc/meminfo") if err != nil { return 0, err @@ -97,8 +97,8 @@ func GetMachineMemoryCapacity() (int64, error) { } // GetMachineSwapCapacity returns the machine's total swap from /proc/meminfo. -// Returns the total swap capacity as an int64 (number of bytes). -func GetMachineSwapCapacity() (int64, error) { +// Returns the total swap capacity as an uint64 (number of bytes). +func GetMachineSwapCapacity() (uint64, error) { out, err := ioutil.ReadFile("/proc/meminfo") if err != nil { return 0, err @@ -113,14 +113,14 @@ func GetMachineSwapCapacity() (int64, error) { // parseCapacity matches a Regexp in a []byte, returning the resulting value in bytes. // Assumes that the value matched by the Regexp is in KB. -func parseCapacity(b []byte, r *regexp.Regexp) (int64, error) { +func parseCapacity(b []byte, r *regexp.Regexp) (uint64, error) { matches := r.FindSubmatch(b) if len(matches) != 2 { - return -1, fmt.Errorf("failed to match regexp in output: %q", string(b)) + return 0, fmt.Errorf("failed to match regexp in output: %q", string(b)) } - m, err := strconv.ParseInt(string(matches[1]), 10, 64) + m, err := strconv.ParseUint(string(matches[1]), 10, 64) if err != nil { - return -1, err + return 0, err } // Convert to bytes. diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/version/VERSION b/Godeps/_workspace/src/github.com/google/cadvisor/version/VERSION index 9d2632160c453..b9f8e558df4d7 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/version/VERSION +++ b/Godeps/_workspace/src/github.com/google/cadvisor/version/VERSION @@ -1 +1 @@ -0.20.1 \ No newline at end of file +0.21.1 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/LICENSE new file mode 100644 index 0000000000000..ccae99f6a9a30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012, 2013 Ugorji Nwoke. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go new file mode 100644 index 0000000000000..c14d810a73e81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Representative Benchmark Results + +Run the benchmark suite using: + go test -bi -bench=. -benchmem + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext_dep_test.go + +*/ +package codec diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md new file mode 100644 index 0000000000000..6c95d1bfd2081 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md @@ -0,0 +1,174 @@ +# Codec + +High Performance and Feature-Rich Idiomatic Go Library providing +encode/decode support for different serialization formats. + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +Online documentation: [http://godoc.org/github.com/ugorji/go/codec] + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +## Representative Benchmark Results + +A sample run of benchmark using "go test -bi -bench=. -benchmem": + + /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) + + .............................................. + BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT + To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." + Benchmark: + Struct recursive Depth: 1 + ApproxDeepSize Of benchmark Struct: 4694 bytes + Benchmark One-Pass Run: + v-msgpack: len: 1600 bytes + bson: len: 3025 bytes + msgpack: len: 1560 bytes + binc: len: 1187 bytes + gob: len: 1972 bytes + json: len: 2538 bytes + .............................................. + PASS + Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op + Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op + Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op + Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op + Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op + Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op + Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op + Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op + Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op + Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op + Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op + Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op + Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op + Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op + ok ugorji.net/codec 30.827s + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext\_dep\_test.go + diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go new file mode 100644 index 0000000000000..2bb5e8fee8548 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go @@ -0,0 +1,786 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "math" + // "reflect" + // "sync/atomic" + "time" + //"fmt" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +//var _ = fmt.Printf + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + w encWriter + m map[string]uint16 // symbols + s uint32 // symbols sequencer + b [8]byte +} + +func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + bs := encodeTime(v.(time.Time)) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) encodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) encodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) encodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) encodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + switch { + case v >= 0: + e.encUint(bincVdPosInt<<4, true, uint64(v)) + case v == -1: + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + default: + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) encodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + switch { + case v == 0: + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + case pos && v >= 1 && v <= 16: + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + case v <= math.MaxUint8: + e.w.writen2(bd|0x0, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.encIntegerPrune(bd, pos, v, 4) + default: + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) encodeArrayPreamble(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeMapPreamble(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + switch l { + case 0: + e.encBytesLen(c_UTF8, 0) + return + case 1: + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writeUint16(ui) + } + } else { + e.s++ + ui = uint16(e.s) + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + switch { + case l <= math.MaxUint8: + // lenprec = 0 + case l <= math.MaxUint16: + lenprec = 1 + case int64(l) <= math.MaxUint32: + lenprec = 2 + default: + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writeUint16(ui) + } + switch lenprec { + case 0: + e.w.writen1(byte(l)) + case 1: + e.w.writeUint16(uint16(l)) + case 2: + e.w.writeUint32(uint32(l)) + default: + e.w.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd | 0x02) + e.w.writeUint32(uint32(v)) + default: + e.w.writen1(bd | 0x03) + e.w.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecDriver struct { + r decReader + bdRead bool + bdType valueType + bd byte + vd byte + vs byte + b [8]byte + m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) +} + +func (d *bincDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *bincDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + d.bdType = valueTypeNil + case bincSpFalse, bincSpTrue: + d.bdType = valueTypeBool + case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: + d.bdType = valueTypeFloat + case bincSpZero: + d.bdType = valueTypeUint + case bincSpNegOne: + d.bdType = valueTypeInt + default: + decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + d.bdType = valueTypeUint + case bincVdPosInt: + d.bdType = valueTypeUint + case bincVdNegInt: + d.bdType = valueTypeInt + case bincVdFloat: + d.bdType = valueTypeFloat + case bincVdString: + d.bdType = valueTypeString + case bincVdSymbol: + d.bdType = valueTypeSymbol + case bincVdByteArray: + d.bdType = valueTypeBytes + case bincVdTimestamp: + d.bdType = valueTypeTimestamp + case bincVdCustomExt: + d.bdType = valueTypeExt + case bincVdArray: + d.bdType = valueTypeArray + case bincVdMap: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) + } + } + return d.bdType +} + +func (d *bincDecDriver) tryDecodeAsNil() bool { + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + if d.vd != bincVdTimestamp { + decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + } + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(d.r.readUint64()); break; } + switch vs := d.vs; vs & 0x7 { + case bincFlBin32: + d.decFloatPre(vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + case bincFlBin64: + d.decFloatPre(vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + default: + decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:]) + v = uint64(bigen.Uint16(d.b[6:])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 3: + d.r.readb(d.b[4:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:])) + case 7: + d.r.readb(d.b[:]) + v = uint64(bigen.Uint64(d.b[:])) + default: + decErr("unsigned integers with greater than 64 bits of precision not supported") + } + return +} + +func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.vd { + case bincVdPosInt: + ui = d.decUint() + i = int64(ui) + case bincVdNegInt: + ui = d.decUint() + i = -(int64(ui)) + neg = true + case bincVdSmallInt: + i = int64(d.vs) + 1 + ui = uint64(d.vs) + 1 + case bincVdSpecial: + switch d.vs { + case bincSpZero: + //i = 0 + case bincSpNegOne: + neg = true + ui = 1 + i = -1 + default: + decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } + default: + decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } + return +} + +func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.vd { + case bincVdSpecial: + d.bdRead = false + switch d.vs { + case bincSpNan: + return math.NaN() + case bincSpPosInf: + return math.Inf(1) + case bincSpZeroFloat, bincSpZero: + return + case bincSpNegInf: + return math.Inf(-1) + default: + decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } + case bincVdFloat: + f = d.decFloat() + default: + _, i, _ := d.decIntAny() + f = float64(i) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) decodeBool() (b bool) { + switch d.bd { + case (bincVdSpecial | bincSpFalse): + // b = false + case (bincVdSpecial | bincSpTrue): + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) readMapLen() (length int) { + if d.vd != bincVdMap { + decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) readArrayLen() (length int) { + if d.vd != bincVdArray { + decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs <= 3 { + return int(d.decUint()) + } + return int(d.vs - 4) +} + +func (d *bincDecDriver) decodeString() (s string) { + switch d.vd { + case bincVdString, bincVdByteArray: + if length := d.decLen(); length > 0 { + s = string(d.r.readn(length)) + } + case bincVdSymbol: + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint32 + vs := d.vs + //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) + if vs&0x8 == 0 { + symbol = uint32(d.r.readn1()) + } else { + symbol = uint32(d.r.readUint16()) + } + if d.m == nil { + d.m = make(map[uint32]string, 16) + } + + if vs&0x4 == 0 { + s = d.m[symbol] + } else { + var slen int + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(d.r.readUint16()) + case 2: + slen = int(d.r.readUint32()) + case 3: + slen = int(d.r.readUint64()) + } + s = string(d.r.readn(slen)) + d.m[symbol] = s + } + default: + decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + var clen int + switch d.vd { + case bincVdString, bincVdByteArray: + clen = d.decLen() + default: + decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.vd { + case bincVdCustomExt: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case bincVdByteArray: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + vt = valueTypeNil + case bincSpFalse: + vt = valueTypeBool + v = false + case bincSpTrue: + vt = valueTypeBool + v = true + case bincSpNan: + vt = valueTypeFloat + v = math.NaN() + case bincSpPosInf: + vt = valueTypeFloat + v = math.Inf(1) + case bincSpNegInf: + vt = valueTypeFloat + v = math.Inf(-1) + case bincSpZeroFloat: + vt = valueTypeFloat + v = float64(0) + case bincSpZero: + vt = valueTypeUint + v = int64(0) // int8(0) + case bincSpNegOne: + vt = valueTypeInt + v = int64(-1) // int8(-1) + default: + decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + vt = valueTypeUint + v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + vt = valueTypeUint + v = d.decUint() + case bincVdNegInt: + vt = valueTypeInt + v = -(int64(d.decUint())) + case bincVdFloat: + vt = valueTypeFloat + v = d.decFloat() + case bincVdSymbol: + vt = valueTypeSymbol + v = d.decodeString() + case bincVdString: + vt = valueTypeString + v = d.decodeString() + case bincVdByteArray: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case bincVdTimestamp: + vt = valueTypeTimestamp + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + v = tt + case bincVdCustomExt: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case bincVdArray: + vt = valueTypeArray + decodeFurther = true + case bincVdMap: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle +} + +func (h *BincHandle) newEncDriver(w encWriter) encDriver { + return &bincEncDriver{w: w} +} + +func (h *BincHandle) newDecDriver(r decReader) decDriver { + return &bincDecDriver{r: r} +} + +func (_ *BincHandle) writeExt() bool { + return true +} + +func (h *BincHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go new file mode 100644 index 0000000000000..87bef2b93586c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go @@ -0,0 +1,1048 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" + // "runtime/debug" +) + +// Some tagging information for error messages. +const ( + msgTagDec = "codec.decoder" + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + readn(n int) []byte + readb([]byte) + readn1() uint8 + readUint16() uint16 + readUint32() uint32 + readUint64() uint64 +} + +type decDriver interface { + initReadNext() + tryDecodeAsNil() bool + currentEncodedType() valueType + isBuiltinType(rt uintptr) bool + decodeBuiltin(rt uintptr, v interface{}) + //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + decodeNaked() (v interface{}, vt valueType, decodeFurther bool) + decodeInt(bitsize uint8) (i int64) + decodeUint(bitsize uint8) (ui uint64) + decodeFloat(chkOverflow32 bool) (f float64) + decodeBool() (b bool) + // decodeString can also decode symbols + decodeString() (s string) + decodeBytes(bs []byte) (bsOut []byte, changed bool) + decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + readMapLen() int + readArrayLen() int +} + +type DecodeOptions struct { + // An instance of MapType is used during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + // An instance of SliceType is used during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + // ErrorIfNoField controls whether an error is returned when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool +} + +// ------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + r io.Reader + br io.ByteReader + x [8]byte //temp byte array re-used internally for efficiency +} + +func (z *ioDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + bs = make([]byte, n) + if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + panic(err) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + panic(err) + } +} + +func (z *ioDecReader) readn1() uint8 { + if z.br != nil { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + return b + } + z.readb(z.x[:1]) + return z.x[0] +} + +func (z *ioDecReader) readUint16() uint16 { + z.readb(z.x[:2]) + return bigen.Uint16(z.x[:2]) +} + +func (z *ioDecReader) readUint32() uint32 { + z.readb(z.x[:4]) + return bigen.Uint32(z.x[:4]) +} + +func (z *ioDecReader) readUint64() uint64 { + z.readb(z.x[:8]) + return bigen.Uint64(z.x[:8]) +} + +// ------------------------------------ + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available +} + +func (z *bytesDecReader) consume(n int) (oldcursor int) { + if z.a == 0 { + panic(io.EOF) + } + if n > z.a { + decErr("Trying to read %v bytes. Only %v available", n, z.a) + } + // z.checkAvailable(n) + oldcursor = z.c + z.c = oldcursor + n + z.a = z.a - n + return +} + +func (z *bytesDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + c0 := z.consume(n) + bs = z.b[c0:z.c] + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readn(len(bs))) +} + +func (z *bytesDecReader) readn1() uint8 { + c0 := z.consume(1) + return z.b[c0] +} + +// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits +// creating temp slice variable and copying it to helper function is expensive +// for just 2 bits. + +func (z *bytesDecReader) readUint16() uint16 { + c0 := z.consume(2) + return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +} + +func (z *bytesDecReader) readUint32() uint32 { + c0 := z.consume(4) + return bigen.Uint32(z.b[c0:z.c]) +} + +func (z *bytesDecReader) readUint64() uint64 { + c0 := z.consume(8) + return bigen.Uint64(z.b[c0:z.c]) +} + +// ------------------------------------ + +// decFnInfo has methods for registering handling decoding of a specific type +// based on some characteristics (builtin, extension, reflect Kind, etc) +type decFnInfo struct { + ti *typeInfo + d *Decoder + dd decDriver + xfFn func(reflect.Value, []byte) error + xfTag byte + array bool +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + xtag, xbs := f.dd.decodeExt(false, 0) + rv.Field(0).SetUint(uint64(xtag)) + rv.Field(1).SetBytes(xbs) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + _, xbs := f.dd.decodeExt(true, f.xfTag) + if fnerr := f.xfFn(rv, xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryUnmarshaler + if f.ti.unmIndir == -1 { + bm = rv.Addr().Interface().(binaryUnmarshaler) + } else if f.ti.unmIndir == 0 { + bm = rv.Interface().(binaryUnmarshaler) + } else { + for j, k := int8(0), f.ti.unmIndir; j < k; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryUnmarshaler) + } + xbs, _ := f.dd.decodeBytes(nil) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.dd.decodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.dd.decodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + if !rv.IsNil() { + f.d.decodeValue(rv.Elem()) + return + } + // nil interface: + // use some hieristics to set the nil interface to an + // appropriate value based on the first byte read (byte descriptor bd) + v, vt, decodeFurther := f.dd.decodeNaked() + if vt == valueTypeNil { + return + } + // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) + // if non-nil value in stream. + if num := f.ti.rt.NumMethod(); num > 0 { + decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", + f.ti.rt, num) + } + var rvn reflect.Value + var useRvn bool + switch vt { + case valueTypeMap: + if f.d.h.MapType == nil { + var m2 map[interface{}]interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.MapType).Elem() + useRvn = true + } + case valueTypeArray: + if f.d.h.SliceType == nil { + var m2 []interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.SliceType).Elem() + useRvn = true + } + case valueTypeExt: + re := v.(*RawExt) + var bfn func(reflect.Value, []byte) error + rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + if bfn == nil { + rvn = reflect.ValueOf(*re) + } else if fnerr := bfn(rvn, re.Data); fnerr != nil { + panic(fnerr) + } + rv.Set(rvn) + return + } + if decodeFurther { + if useRvn { + f.d.decodeValue(rvn) + } else if v != nil { + // this v is a pointer, so we need to dereference it when done + f.d.decode(v) + rvn = reflect.ValueOf(v).Elem() + useRvn = true + } + } + if useRvn { + rv.Set(rvn) + } else if v != nil { + rv.Set(reflect.ValueOf(v)) + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { + containerLen := f.dd.readMapLen() + if containerLen == 0 { + return + } + tisfi := fti.sfi + for j := 0; j < containerLen; j++ { + // var rvkencname string + // ddecode(&rvkencname) + f.dd.initReadNext() + rvkencname := f.dd.decodeString() + // rvksi := ti.getForEncName(rvkencname) + if k := fti.indexForEncName(rvkencname); k > -1 { + sfik := tisfi[k] + if sfik.i != -1 { + f.d.decodeValue(rv.Field(int(sfik.i))) + } else { + f.d.decEmbeddedField(rv, sfik.is) + } + // f.d.decodeValue(ti.field(k, rv)) + } else { + if f.d.h.ErrorIfNoField { + decErr("No matching struct field found when decoding stream map with key: %v", + rvkencname) + } else { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } + } else if currEncodedType == valueTypeArray { + containerLen := f.dd.readArrayLen() + if containerLen == 0 { + return + } + for j, si := range fti.sfip { + if j == containerLen { + break + } + if si.i != -1 { + f.d.decodeValue(rv.Field(int(si.i))) + } else { + f.d.decEmbeddedField(rv, si.is) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } else { + decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", + currEncodedType) + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + currEncodedType := f.dd.currentEncodedType() + + switch currEncodedType { + case valueTypeBytes, valueTypeString: + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { + rv.SetBytes(bs2) + } + return + } + } + + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case intfSliceTypId: + f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) + return + case uint64SliceTypId: + f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) + return + case int64SliceTypId: + f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) + return + case strSliceTypId: + f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) + return + } + } + + containerLen, containerLenS := decContLens(f.dd, currEncodedType) + + // an array can never return a nil slice. so no need to check f.array here. + + if rv.IsNil() { + rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + } + + if containerLen == 0 { + return + } + + if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { + if f.array { // !rv.CanSet() + decErr(msgDecCannotExpandArr, rvcap, containerLenS) + } + rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) + if rvlen > 0 { + reflect.Copy(rvn, rv) + } + rv.Set(rvn) + } else if containerLenS > rvlen { + rv.SetLen(containerLenS) + } + + for j := 0; j < containerLenS; j++ { + f.d.decodeValue(rv.Index(j)) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case mapStrIntfTypId: + f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) + return + case mapIntfIntfTypId: + f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) + return + case mapInt64IntfTypId: + f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) + return + case mapUint64IntfTypId: + f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) + return + } + } + + containerLen := f.dd.readMapLen() + + if rv.IsNil() { + rv.Set(reflect.MakeMap(f.ti.rt)) + } + + if containerLen == 0 { + return + } + + ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + for j := 0; j < containerLen; j++ { + rvk := reflect.New(ktype).Elem() + f.d.decodeValue(rvk) + + // special case if a byte array. + // if ktype == intfTyp { + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(string(rvk.Bytes())) + } + } + rvv := rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } + + f.d.decodeValue(rvv) + rv.SetMapIndex(rvk, rvv) + } +} + +// ---------------------------------------- + +type decFn struct { + i *decFnInfo + f func(*decFnInfo, reflect.Value) +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + r decReader + d decDriver + h *BasicHandle + f map[uintptr]decFn + x []uintptr + s []decFn +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + z := ioDecReader{ + r: r, + } + z.br, _ = r.(io.ByteReader) + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + z := bytesDecReader{ + b: in, + a: len(in), + } + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +func (d *Decoder) decode(iv interface{}) { + d.d.initReadNext() + + switch v := iv.(type) { + case nil: + decErr("Cannot decode into nil.") + + case reflect.Value: + d.chkPtrValue(v) + d.decodeValue(v.Elem()) + + case *string: + *v = d.d.decodeString() + case *bool: + *v = d.d.decodeBool() + case *int: + *v = int(d.d.decodeInt(intBitsize)) + case *int8: + *v = int8(d.d.decodeInt(8)) + case *int16: + *v = int16(d.d.decodeInt(16)) + case *int32: + *v = int32(d.d.decodeInt(32)) + case *int64: + *v = d.d.decodeInt(64) + case *uint: + *v = uint(d.d.decodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.decodeUint(8)) + case *uint16: + *v = uint16(d.d.decodeUint(16)) + case *uint32: + *v = uint32(d.d.decodeUint(32)) + case *uint64: + *v = d.d.decodeUint(64) + case *float32: + *v = float32(d.d.decodeFloat(true)) + case *float64: + *v = d.d.decodeFloat(false) + case *[]byte: + *v, _ = d.d.decodeBytes(*v) + + case *[]interface{}: + d.decSliceIntf(v, valueTypeInvalid, false) + case *[]uint64: + d.decSliceUint64(v, valueTypeInvalid, false) + case *[]int64: + d.decSliceInt64(v, valueTypeInvalid, false) + case *[]string: + d.decSliceStr(v, valueTypeInvalid, false) + case *map[string]interface{}: + d.decMapStrIntf(v) + case *map[interface{}]interface{}: + d.decMapIntfIntf(v) + case *map[uint64]interface{}: + d.decMapUint64Intf(v) + case *map[int64]interface{}: + d.decMapInt64Intf(v) + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem()) + + default: + rv := reflect.ValueOf(iv) + d.chkPtrValue(rv) + d.decodeValue(rv.Elem()) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value) { + d.d.initReadNext() + + if d.d.tryDecodeAsNil() { + // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) + if rv.Kind() == reflect.Ptr { + if !rv.IsNil() { + rv.Set(reflect.Zero(rv.Type())) + } + return + } + // for rv.Kind() == reflect.Ptr { + // rv = rv.Elem() + // } + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var fn decFn + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i, v := range d.x { + if v == rtid { + fn, ok = d.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new dec fn for type: %v\n", rt) + fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} + fn.i = &fi + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.isBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*decFnInfo).ext + } else if supportBinaryMarshal && fi.ti.unm { + fn.f = (*decFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Slice: + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.array = true + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]decFn, 16) + } + d.f[rtid] = fn + } else { + d.s = append(d.s, fn) + d.x = append(d.x, rtid) + } + } + + fn.f(fn.i, rv) + + return +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + if !rv.IsValid() { + decErr("Cannot decode into a zero (ie invalid) reflect.Value") + } + if !rv.CanInterface() { + decErr("Cannot decode into a value without an interface: %v", rv) + } + rvi := rv.Interface() + decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", + rv.Kind(), rvi, rvi) +} + +func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { + // d.decodeValue(rv.FieldByIndex(index)) + // nil pointers may be here; so reproduce FieldByIndex logic + enhancements + for _, j := range index { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + // If a pointer, it must be a pointer to struct (based on typeInfo contract) + rv = rv.Elem() + } + rv = rv.Field(j) + } + d.decodeValue(rv) +} + +// -------------------------------------------------- + +// short circuit functions for common maps and slices + +func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]interface{}, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]interface{}, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + d.decode(&s[j]) + } + *v = s +} + +func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]int64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]int64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeInt(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]uint64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]uint64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeUint(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]string, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]string, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeString() + } + *v = s +} + +func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[interface{}]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + var mk interface{} + d.decode(&mk) + // special case if a byte array. + if bv, bok := mk.([]byte); bok { + mk = string(bv) + } + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[int64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeInt(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[uint64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeUint(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[string]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeString() + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +// ---------------------------------------- + +func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { + if currEncodedType == valueTypeInvalid { + currEncodedType = dd.currentEncodedType() + } + switch currEncodedType { + case valueTypeArray: + containerLen = dd.readArrayLen() + containerLenS = containerLen + case valueTypeMap: + containerLen = dd.readMapLen() + containerLenS = containerLen * 2 + default: + decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", + currEncodedType) + } + return +} + +func decErr(format string, params ...interface{}) { + doPanic(msgTagDec, format, params...) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go new file mode 100644 index 0000000000000..4914be0c748bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go @@ -0,0 +1,1001 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" +) + +const ( + // Some tagging information for error messages. + msgTagEnc = "codec.encoder" + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracting writing to a byte array or to an io.Writer. +type encWriter interface { + writeUint16(uint16) + writeUint32(uint32) + writeUint64(uint64) + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + isBuiltinType(rt uintptr) bool + encodeBuiltin(rt uintptr, v interface{}) + encodeNil() + encodeInt(i int64) + encodeUint(i uint64) + encodeBool(b bool) + encodeFloat32(f float32) + encodeFloat64(f float64) + encodeExtPreamble(xtag byte, length int) + encodeArrayPreamble(length int) + encodeMapPreamble(length int) + encodeString(c charEncoding, v string) + encodeSymbol(v string) + encodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) +} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map. + StructToArray bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + return o.w.Write([]byte(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeUint16(v uint16) { + bigen.PutUint16(z.x[:2], v) + z.writeb(z.x[:2]) +} + +func (z *ioEncWriter) writeUint32(v uint32) { + bigen.PutUint32(z.x[:4], v) + z.writeb(z.x[:4]) +} + +func (z *ioEncWriter) writeUint64(v uint64) { + bigen.PutUint64(z.x[:8], v) + z.writeb(z.x[:8]) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeUint16(v uint16) { + c := z.grow(2) + z.b[c] = byte(v >> 8) + z.b[c+1] = byte(v) +} + +func (z *bytesEncWriter) writeUint32(v uint32) { + c := z.grow(4) + z.b[c] = byte(v >> 24) + z.b[c+1] = byte(v >> 16) + z.b[c+2] = byte(v >> 8) + z.b[c+3] = byte(v) +} + +func (z *bytesEncWriter) writeUint64(v uint64) { + c := z.grow(8) + z.b[c] = byte(v >> 56) + z.b[c+1] = byte(v >> 48) + z.b[c+2] = byte(v >> 40) + z.b[c+3] = byte(v >> 32) + z.b[c+4] = byte(v >> 24) + z.b[c+5] = byte(v >> 16) + z.b[c+6] = byte(v >> 8) + z.b[c+7] = byte(v) +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > cap(z.b) { + // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). + // However, it was too expensive, causing too many iterations of copy. + // Using bytes.Buffer model was much better (2*cap + n) + bs := make([]byte, 2*cap(z.b)+n) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else if z.c > len(z.b) { + z.b = z.b[:cap(z.b)] + } + return +} + +// --------------------------------------------- + +type encFnInfo struct { + ti *typeInfo + e *Encoder + ee encDriver + xfFn func(reflect.Value) ([]byte, error) + xfTag byte +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + f.e.encRawExt(rv.Interface().(RawExt)) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + bs, fnerr := f.xfFn(rv) + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + return + } + if f.e.hh.writeExt() { + f.ee.encodeExtPreamble(f.xfTag, len(bs)) + f.e.w.writeb(bs) + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } + +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryMarshaler + if f.ti.mIndir == 0 { + bm = rv.Interface().(binaryMarshaler) + } else if f.ti.mIndir == -1 { + bm = rv.Addr().Interface().(binaryMarshaler) + } else { + for j, k := int8(0), f.ti.mIndir; j < k; j++ { + if rv.IsNil() { + f.ee.encodeNil() + return + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryMarshaler) + } + // debugf(">>>> binaryMarshaler: %T", rv.Interface()) + bs, fnerr := bm.MarshalBinary() + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.ee.encodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.ee.encodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.ee.encodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.ee.encodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.ee.encodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.ee.encodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.ee.encodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case intfSliceTypId: + f.e.encSliceIntf(rv.Interface().([]interface{})) + return + case strSliceTypId: + f.e.encSliceStr(rv.Interface().([]string)) + return + case uint64SliceTypId: + f.e.encSliceUint64(rv.Interface().([]uint64)) + return + case int64SliceTypId: + f.e.encSliceInt64(rv.Interface().([]int64)) + return + } + } + + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + return + } + + l := rv.Len() + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kArray(rv reflect.Value) { + // We cannot share kSlice method, because the array may be non-addressable. + // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". + // So we have to duplicate the functionality here. + // f.e.encodeValue(rv.Slice(0, rv.Len())) + // f.kSlice(rv.Slice(0, rv.Len())) + + l := rv.Len() + // Handle an array of bytes specially (in line with what is done for slices) + if f.ti.rt.Elem().Kind() == reflect.Uint8 { + if l == 0 { + f.ee.encodeStringBytes(c_RAW, nil) + return + } + var bs []byte + if rv.CanAddr() { + bs = rv.Slice(0, l).Bytes() + } else { + bs = make([]byte, l) + for i := 0; i < l; i++ { + bs[i] = byte(rv.Index(i).Uint()) + } + } + f.ee.encodeStringBytes(c_RAW, bs) + return + } + + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + newlen := len(fti.sfi) + rvals := make([]reflect.Value, newlen) + var encnames []string + e := f.e + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + encnames = make([]string, newlen) + } + newlen = 0 + for _, si := range tisfi { + if si.i != -1 { + rvals[newlen] = rv.Field(int(si.i)) + } else { + rvals[newlen] = rv.FieldByIndex(si.is) + } + if toMap { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + continue + } + encnames[newlen] = si.encName + } else { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + rvals[newlen] = reflect.Value{} //encode as nil + } + } + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + if toMap { + ee := f.ee //don't dereference everytime + ee.encodeMapPreamble(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + if asSymbols { + ee.encodeSymbol(encnames[j]) + } else { + ee.encodeString(c_UTF8, encnames[j]) + } + e.encodeValue(rvals[j]) + } + } else { + f.ee.encodeArrayPreamble(newlen) + for j := 0; j < newlen; j++ { + e.encodeValue(rvals[j]) + } + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.ee.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + f.e.encodeValue(rv.Elem()) +} + +func (f *encFnInfo) kMap(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case mapIntfIntfTypId: + f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) + return + case mapStrIntfTypId: + f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) + return + case mapStrStrTypId: + f.e.encMapStrStr(rv.Interface().(map[string]string)) + return + case mapInt64IntfTypId: + f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) + return + case mapUint64IntfTypId: + f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) + return + } + } + + l := rv.Len() + f.ee.encodeMapPreamble(l) + if l == 0 { + return + } + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + keyTypeIsString := f.ti.rt.Key() == stringTyp + var asSymbols bool + if keyTypeIsString { + asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if keyTypeIsString { + if asSymbols { + f.ee.encodeSymbol(mks[j].String()) + } else { + f.ee.encodeString(c_UTF8, mks[j].String()) + } + } else { + f.e.encodeValue(mks[j]) + } + f.e.encodeValue(rv.MapIndex(mks[j])) + } + +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i *encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + w encWriter + e encDriver + h *BasicHandle + hh Handle + f map[uintptr]encFn + x []uintptr + s []encFn +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + ww, ok := w.(ioEncWriterWriter) + if !ok { + sww := simpleIoEncWriterWriter{w: w} + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + ww = &sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + z := ioEncWriter{ + w: ww, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + z := bytesEncWriter{ + b: in, + out: out, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// Encode writes an object into a stream in the codec format. +// +// Encoding can be configured via the "codec" struct tag for the fields. +// +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's codec tag is "-", OR +// - the field is empty and its codec tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the codec tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline if no struct tag is present. +// Else they are encoded as regular fields. +// +// Examples: +// +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +func (e *Encoder) encode(iv interface{}) { + switch v := iv.(type) { + case nil: + e.e.encodeNil() + + case reflect.Value: + e.encodeValue(v) + + case string: + e.e.encodeString(c_UTF8, v) + case bool: + e.e.encodeBool(v) + case int: + e.e.encodeInt(int64(v)) + case int8: + e.e.encodeInt(int64(v)) + case int16: + e.e.encodeInt(int64(v)) + case int32: + e.e.encodeInt(int64(v)) + case int64: + e.e.encodeInt(v) + case uint: + e.e.encodeUint(uint64(v)) + case uint8: + e.e.encodeUint(uint64(v)) + case uint16: + e.e.encodeUint(uint64(v)) + case uint32: + e.e.encodeUint(uint64(v)) + case uint64: + e.e.encodeUint(v) + case float32: + e.e.encodeFloat32(v) + case float64: + e.e.encodeFloat64(v) + + case []interface{}: + e.encSliceIntf(v) + case []string: + e.encSliceStr(v) + case []int64: + e.encSliceInt64(v) + case []uint64: + e.encSliceUint64(v) + case []uint8: + e.e.encodeStringBytes(c_RAW, v) + + case map[interface{}]interface{}: + e.encMapIntfIntf(v) + case map[string]interface{}: + e.encMapStrIntf(v) + case map[string]string: + e.encMapStrStr(v) + case map[int64]interface{}: + e.encMapInt64Intf(v) + case map[uint64]interface{}: + e.encMapUint64Intf(v) + + case *string: + e.e.encodeString(c_UTF8, *v) + case *bool: + e.e.encodeBool(*v) + case *int: + e.e.encodeInt(int64(*v)) + case *int8: + e.e.encodeInt(int64(*v)) + case *int16: + e.e.encodeInt(int64(*v)) + case *int32: + e.e.encodeInt(int64(*v)) + case *int64: + e.e.encodeInt(*v) + case *uint: + e.e.encodeUint(uint64(*v)) + case *uint8: + e.e.encodeUint(uint64(*v)) + case *uint16: + e.e.encodeUint(uint64(*v)) + case *uint32: + e.e.encodeUint(uint64(*v)) + case *uint64: + e.e.encodeUint(*v) + case *float32: + e.e.encodeFloat32(*v) + case *float64: + e.e.encodeFloat64(*v) + + case *[]interface{}: + e.encSliceIntf(*v) + case *[]string: + e.encSliceStr(*v) + case *[]int64: + e.encSliceInt64(*v) + case *[]uint64: + e.encSliceUint64(*v) + case *[]uint8: + e.e.encodeStringBytes(c_RAW, *v) + + case *map[interface{}]interface{}: + e.encMapIntfIntf(*v) + case *map[string]interface{}: + e.encMapStrIntf(*v) + case *map[string]string: + e.encMapStrStr(*v) + case *map[int64]interface{}: + e.encMapInt64Intf(*v) + case *map[uint64]interface{}: + e.encMapUint64Intf(*v) + + default: + e.encodeValue(reflect.ValueOf(iv)) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value) { + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + e.e.encodeNil() + return + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } + var fn encFn + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i, v := range e.x { + if v == rtid { + fn, ok = e.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new enc fn for type: %v\n", rt) + fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} + fn.i = &fi + if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.isBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*encFnInfo).ext + } else if supportBinaryMarshal && fi.ti.m { + fn.f = (*encFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Slice: + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fn.f = (*encFnInfo).kArray + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]encFn, 16) + } + e.f[rtid] = fn + } else { + e.s = append(e.s, fn) + e.x = append(e.x, rtid) + } + } + + fn.f(fn.i, rv) + +} + +func (e *Encoder) encRawExt(re RawExt) { + if re.Data == nil { + e.e.encodeNil() + return + } + if e.hh.writeExt() { + e.e.encodeExtPreamble(re.Tag, len(re.Data)) + e.w.writeb(re.Data) + } else { + e.e.encodeStringBytes(c_RAW, re.Data) + } +} + +// --------------------------------------------- +// short circuit functions for common maps and slices + +func (e *Encoder) encSliceIntf(v []interface{}) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.encode(v2) + } +} + +func (e *Encoder) encSliceStr(v []string) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encSliceInt64(v []int64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeInt(v2) + } +} + +func (e *Encoder) encSliceUint64(v []uint64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeUint(v2) + } +} + +func (e *Encoder) encMapStrStr(v map[string]string) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encMapStrIntf(v map[string]interface{}) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.encode(v2) + } +} + +func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeInt(k2) + e.encode(v2) + } +} + +func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeUint(uint64(k2)) + e.encode(v2) + } +} + +func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } +} + +// ---------------------------------------- + +func encErr(format string, params ...interface{}) { + doPanic(msgTagEnc, format, params...) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go new file mode 100644 index 0000000000000..e6dc0563f090c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go @@ -0,0 +1,589 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" +) + +const ( + structTagName = "codec" + + // Support + // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) + // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // This constant flag will enable or disable it. + supportBinaryMarshal = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + useMapForCodecCache = false + + // For some common container types, we can short-circuit an elaborate + // reflection dance and call encode/decode directly. + // The currently supported types are: + // - slices of strings, or id's (int64,uint64) or interfaces. + // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf + shortCircuitReflectToFastPath = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + valueTypeInvalid = 0xff +) + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + cachedTypeInfo = make(map[uintptr]*typeInfo, 4) + cachedTypeInfoMutex sync.RWMutex + + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + strSliceTyp = reflect.TypeOf([]string(nil)) + boolSliceTyp = reflect.TypeOf([]bool(nil)) + uintSliceTyp = reflect.TypeOf([]uint(nil)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uint16SliceTyp = reflect.TypeOf([]uint16(nil)) + uint32SliceTyp = reflect.TypeOf([]uint32(nil)) + uint64SliceTyp = reflect.TypeOf([]uint64(nil)) + intSliceTyp = reflect.TypeOf([]int(nil)) + int8SliceTyp = reflect.TypeOf([]int8(nil)) + int16SliceTyp = reflect.TypeOf([]int16(nil)) + int32SliceTyp = reflect.TypeOf([]int32(nil)) + int64SliceTyp = reflect.TypeOf([]int64(nil)) + float32SliceTyp = reflect.TypeOf([]float32(nil)) + float64SliceTyp = reflect.TypeOf([]float64(nil)) + + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) + + mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) + mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) + mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) + mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() + + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() + + boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() + uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() + uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() + uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() + intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() + int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() + int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() + int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() + int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() + float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() + float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() + + mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() + mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() + mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() + mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() + // Id = reflect.ValueOf().Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() + binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error +} + +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + extHandle + EncodeOptions + DecodeOptions +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + writeExt() bool + getBasicHandle() *BasicHandle + newEncDriver(w encWriter) encDriver + newDecDriver(r decReader) decDriver +} + +// RawExt represents raw unprocessed extension data. +type RawExt struct { + Tag byte + Data []byte +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag byte + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +type extHandle []*extTypeTagFn + +// AddExt registers an encode and decode function for a reflect.Type. +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, + tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error, +) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + // o cannot be nil, since it is always embedded in a Handle. + // if nil, let it panic. + // if o == nil { + // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") + // return + // } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.encFn, v.decFn = tag, encfn, decfn + return + } + } + + *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + for _, v := range o { + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { + for _, v := range o { + if v.tag == tag { + return v + } + } + return nil +} + +func (o extHandle) getDecodeExtForTag(tag byte) ( + rv reflect.Value, fn func(reflect.Value, []byte) error) { + if x := o.getExtForTag(tag); x != nil { + // ext is only registered for base + rv = reflect.New(x.rt).Elem() + fn = x.decFn + } + return +} + +func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.decFn + } + return +} + +func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.encFn + } + return +} + +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? + + // tag string // tag + // name string // field name + // encNameBs []byte // encoded name as byte stream + // ikind int // kind of the field as an int i.e. int(reflect.Kind) +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + if fname == "" { + panic("parseStructFieldInfo: No Field Name") + } + si := structFieldInfo{ + // name: fname, + encName: fname, + // tag: stag, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.omitEmpty = true + case "toarray": + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + m bool // base type (T or *T) is a binaryMarshaler + unm bool // base type (T or *T) is a binaryUnmarshaler + mIndir int8 // number of indirections to get to binaryMarshaler type + unmIndir int8 // number of indirections to get to binaryUnmarshaler type + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + cachedTypeInfoMutex.RLock() + pti, ok = cachedTypeInfo[rtid] + cachedTypeInfoMutex.RUnlock() + if ok { + return + } + + cachedTypeInfoMutex.Lock() + defer cachedTypeInfoMutex.Unlock() + if pti, ok = cachedTypeInfo[rtid]; ok { + return + } + + ti := typeInfo{rt: rt, rtid: rtid} + pti = &ti + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.m, ti.mIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.unm, ti.unmIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) + + // // try to put all si close together + // const tryToPutAllStructFieldInfoTogether = true + // if tryToPutAllStructFieldInfoTogether { + // sfip2 := make([]structFieldInfo, len(sfip)) + // for i, si := range sfip { + // sfip2[i] = *si + // } + // for i := range sfip { + // sfip[i] = &sfip2[i] + // } + // } + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + cachedTypeInfo[rtid] = pti + return +} + +func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + // for rt.Kind() == reflect.Ptr { + // // indexstack = append(indexstack, 0) + // rt = rt.Elem() + // } + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + stag := f.Tag.Get(structTagName) + if stag == "-" { + continue + } + if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + continue + } + // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. + if f.Anonymous && stag == "" { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + si := parseStructFieldInfo(f.Name, stag) + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +func doPanic(tag string, format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = tag + copy(params2[1:], params) + panic(fmt.Errorf("%s: "+format, params2...)) +} + +func checkOverflowFloat32(f float64, doCheck bool) { + if !doCheck { + return + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() + f2 := f + if f2 < 0 { + f2 = -f + } + if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { + decErr("Overflow float32 value: %v", f2) + } +} + +func checkOverflow(ui uint64, i int64, bitsize uint8) { + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize == 0 { + return + } + if i != 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + if ui != 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go new file mode 100644 index 0000000000000..58417da958ffd --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -0,0 +1,127 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +var ( + raisePanicAfterRecover = false + debugging = true +) + +func panicValToErr(panicVal interface{}, err *error) { + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + if raisePanicAfterRecover { + panic(panicVal) + } + return +} + +func isEmptyValueDeref(v reflect.Value, deref bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValueDeref(v.Elem(), deref) + } else { + return v.IsNil() + } + case reflect.Struct: + // return true if all fields are empty. else return false. + + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !isEmptyValueDeref(v.Field(i), deref) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + return isEmptyValueDeref(v, true) +} + +func debugf(format string, args ...interface{}) { + if debugging { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) + } +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go new file mode 100644 index 0000000000000..da0500d19223b --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -0,0 +1,816 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + w encWriter + h *MsgpackHandle +} + +func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} + +func (e *msgpackEncDriver) encodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) encodeInt(i int64) { + + switch { + case i >= 0: + e.encodeUint(uint64(i)) + case i >= -32: + e.w.writen1(byte(i)) + case i >= math.MinInt8: + e.w.writen2(mpInt8, byte(i)) + case i >= math.MinInt16: + e.w.writen1(mpInt16) + e.w.writeUint16(uint16(i)) + case i >= math.MinInt32: + e.w.writen1(mpInt32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpInt64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeUint(i uint64) { + switch { + case i <= math.MaxInt8: + e.w.writen1(byte(i)) + case i <= math.MaxUint8: + e.w.writen2(mpUint8, byte(i)) + case i <= math.MaxUint16: + e.w.writen1(mpUint16) + e.w.writeUint16(uint16(i)) + case i <= math.MaxUint32: + e.w.writen1(mpUint32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpUint64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) encodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) encodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + switch { + case l == 1: + e.w.writen2(mpFixExt1, xtag) + case l == 2: + e.w.writen2(mpFixExt2, xtag) + case l == 4: + e.w.writen2(mpFixExt4, xtag) + case l == 8: + e.w.writen2(mpFixExt8, xtag) + case l == 16: + e.w.writen2(mpFixExt16, xtag) + case l < 256: + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + case l < 65536: + e.w.writen1(mpExt16) + e.w.writeUint16(uint16(l)) + e.w.writen1(xtag) + default: + e.w.writen1(mpExt32) + e.w.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) encodeArrayPreamble(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) encodeMapPreamble(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + switch { + case ct.hasFixMin && l < ct.fixCutoff: + e.w.writen1(ct.bFixMin | byte(l)) + case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + e.w.writen2(ct.b8, uint8(l)) + case l < 65536: + e.w.writen1(ct.b16) + e.w.writeUint16(uint16(l)) + default: + e.w.writen1(ct.b32) + e.w.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + r decReader + h *MsgpackHandle + bd byte + bdRead bool + bdType valueType +} + +func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + bd := d.bd + + switch bd { + case mpNil: + vt = valueTypeNil + d.bdRead = false + case mpFalse: + vt = valueTypeBool + v = false + case mpTrue: + vt = valueTypeBool + v = true + + case mpFloat: + vt = valueTypeFloat + v = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + vt = valueTypeFloat + v = math.Float64frombits(d.r.readUint64()) + + case mpUint8: + vt = valueTypeUint + v = uint64(d.r.readn1()) + case mpUint16: + vt = valueTypeUint + v = uint64(d.r.readUint16()) + case mpUint32: + vt = valueTypeUint + v = uint64(d.r.readUint32()) + case mpUint64: + vt = valueTypeUint + v = uint64(d.r.readUint64()) + + case mpInt8: + vt = valueTypeInt + v = int64(int8(d.r.readn1())) + case mpInt16: + vt = valueTypeInt + v = int64(int16(d.r.readUint16())) + case mpInt32: + vt = valueTypeInt + v = int64(int32(d.r.readUint32())) + case mpInt64: + vt = valueTypeInt + v = int64(int64(d.r.readUint64())) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + vt = valueTypeInt + v = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + vt = valueTypeInt + v = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + var rvm string + vt = valueTypeString + v = &rvm + } else { + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + } + decodeFurther = true + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + decodeFurther = true + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + vt = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + vt = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + clen := d.readExtLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(clen) + v = &re + vt = valueTypeExt + default: + decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(d.r.readUint16())) + case mpUint32: + i = int64(uint64(d.r.readUint32())) + case mpUint64: + i = int64(d.r.readUint64()) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(d.r.readUint16())) + case mpInt32: + i = int64(int32(d.r.readUint32())) + case mpInt64: + i = int64(d.r.readUint64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(d.r.readUint16()) + case mpUint32: + ui = uint64(d.r.readUint32()) + case mpUint64: + ui = d.r.readUint64() + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(d.r.readUint16())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(d.r.readUint32())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(d.r.readUint64()); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case mpFloat: + f = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + f = math.Float64frombits(d.r.readUint64()) + default: + f = float64(d.decodeInt(0)) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) decodeBool() (b bool) { + switch d.bd { + case mpFalse, 0: + // b = false + case mpTrue, 1: + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) decodeString() (s string) { + clen := d.readContainerLen(msgpackContainerStr) + if clen > 0 { + s = string(d.r.readn(clen)) + } + d.bdRead = false + return +} + +// Callers must check if changed=true (to decide whether to replace the one they have) +func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + var clen int + switch d.bd { + case mpBin8, mpBin16, mpBin32: + clen = d.readContainerLen(msgpackContainerBin) + default: + clen = d.readContainerLen(msgpackContainerStr) + } + // if clen < 0 { + // changed = true + // panic("length cannot be zero. this cannot be nil.") + // } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + // Return changed=true if length of passed slice diff from length of bytes in stream + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. +func (d *msgpackDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *msgpackDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + bd := d.bd + switch bd { + case mpNil: + d.bdType = valueTypeNil + case mpFalse, mpTrue: + d.bdType = valueTypeBool + case mpFloat, mpDouble: + d.bdType = valueTypeFloat + case mpUint8, mpUint16, mpUint32, mpUint64: + d.bdType = valueTypeUint + case mpInt8, mpInt16, mpInt32, mpInt64: + d.bdType = valueTypeInt + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + d.bdType = valueTypeInt + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + d.bdType = valueTypeInt + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + d.bdType = valueTypeString + } else { + d.bdType = valueTypeBytes + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.bdType = valueTypeBytes + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + d.bdType = valueTypeArray + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + d.bdType = valueTypeMap + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + d.bdType = valueTypeExt + default: + decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + } + return d.bdType +} + +func (d *msgpackDecDriver) tryDecodeAsNil() bool { + if d.bd == mpNil { + d.bdRead = false + return true + } + return false +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + switch { + case bd == mpNil: + clen = -1 // to represent nil + case bd == ct.b8: + clen = int(d.r.readn1()) + case bd == ct.b16: + clen = int(d.r.readUint16()) + case bd == ct.b32: + clen = int(d.r.readUint32()) + case (ct.bFixMin & bd) == ct.bFixMin: + clen = int(ct.bFixMin ^ bd) + default: + decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) readMapLen() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) readArrayLen() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(d.r.readUint16()) + case mpExt32: + clen = int(d.r.readUint32()) + default: + decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + xbd := d.bd + switch { + case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: + xbs, _ = d.decodeBytes(nil) + case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, + xbd >= mpFixStrMin && xbd <= mpFixStrMax: + xbs = []byte(d.decodeString()) + default: + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool +} + +func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { + return &msgpackEncDriver{w: w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { + return &msgpackDecDriver{r: r, h: h} +} + +func (h *MsgpackHandle) writeExt() bool { + return h.WriteExt +} + +func (h *MsgpackHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.cls { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py new file mode 100644 index 0000000000000..e933838c56a89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +import msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464646464.0, + False, + True, + None, + "someday", + "", + "bytestring", + 1328176922000002000, + -2206187877999998000, + 0, + -6795364578871345152 + ] + l1 = [ + { "true": True, + "false": False }, + { "true": "True", + "false": False, + "uint16(1616)": 1616 }, + { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], + "int32":32323232, "bool": True, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890" }, + { True: "true", 8: False, "false": 0 } + ] + + l = [] + l.extend(l0) + l.append(l0) + l.extend(l1) + return l + +def build_test_data(destdir): + l = get_test_data_list() + for i in range(len(l)): + packer = msgpack.Packer() + serialized = packer.pack(l[i]) + f = open(os.path.join(destdir, str(i) + '.golden'), 'wb') + f.write(serialized) + f.close() + +def doRpcServer(port, stopTimeSec): + class EchoHandler(object): + def Echo123(self, msg1, msg2, msg3): + return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) + def EchoStruct(self, msg): + return ("%s" % msg) + + addr = msgpackrpc.Address('localhost', port) + server = msgpackrpc.Server(EchoHandler()) + server.listen(addr) + # run thread to stop it after stopTimeSec seconds if > 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: msgpack_test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go new file mode 100644 index 0000000000000..d014dbdcc7d0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bufio" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accomodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + cls bool +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.cls { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + } + if doFlush && c.bw != nil { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.cls { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.cls { + return io.EOF + } + c.cls = true + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go new file mode 100644 index 0000000000000..9e4d148a2a179 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import "math" + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + h *SimpleHandle + w encWriter + //b [8]byte +} + +func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { +} + +func (e *simpleEncDriver) encodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) encodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) encodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) encodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) encodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, uint8(v)) + case v <= math.MaxUint16: + e.w.writen1(bd + 1) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd + 2) + e.w.writeUint32(uint32(v)) + case v <= math.MaxUint64: + e.w.writen1(bd + 3) + e.w.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + switch { + case length == 0: + e.w.writen1(bd) + case length <= math.MaxUint8: + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + case length <= math.MaxUint16: + e.w.writen1(bd + 2) + e.w.writeUint16(uint16(length)) + case int64(length) <= math.MaxUint32: + e.w.writen1(bd + 3) + e.w.writeUint32(uint32(length)) + default: + e.w.writen1(bd + 4) + e.w.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) encodeArrayPreamble(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) encodeMapPreamble(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) encodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + h *SimpleHandle + r decReader + bdRead bool + bdType valueType + bd byte + //b [8]byte +} + +func (d *simpleDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *simpleDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.bd { + case simpleVdNil: + d.bdType = valueTypeNil + case simpleVdTrue, simpleVdFalse: + d.bdType = valueTypeBool + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + d.bdType = valueTypeUint + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + d.bdType = valueTypeInt + case simpleVdFloat32, simpleVdFloat64: + d.bdType = valueTypeFloat + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + d.bdType = valueTypeString + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.bdType = valueTypeBytes + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + d.bdType = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + d.bdType = valueTypeArray + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) + } + } + return d.bdType +} + +func (d *simpleDecDriver) tryDecodeAsNil() bool { + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +} + +func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + i = int64(ui) + case simpleVdPosInt + 1: + ui = uint64(d.r.readUint16()) + i = int64(ui) + case simpleVdPosInt + 2: + ui = uint64(d.r.readUint32()) + i = int64(ui) + case simpleVdPosInt + 3: + ui = uint64(d.r.readUint64()) + i = int64(ui) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 1: + ui = uint64(d.r.readUint16()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 2: + ui = uint64(d.r.readUint32()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 3: + ui = uint64(d.r.readUint64()) + i = -(int64(ui)) + neg = true + default: + decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // } + return +} + +func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(d.r.readUint32())) + case simpleVdFloat64: + f = math.Float64frombits(d.r.readUint64()) + default: + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + _, i, _ := d.decIntAny() + f = float64(i) + } else { + decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + } + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) decodeBool() (b bool) { + switch d.bd { + case simpleVdTrue: + b = true + case simpleVdFalse: + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) readMapLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) readArrayLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(d.r.readUint16()) + case 3: + ui := uint64(d.r.readUint32()) + checkOverflow(ui, 0, intBitsize) + return int(ui) + case 4: + ui := d.r.readUint64() + checkOverflow(ui, 0, intBitsize) + return int(ui) + } + decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) decodeString() (s string) { + s = string(d.r.readn(d.decLen())) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + if clen := d.decLen(); clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.bd { + case simpleVdNil: + vt = valueTypeNil + case simpleVdFalse: + vt = valueTypeBool + v = false + case simpleVdTrue: + vt = valueTypeBool + v = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + vt = valueTypeUint + ui, _, _ := d.decIntAny() + v = ui + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + vt = valueTypeInt + _, i, _ := d.decIntAny() + v = i + case simpleVdFloat32: + vt = valueTypeFloat + v = d.decodeFloat(true) + case simpleVdFloat64: + vt = valueTypeFloat + v = d.decodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + vt = valueTypeString + v = d.decodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + vt = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle +} + +func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { + return &simpleEncDriver{w: w, h: h} +} + +func (h *SimpleHandle) newDecDriver(r decReader) decDriver { + return &simpleDecDriver{r: r, h: h} +} + +func (_ *SimpleHandle) writeExt() bool { + return true +} + +func (h *SimpleHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go new file mode 100644 index 0000000000000..c86d65328d76a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE new file mode 100644 index 0000000000000..f0e5c79e18115 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md new file mode 100644 index 0000000000000..5d7180ab9ec57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md @@ -0,0 +1,11 @@ +raft-boltdb +=========== + +This repository provides the `raftboltdb` package. The package exports the +`BoltStore` which is an implementation of both a `LogStore` and `StableStore`. + +It is meant to be used as a backend for the `raft` [package +here](https://github.com/hashicorp/raft). + +This implementation uses [BoltDB](https://github.com/boltdb/bolt). BoltDB is +a simple key/value store implemented in pure Go, and inspired by LMDB. diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go new file mode 100644 index 0000000000000..ab6dd4803e610 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go @@ -0,0 +1,231 @@ +package raftboltdb + +import ( + "errors" + + "github.com/boltdb/bolt" + "github.com/hashicorp/raft" +) + +const ( + // Permissions to use on the db file. This is only used if the + // database file does not exist and needs to be created. + dbFileMode = 0600 +) + +var ( + // Bucket names we perform transactions in + dbLogs = []byte("logs") + dbConf = []byte("conf") + + // An error indicating a given key does not exist + ErrKeyNotFound = errors.New("not found") +) + +// BoltStore provides access to BoltDB for Raft to store and retrieve +// log entries. It also provides key/value storage, and can be used as +// a LogStore and StableStore. +type BoltStore struct { + // conn is the underlying handle to the db. + conn *bolt.DB + + // The path to the Bolt database file + path string +} + +// NewBoltStore takes a file path and returns a connected Raft backend. +func NewBoltStore(path string) (*BoltStore, error) { + // Try to connect + handle, err := bolt.Open(path, dbFileMode, nil) + if err != nil { + return nil, err + } + + // Create the new store + store := &BoltStore{ + conn: handle, + path: path, + } + + // Set up our buckets + if err := store.initialize(); err != nil { + store.Close() + return nil, err + } + + return store, nil +} + +// initialize is used to set up all of the buckets. +func (b *BoltStore) initialize() error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Create all the buckets + if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { + return err + } + if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { + return err + } + + return tx.Commit() +} + +// Close is used to gracefully close the DB connection. +func (b *BoltStore) Close() error { + return b.conn.Close() +} + +// FirstIndex returns the first known index from the Raft log. +func (b *BoltStore) FirstIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if first, _ := curs.First(); first == nil { + return 0, nil + } else { + return bytesToUint64(first), nil + } +} + +// LastIndex returns the last known index from the Raft log. +func (b *BoltStore) LastIndex() (uint64, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return 0, err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + if last, _ := curs.Last(); last == nil { + return 0, nil + } else { + return bytesToUint64(last), nil + } +} + +// GetLog is used to retrieve a log from BoltDB at a given index. +func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error { + tx, err := b.conn.Begin(false) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbLogs) + val := bucket.Get(uint64ToBytes(idx)) + + if val == nil { + return raft.ErrLogNotFound + } + return decodeMsgPack(val, log) +} + +// StoreLog is used to store a single raft log +func (b *BoltStore) StoreLog(log *raft.Log) error { + return b.StoreLogs([]*raft.Log{log}) +} + +// StoreLogs is used to store a set of raft logs +func (b *BoltStore) StoreLogs(logs []*raft.Log) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + for _, log := range logs { + key := uint64ToBytes(log.Index) + val, err := encodeMsgPack(log) + if err != nil { + return err + } + bucket := tx.Bucket(dbLogs) + if err := bucket.Put(key, val.Bytes()); err != nil { + return err + } + } + + return tx.Commit() +} + +// DeleteRange is used to delete logs within a given range inclusively. +func (b *BoltStore) DeleteRange(min, max uint64) error { + minKey := uint64ToBytes(min) + + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + curs := tx.Bucket(dbLogs).Cursor() + for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { + // Handle out-of-range log index + if bytesToUint64(k) > max { + break + } + + // Delete in-range log index + if err := curs.Delete(); err != nil { + return err + } + } + + return tx.Commit() +} + +// Set is used to set a key/value set outside of the raft log +func (b *BoltStore) Set(k, v []byte) error { + tx, err := b.conn.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + if err := bucket.Put(k, v); err != nil { + return err + } + + return tx.Commit() +} + +// Get is used to retrieve a value from the k/v store by key +func (b *BoltStore) Get(k []byte) ([]byte, error) { + tx, err := b.conn.Begin(false) + if err != nil { + return nil, err + } + defer tx.Rollback() + + bucket := tx.Bucket(dbConf) + val := bucket.Get(k) + + if val == nil { + return nil, ErrKeyNotFound + } + return append([]byte{}, val...), nil +} + +// SetUint64 is like Set, but handles uint64 values +func (b *BoltStore) SetUint64(key []byte, val uint64) error { + return b.Set(key, uint64ToBytes(val)) +} + +// GetUint64 is like Get, but handles uint64 values +func (b *BoltStore) GetUint64(key []byte) (uint64, error) { + val, err := b.Get(key) + if err != nil { + return 0, err + } + return bytesToUint64(val), nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go new file mode 100644 index 0000000000000..68dd786b7adeb --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go @@ -0,0 +1,37 @@ +package raftboltdb + +import ( + "bytes" + "encoding/binary" + + "github.com/hashicorp/go-msgpack/codec" +) + +// Decode reverses the encode operation on a byte slice input +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Converts bytes to an integer +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Converts a uint to a byte slice +func uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore b/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore new file mode 100644 index 0000000000000..836562412fe8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml b/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml new file mode 100644 index 0000000000000..5cf041d263a4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.2 + - tip + +install: make deps +script: + - make integ + +notifications: + flowdock: + secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= + diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE new file mode 100644 index 0000000000000..c33dcc7c928c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile b/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile new file mode 100644 index 0000000000000..c61b34a8f6c7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile @@ -0,0 +1,17 @@ +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) + +test: + go test -timeout=5s ./... + +integ: test + INTEG_TESTS=yes go test -timeout=3s -run=Integ ./... + +deps: + go get -d -v ./... + echo $(DEPS) | xargs -n1 go get -d + +cov: + INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html + open /tmp/coverage.html + +.PHONY: test cov integ deps diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/README.md b/Godeps/_workspace/src/github.com/hashicorp/raft/README.md new file mode 100644 index 0000000000000..ecb6c977eea13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/README.md @@ -0,0 +1,89 @@ +raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) +==== + +raft is a [Go](http://www.golang.org) library that manages a replicated +log and can be used with an FSM to manage replicated state machines. It +is library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). + +The use cases for such a library are far-reaching as replicated state +machines are a key component of many distributed systems. They enable +building Consistent, Partition Tolerant (CP) systems, with limited +fault tolerance as well. + +## Building + +If you wish to build raft you'll need Go version 1.2+ installed. + +Please check your installation with: + +``` +go version +``` + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). + +To prevent complications with cgo, the primary backend `MDBStore` is in a separate repositoy, +called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation +for the `LogStore` and `StableStore`. + +A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called +[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` +and `StableStore`. + +## Protocol + +raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) + +A high level overview of the Raft protocol is described below, but for details please read the full +[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) +followed by the raft source. Any questions about the raft protocol should be sent to the +[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). + +### Protocol Description + +Raft nodes are always in one of three states: follower, candidate or leader. All +nodes initially start out as a follower. In this state, nodes can accept log entries +from a leader and cast votes. If no entries are received for some time, nodes +self-promote to the candidate state. In the candidate state nodes request votes from +their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. +The leader must accept new log entries and replicate to all the other followers. +In addition, if stale reads are not acceptable, all queries must also be performed on +the leader. + +Once a cluster has a leader, it is able to accept new log entries. A client can +request that a leader append a new log entry, which is an opaque binary blob to +Raft. The leader then writes the entry to durable storage and attempts to replicate +to a quorum of followers. Once the log entry is considered *committed*, it can be +*applied* to a finite state machine. The finite state machine is application specific, +and is implemented using an interface. + +An obvious question relates to the unbounded nature of a replicated log. Raft provides +a mechanism by which the current state is snapshotted, and the log is compacted. Because +of the FSM abstraction, restoring the state of the FSM must result in the same state +as a replay of old logs. This allows Raft to capture the FSM state at a point in time, +and then remove all the logs that were used to reach that state. This is performed automatically +without user intervention, and prevents unbounded disk usage as well as minimizing +time spent replaying logs. + +Lastly, there is the issue of updating the peer set when new servers are joining +or existing servers are leaving. As long as a quorum of nodes is available, this +is not an issue as Raft provides mechanisms to dynamically update the peer set. +If a quorum of nodes is unavailable, then this becomes a very challenging issue. +For example, suppose there are only 2 peers, A and B. The quorum size is also +2, meaning both nodes must agree to commit a log entry. If either A or B fails, +it is now impossible to reach quorum. This means the cluster is unable to add, +or remove a node, or commit any additional log entries. This results in *unavailability*. +At this point, manual intervention would be required to remove either A or B, +and to restart the remaining node in bootstrap mode. + +A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster +of 5 can tolerate 2 node failures. The recommended configuration is to either +run 3 or 5 raft servers. This maximizes availability without +greatly sacrificing performance. + +In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, +committing a log entry requires a single round trip to half of the cluster. +Thus performance is bound by disk I/O and network latency. + diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go b/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go new file mode 100644 index 0000000000000..d7a58f45f4486 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go @@ -0,0 +1,171 @@ +package raftbench + +// raftbench provides common benchmarking functions which can be used by +// anything which implements the raft.LogStore and raft.StableStore interfaces. +// All functions accept these interfaces and perform benchmarking. This +// makes comparing backend performance easier by sharing the tests. + +import ( + "github.com/hashicorp/raft" + "testing" +) + +func FirstIndex(b *testing.B, store raft.LogStore) { + // Create some fake data + var logs []*raft.Log + for i := 1; i < 10; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Run FirstIndex a number of times + for n := 0; n < b.N; n++ { + store.FirstIndex() + } +} + +func LastIndex(b *testing.B, store raft.LogStore) { + // Create some fake data + var logs []*raft.Log + for i := 1; i < 10; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Run LastIndex a number of times + for n := 0; n < b.N; n++ { + store.LastIndex() + } +} + +func GetLog(b *testing.B, store raft.LogStore) { + // Create some fake data + var logs []*raft.Log + for i := 1; i < 10; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Run GetLog a number of times + for n := 0; n < b.N; n++ { + if err := store.GetLog(5, new(raft.Log)); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func StoreLog(b *testing.B, store raft.LogStore) { + // Run StoreLog a number of times + for n := 0; n < b.N; n++ { + log := &raft.Log{Index: uint64(n), Data: []byte("data")} + if err := store.StoreLog(log); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func StoreLogs(b *testing.B, store raft.LogStore) { + // Run StoreLogs a number of times. We want to set multiple logs each + // run, so we create 3 logs with incrementing indexes for each iteration. + for n := 0; n < b.N; n++ { + b.StopTimer() + offset := 3 * (n + 1) + logs := []*raft.Log{ + &raft.Log{Index: uint64(offset - 2), Data: []byte("data")}, + &raft.Log{Index: uint64(offset - 1), Data: []byte("data")}, + &raft.Log{Index: uint64(offset), Data: []byte("data")}, + } + b.StartTimer() + + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func DeleteRange(b *testing.B, store raft.LogStore) { + // Create some fake data. In this case, we create 3 new log entries for each + // test case, and separate them by index in multiples of 10. This allows + // some room so that we can test deleting ranges with "extra" logs to + // to ensure we stop going to the database once our max index is hit. + var logs []*raft.Log + for n := 0; n < b.N; n++ { + offset := 10 * n + for i := offset; i < offset+3; i++ { + logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) + } + } + if err := store.StoreLogs(logs); err != nil { + b.Fatalf("err: %s", err) + } + b.ResetTimer() + + // Delete a range of the data + for n := 0; n < b.N; n++ { + offset := 10 * n + if err := store.DeleteRange(uint64(offset), uint64(offset+9)); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func Set(b *testing.B, store raft.StableStore) { + // Run Set a number of times + for n := 0; n < b.N; n++ { + if err := store.Set([]byte{byte(n)}, []byte("val")); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func Get(b *testing.B, store raft.StableStore) { + // Create some fake data + for i := 1; i < 10; i++ { + if err := store.Set([]byte{byte(i)}, []byte("val")); err != nil { + b.Fatalf("err: %s", err) + } + } + b.ResetTimer() + + // Run Get a number of times + for n := 0; n < b.N; n++ { + if _, err := store.Get([]byte{0x05}); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func SetUint64(b *testing.B, store raft.StableStore) { + // Run SetUint64 a number of times + for n := 0; n < b.N; n++ { + if err := store.SetUint64([]byte{byte(n)}, uint64(n)); err != nil { + b.Fatalf("err: %s", err) + } + } +} + +func GetUint64(b *testing.B, store raft.StableStore) { + // Create some fake data + for i := 0; i < 10; i++ { + if err := store.SetUint64([]byte{byte(i)}, uint64(i)); err != nil { + b.Fatalf("err: %s", err) + } + } + b.ResetTimer() + + // Run GetUint64 a number of times + for n := 0; n < b.N; n++ { + if _, err := store.Get([]byte{0x05}); err != nil { + b.Fatalf("err: %s", err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go b/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go new file mode 100644 index 0000000000000..739775b3541cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go @@ -0,0 +1,84 @@ +package raft + +// AppendEntriesRequest is the command used to append entries to the +// replicated log. +type AppendEntriesRequest struct { + // Provide the current term and leader + Term uint64 + Leader []byte + + // Provide the previous entries for integrity checking + PrevLogEntry uint64 + PrevLogTerm uint64 + + // New entries to commit + Entries []*Log + + // Commit index on the leader + LeaderCommitIndex uint64 +} + +// AppendEntriesResponse is the response returned from an +// AppendEntriesRequest. +type AppendEntriesResponse struct { + // Newer term if leader is out of date + Term uint64 + + // Last Log is a hint to help accelerate rebuilding slow nodes + LastLog uint64 + + // We may not succeed if we have a conflicting entry + Success bool + + // There are scenarios where this request didn't succeed + // but there's no need to wait/back-off the next attempt. + NoRetryBackoff bool +} + +// RequestVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestVoteRequest struct { + // Provide the term and our id + Term uint64 + Candidate []byte + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 +} + +// RequestVoteResponse is the response returned from a RequestVoteRequest. +type RequestVoteResponse struct { + // Newer term if leader is out of date + Term uint64 + + // Return the peers, so that a node can shutdown on removal + Peers []byte + + // Is the vote granted + Granted bool +} + +// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its +// log (and state machine) from a snapshot on another peer. +type InstallSnapshotRequest struct { + Term uint64 + Leader []byte + + // These are the last index/term included in the snapshot + LastLogIndex uint64 + LastLogTerm uint64 + + // Peer Set in the snapshot + Peers []byte + + // Size of the snapshot + Size int64 +} + +// InstallSnapshotResponse is the response returned from an +// InstallSnapshotRequest. +type InstallSnapshotResponse struct { + Term uint64 + Success bool +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/config.go b/Godeps/_workspace/src/github.com/hashicorp/raft/config.go new file mode 100644 index 0000000000000..6b3c0b59f0c4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/config.go @@ -0,0 +1,134 @@ +package raft + +import ( + "fmt" + "io" + "log" + "time" +) + +// Config provides any necessary configuration to +// the Raft server +type Config struct { + // Time in follower state without a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // Time in candidate state without a leader before we attempt an election. + ElectionTimeout time.Duration + + // Time without an Apply() operation before we heartbeat to ensure + // a timely commit. Due to random staggering, may be delayed as much as + // 2x this value. + CommitTimeout time.Duration + + // MaxAppendEntries controls the maximum number of append entries + // to send at once. We want to strike a balance between efficiency + // and avoiding waste if the follower is going to reject because of + // an inconsistent log. + MaxAppendEntries int + + // If we are a member of a cluster, and RemovePeer is invoked for the + // local node, then we forget all peers and transition into the follower state. + // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, + // we can become a leader of a cluster containing only this node. + ShutdownOnRemove bool + + // DisableBootstrapAfterElect is used to turn off EnableSingleNode + // after the node is elected. This is used to prevent self-election + // if the node is removed from the Raft cluster via RemovePeer. Setting + // it to false will keep the bootstrap mode, allowing the node to self-elect + // and potentially bootstrap a separate cluster. + DisableBootstrapAfterElect bool + + // TrailingLogs controls how many logs we leave after a snapshot. This is + // used so that we can quickly replay logs on a follower instead of being + // forced to send an entire snapshot. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a snapshot. + // We randomly stagger between this value and 2x this value to avoid the entire + // cluster from performing a snapshot at once. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshots when we can + // just replay a small set of logs. + SnapshotThreshold uint64 + + // EnableSingleNode allows for a single node mode of operation. This + // is false by default, which prevents a lone node from electing itself. + // leader. + EnableSingleNode bool + + // LeaderLeaseTimeout is used to control how long the "lease" lasts + // for being the leader without being able to contact a quorum + // of nodes. If we reach this interval without contact, we will + // step down as leader. + LeaderLeaseTimeout time.Duration + + // StartAsLeader forces Raft to start in the leader state. This should + // never be used except for testing purposes, as it can cause a split-brain. + StartAsLeader bool + + // NotifyCh is used to provide a channel that will be notified of leadership + // changes. Raft will block writing to this channel, so it should either be + // buffered or aggressively consumed. + NotifyCh chan<- bool + + // LogOutput is used as a sink for logs, unless Logger is specified. + // Defaults to os.Stderr. + LogOutput io.Writer + + // Logger is a user-provided logger. If nil, a logger writing to LogOutput + // is used. + Logger *log.Logger +} + +// DefaultConfig returns a Config with usable defaults. +func DefaultConfig() *Config { + return &Config{ + HeartbeatTimeout: 1000 * time.Millisecond, + ElectionTimeout: 1000 * time.Millisecond, + CommitTimeout: 50 * time.Millisecond, + MaxAppendEntries: 64, + ShutdownOnRemove: true, + DisableBootstrapAfterElect: true, + TrailingLogs: 10240, + SnapshotInterval: 120 * time.Second, + SnapshotThreshold: 8192, + EnableSingleNode: false, + LeaderLeaseTimeout: 500 * time.Millisecond, + } +} + +// ValidateConfig is used to validate a sane configuration +func ValidateConfig(config *Config) error { + if config.HeartbeatTimeout < 5*time.Millisecond { + return fmt.Errorf("Heartbeat timeout is too low") + } + if config.ElectionTimeout < 5*time.Millisecond { + return fmt.Errorf("Election timeout is too low") + } + if config.CommitTimeout < time.Millisecond { + return fmt.Errorf("Commit timeout is too low") + } + if config.MaxAppendEntries <= 0 { + return fmt.Errorf("MaxAppendEntries must be positive") + } + if config.MaxAppendEntries > 1024 { + return fmt.Errorf("MaxAppendEntries is too large") + } + if config.SnapshotInterval < 5*time.Millisecond { + return fmt.Errorf("Snapshot interval is too low") + } + if config.LeaderLeaseTimeout < 5*time.Millisecond { + return fmt.Errorf("Leader lease timeout is too low") + } + if config.LeaderLeaseTimeout > config.HeartbeatTimeout { + return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") + } + if config.ElectionTimeout < config.HeartbeatTimeout { + return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go new file mode 100644 index 0000000000000..1b4611d559f7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go @@ -0,0 +1,48 @@ +package raft + +import ( + "fmt" + "io" +) + +// DiscardSnapshotStore is used to successfully snapshot while +// always discarding the snapshot. This is useful for when the +// log should be truncated but no snapshot should be retained. +// This should never be used for production use, and is only +// suitable for testing. +type DiscardSnapshotStore struct{} + +type DiscardSnapshotSink struct{} + +// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. +func NewDiscardSnapshotStore() *DiscardSnapshotStore { + return &DiscardSnapshotStore{} +} + +func (d *DiscardSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { + return &DiscardSnapshotSink{}, nil +} + +func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { + return nil, nil +} + +func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + return nil, nil, fmt.Errorf("open is not supported") +} + +func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { + return len(b), nil +} + +func (d *DiscardSnapshotSink) Close() error { + return nil +} + +func (d *DiscardSnapshotSink) ID() string { + return "discard" +} + +func (d *DiscardSnapshotSink) Cancel() error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go new file mode 100644 index 0000000000000..a895537342241 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go @@ -0,0 +1,470 @@ +package raft + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +const ( + testPath = "permTest" + snapPath = "snapshots" + metaFilePath = "meta.json" + stateFilePath = "state.bin" + tmpSuffix = ".tmp" +) + +// FileSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. +type FileSnapshotStore struct { + path string + retain int + logger *log.Logger +} + +type snapMetaSlice []*fileSnapshotMeta + +// FileSnapshotSink implements SnapshotSink with a file. +type FileSnapshotSink struct { + store *FileSnapshotStore + logger *log.Logger + dir string + meta fileSnapshotMeta + + stateFile *os.File + stateHash hash.Hash64 + buffered *bufio.Writer + + closed bool +} + +// fileSnapshotMeta is stored on disk. We also put a CRC +// on disk so that we can verify the snapshot. +type fileSnapshotMeta struct { + SnapshotMeta + CRC []byte +} + +// bufferedFile is returned when we open a snapshot. This way +// reads are buffered and the file still gets closed. +type bufferedFile struct { + bh *bufio.Reader + fh *os.File +} + +func (b *bufferedFile) Read(p []byte) (n int, err error) { + return b.bh.Read(p) +} + +func (b *bufferedFile) Close() error { + return b.fh.Close() +} + +// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStoreWithLogger(base string, retain int, logger *log.Logger) (*FileSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &FileSnapshotStore{ + path: path, + retain: retain, + logger: logger, + } + + // Do a permissions test + if err := store.testPermissions(); err != nil { + return nil, fmt.Errorf("permissions test failed: %v", err) + } + return store, nil +} + +// NewFileSnapshotStore creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { + if logOutput == nil { + logOutput = os.Stderr + } + return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags)) +} + +// testPermissions tries to touch a file in our path to see if it works. +func (f *FileSnapshotStore) testPermissions() error { + path := filepath.Join(f.path, testPath) + fh, err := os.Create(path) + if err != nil { + return err + } + fh.Close() + os.Remove(path) + return nil +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} + +// Create is used to start a new snapshot +func (f *FileSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { + // Create a new path + name := snapshotName(term, index) + path := filepath.Join(f.path, name+tmpSuffix) + f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path) + + // Make the directory + if err := os.MkdirAll(path, 0755); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err) + return nil, err + } + + // Create the sink + sink := &FileSnapshotSink{ + store: f, + logger: f.logger, + dir: path, + meta: fileSnapshotMeta{ + SnapshotMeta: SnapshotMeta{ + ID: name, + Index: index, + Term: term, + Peers: peers, + }, + CRC: nil, + }, + } + + // Write out the meta data + if err := sink.writeMeta(); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return nil, err + } + + // Open the state file + statePath := filepath.Join(path, stateFilePath) + fh, err := os.Create(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err) + return nil, err + } + sink.stateFile = fh + + // Create a CRC64 hash + sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Wrap both the hash and file in a MultiWriter with buffering + multi := io.MultiWriter(sink.stateFile, sink.stateHash) + sink.buffered = bufio.NewWriter(multi) + + // Done + return sink, nil +} + +// List returns available snapshots in the store. +func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return nil, err + } + + var snapMeta []*SnapshotMeta + for _, meta := range snapshots { + snapMeta = append(snapMeta, &meta.SnapshotMeta) + if len(snapMeta) == f.retain { + break + } + } + return snapMeta, nil +} + +// getSnapshots returns all the known snapshots. +func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := ioutil.ReadDir(f.path) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err) + return nil, err + } + + // Populate the metadata + var snapMeta []*fileSnapshotMeta + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Ignore any temporary snapshots + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName) + continue + } + + // Try to read the meta data + meta, err := f.readMeta(dirName) + if err != nil { + f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err) + continue + } + + // Append, but only return up to the retain count + snapMeta = append(snapMeta, meta) + } + + // Sort the snapshot, reverse so we get new -> old + sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) + + return snapMeta, nil +} + +// readMeta is used to read the meta data for a given named backup +func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { + // Open the meta file + metaPath := filepath.Join(f.path, name, metaFilePath) + fh, err := os.Open(metaPath) + if err != nil { + return nil, err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewReader(fh) + + // Read in the JSON + meta := &fileSnapshotMeta{} + dec := json.NewDecoder(buffered) + if err := dec.Decode(meta); err != nil { + return nil, err + } + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + // Get the metadata + meta, err := f.readMeta(id) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err) + return nil, nil, err + } + + // Open the state file + statePath := filepath.Join(f.path, id, stateFilePath) + fh, err := os.Open(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err) + return nil, nil, err + } + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + _, err = io.Copy(stateHash, fh) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err) + fh.Close() + return nil, nil, err + } + + // Verify the hash + computed := stateHash.Sum(nil) + if bytes.Compare(meta.CRC, computed) != 0 { + f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)", + meta.CRC, computed) + fh.Close() + return nil, nil, fmt.Errorf("CRC mismatch") + } + + // Seek to the start + if _, err := fh.Seek(0, 0); err != nil { + f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err) + fh.Close() + return nil, nil, err + } + + // Return a buffered file + buffered := &bufferedFile{ + bh: bufio.NewReader(fh), + fh: fh, + } + + return &meta.SnapshotMeta, buffered, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *FileSnapshotStore) ReapSnapshots() error { + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return err + } + + for i := f.retain; i < len(snapshots); i++ { + path := filepath.Join(f.path, snapshots[i].ID) + f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) + return err + } + } + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *FileSnapshotSink) ID() string { + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *FileSnapshotSink) Write(b []byte) (int, error) { + return s.buffered.Write(b) +} + +// Close is used to indicate a successful end. +func (s *FileSnapshotSink) Close() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Write out the meta data + if err := s.writeMeta(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return err + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + if err := os.Rename(s.dir, newPath); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) + return err + } + + // Reap any old snapshots + s.store.ReapSnapshots() + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *FileSnapshotSink) Cancel() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) +} + +// finalize is used to close all of our resources. +func (s *FileSnapshotSink) finalize() error { + // Flush any remaining data + if err := s.buffered.Flush(); err != nil { + return err + } + + // Get the file size + stat, statErr := s.stateFile.Stat() + + // Close the file + if err := s.stateFile.Close(); err != nil { + return err + } + + // Set the file size, check after we close + if statErr != nil { + return statErr + } + s.meta.Size = stat.Size() + + // Set the CRC + s.meta.CRC = s.stateHash.Sum(nil) + return nil +} + +// writeMeta is used to write out the metadata we have. +func (s *FileSnapshotSink) writeMeta() error { + // Open the meta file + metaPath := filepath.Join(s.dir, metaFilePath) + fh, err := os.Create(metaPath) + if err != nil { + return err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewWriter(fh) + defer buffered.Flush() + + // Write out as JSON + enc := json.NewEncoder(buffered) + if err := enc.Encode(&s.meta); err != nil { + return err + } + return nil +} + +// Implement the sort interface for []*fileSnapshotMeta. +func (s snapMetaSlice) Len() int { + return len(s) +} + +func (s snapMetaSlice) Less(i, j int) bool { + if s[i].Term != s[j].Term { + return s[i].Term < s[j].Term + } + if s[i].Index != s[j].Index { + return s[i].Index < s[j].Index + } + return s[i].ID < s[j].ID +} + +func (s snapMetaSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go b/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go new file mode 100644 index 0000000000000..ea8ab548dbcb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go @@ -0,0 +1,37 @@ +package raft + +import ( + "io" +) + +// FSM provides an interface that can be implemented by +// clients to make use of the replicated log. +type FSM interface { + // Apply log is invoked once a log entry is committed. + Apply(*Log) interface{} + + // Snapshot is used to support log compaction. This call should + // return an FSMSnapshot which can be used to save a point-in-time + // snapshot of the FSM. Apply and Snapshot are not called in multiple + // threads, but Apply will be called concurrently with Persist. This means + // the FSM should be implemented in a fashion that allows for concurrent + // updates while a snapshot is happening. + Snapshot() (FSMSnapshot, error) + + // Restore is used to restore an FSM from a snapshot. It is not called + // concurrently with any other command. The FSM must discard all previous + // state. + Restore(io.ReadCloser) error +} + +// FSMSnapshot is returned by an FSM in response to a Snapshot +// It must be safe to invoke FSMSnapshot methods with concurrent +// calls to Apply. +type FSMSnapshot interface { + // Persist should dump all necessary state to the WriteCloser 'sink', + // and call sink.Close() when finished or call sink.Cancel() on error. + Persist(sink SnapshotSink) error + + // Release is invoked when we are finished with the snapshot. + Release() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/future.go b/Godeps/_workspace/src/github.com/hashicorp/raft/future.go new file mode 100644 index 0000000000000..854e1ac927be5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/future.go @@ -0,0 +1,182 @@ +package raft + +import ( + "sync" + "time" +) + +// Future is used to represent an action that may occur in the future. +type Future interface { + Error() error +} + +// ApplyFuture is used for Apply() and can returns the FSM response. +type ApplyFuture interface { + Future + Response() interface{} + Index() uint64 +} + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// deferError can be embedded to allow a future +// to provide an error in the future. +type deferError struct { + err error + errCh chan error + responded bool +} + +func (d *deferError) init() { + d.errCh = make(chan error, 1) +} + +func (d *deferError) Error() error { + if d.err != nil { + return d.err + } + if d.errCh == nil { + panic("waiting for response on nil channel") + } + d.err = <-d.errCh + return d.err +} + +func (d *deferError) respond(err error) { + if d.errCh == nil { + return + } + if d.responded { + return + } + d.errCh <- err + close(d.errCh) + d.responded = true +} + +// logFuture is used to apply a log entry and waits until +// the log is considered committed. +type logFuture struct { + deferError + log Log + policy quorumPolicy + response interface{} + dispatch time.Time +} + +func (l *logFuture) Response() interface{} { + return l.response +} + +func (l *logFuture) Index() uint64 { + return l.log.Index +} + +type peerFuture struct { + deferError + peers []string +} + +type shutdownFuture struct { + raft *Raft +} + +func (s *shutdownFuture) Error() error { + for s.raft.getRoutines() > 0 { + time.Sleep(5 * time.Millisecond) + } + return nil +} + +// snapshotFuture is used for waiting on a snapshot to complete. +type snapshotFuture struct { + deferError +} + +// reqSnapshotFuture is used for requesting a snapshot start. +// It is only used internally. +type reqSnapshotFuture struct { + deferError + + // snapshot details provided by the FSM runner before responding + index uint64 + term uint64 + peers []string + snapshot FSMSnapshot +} + +// restoreFuture is used for requesting an FSM to perform a +// snapshot restore. Used internally only. +type restoreFuture struct { + deferError + ID string +} + +// verifyFuture is used to verify the current node is still +// the leader. This is to prevent a stale read. +type verifyFuture struct { + deferError + notifyCh chan *verifyFuture + quorumSize int + votes int + voteLock sync.Mutex +} + +// vote is used to respond to a verifyFuture. +// This may block when responding on the notifyCh. +func (v *verifyFuture) vote(leader bool) { + v.voteLock.Lock() + defer v.voteLock.Unlock() + + // Guard against having notified already + if v.notifyCh == nil { + return + } + + if leader { + v.votes++ + if v.votes >= v.quorumSize { + v.notifyCh <- v + v.notifyCh = nil + } + } else { + v.notifyCh <- v + v.notifyCh = nil + } +} + +// appendFuture is used for waiting on a pipelined append +// entries RPC. +type appendFuture struct { + deferError + start time.Time + args *AppendEntriesRequest + resp *AppendEntriesResponse +} + +func (a *appendFuture) Start() time.Time { + return a.start +} + +func (a *appendFuture) Request() *AppendEntriesRequest { + return a.args +} + +func (a *appendFuture) Response() *AppendEntriesResponse { + return a.resp +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go new file mode 100644 index 0000000000000..7014ff50394dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go @@ -0,0 +1,213 @@ +package raft + +import ( + "container/list" + "sync" +) + +// QuorumPolicy allows individual logFutures to have different +// commitment rules while still using the inflight mechanism. +type quorumPolicy interface { + // Checks if a commit from a given peer is enough to + // satisfy the commitment rules + Commit() bool + + // Checks if a commit is committed + IsCommitted() bool +} + +// MajorityQuorum is used by Apply transactions and requires +// a simple majority of nodes. +type majorityQuorum struct { + count int + votesNeeded int +} + +func newMajorityQuorum(clusterSize int) *majorityQuorum { + votesNeeded := (clusterSize / 2) + 1 + return &majorityQuorum{count: 0, votesNeeded: votesNeeded} +} + +func (m *majorityQuorum) Commit() bool { + m.count++ + return m.count >= m.votesNeeded +} + +func (m *majorityQuorum) IsCommitted() bool { + return m.count >= m.votesNeeded +} + +// Inflight is used to track operations that are still in-flight. +type inflight struct { + sync.Mutex + committed *list.List + commitCh chan struct{} + minCommit uint64 + maxCommit uint64 + operations map[uint64]*logFuture + stopCh chan struct{} +} + +// NewInflight returns an inflight struct that notifies +// the provided channel when logs are finished committing. +func newInflight(commitCh chan struct{}) *inflight { + return &inflight{ + committed: list.New(), + commitCh: commitCh, + minCommit: 0, + maxCommit: 0, + operations: make(map[uint64]*logFuture), + stopCh: make(chan struct{}), + } +} + +// Start is used to mark a logFuture as being inflight. It +// also commits the entry, as it is assumed the leader is +// starting. +func (i *inflight) Start(l *logFuture) { + i.Lock() + defer i.Unlock() + i.start(l) +} + +// StartAll is used to mark a list of logFuture's as being +// inflight. It also commits each entry as the leader is +// assumed to be starting. +func (i *inflight) StartAll(logs []*logFuture) { + i.Lock() + defer i.Unlock() + for _, l := range logs { + i.start(l) + } +} + +// start is used to mark a single entry as inflight, +// must be invoked with the lock held. +func (i *inflight) start(l *logFuture) { + idx := l.log.Index + i.operations[idx] = l + + if idx > i.maxCommit { + i.maxCommit = idx + } + if i.minCommit == 0 { + i.minCommit = idx + } + i.commit(idx) +} + +// Cancel is used to cancel all in-flight operations. +// This is done when the leader steps down, and all futures +// are sent the given error. +func (i *inflight) Cancel(err error) { + // Close the channel first to unblock any pending commits + close(i.stopCh) + + // Lock after close to avoid deadlock + i.Lock() + defer i.Unlock() + + // Respond to all inflight operations + for _, op := range i.operations { + op.respond(err) + } + + // Clear all the committed but not processed + for e := i.committed.Front(); e != nil; e = e.Next() { + e.Value.(*logFuture).respond(err) + } + + // Clear the map + i.operations = make(map[uint64]*logFuture) + + // Clear the list of committed + i.committed = list.New() + + // Close the commmitCh + close(i.commitCh) + + // Reset indexes + i.minCommit = 0 + i.maxCommit = 0 +} + +// Committed returns all the committed operations in order. +func (i *inflight) Committed() (l *list.List) { + i.Lock() + l, i.committed = i.committed, list.New() + i.Unlock() + return l +} + +// Commit is used by leader replication routines to indicate that +// a follower was finished committing a log to disk. +func (i *inflight) Commit(index uint64) { + i.Lock() + defer i.Unlock() + i.commit(index) +} + +// CommitRange is used to commit a range of indexes inclusively. +// It is optimized to avoid commits for indexes that are not tracked. +func (i *inflight) CommitRange(minIndex, maxIndex uint64) { + i.Lock() + defer i.Unlock() + + // Update the minimum index + minIndex = max(i.minCommit, minIndex) + + // Commit each index + for idx := minIndex; idx <= maxIndex; idx++ { + i.commit(idx) + } +} + +// commit is used to commit a single index. Must be called with the lock held. +func (i *inflight) commit(index uint64) { + op, ok := i.operations[index] + if !ok { + // Ignore if not in the map, as it may be committed already + return + } + + // Check if we've satisfied the commit + if !op.policy.Commit() { + return + } + + // Cannot commit if this is not the minimum inflight. This can happen + // if the quorum size changes, meaning a previous commit requires a larger + // quorum that this commit. We MUST block until the previous log is committed, + // otherwise logs will be applied out of order. + if index != i.minCommit { + return + } + +NOTIFY: + // Add the operation to the committed list + i.committed.PushBack(op) + + // Stop tracking since it is committed + delete(i.operations, index) + + // Update the indexes + if index == i.maxCommit { + i.minCommit = 0 + i.maxCommit = 0 + + } else { + i.minCommit++ + } + + // Check if the next in-flight operation is ready + if i.minCommit != 0 { + op = i.operations[i.minCommit] + if op.policy.IsCommitted() { + index = i.minCommit + goto NOTIFY + } + } + + // Async notify of ready operations + asyncNotifyCh(i.commitCh) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go new file mode 100644 index 0000000000000..6e4dfd020f7b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go @@ -0,0 +1,116 @@ +package raft + +import ( + "sync" +) + +// InmemStore implements the LogStore and StableStore interface. +// It should NOT EVER be used for production. It is used only for +// unit tests. Use the MDBStore implementation instead. +type InmemStore struct { + l sync.RWMutex + lowIndex uint64 + highIndex uint64 + logs map[uint64]*Log + kv map[string][]byte + kvInt map[string]uint64 +} + +// NewInmemStore returns a new in-memory backend. Do not ever +// use for production. Only for testing. +func NewInmemStore() *InmemStore { + i := &InmemStore{ + logs: make(map[uint64]*Log), + kv: make(map[string][]byte), + kvInt: make(map[string]uint64), + } + return i +} + +// FirstIndex implements the LogStore interface. +func (i *InmemStore) FirstIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.lowIndex, nil +} + +// LastIndex implements the LogStore interface. +func (i *InmemStore) LastIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.highIndex, nil +} + +// GetLog implements the LogStore interface. +func (i *InmemStore) GetLog(index uint64, log *Log) error { + i.l.RLock() + defer i.l.RUnlock() + l, ok := i.logs[index] + if !ok { + return ErrLogNotFound + } + *log = *l + return nil +} + +// StoreLog implements the LogStore interface. +func (i *InmemStore) StoreLog(log *Log) error { + return i.StoreLogs([]*Log{log}) +} + +// StoreLogs implements the LogStore interface. +func (i *InmemStore) StoreLogs(logs []*Log) error { + i.l.Lock() + defer i.l.Unlock() + for _, l := range logs { + i.logs[l.Index] = l + if i.lowIndex == 0 { + i.lowIndex = l.Index + } + if l.Index > i.highIndex { + i.highIndex = l.Index + } + } + return nil +} + +// DeleteRange implements the LogStore interface. +func (i *InmemStore) DeleteRange(min, max uint64) error { + i.l.Lock() + defer i.l.Unlock() + for j := min; j <= max; j++ { + delete(i.logs, j) + } + i.lowIndex = max + 1 + return nil +} + +// Set implements the StableStore interface. +func (i *InmemStore) Set(key []byte, val []byte) error { + i.l.Lock() + defer i.l.Unlock() + i.kv[string(key)] = val + return nil +} + +// Get implements the StableStore interface. +func (i *InmemStore) Get(key []byte) ([]byte, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kv[string(key)], nil +} + +// SetUint64 implements the StableStore interface. +func (i *InmemStore) SetUint64(key []byte, val uint64) error { + i.l.Lock() + defer i.l.Unlock() + i.kvInt[string(key)] = val + return nil +} + +// GetUint64 implements the StableStore interface. +func (i *InmemStore) GetUint64(key []byte) (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kvInt[string(key)], nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go new file mode 100644 index 0000000000000..994d06d8fad65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go @@ -0,0 +1,315 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// NewInmemAddr returns a new in-memory addr with +// a randomly generate UUID as the ID. +func NewInmemAddr() string { + return generateUUID() +} + +// inmemPipeline is used to pipeline requests for the in-mem transport. +type inmemPipeline struct { + trans *InmemTransport + peer *InmemTransport + peerAddr string + + doneCh chan AppendFuture + inprogressCh chan *inmemPipelineInflight + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +type inmemPipelineInflight struct { + future *appendFuture + respCh <-chan RPCResponse +} + +// InmemTransport Implements the Transport interface, to allow Raft to be +// tested in-memory without going over a network. +type InmemTransport struct { + sync.RWMutex + consumerCh chan RPC + localAddr string + peers map[string]*InmemTransport + pipelines []*inmemPipeline + timeout time.Duration +} + +// NewInmemTransport is used to initialize a new transport +// and generates a random local address. +func NewInmemTransport() (string, *InmemTransport) { + addr := NewInmemAddr() + trans := &InmemTransport{ + consumerCh: make(chan RPC, 16), + localAddr: addr, + peers: make(map[string]*InmemTransport), + timeout: 50 * time.Millisecond, + } + return addr, trans +} + +// SetHeartbeatHandler is used to set optional fast-path for +// heartbeats, not supported for this transport. +func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { +} + +// Consumer implements the Transport interface. +func (i *InmemTransport) Consumer() <-chan RPC { + return i.consumerCh +} + +// LocalAddr implements the Transport interface. +func (i *InmemTransport) LocalAddr() string { + return i.localAddr +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (i *InmemTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + if !ok { + return nil, fmt.Errorf("failed to connect to peer: %v", target) + } + pipeline := newInmemPipeline(i, peer, target) + i.Lock() + i.pipelines = append(i.pipelines, pipeline) + i.Unlock() + return pipeline, nil +} + +// AppendEntries implements the Transport interface. +func (i *InmemTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*AppendEntriesResponse) + *resp = *out + return nil +} + +// RequestVote implements the Transport interface. +func (i *InmemTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestVoteResponse) + *resp = *out + return nil +} + +// InstallSnapshot implements the Transport interface. +func (i *InmemTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*InstallSnapshotResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) makeRPC(target string, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + + if !ok { + err = fmt.Errorf("failed to connect to peer: %v", target) + return + } + + // Send the RPC over + respCh := make(chan RPCResponse) + peer.consumerCh <- RPC{ + Command: args, + Reader: r, + RespChan: respCh, + } + + // Wait for a response + select { + case rpcResp = <-respCh: + if rpcResp.Error != nil { + err = rpcResp.Error + } + case <-time.After(timeout): + err = fmt.Errorf("command timed out") + } + return +} + +// EncodePeer implements the Transport interface. It uses the UUID as the +// address directly. +func (i *InmemTransport) EncodePeer(p string) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. It wraps the UUID in an +// InmemAddr. +func (i *InmemTransport) DecodePeer(buf []byte) string { + return string(buf) +} + +// Connect is used to connect this transport to another transport for +// a given peer name. This allows for local routing. +func (i *InmemTransport) Connect(peer string, trans *InmemTransport) { + i.Lock() + defer i.Unlock() + i.peers[peer] = trans +} + +// Disconnect is used to remove the ability to route to a given peer. +func (i *InmemTransport) Disconnect(peer string) { + i.Lock() + defer i.Unlock() + delete(i.peers, peer) + + // Disconnect any pipelines + n := len(i.pipelines) + for idx := 0; idx < n; idx++ { + if i.pipelines[idx].peerAddr == peer { + i.pipelines[idx].Close() + i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil + idx-- + n-- + } + } + i.pipelines = i.pipelines[:n] +} + +// DisconnectAll is used to remove all routes to peers. +func (i *InmemTransport) DisconnectAll() { + i.Lock() + defer i.Unlock() + i.peers = make(map[string]*InmemTransport) + + // Handle pipelines + for _, pipeline := range i.pipelines { + pipeline.Close() + } + i.pipelines = nil +} + +func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr string) *inmemPipeline { + i := &inmemPipeline{ + trans: trans, + peer: peer, + peerAddr: addr, + doneCh: make(chan AppendFuture, 16), + inprogressCh: make(chan *inmemPipelineInflight, 16), + shutdownCh: make(chan struct{}), + } + go i.decodeResponses() + return i +} + +func (i *inmemPipeline) decodeResponses() { + timeout := i.trans.timeout + for { + select { + case inp := <-i.inprogressCh: + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + + select { + case rpcResp := <-inp.respCh: + // Copy the result back + *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) + inp.future.respond(rpcResp.Error) + + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-timeoutCh: + inp.future.respond(fmt.Errorf("command timed out")) + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-i.shutdownCh: + return + } + case <-i.shutdownCh: + return + } + } +} + +func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Handle a timeout + var timeout <-chan time.Time + if i.trans.timeout > 0 { + timeout = time.After(i.trans.timeout) + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + Command: args, + RespChan: respCh, + } + select { + case i.peer.consumerCh <- rpc: + case <-timeout: + return nil, fmt.Errorf("command enqueue timeout") + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } + + // Send to be decoded + select { + case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: + return future, nil + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +func (i *inmemPipeline) Consumer() <-chan AppendFuture { + return i.doneCh +} + +func (i *inmemPipeline) Close() error { + i.shutdownLock.Lock() + defer i.shutdownLock.Unlock() + if i.shutdown { + return nil + } + + i.shutdown = true + close(i.shutdownCh) + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log.go new file mode 100644 index 0000000000000..a8c5a40eabf05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/log.go @@ -0,0 +1,60 @@ +package raft + +// LogType describes various types of log entries. +type LogType uint8 + +const ( + // LogCommand is applied to a user FSM. + LogCommand LogType = iota + + // LogNoop is used to assert leadership. + LogNoop + + // LogAddPeer is used to add a new peer. + LogAddPeer + + // LogRemovePeer is used to remove an existing peer. + LogRemovePeer + + // LogBarrier is used to ensure all preceding operations have been + // applied to the FSM. It is similar to LogNoop, but instead of returning + // once committed, it only returns once the FSM manager acks it. Otherwise + // it is possible there are operations committed but not yet applied to + // the FSM. + LogBarrier +) + +// Log entries are replicated to all members of the Raft cluster +// and form the heart of the replicated state machine. +type Log struct { + Index uint64 + Term uint64 + Type LogType + Data []byte + + // peer is not exported since it is not transmitted, only used + // internally to construct the Data field. + peer string +} + +// LogStore is used to provide an interface for storing +// and retrieving logs in a durable fashion. +type LogStore interface { + // Returns the first index written. 0 for no entries. + FirstIndex() (uint64, error) + + // Returns the last index written. 0 for no entries. + LastIndex() (uint64, error) + + // Gets a log entry at a given index. + GetLog(index uint64, log *Log) error + + // Stores a log entry. + StoreLog(log *Log) error + + // Stores multiple log entries. + StoreLogs(logs []*Log) error + + // Deletes a range of log entries. The range is inclusive. + DeleteRange(min, max uint64) error +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go new file mode 100644 index 0000000000000..952e98c22826f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go @@ -0,0 +1,79 @@ +package raft + +import ( + "fmt" + "sync" +) + +// LogCache wraps any LogStore implementation to provide an +// in-memory ring buffer. This is used to cache access to +// the recently written entries. For implementations that do not +// cache themselves, this can provide a substantial boost by +// avoiding disk I/O on recent entries. +type LogCache struct { + store LogStore + + cache []*Log + l sync.RWMutex +} + +// NewLogCache is used to create a new LogCache with the +// given capacity and backend store. +func NewLogCache(capacity int, store LogStore) (*LogCache, error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be positive") + } + c := &LogCache{ + store: store, + cache: make([]*Log, capacity), + } + return c, nil +} + +func (c *LogCache) GetLog(idx uint64, log *Log) error { + // Check the buffer for an entry + c.l.RLock() + cached := c.cache[idx%uint64(len(c.cache))] + c.l.RUnlock() + + // Check if entry is valid + if cached != nil && cached.Index == idx { + *log = *cached + return nil + } + + // Forward request on cache miss + return c.store.GetLog(idx, log) +} + +func (c *LogCache) StoreLog(log *Log) error { + return c.StoreLogs([]*Log{log}) +} + +func (c *LogCache) StoreLogs(logs []*Log) error { + // Insert the logs into the ring buffer + c.l.Lock() + for _, l := range logs { + c.cache[l.Index%uint64(len(c.cache))] = l + } + c.l.Unlock() + + return c.store.StoreLogs(logs) +} + +func (c *LogCache) FirstIndex() (uint64, error) { + return c.store.FirstIndex() +} + +func (c *LogCache) LastIndex() (uint64, error) { + return c.store.LastIndex() +} + +func (c *LogCache) DeleteRange(min, max uint64) error { + // Invalidate the cache on deletes + c.l.Lock() + c.cache = make([]*Log, len(c.cache)) + c.l.Unlock() + + return c.store.DeleteRange(min, max) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go new file mode 100644 index 0000000000000..9eb4fe054e87a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go @@ -0,0 +1,622 @@ +package raft + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "sync" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +const ( + rpcAppendEntries uint8 = iota + rpcRequestVote + rpcInstallSnapshot + + // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. + DefaultTimeoutScale = 256 * 1024 // 256KB + + // rpcMaxPipeline controls the maximum number of outstanding + // AppendEntries RPC calls. + rpcMaxPipeline = 128 +) + +var ( + // ErrTransportShutdown is returned when operations on a transport are + // invoked after it's been terminated. + ErrTransportShutdown = errors.New("transport shutdown") + + // ErrPipelineShutdown is returned when the pipeline is closed. + ErrPipelineShutdown = errors.New("append pipeline closed") +) + +/* + +NetworkTransport provides a network based transport that can be +used to communicate with Raft on remote machines. It requires +an underlying stream layer to provide a stream abstraction, which can +be simple TCP, TLS, etc. + +This transport is very simple and lightweight. Each RPC request is +framed by sending a byte that indicates the message type, followed +by the MsgPack encoded request. + +The response is an error string followed by the response object, +both are encoded using MsgPack. + +InstallSnapshot is special, in that after the RPC request we stream +the entire state. That socket is not re-used as the connection state +is not known if there is an error. + +*/ +type NetworkTransport struct { + connPool map[string][]*netConn + connPoolLock sync.Mutex + + consumeCh chan RPC + + heartbeatFn func(RPC) + heartbeatFnLock sync.Mutex + + logger *log.Logger + + maxPool int + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + stream StreamLayer + + timeout time.Duration + TimeoutScale int +} + +// StreamLayer is used with the NetworkTransport to provide +// the low level stream abstraction. +type StreamLayer interface { + net.Listener + + // Dial is used to create a new outgoing connection + Dial(address string, timeout time.Duration) (net.Conn, error) +} + +type netConn struct { + target string + conn net.Conn + r *bufio.Reader + w *bufio.Writer + dec *codec.Decoder + enc *codec.Encoder +} + +func (n *netConn) Release() error { + return n.conn.Close() +} + +type netPipeline struct { + conn *netConn + trans *NetworkTransport + + doneCh chan AppendFuture + inprogressCh chan *appendFuture + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// NewNetworkTransport creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransport( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) *NetworkTransport { + if logOutput == nil { + logOutput = os.Stderr + } + return NewNetworkTransportWithLogger(stream, maxPool, timeout, log.New(logOutput, "", log.LstdFlags)) +} + +// NewNetworkTransportWithLogger creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransportWithLogger( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logger *log.Logger, +) *NetworkTransport { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + trans := &NetworkTransport{ + connPool: make(map[string][]*netConn), + consumeCh: make(chan RPC), + logger: logger, + maxPool: maxPool, + shutdownCh: make(chan struct{}), + stream: stream, + timeout: timeout, + TimeoutScale: DefaultTimeoutScale, + } + go trans.listen() + return trans +} + +// SetHeartbeatHandler is used to setup a heartbeat handler +// as a fast-pass. This is to avoid head-of-line blocking from +// disk IO. +func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { + n.heartbeatFnLock.Lock() + defer n.heartbeatFnLock.Unlock() + n.heartbeatFn = cb +} + +// Close is used to stop the network transport. +func (n *NetworkTransport) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + + if !n.shutdown { + close(n.shutdownCh) + n.stream.Close() + n.shutdown = true + } + return nil +} + +// Consumer implements the Transport interface. +func (n *NetworkTransport) Consumer() <-chan RPC { + return n.consumeCh +} + +// LocalAddr implements the Transport interface. +func (n *NetworkTransport) LocalAddr() string { + return n.stream.Addr().String() +} + +// IsShutdown is used to check if the transport is shutdown. +func (n *NetworkTransport) IsShutdown() bool { + select { + case <-n.shutdownCh: + return true + default: + return false + } +} + +// getExistingConn is used to grab a pooled connection. +func (n *NetworkTransport) getPooledConn(target string) *netConn { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + conns, ok := n.connPool[target] + if !ok || len(conns) == 0 { + return nil + } + + var conn *netConn + num := len(conns) + conn, conns[num-1] = conns[num-1], nil + n.connPool[target] = conns[:num-1] + return conn +} + +// getConn is used to get a connection from the pool. +func (n *NetworkTransport) getConn(target string) (*netConn, error) { + // Check for a pooled conn + if conn := n.getPooledConn(target); conn != nil { + return conn, nil + } + + // Dial a new connection + conn, err := n.stream.Dial(target, n.timeout) + if err != nil { + return nil, err + } + + // Wrap the conn + netConn := &netConn{ + target: target, + conn: conn, + r: bufio.NewReader(conn), + w: bufio.NewWriter(conn), + } + + // Setup encoder/decoders + netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) + netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) + + // Done + return netConn, nil +} + +// returnConn returns a connection back to the pool. +func (n *NetworkTransport) returnConn(conn *netConn) { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + key := conn.target + conns, _ := n.connPool[key] + + if !n.IsShutdown() && len(conns) < n.maxPool { + n.connPool[key] = append(conns, conn) + } else { + conn.Release() + } +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (n *NetworkTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { + // Get a connection + conn, err := n.getConn(target) + if err != nil { + return nil, err + } + + // Create the pipeline + return newNetPipeline(n, conn), nil +} + +// AppendEntries implements the Transport interface. +func (n *NetworkTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + return n.genericRPC(target, rpcAppendEntries, args, resp) +} + +// RequestVote implements the Transport interface. +func (n *NetworkTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { + return n.genericRPC(target, rpcRequestVote, args, resp) +} + +// genericRPC handles a simple request/response RPC. +func (n *NetworkTransport) genericRPC(target string, rpcType uint8, args interface{}, resp interface{}) error { + // Get a conn + conn, err := n.getConn(target) + if err != nil { + return err + } + + // Set a deadline + if n.timeout > 0 { + conn.conn.SetDeadline(time.Now().Add(n.timeout)) + } + + // Send the RPC + if err := sendRPC(conn, rpcType, args); err != nil { + return err + } + + // Decode the response + canReturn, err := decodeResponse(conn, resp) + if canReturn { + n.returnConn(conn) + } + return err +} + +// InstallSnapshot implements the Transport interface. +func (n *NetworkTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + // Get a conn, always close for InstallSnapshot + conn, err := n.getConn(target) + if err != nil { + return err + } + defer conn.Release() + + // Set a deadline, scaled by request size + if n.timeout > 0 { + timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) + if timeout < n.timeout { + timeout = n.timeout + } + conn.conn.SetDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(conn, rpcInstallSnapshot, args); err != nil { + return err + } + + // Stream the state + if _, err := io.Copy(conn.w, data); err != nil { + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + return err + } + + // Decode the response, do not return conn + _, err = decodeResponse(conn, resp) + return err +} + +// EncodePeer implements the Transport interface. +func (n *NetworkTransport) EncodePeer(p string) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (n *NetworkTransport) DecodePeer(buf []byte) string { + return string(buf) +} + +// listen is used to handling incoming connections. +func (n *NetworkTransport) listen() { + for { + // Accept incoming connections + conn, err := n.stream.Accept() + if err != nil { + if n.IsShutdown() { + return + } + n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err) + continue + } + n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr()) + + // Handle the connection in dedicated routine + go n.handleConn(conn) + } +} + +// handleConn is used to handle an inbound connection for its lifespan. +func (n *NetworkTransport) handleConn(conn net.Conn) { + defer conn.Close() + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) + + for { + if err := n.handleCommand(r, dec, enc); err != nil { + if err != io.EOF { + n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) + } + return + } + if err := w.Flush(); err != nil { + n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) + return + } + } +} + +// handleCommand is used to decode and dispatch a single command. +func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { + // Get the rpc type + rpcType, err := r.ReadByte() + if err != nil { + return err + } + + // Create the RPC object + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + RespChan: respCh, + } + + // Decode the command + isHeartbeat := false + switch rpcType { + case rpcAppendEntries: + var req AppendEntriesRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + // Check if this is a heartbeat + if req.Term != 0 && req.Leader != nil && + req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && + len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { + isHeartbeat = true + } + + case rpcRequestVote: + var req RequestVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + case rpcInstallSnapshot: + var req InstallSnapshotRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + rpc.Reader = io.LimitReader(r, req.Size) + + default: + return fmt.Errorf("unknown rpc type %d", rpcType) + } + + // Check for heartbeat fast-path + if isHeartbeat { + n.heartbeatFnLock.Lock() + fn := n.heartbeatFn + n.heartbeatFnLock.Unlock() + if fn != nil { + fn(rpc) + goto RESP + } + } + + // Dispatch the RPC + select { + case n.consumeCh <- rpc: + case <-n.shutdownCh: + return ErrTransportShutdown + } + + // Wait for response +RESP: + select { + case resp := <-respCh: + // Send the error first + respErr := "" + if resp.Error != nil { + respErr = resp.Error.Error() + } + if err := enc.Encode(respErr); err != nil { + return err + } + + // Send the response + if err := enc.Encode(resp.Response); err != nil { + return err + } + case <-n.shutdownCh: + return ErrTransportShutdown + } + return nil +} + +// decodeResponse is used to decode an RPC response and reports whether +// the connection can be reused. +func decodeResponse(conn *netConn, resp interface{}) (bool, error) { + // Decode the error if any + var rpcError string + if err := conn.dec.Decode(&rpcError); err != nil { + conn.Release() + return false, err + } + + // Decode the response + if err := conn.dec.Decode(resp); err != nil { + conn.Release() + return false, err + } + + // Format an error if any + if rpcError != "" { + return true, fmt.Errorf(rpcError) + } + return true, nil +} + +// sendRPC is used to encode and send the RPC. +func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { + // Write the request type + if err := conn.w.WriteByte(rpcType); err != nil { + conn.Release() + return err + } + + // Send the request + if err := conn.enc.Encode(args); err != nil { + conn.Release() + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + conn.Release() + return err + } + return nil +} + +// newNetPipeline is used to construct a netPipeline from a given +// transport and connection. +func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { + n := &netPipeline{ + conn: conn, + trans: trans, + doneCh: make(chan AppendFuture, rpcMaxPipeline), + inprogressCh: make(chan *appendFuture, rpcMaxPipeline), + shutdownCh: make(chan struct{}), + } + go n.decodeResponses() + return n +} + +// decodeResponses is a long running routine that decodes the responses +// sent on the connection. +func (n *netPipeline) decodeResponses() { + timeout := n.trans.timeout + for { + select { + case future := <-n.inprogressCh: + if timeout > 0 { + n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + + _, err := decodeResponse(n.conn, future.resp) + future.respond(err) + select { + case n.doneCh <- future: + case <-n.shutdownCh: + return + } + case <-n.shutdownCh: + return + } + } +} + +// AppendEntries is used to pipeline a new append entries request. +func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Add a send timeout + if timeout := n.trans.timeout; timeout > 0 { + n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { + return nil, err + } + + // Hand-off for decoding, this can also cause back-pressure + // to prevent too many inflight requests + select { + case n.inprogressCh <- future: + return future, nil + case <-n.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +// Consumer returns a channel that can be used to consume complete futures. +func (n *netPipeline) Consumer() <-chan AppendFuture { + return n.doneCh +} + +// Closed is used to shutdown the pipeline connection. +func (n *netPipeline) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + if n.shutdown { + return nil + } + + // Release the connection + n.conn.Release() + + n.shutdown = true + close(n.shutdownCh) + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go b/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go new file mode 100644 index 0000000000000..6f3bcf8564507 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go @@ -0,0 +1,122 @@ +package raft + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +const ( + jsonPeerPath = "peers.json" +) + +// PeerStore provides an interface for persistent storage and +// retrieval of peers. We use a separate interface than StableStore +// since the peers may need to be edited by a human operator. For example, +// in a two node cluster, the failure of either node requires human intervention +// since consensus is impossible. +type PeerStore interface { + // Peers returns the list of known peers. + Peers() ([]string, error) + + // SetPeers sets the list of known peers. This is invoked when a peer is + // added or removed. + SetPeers([]string) error +} + +// StaticPeers is used to provide a static list of peers. +type StaticPeers struct { + StaticPeers []string + l sync.Mutex +} + +// Peers implements the PeerStore interface. +func (s *StaticPeers) Peers() ([]string, error) { + s.l.Lock() + peers := s.StaticPeers + s.l.Unlock() + return peers, nil +} + +// SetPeers implements the PeerStore interface. +func (s *StaticPeers) SetPeers(p []string) error { + s.l.Lock() + s.StaticPeers = p + s.l.Unlock() + return nil +} + +// JSONPeers is used to provide peer persistence on disk in the form +// of a JSON file. This allows human operators to manipulate the file. +type JSONPeers struct { + l sync.Mutex + path string + trans Transport +} + +// NewJSONPeers creates a new JSONPeers store. Requires a transport +// to handle the serialization of network addresses. +func NewJSONPeers(base string, trans Transport) *JSONPeers { + path := filepath.Join(base, jsonPeerPath) + store := &JSONPeers{ + path: path, + trans: trans, + } + return store +} + +// Peers implements the PeerStore interface. +func (j *JSONPeers) Peers() ([]string, error) { + j.l.Lock() + defer j.l.Unlock() + + // Read the file + buf, err := ioutil.ReadFile(j.path) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + // Check for no peers + if len(buf) == 0 { + return nil, nil + } + + // Decode the peers + var peerSet []string + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peerSet); err != nil { + return nil, err + } + + // Deserialize each peer + var peers []string + for _, p := range peerSet { + peers = append(peers, j.trans.DecodePeer([]byte(p))) + } + return peers, nil +} + +// SetPeers implements the PeerStore interface. +func (j *JSONPeers) SetPeers(peers []string) error { + j.l.Lock() + defer j.l.Unlock() + + // Encode each peer + var peerSet []string + for _, p := range peers { + peerSet = append(peerSet, string(j.trans.EncodePeer(p))) + } + + // Convert to JSON + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(peerSet); err != nil { + return err + } + + // Write out as JSON + return ioutil.WriteFile(j.path, buf.Bytes(), 0755) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go b/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go new file mode 100644 index 0000000000000..f7880ba9c974d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go @@ -0,0 +1,1887 @@ +package raft + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "os" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +const ( + minCheckInterval = 10 * time.Millisecond +) + +var ( + keyCurrentTerm = []byte("CurrentTerm") + keyLastVoteTerm = []byte("LastVoteTerm") + keyLastVoteCand = []byte("LastVoteCand") + + // ErrLeader is returned when an operation can't be completed on a + // leader node. + ErrLeader = errors.New("node is the leader") + + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + + // ErrLeadershipLost is returned when a leader fails to commit a log entry + // because it's been deposed in the process. + ErrLeadershipLost = errors.New("leadership lost while committing log") + + // ErrRaftShutdown is returned when operations are requested against an + // inactive Raft. + ErrRaftShutdown = errors.New("raft is already shutdown") + + // ErrEnqueueTimeout is returned when a command fails due to a timeout. + ErrEnqueueTimeout = errors.New("timed out enqueuing operation") + + // ErrKnownPeer is returned when trying to add a peer to the configuration + // that already exists. + ErrKnownPeer = errors.New("peer already known") + + // ErrUnknownPeer is returned when trying to remove a peer from the + // configuration that doesn't exist. + ErrUnknownPeer = errors.New("peer is unknown") + + // ErrNothingNewToSnapshot is returned when trying to create a snapshot + // but there's nothing new commited to the FSM since we started. + ErrNothingNewToSnapshot = errors.New("Nothing new to snapshot") +) + +// commitTuple is used to send an index that was committed, +// with an optional associated future that should be invoked. +type commitTuple struct { + log *Log + future *logFuture +} + +// leaderState is state that is used while we are a leader. +type leaderState struct { + commitCh chan struct{} + inflight *inflight + replState map[string]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} +} + +// Raft implements a Raft node. +type Raft struct { + raftState + + // applyCh is used to async send logs to the main thread to + // be committed and applied to the FSM. + applyCh chan *logFuture + + // Configuration provided at Raft initialization + conf *Config + + // FSM is the client state machine to apply commands to + fsm FSM + + // fsmCommitCh is used to trigger async application of logs to the fsm + fsmCommitCh chan commitTuple + + // fsmRestoreCh is used to trigger a restore from snapshot + fsmRestoreCh chan *restoreFuture + + // fsmSnapshotCh is used to trigger a new snapshot being taken + fsmSnapshotCh chan *reqSnapshotFuture + + // lastContact is the last time we had contact from the + // leader node. This can be used to gauge staleness. + lastContact time.Time + lastContactLock sync.RWMutex + + // Leader is the current cluster leader + leader string + leaderLock sync.RWMutex + + // leaderCh is used to notify of leadership changes + leaderCh chan bool + + // leaderState used only while state is leader + leaderState leaderState + + // Stores our local addr + localAddr string + + // Used for our logging + logger *log.Logger + + // LogStore provides durable storage for logs + logs LogStore + + // Track our known peers + peerCh chan *peerFuture + peers []string + peerStore PeerStore + + // RPC chan comes from the transport layer + rpcCh <-chan RPC + + // Shutdown channel to exit, protected to prevent concurrent exits + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + // snapshots is used to store and retrieve snapshots + snapshots SnapshotStore + + // snapshotCh is used for user triggered snapshots + snapshotCh chan *snapshotFuture + + // stable is a StableStore implementation for durable state + // It provides stable storage for many fields in raftState + stable StableStore + + // The transport layer we use + trans Transport + + // verifyCh is used to async send verify futures to the main thread + // to verify we are still the leader + verifyCh chan *verifyFuture +} + +// NewRaft is used to construct a new Raft node. It takes a configuration, as well +// as implementations of various interfaces that are required. If we have any old state, +// such as snapshots, logs, peers, etc, all those will be restored when creating the +// Raft node. +func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, + peerStore PeerStore, trans Transport) (*Raft, error) { + // Validate the configuration + if err := ValidateConfig(conf); err != nil { + return nil, err + } + + // Ensure we have a LogOutput + var logger *log.Logger + if conf.Logger != nil { + logger = conf.Logger + } else { + if conf.LogOutput == nil { + conf.LogOutput = os.Stderr + } + logger = log.New(conf.LogOutput, "", log.LstdFlags) + } + + // Try to restore the current term + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err != nil && err.Error() != "not found" { + return nil, fmt.Errorf("failed to load current term: %v", err) + } + + // Read the last log value + lastIdx, err := logs.LastIndex() + if err != nil { + return nil, fmt.Errorf("failed to find last log: %v", err) + } + + // Get the log + var lastLog Log + if lastIdx > 0 { + if err := logs.GetLog(lastIdx, &lastLog); err != nil { + return nil, fmt.Errorf("failed to get last log: %v", err) + } + } + + // Construct the list of peers that excludes us + localAddr := trans.LocalAddr() + peers, err := peerStore.Peers() + if err != nil { + return nil, fmt.Errorf("failed to get list of peers: %v", err) + } + peers = ExcludePeer(peers, localAddr) + + // Create Raft struct + r := &Raft{ + applyCh: make(chan *logFuture), + conf: conf, + fsm: fsm, + fsmCommitCh: make(chan commitTuple, 128), + fsmRestoreCh: make(chan *restoreFuture), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool), + localAddr: localAddr, + logger: logger, + logs: logs, + peerCh: make(chan *peerFuture), + peers: peers, + peerStore: peerStore, + rpcCh: trans.Consumer(), + snapshots: snaps, + snapshotCh: make(chan *snapshotFuture), + shutdownCh: make(chan struct{}), + stable: stable, + trans: trans, + verifyCh: make(chan *verifyFuture, 64), + } + + // Initialize as a follower + r.setState(Follower) + + // Start as leader if specified. This should only be used + // for testing purposes. + if conf.StartAsLeader { + r.setState(Leader) + r.setLeader(r.localAddr) + } + + // Restore the current term and the last log + r.setCurrentTerm(currentTerm) + r.setLastLogIndex(lastLog.Index) + r.setLastLogTerm(lastLog.Term) + + // Attempt to restore a snapshot if there are any + if err := r.restoreSnapshot(); err != nil { + return nil, err + } + + // Setup a heartbeat fast-path to avoid head-of-line + // blocking where possible. It MUST be safe for this + // to be called concurrently with a blocking RPC. + trans.SetHeartbeatHandler(r.processHeartbeat) + + // Start the background work + r.goFunc(r.run) + r.goFunc(r.runFSM) + r.goFunc(r.runSnapshots) + return r, nil +} + +// Leader is used to return the current leader of the cluster. +// It may return empty string if there is no current leader +// or the leader is unknown. +func (r *Raft) Leader() string { + r.leaderLock.RLock() + leader := r.leader + r.leaderLock.RUnlock() + return leader +} + +// setLeader is used to modify the current leader of the cluster +func (r *Raft) setLeader(leader string) { + r.leaderLock.Lock() + r.leader = leader + r.leaderLock.Unlock() +} + +// Apply is used to apply a command to the FSM in a highly consistent +// manner. This returns a future that can be used to wait on the application. +// An optional timeout can be provided to limit the amount of time we wait +// for the command to be started. This must be run on the leader or it +// will fail. +func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { + metrics.IncrCounter([]string{"raft", "apply"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogCommand, + Data: cmd, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// Barrier is used to issue a command that blocks until all preceeding +// operations have been applied to the FSM. It can be used to ensure the +// FSM reflects all queued writes. An optional timeout can be provided to +// limit the amount of time we wait for the command to be started. This +// must be run on the leader or it will fail. +func (r *Raft) Barrier(timeout time.Duration) Future { + metrics.IncrCounter([]string{"raft", "barrier"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogBarrier, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// VerifyLeader is used to ensure the current node is still +// the leader. This can be done to prevent stale reads when a +// new leader has potentially been elected. +func (r *Raft) VerifyLeader() Future { + metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) + verifyFuture := &verifyFuture{} + verifyFuture.init() + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.verifyCh <- verifyFuture: + return verifyFuture + } +} + +// AddPeer is used to add a new peer into the cluster. This must be +// run on the leader or it will fail. +func (r *Raft) AddPeer(peer string) Future { + logFuture := &logFuture{ + log: Log{ + Type: LogAddPeer, + peer: peer, + }, + } + logFuture.init() + select { + case r.applyCh <- logFuture: + return logFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// RemovePeer is used to remove a peer from the cluster. If the +// current leader is being removed, it will cause a new election +// to occur. This must be run on the leader or it will fail. +func (r *Raft) RemovePeer(peer string) Future { + logFuture := &logFuture{ + log: Log{ + Type: LogRemovePeer, + peer: peer, + }, + } + logFuture.init() + select { + case r.applyCh <- logFuture: + return logFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// SetPeers is used to forcibly replace the set of internal peers and +// the peerstore with the ones specified. This can be considered unsafe. +func (r *Raft) SetPeers(p []string) Future { + peerFuture := &peerFuture{ + peers: p, + } + peerFuture.init() + + select { + case r.peerCh <- peerFuture: + return peerFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// Shutdown is used to stop the Raft background routines. +// This is not a graceful operation. Provides a future that +// can be used to block until all background routines have exited. +func (r *Raft) Shutdown() Future { + r.shutdownLock.Lock() + defer r.shutdownLock.Unlock() + + if !r.shutdown { + close(r.shutdownCh) + r.shutdown = true + r.setState(Shutdown) + } + + return &shutdownFuture{r} +} + +// Snapshot is used to manually force Raft to take a snapshot. +// Returns a future that can be used to block until complete. +func (r *Raft) Snapshot() Future { + snapFuture := &snapshotFuture{} + snapFuture.init() + select { + case r.snapshotCh <- snapFuture: + return snapFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } + +} + +// State is used to return the current raft state. +func (r *Raft) State() RaftState { + return r.getState() +} + +// LeaderCh is used to get a channel which delivers signals on +// acquiring or losing leadership. It sends true if we become +// the leader, and false if we lose it. The channel is not buffered, +// and does not block on writes. +func (r *Raft) LeaderCh() <-chan bool { + return r.leaderCh +} + +func (r *Raft) String() string { + return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) +} + +// LastContact returns the time of last contact by a leader. +// This only makes sense if we are currently a follower. +func (r *Raft) LastContact() time.Time { + r.lastContactLock.RLock() + last := r.lastContact + r.lastContactLock.RUnlock() + return last +} + +// Stats is used to return a map of various internal stats. This should only +// be used for informative purposes or debugging. +func (r *Raft) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + s := map[string]string{ + "state": r.getState().String(), + "term": toString(r.getCurrentTerm()), + "last_log_index": toString(r.getLastLogIndex()), + "last_log_term": toString(r.getLastLogTerm()), + "commit_index": toString(r.getCommitIndex()), + "applied_index": toString(r.getLastApplied()), + "fsm_pending": toString(uint64(len(r.fsmCommitCh))), + "last_snapshot_index": toString(r.getLastSnapshotIndex()), + "last_snapshot_term": toString(r.getLastSnapshotTerm()), + "num_peers": toString(uint64(len(r.peers))), + } + last := r.LastContact() + if last.IsZero() { + s["last_contact"] = "never" + } else if r.getState() == Leader { + s["last_contact"] = "0" + } else { + s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) + } + return s +} + +// LastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +func (r *Raft) LastIndex() uint64 { + return r.getLastIndex() +} + +// AppliedIndex returns the last index applied to the FSM. +// This is generally lagging behind the last index, especially +// for indexes that are persisted but have not yet been considered +// committed by the leader. +func (r *Raft) AppliedIndex() uint64 { + return r.getLastApplied() +} + +// runFSM is a long running goroutine responsible for applying logs +// to the FSM. This is done async of other logs since we don't want +// the FSM to block our internal operations. +func (r *Raft) runFSM() { + var lastIndex, lastTerm uint64 + for { + select { + case req := <-r.fsmRestoreCh: + // Open the snapshot + meta, source, err := r.snapshots.Open(req.ID) + if err != nil { + req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) + continue + } + + // Attempt to restore + start := time.Now() + if err := r.fsm.Restore(source); err != nil { + req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) + source.Close() + continue + } + source.Close() + metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) + + // Update the last index and term + lastIndex = meta.Index + lastTerm = meta.Term + req.respond(nil) + + case req := <-r.fsmSnapshotCh: + // Is there something to snapshot? + if lastIndex == 0 { + req.respond(ErrNothingNewToSnapshot) + continue + } + + // Get our peers + peers, err := r.peerStore.Peers() + if err != nil { + req.respond(err) + continue + } + + // Start a snapshot + start := time.Now() + snap, err := r.fsm.Snapshot() + metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) + + // Respond to the request + req.index = lastIndex + req.term = lastTerm + req.peers = peers + req.snapshot = snap + req.respond(err) + + case commitTuple := <-r.fsmCommitCh: + // Apply the log if a command + var resp interface{} + if commitTuple.log.Type == LogCommand { + start := time.Now() + resp = r.fsm.Apply(commitTuple.log) + metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) + } + + // Update the indexes + lastIndex = commitTuple.log.Index + lastTerm = commitTuple.log.Term + + // Invoke the future if given + if commitTuple.future != nil { + commitTuple.future.response = resp + commitTuple.future.respond(nil) + } + case <-r.shutdownCh: + return + } + } +} + +// run is a long running goroutine that runs the Raft FSM. +func (r *Raft) run() { + for { + // Check if we are doing a shutdown + select { + case <-r.shutdownCh: + // Clear the leader to prevent forwarding + r.setLeader("") + return + default: + } + + // Enter into a sub-FSM + switch r.getState() { + case Follower: + r.runFollower() + case Candidate: + r.runCandidate() + case Leader: + r.runLeader() + } + } +} + +// runFollower runs the FSM for a follower. +func (r *Raft) runFollower() { + didWarn := false + r.logger.Printf("[INFO] raft: %v entering Follower state", r) + metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) + heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) + for { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case p := <-r.peerCh: + // Set the peers + r.peers = ExcludePeer(p.peers, r.localAddr) + p.respond(r.peerStore.SetPeers(p.peers)) + + case <-heartbeatTimer: + // Restart the heartbeat timer + heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) + + // Check if we have had a successful contact + lastContact := r.LastContact() + if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout { + continue + } + + // Heartbeat failed! Transition to the candidate state + r.setLeader("") + if len(r.peers) == 0 && !r.conf.EnableSingleNode { + if !didWarn { + r.logger.Printf("[WARN] raft: EnableSingleNode disabled, and no known peers. Aborting election.") + didWarn = true + } + } else { + r.logger.Printf("[WARN] raft: Heartbeat timeout reached, starting election") + + metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timout"}, 1) + r.setState(Candidate) + return + } + + case <-r.shutdownCh: + return + } + } +} + +// runCandidate runs the FSM for a candidate. +func (r *Raft) runCandidate() { + r.logger.Printf("[INFO] raft: %v entering Candidate state", r) + metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) + + // Start vote for us, and set a timeout + voteCh := r.electSelf() + electionTimer := randomTimeout(r.conf.ElectionTimeout) + + // Tally the votes, need a simple majority + grantedVotes := 0 + votesNeeded := r.quorumSize() + r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) + + for r.getState() == Candidate { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case vote := <-voteCh: + // Check if the term is greater than ours, bail + if vote.Term > r.getCurrentTerm() { + r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") + r.setState(Follower) + r.setCurrentTerm(vote.Term) + return + } + + // Check if the vote is granted + if vote.Granted { + grantedVotes++ + r.logger.Printf("[DEBUG] raft: Vote granted from %s. Tally: %d", vote.voter, grantedVotes) + } + + // Check if we've become the leader + if grantedVotes >= votesNeeded { + r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) + r.setState(Leader) + r.setLeader(r.localAddr) + return + } + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case p := <-r.peerCh: + // Set the peers + r.peers = ExcludePeer(p.peers, r.localAddr) + p.respond(r.peerStore.SetPeers(p.peers)) + // Become a follower again + r.setState(Follower) + return + + case <-electionTimer: + // Election failed! Restart the election. We simply return, + // which will kick us back into runCandidate + r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") + return + + case <-r.shutdownCh: + return + } + } +} + +// runLeader runs the FSM for a leader. Do the setup here and drop into +// the leaderLoop for the hot loop. +func (r *Raft) runLeader() { + r.logger.Printf("[INFO] raft: %v entering Leader state", r) + metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) + + // Notify that we are the leader + asyncNotifyBool(r.leaderCh, true) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- true: + case <-r.shutdownCh: + } + } + + // Setup leader state + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.inflight = newInflight(r.leaderState.commitCh) + r.leaderState.replState = make(map[string]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) + + // Cleanup state on step down + defer func() { + // Since we were the leader previously, we update our + // last contact time when we step down, so that we are not + // reporting a last contact time from before we were the + // leader. Otherwise, to a client it would seem our data + // is extremely stale. + r.setLastContact() + + // Stop replication + for _, p := range r.leaderState.replState { + close(p.stopCh) + } + + // Cancel inflight requests + r.leaderState.inflight.Cancel(ErrLeadershipLost) + + // Respond to any pending verify requests + for future := range r.leaderState.notify { + future.respond(ErrLeadershipLost) + } + + // Clear all the state + r.leaderState.commitCh = nil + r.leaderState.inflight = nil + r.leaderState.replState = nil + r.leaderState.notify = nil + r.leaderState.stepDown = nil + + // If we are stepping down for some reason, no known leader. + // We may have stepped down due to an RPC call, which would + // provide the leader, so we cannot always blank this out. + r.leaderLock.Lock() + if r.leader == r.localAddr { + r.leader = "" + } + r.leaderLock.Unlock() + + // Notify that we are not the leader + asyncNotifyBool(r.leaderCh, false) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- false: + case <-r.shutdownCh: + // On shutdown, make a best effort but do not block + select { + case notify <- false: + default: + } + } + } + }() + + // Start a replication routine for each peer + for _, peer := range r.peers { + r.startReplication(peer) + } + + // Dispatch a no-op log first. Instead of LogNoop, + // we use a LogAddPeer with our peerset. This acts like + // a no-op as well, but when doing an initial bootstrap, ensures + // that all nodes share a common peerset. + peerSet := append([]string{r.localAddr}, r.peers...) + noop := &logFuture{ + log: Log{ + Type: LogAddPeer, + Data: encodePeers(peerSet, r.trans), + }, + } + r.dispatchLogs([]*logFuture{noop}) + + // Disable EnableSingleNode after we've been elected leader. + // This is to prevent a split brain in the future, if we are removed + // from the cluster and then elect ourself as leader. + if r.conf.DisableBootstrapAfterElect && r.conf.EnableSingleNode { + r.logger.Printf("[INFO] raft: Disabling EnableSingleNode (bootstrap)") + r.conf.EnableSingleNode = false + } + + // Sit in the leader loop until we step down + r.leaderLoop() +} + +// startReplication is a helper to setup state and start async replication to a peer. +func (r *Raft) startReplication(peer string) { + lastIdx := r.getLastIndex() + s := &followerReplication{ + peer: peer, + inflight: r.leaderState.inflight, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + currentTerm: r.getCurrentTerm(), + matchIndex: 0, + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, + } + r.leaderState.replState[peer] = s + r.goFunc(func() { r.replicate(s) }) + asyncNotifyCh(s.triggerCh) +} + +// leaderLoop is the hot loop for a leader. It is invoked +// after all the various leader setup is done. +func (r *Raft) leaderLoop() { + // stepDown is used to track if there is an inflight log that + // would cause us to lose leadership (specifically a RemovePeer of + // ourselves). If this is the case, we must not allow any logs to + // be processed in parallel, otherwise we are basing commit on + // only a single peer (ourself) and replicating to an undefined set + // of peers. + stepDown := false + + lease := time.After(r.conf.LeaderLeaseTimeout) + for r.getState() == Leader { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case <-r.leaderState.stepDown: + r.setState(Follower) + + case <-r.leaderState.commitCh: + // Get the committed messages + committed := r.leaderState.inflight.Committed() + for e := committed.Front(); e != nil; e = e.Next() { + // Measure the commit time + commitLog := e.Value.(*logFuture) + metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) + + // Increment the commit index + idx := commitLog.log.Index + r.setCommitIndex(idx) + r.processLogs(idx, commitLog) + } + + case v := <-r.verifyCh: + if v.quorumSize == 0 { + // Just dispatched, start the verification + r.verifyLeader(v) + + } else if v.votes < v.quorumSize { + // Early return, means there must be a new leader + r.logger.Printf("[WARN] raft: New leader elected, stepping down") + r.setState(Follower) + delete(r.leaderState.notify, v) + v.respond(ErrNotLeader) + + } else { + // Quorum of members agree, we are still leader + delete(r.leaderState.notify, v) + v.respond(nil) + } + + case p := <-r.peerCh: + p.respond(ErrLeader) + + case newLog := <-r.applyCh: + // Group commit, gather all the ready commits + ready := []*logFuture{newLog} + for i := 0; i < r.conf.MaxAppendEntries; i++ { + select { + case newLog := <-r.applyCh: + ready = append(ready, newLog) + default: + break + } + } + + // Handle any peer set changes + n := len(ready) + for i := 0; i < n; i++ { + // Fail all future transactions once stepDown is on + if stepDown { + ready[i].respond(ErrNotLeader) + ready[i], ready[n-1] = ready[n-1], nil + n-- + i-- + continue + } + + // Special case AddPeer and RemovePeer + log := ready[i] + if log.log.Type != LogAddPeer && log.log.Type != LogRemovePeer { + continue + } + + // Check if this log should be ignored. The logs can be + // reordered here since we have not yet assigned an index + // and are not violating any promises. + if !r.preparePeerChange(log) { + ready[i], ready[n-1] = ready[n-1], nil + n-- + i-- + continue + } + + // Apply peer set changes early and check if we will step + // down after the commit of this log. If so, we must not + // allow any future entries to make progress to avoid undefined + // behavior. + if ok := r.processLog(&log.log, nil, true); ok { + stepDown = true + } + } + + // Nothing to do if all logs are invalid + if n == 0 { + continue + } + + // Dispatch the logs + ready = ready[:n] + r.dispatchLogs(ready) + + case <-lease: + // Check if we've exceeded the lease, potentially stepping down + maxDiff := r.checkLeaderLease() + + // Next check interval should adjust for the last node we've + // contacted, without going negative + checkInterval := r.conf.LeaderLeaseTimeout - maxDiff + if checkInterval < minCheckInterval { + checkInterval = minCheckInterval + } + + // Renew the lease timer + lease = time.After(checkInterval) + + case <-r.shutdownCh: + return + } + } +} + +// verifyLeader must be called from the main thread for safety. +// Causes the followers to attempt an immediate heartbeat. +func (r *Raft) verifyLeader(v *verifyFuture) { + // Current leader always votes for self + v.votes = 1 + + // Set the quorum size, hot-path for single node + v.quorumSize = r.quorumSize() + if v.quorumSize == 1 { + v.respond(nil) + return + } + + // Track this request + v.notifyCh = r.verifyCh + r.leaderState.notify[v] = struct{}{} + + // Trigger immediate heartbeats + for _, repl := range r.leaderState.replState { + repl.notifyLock.Lock() + repl.notify = append(repl.notify, v) + repl.notifyLock.Unlock() + asyncNotifyCh(repl.notifyCh) + } +} + +// checkLeaderLease is used to check if we can contact a quorum of nodes +// within the last leader lease interval. If not, we need to step down, +// as we may have lost connectivity. Returns the maximum duration without +// contact. +func (r *Raft) checkLeaderLease() time.Duration { + // Track contacted nodes, we can always contact ourself + contacted := 1 + + // Check each follower + var maxDiff time.Duration + now := time.Now() + for peer, f := range r.leaderState.replState { + diff := now.Sub(f.LastContact()) + if diff <= r.conf.LeaderLeaseTimeout { + contacted++ + if diff > maxDiff { + maxDiff = diff + } + } else { + // Log at least once at high value, then debug. Otherwise it gets very verbose. + if diff <= 3*r.conf.LeaderLeaseTimeout { + r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) + } else { + r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) + } + } + metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) + } + + // Verify we can contact a quorum + quorum := r.quorumSize() + if contacted < quorum { + r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") + r.setState(Follower) + metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) + } + return maxDiff +} + +// quorumSize is used to return the quorum size +func (r *Raft) quorumSize() int { + return ((len(r.peers) + 1) / 2) + 1 +} + +// preparePeerChange checks if a LogAddPeer or LogRemovePeer should be performed, +// and properly formats the data field on the log before dispatching it. +func (r *Raft) preparePeerChange(l *logFuture) bool { + // Check if this is a known peer + p := l.log.peer + knownPeer := PeerContained(r.peers, p) || r.localAddr == p + + // Ignore known peers on add + if l.log.Type == LogAddPeer && knownPeer { + l.respond(ErrKnownPeer) + return false + } + + // Ignore unknown peers on remove + if l.log.Type == LogRemovePeer && !knownPeer { + l.respond(ErrUnknownPeer) + return false + } + + // Construct the peer set + var peerSet []string + if l.log.Type == LogAddPeer { + peerSet = append([]string{p, r.localAddr}, r.peers...) + } else { + peerSet = ExcludePeer(append([]string{r.localAddr}, r.peers...), p) + } + + // Setup the log + l.log.Data = encodePeers(peerSet, r.trans) + return true +} + +// dispatchLog is called to push a log to disk, mark it +// as inflight and begin replication of it. +func (r *Raft) dispatchLogs(applyLogs []*logFuture) { + now := time.Now() + defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) + + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + logs := make([]*Log, len(applyLogs)) + + for idx, applyLog := range applyLogs { + applyLog.dispatch = now + applyLog.log.Index = lastIndex + uint64(idx) + 1 + applyLog.log.Term = term + applyLog.policy = newMajorityQuorum(len(r.peers) + 1) + logs[idx] = &applyLog.log + } + + // Write the log entry locally + if err := r.logs.StoreLogs(logs); err != nil { + r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) + for _, applyLog := range applyLogs { + applyLog.respond(err) + } + r.setState(Follower) + return + } + + // Add this to the inflight logs, commit + r.leaderState.inflight.StartAll(applyLogs) + + // Update the last log since it's on disk now + r.setLastLogIndex(lastIndex + uint64(len(applyLogs))) + r.setLastLogTerm(term) + + // Notify the replicators of the new log + for _, f := range r.leaderState.replState { + asyncNotifyCh(f.triggerCh) + } +} + +// processLogs is used to process all the logs from the lastApplied +// up to the given index. +func (r *Raft) processLogs(index uint64, future *logFuture) { + // Reject logs we've applied already + lastApplied := r.getLastApplied() + if index <= lastApplied { + r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) + return + } + + // Apply all the preceding logs + for idx := r.getLastApplied() + 1; idx <= index; idx++ { + // Get the log, either from the future or from our log store + if future != nil && future.log.Index == idx { + r.processLog(&future.log, future, false) + + } else { + l := new(Log) + if err := r.logs.GetLog(idx, l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) + panic(err) + } + r.processLog(l, nil, false) + } + + // Update the lastApplied index and term + r.setLastApplied(idx) + } +} + +// processLog is invoked to process the application of a single committed log. +// Returns if this log entry would cause us to stepDown after it commits. +func (r *Raft) processLog(l *Log, future *logFuture, precommit bool) (stepDown bool) { + switch l.Type { + case LogBarrier: + // Barrier is handled by the FSM + fallthrough + + case LogCommand: + // Forward to the fsm handler + select { + case r.fsmCommitCh <- commitTuple{l, future}: + case <-r.shutdownCh: + if future != nil { + future.respond(ErrRaftShutdown) + } + } + + // Return so that the future is only responded to + // by the FSM handler when the application is done + return + + case LogAddPeer: + fallthrough + case LogRemovePeer: + peers := decodePeers(l.Data, r.trans) + r.logger.Printf("[DEBUG] raft: Node %v updated peer set (%v): %v", r.localAddr, l.Type, peers) + + // If the peer set does not include us, remove all other peers + removeSelf := !PeerContained(peers, r.localAddr) && l.Type == LogRemovePeer + if removeSelf { + // Mark that this operation will cause us to step down as + // leader. This prevents the future logs from being Applied + // from this leader. + stepDown = true + + // We only modify the peers after the commit, otherwise we + // would be using a quorum size of 1 for the RemovePeer operation. + // This is used with the stepDown guard to prevent any other logs. + if !precommit { + r.peers = nil + r.peerStore.SetPeers([]string{r.localAddr}) + } + } else { + r.peers = ExcludePeer(peers, r.localAddr) + r.peerStore.SetPeers(peers) + } + + // Handle replication if we are the leader + if r.getState() == Leader { + for _, p := range r.peers { + if _, ok := r.leaderState.replState[p]; !ok { + r.logger.Printf("[INFO] raft: Added peer %v, starting replication", p) + r.startReplication(p) + } + } + } + + // Stop replication for old nodes + if r.getState() == Leader && !precommit { + var toDelete []string + for _, repl := range r.leaderState.replState { + if !PeerContained(r.peers, repl.peer) { + r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication (Index: %d)", repl.peer, l.Index) + + // Replicate up to this index and stop + repl.stopCh <- l.Index + close(repl.stopCh) + toDelete = append(toDelete, repl.peer) + } + } + for _, name := range toDelete { + delete(r.leaderState.replState, name) + } + } + + // Handle removing ourself + if removeSelf && !precommit { + if r.conf.ShutdownOnRemove { + r.logger.Printf("[INFO] raft: Removed ourself, shutting down") + r.Shutdown() + } else { + r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") + r.setState(Follower) + } + } + + case LogNoop: + // Ignore the no-op + default: + r.logger.Printf("[ERR] raft: Got unrecognized log type: %#v", l) + } + + // Invoke the future if given + if future != nil && !precommit { + future.respond(nil) + } + return +} + +// processRPC is called to handle an incoming RPC request. +func (r *Raft) processRPC(rpc RPC) { + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + case *RequestVoteRequest: + r.requestVote(rpc, cmd) + case *InstallSnapshotRequest: + r.installSnapshot(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// processHeartbeat is a special handler used just for heartbeat requests +// so that they can be fast-pathed if a transport supports it. +func (r *Raft) processHeartbeat(rpc RPC) { + defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) + + // Check if we are shutdown, just ignore the RPC + select { + case <-r.shutdownCh: + return + default: + } + + // Ensure we are only handling a heartbeat + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// appendEntries is invoked when we get an append entries RPC call. +func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) + // Setup a response + resp := &AppendEntriesResponse{ + Term: r.getCurrentTerm(), + LastLog: r.getLastIndex(), + Success: false, + NoRetryBackoff: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if a.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one, also transition to follower + // if we ever get an appendEntries call + if a.Term > r.getCurrentTerm() || r.getState() != Follower { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(a.Term) + resp.Term = a.Term + } + + // Save the current leader + r.setLeader(r.trans.DecodePeer(a.Leader)) + + // Verify the last log entry + if a.PrevLogEntry > 0 { + lastIdx, lastTerm := r.getLastEntry() + + var prevLogTerm uint64 + if a.PrevLogEntry == lastIdx { + prevLogTerm = lastTerm + + } else { + var prevLog Log + if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { + r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", + a.PrevLogEntry, err, lastIdx) + resp.NoRetryBackoff = true + return + } + prevLogTerm = prevLog.Term + } + + if a.PrevLogTerm != prevLogTerm { + r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", + prevLogTerm, a.PrevLogTerm) + resp.NoRetryBackoff = true + return + } + } + + // Process any new entries + if n := len(a.Entries); n > 0 { + start := time.Now() + first := a.Entries[0] + last := a.Entries[n-1] + + // Delete any conflicting entries + lastLogIdx := r.getLastLogIndex() + if first.Index <= lastLogIdx { + r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", first.Index, lastLogIdx) + if err := r.logs.DeleteRange(first.Index, lastLogIdx); err != nil { + r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) + return + } + } + + // Append the entry + if err := r.logs.StoreLogs(a.Entries); err != nil { + r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) + return + } + + // Update the lastLog + r.setLastLogIndex(last.Index) + r.setLastLogTerm(last.Term) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) + } + + // Update the commit index + if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { + start := time.Now() + idx := min(a.LeaderCommitIndex, r.getLastIndex()) + r.setCommitIndex(idx) + r.processLogs(idx, nil) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) + } + + // Everything went well, set success + resp.Success = true + r.setLastContact() + return +} + +// requestVote is invoked when we get an request vote RPC call. +func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + // Setup a response + resp := &RequestVoteResponse{ + Term: r.getCurrentTerm(), + Peers: encodePeers(r.peers, r.trans), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Check if we have an existing leader [who's not the candidate] + candidate := r.trans.DecodePeer(req.Candidate) + if leader := r.Leader(); leader != "" && leader != candidate { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since we have a leader: %v", + candidate, leader) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Check if we have voted yet + lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) + return + } + lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) + return + } + + // Check if we've voted in this election before + if lastVoteTerm == req.Term && lastVoteCandBytes != nil { + r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) + if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { + r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) + resp.Granted = true + } + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last term is greater (%d, %d)", + candidate, lastTerm, req.LastLogTerm) + return + } + + if lastIdx > req.LastLogIndex { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last index is greater (%d, %d)", + candidate, lastIdx, req.LastLogIndex) + return + } + + // Persist a vote for safety + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) + return + } + + resp.Granted = true + return +} + +// installSnapshot is invoked when we get a InstallSnapshot RPC call. +// We must be in the follower state for this, since it means we are +// too far behind a leader for log replay. +func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) + // Setup a response + resp := &InstallSnapshotResponse{ + Term: r.getCurrentTerm(), + Success: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Save the current leader + r.setLeader(r.trans.DecodePeer(req.Leader)) + + // Create a new snapshot + sink, err := r.snapshots.Create(req.LastLogIndex, req.LastLogTerm, req.Peers) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) + rpcErr = fmt.Errorf("failed to create snapshot: %v", err) + return + } + + // Spill the remote snapshot to disk + n, err := io.Copy(sink, rpc.Reader) + if err != nil { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) + rpcErr = err + return + } + + // Check that we received it all + if n != req.Size { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) + rpcErr = fmt.Errorf("short read") + return + } + + // Finalize the snapshot + if err := sink.Close(); err != nil { + r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) + rpcErr = err + return + } + r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + + // Restore snapshot + future := &restoreFuture{ID: sink.ID()} + future.init() + select { + case r.fsmRestoreCh <- future: + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return + } + + // Wait for the restore to happen + if err := future.Error(); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) + rpcErr = err + return + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(req.LastLogIndex) + + // Update the last stable snapshot info + r.setLastSnapshotIndex(req.LastLogIndex) + r.setLastSnapshotTerm(req.LastLogTerm) + + // Restore the peer set + peers := decodePeers(req.Peers, r.trans) + r.peers = ExcludePeer(peers, r.localAddr) + r.peerStore.SetPeers(peers) + + // Compact logs, continue even if this fails + if err := r.compactLogs(req.LastLogIndex); err != nil { + r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) + } + + r.logger.Printf("[INFO] raft: Installed remote snapshot") + resp.Success = true + r.setLastContact() + return +} + +// setLastContact is used to set the last contact time to now +func (r *Raft) setLastContact() { + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() +} + +type voteResult struct { + RequestVoteResponse + voter string +} + +// electSelf is used to send a RequestVote RPC to all peers, +// and vote for ourself. This has the side affecting of incrementing +// the current term. The response channel returned is used to wait +// for all the responses (including a vote for ourself). +func (r *Raft) electSelf() <-chan *voteResult { + // Create a response channel + respCh := make(chan *voteResult, len(r.peers)+1) + + // Increment the term + r.setCurrentTerm(r.getCurrentTerm() + 1) + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestVoteRequest{ + Term: r.getCurrentTerm(), + Candidate: r.trans.EncodePeer(r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + } + + // Construct a function to ask for a vote + askPeer := func(peer string) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) + resp := &voteResult{voter: peer} + err := r.trans.RequestVote(peer, req, &resp.RequestVoteResponse) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) + resp.Term = req.Term + resp.Granted = false + } + + // If we are not a peer, we could have been removed but failed + // to receive the log message. OR it could mean an improperly configured + // cluster. Either way, we should warn + if err == nil { + peerSet := decodePeers(resp.Peers, r.trans) + if !PeerContained(peerSet, r.localAddr) { + r.logger.Printf("[WARN] raft: Remote peer %v does not have local node %v as a peer", + peer, r.localAddr) + } + } + + respCh <- resp + }) + } + + // For each peer, request a vote + for _, peer := range r.peers { + askPeer(peer) + } + + // Persist a vote for ourselves + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) + return nil + } + + // Include our own vote + respCh <- &voteResult{ + RequestVoteResponse: RequestVoteResponse{ + Term: req.Term, + Granted: true, + }, + voter: r.localAddr, + } + return respCh +} + +// persistVote is used to persist our vote for safety. +func (r *Raft) persistVote(term uint64, candidate []byte) error { + if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { + return err + } + if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { + return err + } + return nil +} + +// setCurrentTerm is used to set the current term in a durable manner. +func (r *Raft) setCurrentTerm(t uint64) { + // Persist to disk first + if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { + panic(fmt.Errorf("failed to save current term: %v", err)) + } + r.raftState.setCurrentTerm(t) +} + +// setState is used to update the current state. Any state +// transition causes the known leader to be cleared. This means +// that leader should be set only after updating the state. +func (r *Raft) setState(state RaftState) { + r.setLeader("") + r.raftState.setState(state) +} + +// runSnapshots is a long running goroutine used to manage taking +// new snapshots of the FSM. It runs in parallel to the FSM and +// main goroutines, so that snapshots do not block normal operation. +func (r *Raft) runSnapshots() { + for { + select { + case <-randomTimeout(r.conf.SnapshotInterval): + // Check if we should snapshot + if !r.shouldSnapshot() { + continue + } + + // Trigger a snapshot + if err := r.takeSnapshot(); err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + + case future := <-r.snapshotCh: + // User-triggered, run immediately + err := r.takeSnapshot() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + future.respond(err) + + case <-r.shutdownCh: + return + } + } +} + +// shouldSnapshot checks if we meet the conditions to take +// a new snapshot. +func (r *Raft) shouldSnapshot() bool { + // Check the last snapshot index + lastSnap := r.getLastSnapshotIndex() + + // Check the last log index + lastIdx, err := r.logs.LastIndex() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) + return false + } + + // Compare the delta to the threshold + delta := lastIdx - lastSnap + return delta >= r.conf.SnapshotThreshold +} + +// takeSnapshot is used to take a new snapshot. +func (r *Raft) takeSnapshot() error { + defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) + // Create a snapshot request + req := &reqSnapshotFuture{} + req.init() + + // Wait for dispatch or shutdown + select { + case r.fsmSnapshotCh <- req: + case <-r.shutdownCh: + return ErrRaftShutdown + } + + // Wait until we get a response + if err := req.Error(); err != nil { + if err != ErrNothingNewToSnapshot { + err = fmt.Errorf("failed to start snapshot: %v", err) + } + return err + } + defer req.snapshot.Release() + + // Log that we are starting the snapshot + r.logger.Printf("[INFO] raft: Starting snapshot up to %d", req.index) + + // Encode the peerset + peerSet := encodePeers(req.peers, r.trans) + + // Create a new snapshot + start := time.Now() + sink, err := r.snapshots.Create(req.index, req.term, peerSet) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) + + // Try to persist the snapshot + start = time.Now() + if err := req.snapshot.Persist(sink); err != nil { + sink.Cancel() + return fmt.Errorf("failed to persist snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) + + // Close and check for error + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to close snapshot: %v", err) + } + + // Update the last stable snapshot info + r.setLastSnapshotIndex(req.index) + r.setLastSnapshotTerm(req.term) + + // Compact the logs + if err := r.compactLogs(req.index); err != nil { + return err + } + + // Log completion + r.logger.Printf("[INFO] raft: Snapshot to %d complete", req.index) + return nil +} + +// compactLogs takes the last inclusive index of a snapshot +// and trims the logs that are no longer needed. +func (r *Raft) compactLogs(snapIdx uint64) error { + defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) + // Determine log ranges to compact + minLog, err := r.logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + + // Check if we have enough logs to truncate + if r.getLastLogIndex() <= r.conf.TrailingLogs { + return nil + } + + // Truncate up to the end of the snapshot, or `TrailingLogs` + // back from the head, which ever is further back. This ensures + // at least `TrailingLogs` entries, but does not allow logs + // after the snapshot to be removed. + maxLog := min(snapIdx, r.getLastLogIndex()-r.conf.TrailingLogs) + + // Log this + r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) + + // Compact the logs + if err := r.logs.DeleteRange(minLog, maxLog); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + return nil +} + +// restoreSnapshot attempts to restore the latest snapshots, and fails +// if none of them can be restored. This is called at initialization time, +// and is completely unsafe to call at any other time. +func (r *Raft) restoreSnapshot() error { + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return err + } + + // Try to load in order of newest to oldest + for _, snapshot := range snapshots { + _, source, err := r.snapshots.Open(snapshot.ID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) + continue + } + defer source.Close() + + if err := r.fsm.Restore(source); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) + continue + } + + // Log success + r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(snapshot.Index) + + // Update the last stable snapshot info + r.setLastSnapshotIndex(snapshot.Index) + r.setLastSnapshotTerm(snapshot.Term) + + // Success! + return nil + } + + // If we had snapshots and failed to load them, its an error + if len(snapshots) > 0 { + return fmt.Errorf("failed to load any existing snapshots") + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go b/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go new file mode 100644 index 0000000000000..6a01631d237eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go @@ -0,0 +1,517 @@ +package raft + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +const ( + maxFailureScale = 12 + failureWait = 10 * time.Millisecond +) + +var ( + // ErrLogNotFound indicates a given log entry is not available. + ErrLogNotFound = errors.New("log not found") + + // ErrPipelineReplicationNotSupported can be returned by the transport to + // signal that pipeline replication is not supported in general, and that + // no error message should be produced. + ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") +) + +type followerReplication struct { + peer string + inflight *inflight + + stopCh chan uint64 + triggerCh chan struct{} + + currentTerm uint64 + matchIndex uint64 + nextIndex uint64 + + lastContact time.Time + lastContactLock sync.RWMutex + + failures uint64 + + notifyCh chan struct{} + notify []*verifyFuture + notifyLock sync.Mutex + + // stepDown is used to indicate to the leader that we + // should step down based on information from a follower. + stepDown chan struct{} + + // allowPipeline is used to control it seems like + // pipeline replication should be enabled. + allowPipeline bool +} + +// notifyAll is used to notify all the waiting verify futures +// if the follower believes we are still the leader. +func (s *followerReplication) notifyAll(leader bool) { + // Clear the waiting notifies minimizing lock time + s.notifyLock.Lock() + n := s.notify + s.notify = nil + s.notifyLock.Unlock() + + // Submit our votes + for _, v := range n { + v.vote(leader) + } +} + +// LastContact returns the time of last contact. +func (s *followerReplication) LastContact() time.Time { + s.lastContactLock.RLock() + last := s.lastContact + s.lastContactLock.RUnlock() + return last +} + +// setLastContact sets the last contact to the current time. +func (s *followerReplication) setLastContact() { + s.lastContactLock.Lock() + s.lastContact = time.Now() + s.lastContactLock.Unlock() +} + +// replicate is a long running routine that is used to manage +// the process of replicating logs to our followers. +func (r *Raft) replicate(s *followerReplication) { + // Start an async heartbeating routing + stopHeartbeat := make(chan struct{}) + defer close(stopHeartbeat) + r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) + +RPC: + shouldStop := false + for !shouldStop { + select { + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.replicateTo(s, maxIndex) + } + return + case <-s.triggerCh: + shouldStop = r.replicateTo(s, r.getLastLogIndex()) + case <-randomTimeout(r.conf.CommitTimeout): + shouldStop = r.replicateTo(s, r.getLastLogIndex()) + } + + // If things looks healthy, switch to pipeline mode + if !shouldStop && s.allowPipeline { + goto PIPELINE + } + } + return + +PIPELINE: + // Disable until re-enabled + s.allowPipeline = false + + // Replicates using a pipeline for high performance. This method + // is not able to gracefully recover from errors, and so we fall back + // to standard mode on failure. + if err := r.pipelineReplicate(s); err != nil { + if err != ErrPipelineReplicationNotSupported { + r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) + } + } + goto RPC +} + +// replicateTo is used to replicate the logs up to a given last index. +// If the follower log is behind, we take care to bring them up to date. +func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { + // Create the base request + var req AppendEntriesRequest + var resp AppendEntriesResponse + var start time.Time +START: + // Prevent an excessive retry rate on errors + if s.failures > 0 { + select { + case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): + case <-r.shutdownCh: + } + } + + // Setup the request + if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { + goto SEND_SNAP + } else if err != nil { + return + } + + // Make the RPC call + start = time.Now() + if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) + s.failures++ + return + } + appendStats(s.peer, start, float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true + } + + // Update the last contact + s.setLastContact() + + // Update s based on success + if resp.Success { + // Update our replication state + updateLastAppended(s, &req) + + // Clear any failures, allow pipelining + s.failures = 0 + s.allowPipeline = true + } else { + s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) + s.matchIndex = s.nextIndex - 1 + if resp.NoRetryBackoff { + s.failures = 0 + } else { + s.failures++ + } + r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) + } + +CHECK_MORE: + // Check if there are more logs to replicate + if s.nextIndex <= lastIndex { + goto START + } + return + + // SEND_SNAP is used when we fail to get a log, usually because the follower + // is too far behind, and we must ship a snapshot down instead +SEND_SNAP: + if stop, err := r.sendLatestSnapshot(s); stop { + return true + } else if err != nil { + r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) + return + } + + // Check if there is more to replicate + goto CHECK_MORE +} + +// sendLatestSnapshot is used to send the latest snapshot we have +// down to our follower. +func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { + // Get the snapshots + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return false, err + } + + // Check we have at least a single snapshot + if len(snapshots) == 0 { + return false, fmt.Errorf("no snapshots found") + } + + // Open the most recent snapshot + snapID := snapshots[0].ID + meta, snapshot, err := r.snapshots.Open(snapID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) + return false, err + } + defer snapshot.Close() + + // Setup the request + req := InstallSnapshotRequest{ + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localAddr), + LastLogIndex: meta.Index, + LastLogTerm: meta.Term, + Peers: meta.Peers, + Size: meta.Size, + } + + // Make the call + start := time.Now() + var resp InstallSnapshotResponse + if err := r.trans.InstallSnapshot(s.peer, &req, &resp, snapshot); err != nil { + r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) + s.failures++ + return false, err + } + metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", s.peer}, start) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true, nil + } + + // Update the last contact + s.setLastContact() + + // Check for success + if resp.Success { + // Mark any inflight logs as committed + s.inflight.CommitRange(s.matchIndex+1, meta.Index) + + // Update the indexes + s.matchIndex = meta.Index + s.nextIndex = s.matchIndex + 1 + + // Clear any failures + s.failures = 0 + + // Notify we are still leader + s.notifyAll(true) + } else { + s.failures++ + r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) + } + return false, nil +} + +// heartbeat is used to periodically invoke AppendEntries on a peer +// to ensure they don't time out. This is done async of replicate(), +// since that routine could potentially be blocked on disk IO. +func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { + var failures uint64 + req := AppendEntriesRequest{ + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localAddr), + } + var resp AppendEntriesResponse + for { + // Wait for the next heartbeat interval or forced notify + select { + case <-s.notifyCh: + case <-randomTimeout(r.conf.HeartbeatTimeout / 10): + case <-stopCh: + return + } + + start := time.Now() + if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer, err) + failures++ + select { + case <-time.After(backoff(failureWait, failures, maxFailureScale)): + case <-stopCh: + } + } else { + s.setLastContact() + failures = 0 + metrics.MeasureSince([]string{"raft", "replication", "heartbeat", s.peer}, start) + s.notifyAll(resp.Success) + } + } +} + +// pipelineReplicate is used when we have synchronized our state with the follower, +// and want to switch to a higher performance pipeline mode of replication. +// We only pipeline AppendEntries commands, and if we ever hit an error, we fall +// back to the standard replication which can handle more complex situations. +func (r *Raft) pipelineReplicate(s *followerReplication) error { + // Create a new pipeline + pipeline, err := r.trans.AppendEntriesPipeline(s.peer) + if err != nil { + return err + } + defer pipeline.Close() + + // Log start and stop of pipeline + r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) + defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) + + // Create a shutdown and finish channel + stopCh := make(chan struct{}) + finishCh := make(chan struct{}) + + // Start a dedicated decoder + r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) + + // Start pipeline sends at the last good nextIndex + nextIndex := s.nextIndex + + shouldStop := false +SEND: + for !shouldStop { + select { + case <-finishCh: + break SEND + case maxIndex := <-s.stopCh: + if maxIndex > 0 { + r.pipelineSend(s, pipeline, &nextIndex, maxIndex) + } + break SEND + case <-s.triggerCh: + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, r.getLastLogIndex()) + case <-randomTimeout(r.conf.CommitTimeout): + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, r.getLastLogIndex()) + } + } + + // Stop our decoder, and wait for it to finish + close(stopCh) + select { + case <-finishCh: + case <-r.shutdownCh: + } + return nil +} + +// pipelineSend is used to send data over a pipeline. +func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { + // Create a new append request + req := new(AppendEntriesRequest) + if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { + return true + } + + // Pipeline the append entries + if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { + r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) + return true + } + + // Increase the next send log to avoid re-sending old logs + if n := len(req.Entries); n > 0 { + last := req.Entries[n-1] + *nextIdx = last.Index + 1 + } + return false +} + +// pipelineDecode is used to decode the responses of pipelined requests. +func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { + defer close(finishCh) + respCh := p.Consumer() + for { + select { + case ready := <-respCh: + req, resp := ready.Request(), ready.Response() + appendStats(s.peer, ready.Start(), float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return + } + + // Update the last contact + s.setLastContact() + + // Abort pipeline if not successful + if !resp.Success { + return + } + + // Update our replication state + updateLastAppended(s, req) + case <-stopCh: + return + } + } +} + +// setupAppendEntries is used to setup an append entries request. +func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + req.Term = s.currentTerm + req.Leader = r.trans.EncodePeer(r.localAddr) + req.LeaderCommitIndex = r.getCommitIndex() + if err := r.setPreviousLog(req, nextIndex); err != nil { + return err + } + if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { + return err + } + return nil +} + +// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an +// AppendEntriesRequest given the next index to replicate. +func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { + // Guard for the first index, since there is no 0 log entry + // Guard against the previous index being a snapshot as well + if nextIndex == 1 { + req.PrevLogEntry = 0 + req.PrevLogTerm = 0 + + } else if (nextIndex - 1) == r.getLastSnapshotIndex() { + req.PrevLogEntry = r.getLastSnapshotIndex() + req.PrevLogTerm = r.getLastSnapshotTerm() + + } else { + var l Log + if err := r.logs.GetLog(nextIndex-1, &l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", + nextIndex-1, err) + return err + } + + // Set the previous index and term (0 if nextIndex is 1) + req.PrevLogEntry = l.Index + req.PrevLogTerm = l.Term + } + return nil +} + +// setNewLogs is used to setup the logs which should be appended for a request. +func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + // Append up to MaxAppendEntries or up to the lastIndex + req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) + maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) + for i := nextIndex; i <= maxIndex; i++ { + oldLog := new(Log) + if err := r.logs.GetLog(i, oldLog); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) + return err + } + req.Entries = append(req.Entries, oldLog) + } + return nil +} + +// appendStats is used to emit stats about an AppendEntries invocation. +func appendStats(peer string, start time.Time, logs float32) { + metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) + metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) +} + +// handleStaleTerm is used when a follower indicates that we have a stale term. +func (r *Raft) handleStaleTerm(s *followerReplication) { + r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) + s.notifyAll(false) // No longer leader + asyncNotifyCh(s.stepDown) +} + +// updateLastAppended is used to update follower replication state after a successful +// AppendEntries RPC. +func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { + // Mark any inflight logs as committed + if logs := req.Entries; len(logs) > 0 { + first := logs[0] + last := logs[len(logs)-1] + s.inflight.CommitRange(first.Index, last.Index) + + // Update the indexes + s.matchIndex = last.Index + s.nextIndex = last.Index + 1 + } + + // Notify still leader + s.notifyAll(true) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go new file mode 100644 index 0000000000000..7151f43ce2656 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go @@ -0,0 +1,40 @@ +package raft + +import ( + "io" +) + +// SnapshotMeta is for metadata of a snapshot. +type SnapshotMeta struct { + ID string // ID is opaque to the store, and is used for opening + Index uint64 + Term uint64 + Peers []byte + Size int64 +} + +// SnapshotStore interface is used to allow for flexible implementations +// of snapshot storage and retrieval. For example, a client could implement +// a shared state store such as S3, allowing new nodes to restore snapshots +// without steaming from the leader. +type SnapshotStore interface { + // Create is used to begin a snapshot at a given index and term, + // with the current peer set already encoded. + Create(index, term uint64, peers []byte) (SnapshotSink, error) + + // List is used to list the available snapshots in the store. + // It should return then in descending order, with the highest index first. + List() ([]*SnapshotMeta, error) + + // Open takes a snapshot ID and provides a ReadCloser. Once close is + // called it is assumed the snapshot is no longer needed. + Open(id string) (*SnapshotMeta, io.ReadCloser, error) +} + +// SnapshotSink is returned by StartSnapshot. The FSM will Write state +// to the sink and call Close on completion. On error, Cancel will be invoked. +type SnapshotSink interface { + io.WriteCloser + ID() string + Cancel() error +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go b/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go new file mode 100644 index 0000000000000..ff59a8c570a29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go @@ -0,0 +1,15 @@ +package raft + +// StableStore is used to provide stable storage +// of key configurations to ensure safety. +type StableStore interface { + Set(key []byte, val []byte) error + + // Get returns the value for key, or an empty byte slice if key was not found. + Get(key []byte) ([]byte, error) + + SetUint64(key []byte, val uint64) error + + // GetUint64 returns the uint64 value for key, or 0 if key was not found. + GetUint64(key []byte) (uint64, error) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/state.go b/Godeps/_workspace/src/github.com/hashicorp/raft/state.go new file mode 100644 index 0000000000000..41e80a1b51093 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/state.go @@ -0,0 +1,169 @@ +package raft + +import ( + "sync/atomic" +) + +// RaftState captures the state of a Raft node: Follower, Candidate, Leader, +// or Shutdown. +type RaftState uint32 + +const ( + // Follower is the initial state of a Raft node. + Follower RaftState = iota + + // Candidate is one of the valid states of a Raft node. + Candidate + + // Leader is one of the valid states of a Raft node. + Leader + + // Shutdown is the terminal state of a Raft node. + Shutdown +) + +func (s RaftState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Shutdown: + return "Shutdown" + default: + return "Unknown" + } +} + +// raftState is used to maintain various state variables +// and provides an interface to set/get the variables in a +// thread safe manner. +type raftState struct { + // The current term, cache of StableStore + currentTerm uint64 + + // Cache the latest log from LogStore + LastLogIndex uint64 + LastLogTerm uint64 + + // Highest committed log entry + commitIndex uint64 + + // Last applied log to the FSM + lastApplied uint64 + + // Cache the latest snapshot index/term + lastSnapshotIndex uint64 + lastSnapshotTerm uint64 + + // Tracks the number of live routines + runningRoutines int32 + + // The current state + state RaftState +} + +func (r *raftState) getState() RaftState { + stateAddr := (*uint32)(&r.state) + return RaftState(atomic.LoadUint32(stateAddr)) +} + +func (r *raftState) setState(s RaftState) { + stateAddr := (*uint32)(&r.state) + atomic.StoreUint32(stateAddr, uint32(s)) +} + +func (r *raftState) getCurrentTerm() uint64 { + return atomic.LoadUint64(&r.currentTerm) +} + +func (r *raftState) setCurrentTerm(term uint64) { + atomic.StoreUint64(&r.currentTerm, term) +} + +func (r *raftState) getLastLogIndex() uint64 { + return atomic.LoadUint64(&r.LastLogIndex) +} + +func (r *raftState) setLastLogIndex(term uint64) { + atomic.StoreUint64(&r.LastLogIndex, term) +} + +func (r *raftState) getLastLogTerm() uint64 { + return atomic.LoadUint64(&r.LastLogTerm) +} + +func (r *raftState) setLastLogTerm(term uint64) { + atomic.StoreUint64(&r.LastLogTerm, term) +} + +func (r *raftState) getCommitIndex() uint64 { + return atomic.LoadUint64(&r.commitIndex) +} + +func (r *raftState) setCommitIndex(term uint64) { + atomic.StoreUint64(&r.commitIndex, term) +} + +func (r *raftState) getLastApplied() uint64 { + return atomic.LoadUint64(&r.lastApplied) +} + +func (r *raftState) setLastApplied(term uint64) { + atomic.StoreUint64(&r.lastApplied, term) +} + +func (r *raftState) getLastSnapshotIndex() uint64 { + return atomic.LoadUint64(&r.lastSnapshotIndex) +} + +func (r *raftState) setLastSnapshotIndex(term uint64) { + atomic.StoreUint64(&r.lastSnapshotIndex, term) +} + +func (r *raftState) getLastSnapshotTerm() uint64 { + return atomic.LoadUint64(&r.lastSnapshotTerm) +} + +func (r *raftState) setLastSnapshotTerm(term uint64) { + atomic.StoreUint64(&r.lastSnapshotTerm, term) +} + +func (r *raftState) incrRoutines() { + atomic.AddInt32(&r.runningRoutines, 1) +} + +func (r *raftState) decrRoutines() { + atomic.AddInt32(&r.runningRoutines, -1) +} + +func (r *raftState) getRoutines() int32 { + return atomic.LoadInt32(&r.runningRoutines) +} + +// Start a goroutine and properly handle the race between a routine +// starting and incrementing, and exiting and decrementing. +func (r *raftState) goFunc(f func()) { + r.incrRoutines() + go func() { + defer r.decrRoutines() + f() + }() +} + +// getLastIndex returns the last index in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastIndex() uint64 { + return max(r.getLastLogIndex(), r.getLastSnapshotIndex()) +} + +// getLastEntry returns the last index and term in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastEntry() (uint64, uint64) { + if r.getLastLogIndex() >= r.getLastSnapshotIndex() { + return r.getLastLogIndex(), r.getLastLogTerm() + } + return r.getLastSnapshotIndex(), r.getLastSnapshotTerm() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go new file mode 100644 index 0000000000000..50c6d15df182a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go @@ -0,0 +1,105 @@ +package raft + +import ( + "errors" + "io" + "log" + "net" + "time" +) + +var ( + errNotAdvertisable = errors.New("local bind address is not advertisable") + errNotTCP = errors.New("local address is not a TCP address") +) + +// TCPStreamLayer implements StreamLayer interface for plain TCP. +type TCPStreamLayer struct { + advertise net.Addr + listener *net.TCPListener +} + +// NewTCPTransport returns a NetworkTransport that is built on top of +// a TCP streaming transport layer. +func NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransport(stream, maxPool, timeout, logOutput) + }) +} + +// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, with log output going to the supplied Logger +func NewTCPTransportWithLogger( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logger *log.Logger, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger) + }) +} + +func newTCPTransport(bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) { + // Try to bind + list, err := net.Listen("tcp", bindAddr) + if err != nil { + return nil, err + } + + // Create stream + stream := &TCPStreamLayer{ + advertise: advertise, + listener: list.(*net.TCPListener), + } + + // Verify that we have a usable advertise address + addr, ok := stream.Addr().(*net.TCPAddr) + if !ok { + list.Close() + return nil, errNotTCP + } + if addr.IP.IsUnspecified() { + list.Close() + return nil, errNotAdvertisable + } + + // Create the network transport + trans := transportCreator(stream) + return trans, nil +} + +// Dial implements the StreamLayer interface. +func (t *TCPStreamLayer) Dial(address string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", address, timeout) +} + +// Accept implements the net.Listener interface. +func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { + return t.listener.Accept() +} + +// Close implements the net.Listener interface. +func (t *TCPStreamLayer) Close() (err error) { + return t.listener.Close() +} + +// Addr implements the net.Listener interface. +func (t *TCPStreamLayer) Addr() net.Addr { + // Use an advertise addr if provided + if t.advertise != nil { + return t.advertise + } + return t.listener.Addr() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go new file mode 100644 index 0000000000000..8928de0c2fc07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go @@ -0,0 +1,85 @@ +package raft + +import ( + "io" + "time" +) + +// RPCResponse captures both a response and a potential error. +type RPCResponse struct { + Response interface{} + Error error +} + +// RPC has a command, and provides a response mechanism. +type RPC struct { + Command interface{} + Reader io.Reader // Set only for InstallSnapshot + RespChan chan<- RPCResponse +} + +// Respond is used to respond with a response, error or both +func (r *RPC) Respond(resp interface{}, err error) { + r.RespChan <- RPCResponse{resp, err} +} + +// Transport provides an interface for network transports +// to allow Raft to communicate with other nodes. +type Transport interface { + // Consumer returns a channel that can be used to + // consume and respond to RPC requests. + Consumer() <-chan RPC + + // LocalAddr is used to return our local address to distinguish from our peers. + LocalAddr() string + + // AppendEntriesPipeline returns an interface that can be used to pipeline + // AppendEntries requests. + AppendEntriesPipeline(target string) (AppendPipeline, error) + + // AppendEntries sends the appropriate RPC to the target node. + AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error + + // RequestVote sends the appropriate RPC to the target node. + RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error + + // InstallSnapshot is used to push a snapshot down to a follower. The data is read from + // the ReadCloser and streamed to the client. + InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error + + // EncodePeer is used to serialize a peer name. + EncodePeer(string) []byte + + // DecodePeer is used to deserialize a peer name. + DecodePeer([]byte) string + + // SetHeartbeatHandler is used to setup a heartbeat handler + // as a fast-pass. This is to avoid head-of-line blocking from + // disk IO. If a Transport does not support this, it can simply + // ignore the call, and push the heartbeat onto the Consumer channel. + SetHeartbeatHandler(cb func(rpc RPC)) +} + +// AppendPipeline is used for pipelining AppendEntries requests. It is used +// to increase the replication throughput by masking latency and better +// utilizing bandwidth. +type AppendPipeline interface { + // AppendEntries is used to add another request to the pipeline. + // The send may block which is an effective form of back-pressure. + AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) + + // Consumer returns a channel that can be used to consume + // response futures when they are ready. + Consumer() <-chan AppendFuture + + // Closes pipeline and cancels all inflight RPCs + Close() error +} + +// AppendFuture is used to return information about a pipelined AppendEntries request. +type AppendFuture interface { + Future + Start() time.Time + Request() *AppendEntriesRequest + Response() *AppendEntriesResponse +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/util.go b/Godeps/_workspace/src/github.com/hashicorp/raft/util.go new file mode 100644 index 0000000000000..a6642c4c9e646 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/raft/util.go @@ -0,0 +1,200 @@ +package raft + +import ( + "bytes" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math" + "math/big" + "math/rand" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +func init() { + // Ensure we use a high-entropy seed for the psuedo-random generator + rand.Seed(newSeed()) +} + +// returns an int64 from a crypto random source +// can be used to seed a source for a math/rand. +func newSeed() int64 { + r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + return r.Int64() +} + +// randomTimeout returns a value that is between the minVal and 2x minVal. +func randomTimeout(minVal time.Duration) <-chan time.Time { + if minVal == 0 { + return nil + } + extra := (time.Duration(rand.Int63()) % minVal) + return time.After(minVal + extra) +} + +// min returns the minimum. +func min(a, b uint64) uint64 { + if a <= b { + return a + } + return b +} + +// max returns the maximum. +func max(a, b uint64) uint64 { + if a >= b { + return a + } + return b +} + +// generateUUID is used to generate a random UUID. +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +// asyncNotify is used to do an async channel send to +// a list of channels. This will not block. +func asyncNotify(chans []chan struct{}) { + for _, ch := range chans { + asyncNotifyCh(ch) + } +} + +// asyncNotifyCh is used to do an async channel send +// to a single channel without blocking. +func asyncNotifyCh(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// asyncNotifyBool is used to do an async notification +// on a bool channel. +func asyncNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + default: + } +} + +// ExcludePeer is used to exclude a single peer from a list of peers. +func ExcludePeer(peers []string, peer string) []string { + otherPeers := make([]string, 0, len(peers)) + for _, p := range peers { + if p != peer { + otherPeers = append(otherPeers, p) + } + } + return otherPeers +} + +// PeerContained checks if a given peer is contained in a list. +func PeerContained(peers []string, peer string) bool { + for _, p := range peers { + if p == peer { + return true + } + } + return false +} + +// AddUniquePeer is used to add a peer to a list of existing +// peers only if it is not already contained. +func AddUniquePeer(peers []string, peer string) []string { + if PeerContained(peers, peer) { + return peers + } + return append(peers, peer) +} + +// encodePeers is used to serialize a list of peers. +func encodePeers(peers []string, trans Transport) []byte { + // Encode each peer + var encPeers [][]byte + for _, p := range peers { + encPeers = append(encPeers, trans.EncodePeer(p)) + } + + // Encode the entire array + buf, err := encodeMsgPack(encPeers) + if err != nil { + panic(fmt.Errorf("failed to encode peers: %v", err)) + } + + return buf.Bytes() +} + +// decodePeers is used to deserialize a list of peers. +func decodePeers(buf []byte, trans Transport) []string { + // Decode the buffer first + var encPeers [][]byte + if err := decodeMsgPack(buf, &encPeers); err != nil { + panic(fmt.Errorf("failed to decode peers: %v", err)) + } + + // Deserialize each peer + var peers []string + for _, enc := range encPeers { + peers = append(peers, trans.DecodePeer(enc)) + } + + return peers +} + +// Decode reverses the encode operation on a byte slice input. +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer. +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Converts bytes to an integer. +func bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// Converts a uint64 to a byte slice. +func uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} + +// backoff is used to compute an exponential backoff +// duration. Base time is scaled by the current round, +// up to some maximum scale factor. +func backoff(base time.Duration, round, limit uint64) time.Duration { + power := min(round, limit) + for power > 2 { + base *= 2 + power-- + } + return base +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE index 03f21e89fecdd..d50222706cf00 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013-2014 Errplane Inc. +Copyright (c) 2013-2015 Errplane Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/_vendor/raft/LICENSE b/Godeps/_workspace/src/github.com/influxdb/influxdb/_vendor/raft/LICENSE deleted file mode 100644 index ee7f222286b61..0000000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/_vendor/raft/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright 2013 go-raft contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md index 2d849dfb7e088..012109bc00cd4 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md @@ -1,2 +1,206 @@ -influxdb-go -=========== +# InfluxDB Client + +[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client) + +## Description + +A Go client library written and maintained by the **InfluxDB** team. +This package provides convenience functions to read and write time series data. +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. + + +## Getting Started + +### Connecting To Your Database + +Connecting to an **InfluxDB** database is straightforward. You will need a host +name, a port and the cluster user credentials if applicable. The default port is 8086. +You can customize these settings to your specific installation via the +**InfluxDB** configuration file. + +Thought not necessary for experimentation, you may want to create a new user +and authenticate the connection to your database. + +For more information please check out the +[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html). + +For the impatient, you can create a new admin user _bubba_ by firing off the +[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). + +```shell +influx +> create user bubba with password 'bumblebeetuna' +> grant all privileges to bubba +``` + +And now for good measure set the credentials in you shell environment. +In the example below we will use $INFLUX_USER and $INFLUX_PWD + +Now with the administrivia out of the way, let's connect to our database. + +NOTE: If you've opted out of creating a user, you can omit Username and Password in +the configuration below. + +```go +package main + +import "github.com/influxdb/influxdb/client" + +const ( + MyHost = "localhost" + MyPort = 8086 + MyDB = "square_holes" + MyMeasurement = "shapes" +) + +func main() { + u, err := url.Parse(fmt.Sprintf("http://%s:%d", MyHost, MyPort)) + if err != nil { + log.Fatal(err) + } + + conf := client.Config{ + URL: *u, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a Hippo! %v, %s", dur, ver) +} + +``` + +### Inserting Data + +Time series data aka *points* are written to the database using batch inserts. +The mechanism is to create one or more points and then create a batch aka *batch points* +and write these to a given database and series. A series is a combination of a +measurement (time/values) and a set of tags. + +In this sample we will create a batch of a 1,000 points. Each point has a time and +a single value as well as 2 tags indicating a shape and color. We write these points +to a database called _square_holes_ using a measurement named _shapes_. + +NOTE: You can specify a RetentionPolicy as part of the batch points. If not +provided InfluxDB will use the database _default_ retention policy. By default, the _default_ +retention policy never deletes any data it contains. + +```go +func writePoints(con *client.Client) { + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: MyDB, + RetentionPolicy: "default", + } + _, err := con.Write(bps) + if err != nil { + log.Fatal(err) + } +} +``` + + +### Querying Data + +One nice advantage of using **InfluxDB** the ability to query your data using familiar +SQL constructs. In this example we can create a convenience function to query the database +as follows: + +```go +// queryDB convenience function to query the database +func queryDB(con *client.Client, cmd string) (res []client.Result, err error) { + q := client.Query{ + Command: cmd, + Database: MyDB, + } + if response, err := con.Query(q); err == nil { + if response.Error() != nil { + return res, response.Error() + } + res = response.Results + } + return +} +``` + +#### Creating a Database +```go +_, err := queryDB(con, fmt.Sprintf("create database %s", MyDB)) +if err != nil { + log.Fatal(err) +} +``` + +#### Count Records +```go +q := fmt.Sprintf("select count(%s) from %s", "value", MyMeasurement) +res, err := queryDB(con, q) +if err != nil { + log.Fatal(err) +} +count := res[0].Series[0].Values[0][1] +log.Printf("Found a total of `%v records", count) + +``` + +#### Find the last 10 _shapes_ records + +```go +q := fmt.Sprintf("select * from %s limit %d", MyMeasurement, 20) +res, err = queryDB(con, q) +if err != nil { + log.Fatal(err) +} + +for i, row := range res[0].Series[0].Values { + t, err := time.Parse(time.RFC3339, row[0].(string)) + if err != nil { + log.Fatal(err) + } + val, err := row[1].(json.Number).Int64() + log.Printf("[%2d] %s: %03d\n", i, t.Format(time.Stamp), val) +} +``` + +## Go Docs + +Please refer to +[http://godoc.org/github.com/influxdb/influxdb/client](http://godoc.org/github.com/influxdb/influxdb/client) +for documentation. + +## See Also + +You can also examine how the client library is used by the +[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/examples/example.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/examples/example.go deleted file mode 100644 index 6cc866e88c0c8..0000000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/examples/example.go +++ /dev/null @@ -1,200 +0,0 @@ -package examples - -import ( - "fmt" - - "github.com/influxdb/influxdb/client" -) - -func main() { - TestClient() -} - -func TestClient() { - internalTest(true) -} - -func TestClientWithoutCompression() { - internalTest(false) -} - -func internalTest(compression bool) { - c, err := client.NewClient(&client.ClientConfig{}) - if err != nil { - panic(err) - } - - admins, err := c.GetClusterAdminList() - if err != nil { - panic(err) - } - - if len(admins) == 1 { - if err := c.CreateClusterAdmin("admin", "password"); err != nil { - panic(err) - } - } - - admins, err = c.GetClusterAdminList() - if err != nil { - panic(err) - } - - if len(admins) != 2 { - panic("more than two admins returned") - } - - dbs, err := c.GetDatabaseList() - if err != nil { - panic(err) - } - - if len(dbs) == 0 { - if err := c.CreateDatabase("foobar"); err != nil { - panic(err) - } - } - - dbs, err = c.GetDatabaseList() - if err != nil { - panic(err) - } - - if len(dbs) != 1 && dbs[0]["foobar"] == nil { - panic("List of databases don't match") - } - - users, err := c.GetDatabaseUserList("foobar") - if err != nil { - panic(err) - } - - if len(users) == 0 { - if err := c.CreateDatabaseUser("foobar", "dbuser", "pass"); err != nil { - panic(err) - } - - if err := c.AlterDatabasePrivilege("foobar", "dbuser", true); err != nil { - panic(err) - } - } - - users, err = c.GetDatabaseUserList("foobar") - if err != nil { - panic(err) - } - - if len(users) != 1 { - panic("more than one user returned") - } - - c, err = client.NewClient(&client.ClientConfig{ - Username: "dbuser", - Password: "pass", - Database: "foobar", - }) - - if !compression { - c.DisableCompression() - } - - if err != nil { - panic(err) - } - - name := "ts9" - if !compression { - name = "ts9_uncompressed" - } - - series := &client.Series{ - Name: name, - Columns: []string{"value"}, - Points: [][]interface{}{ - {1.0}, - }, - } - if err := c.WriteSeries([]*client.Series{series}); err != nil { - panic(err) - } - - result, err := c.Query("select * from " + name) - if err != nil { - panic(err) - } - - if len(result) != 1 { - panic(fmt.Errorf("expected one time series returned: %d", len(result))) - } - - if len(result[0].Points) != 1 { - panic(fmt.Errorf("Expected one point: %d", len(result[0].Points))) - } - - if result[0].Points[0][2].(float64) != 1 { - panic("Value not equal to 1") - } - - c, err = client.NewClient(&client.ClientConfig{ - Username: "root", - Password: "root", - }) - - if err != nil { - panic(err) - } - - spaces, err := c.GetShardSpaces() - if err != nil || len(spaces) == 0 { - panic(fmt.Errorf("Got empty spaces back: %s", err)) - } - if spaces[0].Name != "default" { - panic("Space name isn't default") - } - space := &client.ShardSpace{Name: "foo", Regex: "/^paul_is_rad/"} - err = c.CreateShardSpace("foobar", space) - if err != nil { - panic(err) - } - spaces, _ = c.GetShardSpaces() - if spaces[1].Name != "foo" { - panic("Space name isn't foo") - } - shards, err := c.GetShards() - if err != nil { - panic(fmt.Errorf("Couldn't get shards back: %s", err)) - } - - c, err = client.NewClient(&client.ClientConfig{ - Username: "root", - Password: "root", - Database: "", - }) - series = &client.Series{ - Name: "paul_is_rad", - Columns: []string{"value"}, - Points: [][]interface{}{ - {1.0}, - }, - } - if err := c.WriteSeries([]*client.Series{series}); err != nil { - panic(err) - } - - spaces, _ = c.GetShardSpaces() - count := 0 - for _, s := range shards.All { - if s.SpaceName == "foo" { - count++ - } - } - - if err := c.DropShardSpace("foobar", "foo"); err != nil { - panic(fmt.Errorf("Error: %s", err)) - } - - spaces, err = c.GetShardSpaces() - if err != nil || len(spaces) != 1 || spaces[0].Name != "default" { - panic(fmt.Errorf("Error: %s, %d, %s", err, len(spaces), spaces[0].Name)) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go index 22a50e5bcd20a..2ec08a96eec55 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go @@ -2,609 +2,582 @@ package client import ( "bytes" - "compress/gzip" "encoding/json" + "errors" "fmt" "io" "io/ioutil" - "net" "net/http" "net/url" - "strings" -) + "time" -const ( - UDPMaxMessageSize = 2048 + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/tsdb" ) -type Client struct { - host string - username string - password string - database string - httpClient *http.Client - udpConn *net.UDPConn - schema string - compression bool -} - -type ClientConfig struct { - Host string - Username string - Password string - Database string - HttpClient *http.Client - IsSecure bool - IsUDP bool +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string } -var defaults *ClientConfig - -func init() { - defaults = &ClientConfig{ - Host: "localhost:8086", - Username: "root", - Password: "root", - Database: "", - HttpClient: http.DefaultClient, - } +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + Username string + Password string + UserAgent string + Timeout time.Duration } -func getDefault(value, defaultValue string) string { - if value == "" { - return defaultValue - } - return value +// Client is used to make calls to the server. +type Client struct { + url url.URL + username string + password string + httpClient *http.Client + userAgent string } -func New(config *ClientConfig) (*Client, error) { - return NewClient(config) -} +const ( + ConsistencyOne = "one" + ConsistencyAll = "all" + ConsistencyQuorum = "quorum" + ConsistencyAny = "any" +) -func NewClient(config *ClientConfig) (*Client, error) { - host := getDefault(config.Host, defaults.Host) - username := getDefault(config.Username, defaults.Username) - password := getDefault(config.Password, defaults.Password) - database := getDefault(config.Database, defaults.Database) - if config.HttpClient == nil { - config.HttpClient = defaults.HttpClient - } - var udpConn *net.UDPConn - if config.IsUDP { - serverAddr, err := net.ResolveUDPAddr("udp", host) - if err != nil { - return nil, err - } - udpConn, err = net.DialUDP("udp", nil, serverAddr) - if err != nil { - return nil, err - } +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + client := Client{ + url: c.URL, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout}, + userAgent: c.UserAgent, } - - schema := "http" - if config.IsSecure { - schema = "https" + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" } - return &Client{host, username, password, database, config.HttpClient, udpConn, schema, false}, nil + return &client, nil } -func (self *Client) DisableCompression() { - self.compression = false +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p } -func (self *Client) getUrl(path string) string { - return self.getUrlWithUserAndPass(path, self.username, self.password) -} +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + u := c.url -func (self *Client) getUrlWithUserAndPass(path, username, password string) string { - return fmt.Sprintf("%s://%s%s?u=%s&p=%s", self.schema, self.host, path, username, password) -} + u.Path = "query" + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + u.RawQuery = values.Encode() -func responseToError(response *http.Response, err error, closeResponse bool) error { + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return err - } - if closeResponse { - defer response.Body.Close() + return nil, err } - if response.StatusCode >= 200 && response.StatusCode < 300 { - return nil + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) } - defer response.Body.Close() - body, err := ioutil.ReadAll(response.Body) - if err != nil { - return err - } - return fmt.Errorf("Server returned (%d): %s", response.StatusCode, string(body)) -} -func (self *Client) CreateDatabase(name string) error { - url := self.getUrl("/db") - payload := map[string]string{"name": name} - data, err := json.Marshal(payload) + resp, err := c.httpClient.Do(req) if err != nil { - return err + return nil, err } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) -} + defer resp.Body.Close() -func (self *Client) del(url string) (*http.Response, error) { - return self.delWithBody(url, nil) -} + var response Response + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, decErr + } + // If we don't have an error in our json response, and didn't get statusOK, then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + c.url.Path = "write" + + var b bytes.Buffer + for _, p := range bp.Points { + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } -func (self *Client) delWithBody(url string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("DELETE", url, body) - if err != nil { - return nil, err + if err := b.WriteByte('\n'); err != nil { + return nil, err + } } - return self.httpClient.Do(req) -} - -func (self *Client) DeleteDatabase(name string) error { - url := self.getUrl("/db/" + name) - resp, err := self.del(url) - return responseToError(resp, err, true) -} -func (self *Client) get(url string) ([]byte, error) { - resp, err := self.httpClient.Get(url) - err = responseToError(resp, err, false) + req, err := http.NewRequest("POST", c.url.String(), &b) if err != nil { return nil, err } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - return body, err -} + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Add("db", bp.Database) + params.Add("rp", bp.RetentionPolicy) + params.Add("precision", bp.Precision) + params.Add("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() -func (self *Client) getWithVersion(url string) ([]byte, string, error) { - resp, err := self.httpClient.Get(url) - err = responseToError(resp, err, false) + resp, err := c.httpClient.Do(req) if err != nil { - return nil, "", err + return nil, err } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - version := resp.Header.Get("X-Influxdb-Version") - fields := strings.Fields(version) - if len(fields) > 2 { - return body, fields[1], err - } - return body, "", err -} -func (self *Client) listSomething(url string) ([]map[string]interface{}, error) { - body, err := self.get(url) - if err != nil { + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil && err.Error() != "EOF" { return nil, err } - somethings := []map[string]interface{}{} - err = json.Unmarshal(body, &somethings) - if err != nil { - return nil, err + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err } - return somethings, nil -} -func (self *Client) GetDatabaseList() ([]map[string]interface{}, error) { - url := self.getUrl("/db") - return self.listSomething(url) + return nil, nil } -func (self *Client) CreateClusterAdmin(name, password string) error { - url := self.getUrl("/cluster_admins") - payload := map[string]string{"name": name, "password": password} - data, err := json.Marshal(payload) - if err != nil { - return err - } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) -} +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" -func (self *Client) UpdateClusterAdmin(name, password string) error { - url := self.getUrl("/cluster_admins/" + name) - payload := map[string]string{"password": password} - data, err := json.Marshal(payload) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return err + return 0, "", err } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) -} - -func (self *Client) DeleteClusterAdmin(name string) error { - url := self.getUrl("/cluster_admins/" + name) - resp, err := self.del(url) - return responseToError(resp, err, true) -} - -func (self *Client) GetClusterAdminList() ([]map[string]interface{}, error) { - url := self.getUrl("/cluster_admins") - return self.listSomething(url) -} - -func (self *Client) Servers() ([]map[string]interface{}, error) { - url := self.getUrl("/cluster/servers") - return self.listSomething(url) -} - -func (self *Client) RemoveServer(id int) error { - resp, err := self.del(self.getUrl(fmt.Sprintf("/cluster/servers/%d", id))) - return responseToError(resp, err, true) -} - -// Creates a new database user for the given database. permissions can -// be omitted in which case the user will be able to read and write to -// all time series. If provided, there should be two strings, the -// first for read and the second for write. The strings are regexes -// that are used to match the time series name to determine whether -// the user has the ability to read/write to the given time series. -// -// client.CreateDatabaseUser("db", "user", "pass") -// // the following user cannot read from any series and can write -// // to the limited time series only -// client.CreateDatabaseUser("db", "limited", "pass", "^$", "limited") -func (self *Client) CreateDatabaseUser(database, name, password string, permissions ...string) error { - readMatcher, writeMatcher := ".*", ".*" - switch len(permissions) { - case 0: - case 2: - readMatcher, writeMatcher = permissions[0], permissions[1] - default: - return fmt.Errorf("You have to provide two ") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) } - url := self.getUrl("/db/" + database + "/users") - payload := map[string]string{"name": name, "password": password, "readFrom": readMatcher, "writeTo": writeMatcher} - data, err := json.Marshal(payload) + resp, err := c.httpClient.Do(req) if err != nil { - return err + return 0, "", err } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) -} + defer resp.Body.Close() -// Change the cluster admin password -func (self *Client) ChangeClusterAdminPassword(name, newPassword string) error { - url := self.getUrl("/cluster_admins/" + name) - payload := map[string]interface{}{"password": newPassword} - data, err := json.Marshal(payload) - if err != nil { - return err - } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil } -// Change the user password, adming flag and optionally permissions -func (self *Client) ChangeDatabaseUser(database, name, newPassword string, isAdmin bool, newPermissions ...string) error { - switch len(newPermissions) { - case 0, 2: - default: - return fmt.Errorf("You have to provide two ") - } +// Dump connects to server and retrieves all data stored for specified database. +// If successful, Dump returns the entire response body, which is an io.ReadCloser +func (c *Client) Dump(db string) (io.ReadCloser, error) { + u := c.url + u.Path = "dump" + values := u.Query() + values.Set("db", db) + u.RawQuery = values.Encode() - url := self.getUrl("/db/" + database + "/users/" + name) - payload := map[string]interface{}{"password": newPassword, "admin": isAdmin} - if len(newPermissions) == 2 { - payload["readFrom"] = newPermissions[0] - payload["writeTo"] = newPermissions[1] - } - data, err := json.Marshal(payload) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return err + return nil, err } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) -} - -// See Client.CreateDatabaseUser for more info on the permissions -// argument -func (self *Client) updateDatabaseUserCommon(database, name string, password *string, isAdmin *bool, permissions ...string) error { - url := self.getUrl("/db/" + database + "/users/" + name) - payload := map[string]interface{}{} - if password != nil { - payload["password"] = *password - } - if isAdmin != nil { - payload["admin"] = *isAdmin - } - switch len(permissions) { - case 0: - case 2: - payload["readFrom"] = permissions[0] - payload["writeTo"] = permissions[1] - default: + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) } - data, err := json.Marshal(payload) + resp, err := c.httpClient.Do(req) if err != nil { - return err + return nil, err } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) -} - -func (self *Client) UpdateDatabaseUser(database, name, password string) error { - return self.updateDatabaseUserCommon(database, name, &password, nil) -} - -func (self *Client) UpdateDatabaseUserPermissions(database, name, readPermission, writePermissions string) error { - return self.updateDatabaseUserCommon(database, name, nil, nil, readPermission, writePermissions) -} - -func (self *Client) DeleteDatabaseUser(database, name string) error { - url := self.getUrl("/db/" + database + "/users/" + name) - resp, err := self.del(url) - return responseToError(resp, err, true) + if resp.StatusCode != http.StatusOK { + return resp.Body, fmt.Errorf("HTTP Protocol error %d", resp.StatusCode) + } + return resp.Body, nil } -func (self *Client) GetDatabaseUserList(database string) ([]map[string]interface{}, error) { - url := self.getUrl("/db/" + database + "/users") - return self.listSomething(url) -} +// Structs -func (self *Client) AlterDatabasePrivilege(database, name string, isAdmin bool, permissions ...string) error { - return self.updateDatabaseUserCommon(database, name, nil, &isAdmin, permissions...) +// Result represents a resultset returned from a single statement. +type Result struct { + Series []influxql.Row + Err error } -type TimePrecision string +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []influxql.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` + } -const ( - Second TimePrecision = "s" - Millisecond TimePrecision = "ms" - Microsecond TimePrecision = "u" -) + // Copy fields to output struct. + o.Series = r.Series + if r.Err != nil { + o.Err = r.Err.Error() + } -func (self *Client) WriteSeries(series []*Series) error { - return self.writeSeriesCommon(series, nil) + return json.Marshal(&o) } -func (self *Client) WriteSeriesOverUDP(series []*Series) error { - if self.udpConn == nil { - return fmt.Errorf("UDP isn't enabled. Make sure to set config.IsUDP to true") +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []influxql.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` } - data, err := json.Marshal(series) + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) if err != nil { return err } - // because max of msg over upd is 2048 bytes - // https://github.com/influxdb/influxdb/blob/master/src/api/udp/api.go#L65 - if len(data) >= UDPMaxMessageSize { - err = fmt.Errorf("data size over limit %v limit is %v", len(data), UDPMaxMessageSize) - fmt.Println(err) - return err - } - _, err = self.udpConn.Write(data) - if err != nil { - return err + r.Series = o.Series + if o.Err != "" { + r.Err = errors.New(o.Err) } return nil } -func (self *Client) WriteSeriesWithTimePrecision(series []*Series, timePrecision TimePrecision) error { - return self.writeSeriesCommon(series, map[string]string{"time_precision": string(timePrecision)}) +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error } -func (self *Client) writeSeriesCommon(series []*Series, options map[string]string) error { - data, err := json.Marshal(series) - if err != nil { - return err +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` } - url := self.getUrl("/db/" + self.database + "/series") - for name, value := range options { - url += fmt.Sprintf("&%s=%s", name, value) - } - var b *bytes.Buffer - if self.compression { - b = bytes.NewBuffer(nil) - w := gzip.NewWriter(b) - if _, err := w.Write(data); err != nil { - return err - } - w.Flush() - w.Close() - } else { - b = bytes.NewBuffer(data) - } - req, err := http.NewRequest("POST", url, b) - if err != nil { - return err - } - if self.compression { - req.Header.Set("Content-Encoding", "gzip") - } - resp, err := self.httpClient.Do(req) - return responseToError(resp, err, true) -} -func (self *Client) Query(query string, precision ...TimePrecision) ([]*Series, error) { - return self.queryCommon(query, false, precision...) -} + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } -func (self *Client) QueryWithNumbers(query string, precision ...TimePrecision) ([]*Series, error) { - return self.queryCommon(query, true, precision...) + return json.Marshal(&o) } -func (self *Client) queryCommon(query string, useNumber bool, precision ...TimePrecision) ([]*Series, error) { - escapedQuery := url.QueryEscape(query) - url := self.getUrl("/db/" + self.database + "/series") - if len(precision) > 0 { - url += "&time_precision=" + string(precision[0]) +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` } - url += "&q=" + escapedQuery - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - if !self.compression { - req.Header.Set("Accept-Encoding", "identity") - } - resp, err := self.httpClient.Do(req) - err = responseToError(resp, err, false) - if err != nil { - return nil, err - } - defer resp.Body.Close() - series := []*Series{} - decoder := json.NewDecoder(resp.Body) - if useNumber { - decoder.UseNumber() - } - err = decoder.Decode(&series) + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) if err != nil { - return nil, err + return err } - return series, nil -} - -func (self *Client) Ping() error { - url := self.getUrl("/ping") - resp, err := self.httpClient.Get(url) - return responseToError(resp, err, true) -} - -func (self *Client) AuthenticateDatabaseUser(database, username, password string) error { - url := self.getUrlWithUserAndPass(fmt.Sprintf("/db/%s/authenticate", database), username, password) - resp, err := self.httpClient.Get(url) - return responseToError(resp, err, true) -} - -func (self *Client) AuthenticateClusterAdmin(username, password string) error { - url := self.getUrlWithUserAndPass("/cluster_admins/authenticate", username, password) - resp, err := self.httpClient.Get(url) - return responseToError(resp, err, true) -} - -type LongTermShortTermShards struct { - // Long term shards, (doesn't get populated for version >= 0.8.0) - LongTerm []*Shard `json:"longTerm"` - // Short term shards, (doesn't get populated for version >= 0.8.0) - ShortTerm []*Shard `json:"shortTerm"` - // All shards in the system (Long + Short term shards for version < 0.8.0) - All []*Shard `json:"-"` -} - -type Shard struct { - Id uint32 `json:"id"` - EndTime int64 `json:"endTime"` - StartTime int64 `json:"startTime"` - ServerIds []uint32 `json:"serverIds"` - SpaceName string `json:"spaceName"` - Database string `json:"database"` -} - -type ShardSpaceCollection struct { - ShardSpaces []ShardSpace -} - -func (self *Client) GetShards() (*LongTermShortTermShards, error) { - url := self.getUrlWithUserAndPass("/cluster/shards", self.username, self.password) - body, version, err := self.getWithVersion(url) - if err != nil { - return nil, err + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) } - return parseShards(body, version) + return nil } -func isOrNewerThan(version, reference string) bool { - if version == "vdev" { - return true - } - majorMinor := strings.Split(version[1:], ".")[:2] - refMajorMinor := strings.Split(reference[1:], ".")[:2] - if majorMinor[0] > refMajorMinor[0] { - return true +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r Response) Error() error { + if r.Err != nil { + return r.Err } - if majorMinor[1] > refMajorMinor[1] { - return true + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } } - return majorMinor[1] == refMajorMinor[1] + return nil } -func parseShards(body []byte, version string) (*LongTermShortTermShards, error) { - // strip the initial v in `v0.8.0` and split on the dots - if version != "" && isOrNewerThan(version, "v0.8") { - return parseNewShards(body) - } - shards := &LongTermShortTermShards{} - err := json.Unmarshal(body, &shards) - if err != nil { - return nil, err +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +func (p *Point) MarshalString() string { + return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String() +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil } - shards.All = make([]*Shard, len(shards.LongTerm)+len(shards.ShortTerm)) - copy(shards.All, shards.LongTerm) - copy(shards.All[len(shards.LongTerm):], shards.ShortTerm) - return shards, nil -} - -func parseNewShards(body []byte) (*LongTermShortTermShards, error) { - shards := []*Shard{} - err := json.Unmarshal(body, &shards) - if err != nil { - return nil, err + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) - return &LongTermShortTermShards{All: shards}, nil + return nil } -// Added to InfluxDB in 0.8.0 -func (self *Client) GetShardSpaces() ([]*ShardSpace, error) { - url := self.getUrlWithUserAndPass("/cluster/shard_spaces", self.username, self.password) - body, err := self.get(url) - if err != nil { - return nil, err +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } } - spaces := []*ShardSpace{} - err = json.Unmarshal(body, &spaces) - if err != nil { - return nil, err + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil } - return spaces, nil -} - -// Added to InfluxDB in 0.8.0 -func (self *Client) DropShardSpace(database, name string) error { - url := self.getUrlWithUserAndPass(fmt.Sprintf("/cluster/shard_spaces/%s/%s", database, name), self.username, self.password) - _, err := self.del(url) - return err -} - -// Added to InfluxDB in 0.8.0 -func (self *Client) CreateShardSpace(database string, space *ShardSpace) error { - url := self.getUrl(fmt.Sprintf("/cluster/shard_spaces/%s", database)) - data, err := json.Marshal(space) - if err != nil { + if err := json.Unmarshal(b, &normal); err != nil { return err } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil } -func (self *Client) DropShard(id uint32, serverIds []uint32) error { - url := self.getUrlWithUserAndPass(fmt.Sprintf("/cluster/shards/%d", id), self.username, self.password) - ids := map[string][]uint32{"serverIds": serverIds} - body, err := json.Marshal(ids) - if err != nil { - return err - } - _, err = self.delWithBody(url, bytes.NewBuffer(body)) - return err +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + return c.url.String() } -// Added to InfluxDB in 0.8.2 -func (self *Client) UpdateShardSpace(database, name string, space *ShardSpace) error { - url := self.getUrl(fmt.Sprintf("/cluster/shard_spaces/%s/%s", database, name)) - data, err := json.Marshal(space) - if err != nil { - return err +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" } - resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) - return responseToError(resp, err, true) + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go deleted file mode 100644 index f18b8bbb59ea1..0000000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go +++ /dev/null @@ -1,19 +0,0 @@ -package client - -type Series struct { - Name string `json:"name"` - Columns []string `json:"columns"` - Points [][]interface{} `json:"points"` -} - -func (self *Series) GetName() string { - return self.Name -} - -func (self *Series) GetColumns() []string { - return self.Columns -} - -func (self *Series) GetPoints() [][]interface{} { - return self.Points -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go deleted file mode 100644 index 87dea1173bc0f..0000000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go +++ /dev/null @@ -1,15 +0,0 @@ -package client - -type ShardSpace struct { - // required, must be unique within the database - Name string `json:"name"` - // required, a database has many shard spaces and a shard space belongs to a database - Database string `json:"database"` - // this is optional, if they don't set it, we'll set to /.*/ - Regex string `json:"regex"` - // this is optional, if they don't set it, it will default to the storage.dir in the config - RetentionPolicy string `json:"retentionPolicy"` - ShardDuration string `json:"shardDuration"` - ReplicationFactor uint32 `json:"replicationFactor"` - Split uint32 `json:"split"` -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md new file mode 100644 index 0000000000000..087fc3b9ff6b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md @@ -0,0 +1,650 @@ +# The Influx Query Language Specification + +## Introduction + +This is a reference for the Influx Query Language ("InfluxQL"). + +InfluxQL is a SQL-like query language for interacting with InfluxDB. It has been lovingly crafted to feel familiar to those coming from other SQL or SQL-like environments while providing features specific to storing and analyzing time series data. + +## Notation + +The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the same notation used in the [Go](http://golang.org) programming language specification, which can be found [here](https://golang.org/ref/spec). Not so coincidentally, InfluxDB is written in Go. + +``` +Production = production_name "=" [ Expression ] "." . +Expression = Alternative { "|" Alternative } . +Alternative = Term { Term } . +Term = production_name | token [ "…" token ] | Group | Option | Repetition . +Group = "(" Expression ")" . +Option = "[" Expression "]" . +Repetition = "{" Expression "}" . +``` + +Notation operators in order of increasing precedence: + +``` +| alternation +() grouping +[] option (0 or 1 times) +{} repetition (0 to n times) +``` + +## Query representation + +### Characters + +InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). + +``` +newline = /* the Unicode code point U+000A */ . +unicode_char = /* an arbitrary Unicode code point except newline */ . +``` + +## Letters and digits + +Letters are the set of ASCII characters plus the underscore character _ (U+005F) is considered a letter. + +Only decimal digits are supported. + +``` +letter = ascii_letter | "_" . +ascii_letter = "A" … "Z" | "a" … "z" . +digit = "0" … "9" . +``` + +## Identifiers + +Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field names. + +The rules: + +- double quoted identifiers can contain any unicode character other than a new line +- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) +- unquoted identifiers must start with an upper or lowercase ASCII character or "_" +- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" + +``` +identifier = unquoted_identifier | quoted_identifier . +unquoted_identifier = ( letter ) { letter | digit } . +quoted_identifier = `"` unicode_char { unicode_char } `"` . +``` + +#### Examples: + +``` +cpu +_cpu_stats +"1h" +"anything really" +"1_Crazy-1337.identifier>NAME👍" +``` + +## Keywords + +``` +ALL ALTER AS ASC BEGIN BY +CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE +DESC DROP DURATION END EXISTS EXPLAIN +FIELD FROM GRANT GROUP IF IN +INNER INSERT INTO KEY KEYS LIMIT +SHOW MEASUREMENT MEASUREMENTS OFFSET ON ORDER +PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY +READ REPLICATION RETENTION REVOKE SELECT SERIES +SLIMIT SOFFSET TAG TO USER USERS +VALUES WHERE WITH WRITE +``` + +## Literals + +### Integers + +InfluxQL supports decimal integer literals. Hexadecimal and octal literals are not currently supported. + +``` +int_lit = ( "1" … "9" ) { digit } . +``` + +### Floats + +InfluxQL supports floating-point literals. Exponents are not currently supported. + +``` +float_lit = int_lit "." int_lit . +``` + +### Strings + +String literals must be surrounded by single quotes. Strings may contain `'` characters as long as they are escaped (i.e., `\'`). + +``` +string_lit = `'` { unicode_char } `'`' . +``` + +### Durations + +Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal. + +``` +Duration unit definitions +------------------------- +| Units | Meaning | +|--------|-----------------------------------------| +| u or µ | microseconds (1 millionth of a second) | +| ms | milliseconds (1 thousandth of a second) | +| s | second | +| m | minute | +| h | hour | +| d | day | +| w | week | +``` + +``` +duration_lit = int_lit duration_unit . +duration_unit = "u" | "µ" | "s" | "h" | "d" | "w" | "ms" . +``` + +### Dates & Times + +The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: + +InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM + +``` +time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" +``` + +### Booleans + +``` +bool_lit = TRUE | FALSE . +``` + +### Regular Expressions + +``` +regex_lit = "/" { unicode_char } "/" . +``` + +## Queries + +A query is composed of one or more statements separated by a semicolon. + +``` +query = statement { ; statement } . + +statement = alter_retention_policy_stmt | + create_continuous_query_stmt | + create_database_stmt | + create_retention_policy_stmt | + create_user_stmt | + delete_stmt | + drop_continuous_query_stmt | + drop_database_stmt | + drop_measurement_stmt | + drop_retention_policy_stmt | + drop_series_stmt | + drop_user_stmt | + grant_stmt | + show_continuous_queries_stmt | + show_databases_stmt | + show_field_keys_stmt | + show_measurements_stmt | + show_retention_policies | + show_series_stmt | + show_tag_keys_stmt | + show_tag_values_stmt | + show_users_stmt | + revoke_stmt | + select_stmt . +``` + +## Statements + +### ALTER RETENTION POLICY + +``` +alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name "ON" + db_name retention_policy_option + [ retention_policy_option ] + [ retention_policy_option ] . + +db_name = identifier . + +policy_name = identifier . + +retention_policy_option = retention_policy_duration | + retention_policy_replication | + "DEFAULT" . + +retention_policy_duration = "DURATION" duration_lit . +retention_policy_replication = "REPLICATION" int_lit +``` + +#### Examples: + +```sql +-- Set default retention policy for mydb to 1h.cpu. +ALTER RETENTION POLICY "1h.cpu" ON mydb DEFAULT; + +-- Change duration and replication factor. +ALTER RETENTION POLICY policy1 ON somedb DURATION 1h REPLICATION 4 +``` + +### CREATE CONTINUOUS QUERY + +``` +create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name "ON" db_name + "BEGIN" select_stmt "END" . + +query_name = identifier . +``` + +#### Examples: + +```sql +-- selects from default retention policy and writes into 6_months retention policy +CREATE CONTINUOUS QUERY "10m_event_count" +ON db_name +BEGIN + SELECT count(value) + INTO "6_months".events + FROM events + GROUP BY time(10m) +END; + +-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy +CREATE CONTINUOUS QUERY "1h_event_count" +ON db_name +BEGIN + SELECT sum(count) as count + INTO "2_years".events + FROM "6_months".events + GROUP BY time(1h) +END; +``` + +### CREATE DATABASE + +``` +create_database_stmt = "CREATE DATABASE" db_name +``` + +#### Example: + +```sql +CREATE DATABASE foo +``` + +### CREATE RETENTION POLICY + +``` +create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name "ON" + db_name retention_policy_duration + retention_policy_replication + [ "DEFAULT" ] . +``` + +#### Examples + +```sql +-- Create a retention policy. +CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2; + +-- Create a retention policy and set it as the default. +CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT; +``` + +### CREATE USER + +``` +create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password + [ "WITH ALL PRIVILEGES" ] . +``` + +#### Examples: + +```sql +-- Create a normal database user. +CREATE USER jdoe WITH PASSWORD '1337password'; + +-- Create a cluster admin. +-- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. +CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES; +``` + +### DELETE + +``` +delete_stmt = "DELETE" from_clause where_clause . +``` + +#### Example: + +```sql +-- delete data points from the cpu measurement where the region tag +-- equals 'uswest' +DELETE FROM cpu WHERE region = 'uswest'; +``` + +### DROP CONTINUOUS QUERY + +drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name . + +#### Example: + +```sql +DROP CONTINUOUS QUERY myquery; +``` + +### DROP DATABASE + +drop_database_stmt = "DROP DATABASE" db_name . + +#### Example: + +```sql +DROP DATABASE mydb; +``` + +### DROP MEASUREMENT + +``` +drop_measurement_stmt = "DROP MEASUREMENT" measurement . +``` + +#### Examples: + +```sql +-- drop the cpu measurement +DROP MEASUREMENT cpu; +``` + +### DROP RETENTION POLICY + +``` +drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name "ON" db_name . +``` + +#### Example: + +```sql +-- drop the retention policy named 1h.cpu from mydb +DROP RETENTION POLICY "1h.cpu" ON mydb; +``` + +### DROP SERIES + +``` +drop_series_stmt = "DROP SERIES" [ from_clause ] [ where_clause ] +``` + +#### Example: + +```sql + +``` + +### DROP USER + +``` +drop_user_stmt = "DROP USER" user_name . +``` + +#### Example: + +```sql +DROP USER jdoe; + +``` + +### GRANT + +NOTE: Users can be granted privileges on databases that do not exist. + +``` +grant_stmt = "GRANT" privilege [ on_clause ] to_clause +``` + +#### Examples: + +```sql +-- grant cluster admin privileges +GRANT ALL TO jdoe; + +-- grant read access to a database +GRANT READ ON mydb TO jdoe; +``` + +### SHOW CONTINUOUS QUERIES + +show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" + +#### Example: + +```sql +-- show all continuous queries +SHOW CONTINUOUS QUERIES; +``` + +### SHOW DATABASES + +``` +show_databases_stmt = "SHOW DATABASES" . +``` + +#### Example: + +```sql +-- show all databases +SHOW DATABASES; +``` + +### SHOW FIELD + +show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . + +#### Examples: + +```sql +-- show field keys from all measurements +SHOW FIELD KEYS; + +-- show field keys from specified measurement +SHOW FIELD KEYS FROM cpu; +``` + +### SHOW MEASUREMENTS + +show_measurements_stmt = [ where_clause ] [ group_by_clause ] [ limit_clause ] + [ offset_clause ] . + +```sql +-- show all measurements +SHOW MEASUREMENTS; + +-- show measurements where region tag = 'uswest' AND host tag = 'serverA' +SHOW MEASUREMENTS WHERE region = 'uswest' AND host = 'serverA'; +``` + +### SHOW RETENTION POLICIES + +``` +show_retention_policies = "SHOW RETENTION POLICIES" db_name . +``` + +#### Example: + +```sql +-- show all retention policies on a database +SHOW RETENTION POLICIES mydb; +``` + +### SHOW SERIES + +``` +show_series_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Example: + +```sql + +``` + +### SHOW TAG KEYS + +``` +show_tag_keys_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag keys +SHOW TAG KEYS; + +-- show all tag keys from the cpu measurement +SHOW TAG KEYS FROM cpu; + +-- show all tag keys from the cpu measurement where the region key = 'uswest' +SHOW TAG KEYS FROM cpu WHERE region = 'uswest'; + +-- show all tag keys where the host key = 'serverA' +SHOW TAG KEYS WHERE host = 'serverA'; +``` + +### SHOW TAG VALUES + +``` +show_tag_values_stmt = [ from_clause ] with_tag_clause [ where_clause ] + [ group_by_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag values across all measurements for the region tag +SHOW TAG VALUES WITH TAG = 'region'; + +-- show tag values from the cpu measurement for the region tag +SHOW TAG VALUES FROM cpu WITH TAG = 'region'; + +-- show tag values from the cpu measurement for region & host tag keys where service = 'redis' +SHOW TAG VALUES FROM cpu WITH TAG IN (region, host) WHERE service = 'redis'; +``` + +### SHOW USERS + +``` +show_users_stmt = "SHOW USERS" . +``` + +#### Example: + +```sql +-- show all users +SHOW USERS; +``` + +### REVOKE + +``` +revoke_stmt = privilege [ "ON" db_name ] "FROM" user_name +``` + +#### Examples: + +```sql +-- revoke cluster admin from jdoe +REVOKE ALL PRIVILEGES FROM jdoe; + +-- revoke read privileges from jdoe on mydb +REVOKE READ ON mydb FROM jdoe; +``` + +### SELECT + +``` +select_stmt = fields from_clause [ into_clause ] [ where_clause ] + [ group_by_clause ] [ order_by_clause ] [ limit_clause ] + [ offset_clause ] [ slimit_clause ] [ soffset_clause ]. +``` + +#### Examples: + +```sql +-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals +SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0); +``` + +## Clauses + +``` +from_clause = "FROM" measurements . + +group_by_clause = "GROUP BY" dimensions fill(