diff --git a/NOTICE.txt b/NOTICE.txt index 10e10abf1111..fbbaf70ad6df 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -79,6 +79,114 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: code.cloudfoundry.org/go-diodes +Revision: f77fb823c7ee0156ed4cdadaf4f79ac3fd84613f +License type (autodetected): Apache-2.0 +./vendor/code.cloudfoundry.org/go-diodes/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Copyright (c) 2017-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. + +-------------------------------------------------------------------- +Dependency: code.cloudfoundry.org/go-loggregator +Version: v7.7.0 +Revision: b8d176783c8a6280a34f0e19e0e8f57d722773a1 +License type (autodetected): Apache-2.0 +./vendor/code.cloudfoundry.org/go-loggregator/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +go-loggregator + +Copyright (c) 2017-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +-------------------------------------------------------------------- +Dependency: code.cloudfoundry.org/gofileutils +Revision: 4d0c80011a0f37da1711c184028bc40137cd45af +License type (autodetected): Apache-2.0 +./vendor/code.cloudfoundry.org/gofileutils/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +This project is licensed to you under the Apache License, Version 2.0 (the "License"). + +You may not use this project except in compliance with the License. + +This project may include a number of subcomponents with separate copyright notices +and license terms. Your use of these subcomponents is subject to the terms and +conditions of the subcomponent's license, as noted in the LICENSE file. + +-------------------------------------------------------------------- +Dependency: code.cloudfoundry.org/rfc5424 +Revision: 236a6d29298aea12f69978f33393d12465abc429 +License type (autodetected): BSD-2-Clause +./vendor/code.cloudfoundry.org/rfc5424/LICENSE: +-------------------------------------------------------------------- +BSD 2-Clause License + +Copyright (c) 2016, Ross Kinder +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: contrib.go.opencensus.io/exporter/ocagent Revision: 8110e6c0236bb231b19119275a6be6ec666d05c8 @@ -660,6 +768,60 @@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/cloudfoundry-community/go-cfclient +Version: master +Revision: 35bcce23fc5f8b9969723ac38c0de1f82c4d3471 +License type (autodetected): MIT +./vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE: +-------------------------------------------------------------------- +The MIT License + +Copyright (c) 2017 Long Nguyen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------- +Dependency: github.com/cloudfoundry/sonde-go +Revision: b33733203bb48d7c56de7cb639d77f78b0449d19 +License type (autodetected): Apache-2.0 +./vendor/github.com/cloudfoundry/sonde-go/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +sonde-go + +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +Limitations under the License. + -------------------------------------------------------------------- Dependency: github.com/containerd/containerd Revision: 36cf5b690dcc00ff0f34ff7799209050c3d0c59a @@ -4788,6 +4950,46 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/mailru/easyjson +Revision: 8edcc4e51f39ddbd3505a3386aff3f435a7fd028 +License type (autodetected): MIT +./vendor/github.com/mailru/easyjson/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------- +Dependency: github.com/Masterminds/semver +Revision: 910aa146bd66780c2815d652b92a7fc5331e533c +License type (autodetected): MIT +./vendor/github.com/Masterminds/semver/LICENSE.txt: +-------------------------------------------------------------------- +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + -------------------------------------------------------------------- Dependency: github.com/mattn/go-colorable Revision: 941b50ebc6efddf4c41c8e4537a5f68a4e686b24 diff --git a/libbeat/common/cache.go b/libbeat/common/cache.go index 25f3be7d92b4..c06aa2427b89 100644 --- a/libbeat/common/cache.go +++ b/libbeat/common/cache.go @@ -68,18 +68,26 @@ func (e *element) UpdateLastAccessTime(now time.Time, expiration time.Duration) // the cache will cause a panic. type Cache struct { sync.RWMutex - timeout time.Duration // Length of time before cache elements expire. - elements map[Key]*element // Data stored by the cache. - clock clock // Function used to get the current time. - listener RemovalListener // Callback listen to notify of evictions. - janitorQuit chan struct{} // Closing this channel stop the janitor. + timeout time.Duration // Length of time before cache elements expire. + accessExpire bool // Expire objects based on access time instead of addition time. + elements map[Key]*element // Data stored by the cache. + clock clock // Function used to get the current time. + listener RemovalListener // Callback listen to notify of evictions. + janitorQuit chan struct{} // Closing this channel stop the janitor. } // NewCache creates and returns a new Cache. d is the length of time after last // access that cache elements expire. initialSize is the initial allocation size // used for the Cache's underlying map. func NewCache(d time.Duration, initialSize int) *Cache { - return newCache(d, initialSize, nil, time.Now) + return newCache(d, true, initialSize, nil, time.Now) +} + +// NewCacheWithExpireOnAdd creates and returns a new Cache that does not updated +// the expiration time when the object is accessed. The expiration is only set when +// a new object is added to the cache and is not updated until it expires and re-inserted. +func NewCacheWithExpireOnAdd(d time.Duration, initialSize int) *Cache { + return newCache(d, false, initialSize, nil, time.Now) } // NewCacheWithRemovalListener creates and returns a new Cache and register a @@ -88,15 +96,16 @@ func NewCache(d time.Duration, initialSize int) *Cache { // for the Cache's underlying map. l is the callback function that will be // invoked when cache elements are removed from the map on CleanUp. func NewCacheWithRemovalListener(d time.Duration, initialSize int, l RemovalListener) *Cache { - return newCache(d, initialSize, l, time.Now) + return newCache(d, true, initialSize, l, time.Now) } -func newCache(d time.Duration, initialSize int, l RemovalListener, t clock) *Cache { +func newCache(d time.Duration, accessExpire bool, initialSize int, l RemovalListener, t clock) *Cache { return &Cache{ - timeout: d, - elements: make(map[Key]*element, initialSize), - listener: l, - clock: t, + timeout: d, + accessExpire: accessExpire, + elements: make(map[Key]*element, initialSize), + listener: l, + clock: t, } } @@ -254,7 +263,9 @@ func (c *Cache) get(k Key) (Value, bool) { elem, exists := c.elements[k] now := c.clock() if exists && !elem.IsExpired(now) { - elem.UpdateLastAccessTime(now, elem.timeout) + if c.accessExpire { + elem.UpdateLastAccessTime(now, elem.timeout) + } return elem.value, true } return nil, false diff --git a/libbeat/common/cache_test.go b/libbeat/common/cache_test.go index 8f5fbbdcd06e..eb88bda845f6 100644 --- a/libbeat/common/cache_test.go +++ b/libbeat/common/cache_test.go @@ -60,7 +60,7 @@ var ( func TestExpireWithRemovalListener(t *testing.T) { callbackKey = nil callbackValue = nil - c := newCache(Timeout, InitalSize, removalListener, fakeClock) + c := newCache(Timeout, true, InitalSize, removalListener, fakeClock) c.Put(alphaKey, alphaValue) currentTime = currentTime.Add(Timeout).Add(time.Nanosecond) assert.Equal(t, 1, c.CleanUp()) @@ -70,7 +70,7 @@ func TestExpireWithRemovalListener(t *testing.T) { // Test that the number of removed elements is returned by Expire. func TestExpireWithoutRemovalListener(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) c.Put(bravoKey, bravoValue) currentTime = currentTime.Add(Timeout).Add(time.Nanosecond) @@ -78,7 +78,7 @@ func TestExpireWithoutRemovalListener(t *testing.T) { } func TestPutIfAbsent(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) oldValue := c.PutIfAbsent(alphaKey, alphaValue) assert.Nil(t, oldValue) oldValue = c.PutIfAbsent(alphaKey, bravoValue) @@ -86,7 +86,7 @@ func TestPutIfAbsent(t *testing.T) { } func TestPut(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) oldValue := c.Put(alphaKey, alphaValue) assert.Nil(t, oldValue) oldValue = c.Put(bravoKey, bravoValue) @@ -99,7 +99,7 @@ func TestPut(t *testing.T) { } func TestReplace(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) // Nil is returned when the value does not exist and no element is added. assert.Nil(t, c.Replace(alphaKey, alphaValue)) @@ -112,7 +112,7 @@ func TestReplace(t *testing.T) { } func TestGetUpdatesLastAccessTime(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) currentTime = currentTime.Add(Timeout / 2) @@ -121,19 +121,29 @@ func TestGetUpdatesLastAccessTime(t *testing.T) { assert.Equal(t, alphaValue, c.Get(alphaKey)) } +func TestGetDoesntUpdateLastAccessTime(t *testing.T) { + c := newCache(Timeout, false, InitalSize, nil, fakeClock) + c.Put(alphaKey, alphaValue) + + currentTime = currentTime.Add(Timeout - 1) + assert.Equal(t, alphaValue, c.Get(alphaKey)) + currentTime = currentTime.Add(Timeout - 1) + assert.Nil(t, c.Get(alphaKey)) +} + func TestDeleteNonExistentKey(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) assert.Nil(t, c.Delete(alphaKey)) } func TestDeleteExistingKey(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) assert.Equal(t, alphaValue, c.Delete(alphaKey)) } func TestDeleteExpiredKey(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) currentTime = currentTime.Add(Timeout).Add(time.Nanosecond) assert.Nil(t, c.Delete(alphaKey)) @@ -141,7 +151,7 @@ func TestDeleteExpiredKey(t *testing.T) { // Test that Entries returns the non-expired map entries. func TestEntries(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) currentTime = currentTime.Add(Timeout).Add(time.Nanosecond) c.Put(bravoKey, bravoValue) @@ -152,7 +162,7 @@ func TestEntries(t *testing.T) { // Test that Size returns a count of both expired and non-expired elements. func TestSize(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) currentTime = currentTime.Add(Timeout).Add(time.Nanosecond) c.Put(bravoKey, bravoValue) @@ -160,7 +170,7 @@ func TestSize(t *testing.T) { } func TestGetExpiredValue(t *testing.T) { - c := newCache(Timeout, InitalSize, nil, fakeClock) + c := newCache(Timeout, true, InitalSize, nil, fakeClock) c.Put(alphaKey, alphaValue) v := c.Get(alphaKey) assert.Equal(t, alphaValue, v) @@ -174,7 +184,7 @@ func TestGetExpiredValue(t *testing.T) { // RemovalListener is invoked during clean up. func TestJanitor(t *testing.T) { keyChan := make(chan Key) - c := newCache(Timeout, InitalSize, func(k Key, v Value) { + c := newCache(Timeout, true, InitalSize, func(k Key, v Value) { keyChan <- k }, fakeClock) c.Put(alphaKey, alphaValue) diff --git a/vendor/code.cloudfoundry.org/go-diodes/LICENSE b/vendor/code.cloudfoundry.org/go-diodes/LICENSE new file mode 100644 index 000000000000..f49a4e16e68b --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/go-diodes/NOTICE b/vendor/code.cloudfoundry.org/go-diodes/NOTICE new file mode 100644 index 000000000000..9867e6887b87 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/NOTICE @@ -0,0 +1,18 @@ +Copyright (c) 2017-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/go-diodes/README.md b/vendor/code.cloudfoundry.org/go-diodes/README.md new file mode 100644 index 000000000000..ff3ae457b060 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/README.md @@ -0,0 +1,159 @@ +![diode][diode-logo] + +[![GoDoc][go-doc-badge]][go-doc] [![travis][travis-badge]][travis] + +Diodes are ring buffers manipulated via atomics. + +Diodes are optimized for high throughput scenarios where losing data is +acceptable. Unlike a channel, a diode will overwrite data on writes in lieu +of blocking. A diode does its best to not "push back" on the producer. +In other words, invoking `Set()` on a diode never blocks. + +### Installation + +```bash +go get code.cloudfoundry.org/go-diodes +``` + +### Example: Basic Use + +```go +d := diodes.NewOneToOne(1024, diodes.AlertFunc(func(missed int) { + log.Printf("Dropped %d messages", missed) +})) + +// writer +go func() { + for i := 0; i < 2048; i++ { + // Warning: Do not use i. By taking the address, + // you would not get each value + j := i + d.Set(diodes.GenericDataType(&j)) + } +}() + +// reader +poller := diodes.NewPoller(d) +for { + i := poller.Next() + fmt.Println(*(*int)(i)) +} +``` + +### Example: Creating a Concrete Shell + +Diodes accept and return `diodes.GenericDataType`. It is recommended to not +use these generic pointers directly. Rather, it is a much better experience to +wrap the diode in a concrete shell that accepts the types your program works +with and does the type casting for you. Here is an example of how to create a +concrete shell for `[]byte`: + +```go +type OneToOne struct { + d *diodes.Poller +} + +func NewOneToOne(size int, alerter diodes.Alerter) *OneToOne { + return &OneToOne{ + d: diodes.NewPoller(diodes.NewOneToOne(size, alerter)), + } +} + +func (d *OneToOne) Set(data []byte) { + d.d.Set(diodes.GenericDataType(&data)) +} + +func (d *OneToOne) TryNext() ([]byte, bool) { + data, ok := d.d.TryNext() + if !ok { + return nil, ok + } + + return *(*[]byte)(data), true +} + +func (d *OneToOne) Next() []byte { + data := d.d.Next() + return *(*[]byte)(data) +} +``` + +Creating a concrete shell gives you the following advantages: + +- The compiler will tell you if you use a diode to read or write data of the + wrong type. +- The type casting syntax in go is not common and should be hidden. +- It prevents the generic pointer type from escaping in to client code. + +### Dropping Data + +The diode takes an `Alerter` as an argument to alert the user code to when +the read noticed it missed data. It is important to note that the go-routine +consuming from the diode is used to signal the alert. + +When the diode notices it has fallen behind, it will move the read index to +the new write index and therefore drop more than a single message. + +There are two things to consider when choosing a diode: + +1. Storage layer +2. Access layer + +### Storage Layer + +##### OneToOne + +The OneToOne diode is meant to be used by one producing (invoking `Set()`) +go-routine and a (different) consuming (invoking `TryNext()`) go-routine. It +is not thread safe for multiple readers or writers. + +##### ManyToOne + +The ManyToOne diode is optimized for many producing (invoking `Set()`) +go-routines and a single consuming (invoking `TryNext()`) go-routine. It is +not thread safe for multiple readers. + +It is recommended to have a larger diode buffer size if the number of producers +is high. This is to avoid the diode from having to mitigate write collisions +(it will call its alert function if this occurs). + +### Access Layer + +##### Poller + +The Poller uses polling via `time.Sleep(...)` when `Next()` is invoked. While +polling might seem sub-optimal, it allows the producer to be completely +decoupled from the consumer. If you require very minimal push back on the +producer, then the Poller is a better choice. However, if you require several +diodes (e.g. one per connected client), then having several go-routines +polling (sleeping) may be hard on the scheduler. + +##### Waiter + +The Waiter uses a conditional mutex to manage when the reader is alerted +of new data. While this method is great for the scheduler, it does have +extra overhead for the producer. Therefore, it is better suited for situations +where you have several diodes and can afford slightly slower producers. + +### Benchmarks + +There are benchmarks that compare the various storage and access layers to +channels. To run them: + +``` +go test -bench=. -run=NoTest +``` + +### Known Issues + +If a diode was to be written to `18446744073709551615+1` times it would overflow +a `uint64`. This will cause problems if the size of the diode is not a power +of two (`2^x`). If you write into a diode at the rate of one message every +nanosecond, without restarting your process, it would take you 584.54 years to +encounter this issue. + +[diode-logo]: https://raw.githubusercontent.com/cloudfoundry/go-diodes/gh-pages/diode-logo.png +[go-doc-badge]: https://godoc.org/code.cloudfoundry.org/go-diodes?status.svg +[go-doc]: https://godoc.org/code.cloudfoundry.org/go-diodes +[travis-badge]: https://travis-ci.org/cloudfoundry/go-diodes.svg?branch=master +[travis]: https://travis-ci.org/cloudfoundry/go-diodes?branch=master diff --git a/vendor/code.cloudfoundry.org/go-diodes/many_to_one.go b/vendor/code.cloudfoundry.org/go-diodes/many_to_one.go new file mode 100644 index 000000000000..6810b20f5393 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/many_to_one.go @@ -0,0 +1,130 @@ +package diodes + +import ( + "log" + "sync/atomic" + "unsafe" +) + +// ManyToOne diode is optimal for many writers (go-routines B-n) and a single +// reader (go-routine A). It is not thread safe for multiple readers. +type ManyToOne struct { + writeIndex uint64 + buffer []unsafe.Pointer + readIndex uint64 + alerter Alerter +} + +// NewManyToOne creates a new diode (ring buffer). The ManyToOne diode +// is optimzed for many writers (on go-routines B-n) and a single reader +// (on go-routine A). The alerter is invoked on the read's go-routine. It is +// called when it notices that the writer go-routine has passed it and wrote +// over data. A nil can be used to ignore alerts. +func NewManyToOne(size int, alerter Alerter) *ManyToOne { + if alerter == nil { + alerter = AlertFunc(func(int) {}) + } + + d := &ManyToOne{ + buffer: make([]unsafe.Pointer, size), + alerter: alerter, + } + + // Start write index at the value before 0 + // to allow the first write to use AddUint64 + // and still have a beginning index of 0 + d.writeIndex = ^d.writeIndex + return d +} + +// Set sets the data in the next slot of the ring buffer. +func (d *ManyToOne) Set(data GenericDataType) { + for { + writeIndex := atomic.AddUint64(&d.writeIndex, 1) + idx := writeIndex % uint64(len(d.buffer)) + old := atomic.LoadPointer(&d.buffer[idx]) + + if old != nil && + (*bucket)(old) != nil && + (*bucket)(old).seq > writeIndex-uint64(len(d.buffer)) { + log.Println("Diode set collision: consider using a larger diode") + continue + } + + newBucket := &bucket{ + data: data, + seq: writeIndex, + } + + if !atomic.CompareAndSwapPointer(&d.buffer[idx], old, unsafe.Pointer(newBucket)) { + log.Println("Diode set collision: consider using a larger diode") + continue + } + + return + } +} + +// TryNext will attempt to read from the next slot of the ring buffer. +// If there is not data available, it will return (nil, false). +func (d *ManyToOne) TryNext() (data GenericDataType, ok bool) { + // Read a value from the ring buffer based on the readIndex. + idx := d.readIndex % uint64(len(d.buffer)) + result := (*bucket)(atomic.SwapPointer(&d.buffer[idx], nil)) + + // When the result is nil that means the writer has not had the + // opportunity to write a value into the diode. This value must be ignored + // and the read head must not increment. + if result == nil { + return nil, false + } + + // When the seq value is less than the current read index that means a + // value was read from idx that was previously written but has since has + // been dropped. This value must be ignored and the read head must not + // increment. + // + // The simulation for this scenario assumes the fast forward occurred as + // detailed below. + // + // 5. The reader reads again getting seq 5. It then reads again expecting + // seq 6 but gets seq 2. This is a read of a stale value that was + // effectively "dropped" so the read fails and the read head stays put. + // `| 4 | 5 | 2 | 3 |` r: 7, w: 6 + // + if result.seq < d.readIndex { + return nil, false + } + + // When the seq value is greater than the current read index that means a + // value was read from idx that overwrote the value that was expected to + // be at this idx. This happens when the writer has lapped the reader. The + // reader needs to catch up to the writer so it moves its write head to + // the new seq, effectively dropping the messages that were not read in + // between the two values. + // + // Here is a simulation of this scenario: + // + // 1. Both the read and write heads start at 0. + // `| nil | nil | nil | nil |` r: 0, w: 0 + // 2. The writer fills the buffer. + // `| 0 | 1 | 2 | 3 |` r: 0, w: 4 + // 3. The writer laps the read head. + // `| 4 | 5 | 2 | 3 |` r: 0, w: 6 + // 4. The reader reads the first value, expecting a seq of 0 but reads 4, + // this forces the reader to fast forward to 5. + // `| 4 | 5 | 2 | 3 |` r: 5, w: 6 + // + if result.seq > d.readIndex { + dropped := result.seq - d.readIndex + d.readIndex = result.seq + d.alerter.Alert(int(dropped)) + } + + // Only increment read index if a regular read occurred (where seq was + // equal to readIndex) or a value was read that caused a fast forward + // (where seq was greater than readIndex). + // + d.readIndex++ + return result.data, true +} diff --git a/vendor/code.cloudfoundry.org/go-diodes/one_to_one.go b/vendor/code.cloudfoundry.org/go-diodes/one_to_one.go new file mode 100644 index 000000000000..aaf66d155e59 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/one_to_one.go @@ -0,0 +1,129 @@ +package diodes + +import ( + "sync/atomic" + "unsafe" +) + +// GenericDataType is the data type the diodes operate on. +type GenericDataType unsafe.Pointer + +// Alerter is used to report how many values were overwritten since the +// last write. +type Alerter interface { + Alert(missed int) +} + +// AlertFunc type is an adapter to allow the use of ordinary functions as +// Alert handlers. +type AlertFunc func(missed int) + +// Alert calls f(missed) +func (f AlertFunc) Alert(missed int) { + f(missed) +} + +type bucket struct { + data GenericDataType + seq uint64 // seq is the recorded write index at the time of writing +} + +// OneToOne diode is meant to be used by a single reader and a single writer. +// It is not thread safe if used otherwise. +type OneToOne struct { + buffer []unsafe.Pointer + writeIndex uint64 + readIndex uint64 + alerter Alerter +} + +// NewOneToOne creates a new diode is meant to be used by a single reader and +// a single writer. The alerter is invoked on the read's go-routine. It is +// called when it notices that the writer go-routine has passed it and wrote +// over data. A nil can be used to ignore alerts. +func NewOneToOne(size int, alerter Alerter) *OneToOne { + if alerter == nil { + alerter = AlertFunc(func(int) {}) + } + + return &OneToOne{ + buffer: make([]unsafe.Pointer, size), + alerter: alerter, + } +} + +// Set sets the data in the next slot of the ring buffer. +func (d *OneToOne) Set(data GenericDataType) { + idx := d.writeIndex % uint64(len(d.buffer)) + + newBucket := &bucket{ + data: data, + seq: d.writeIndex, + } + d.writeIndex++ + + atomic.StorePointer(&d.buffer[idx], unsafe.Pointer(newBucket)) +} + +// TryNext will attempt to read from the next slot of the ring buffer. +// If there is no data available, it will return (nil, false). +func (d *OneToOne) TryNext() (data GenericDataType, ok bool) { + // Read a value from the ring buffer based on the readIndex. + idx := d.readIndex % uint64(len(d.buffer)) + result := (*bucket)(atomic.SwapPointer(&d.buffer[idx], nil)) + + // When the result is nil that means the writer has not had the + // opportunity to write a value into the diode. This value must be ignored + // and the read head must not increment. + if result == nil { + return nil, false + } + + // When the seq value is less than the current read index that means a + // value was read from idx that was previously written but has since has + // been dropped. This value must be ignored and the read head must not + // increment. + // + // The simulation for this scenario assumes the fast forward occurred as + // detailed below. + // + // 5. The reader reads again getting seq 5. It then reads again expecting + // seq 6 but gets seq 2. This is a read of a stale value that was + // effectively "dropped" so the read fails and the read head stays put. + // `| 4 | 5 | 2 | 3 |` r: 7, w: 6 + // + if result.seq < d.readIndex { + return nil, false + } + + // When the seq value is greater than the current read index that means a + // value was read from idx that overwrote the value that was expected to + // be at this idx. This happens when the writer has lapped the reader. The + // reader needs to catch up to the writer so it moves its write head to + // the new seq, effectively dropping the messages that were not read in + // between the two values. + // + // Here is a simulation of this scenario: + // + // 1. Both the read and write heads start at 0. + // `| nil | nil | nil | nil |` r: 0, w: 0 + // 2. The writer fills the buffer. + // `| 0 | 1 | 2 | 3 |` r: 0, w: 4 + // 3. The writer laps the read head. + // `| 4 | 5 | 2 | 3 |` r: 0, w: 6 + // 4. The reader reads the first value, expecting a seq of 0 but reads 4, + // this forces the reader to fast forward to 5. + // `| 4 | 5 | 2 | 3 |` r: 5, w: 6 + // + if result.seq > d.readIndex { + dropped := result.seq - d.readIndex + d.readIndex = result.seq + d.alerter.Alert(int(dropped)) + } + + // Only increment read index if a regular read occurred (where seq was + // equal to readIndex) or a value was read that caused a fast forward + // (where seq was greater than readIndex). + d.readIndex++ + return result.data, true +} diff --git a/vendor/code.cloudfoundry.org/go-diodes/poller.go b/vendor/code.cloudfoundry.org/go-diodes/poller.go new file mode 100644 index 000000000000..d317a233fdf8 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/poller.go @@ -0,0 +1,80 @@ +package diodes + +import ( + "context" + "time" +) + +// Diode is any implementation of a diode. +type Diode interface { + Set(GenericDataType) + TryNext() (GenericDataType, bool) +} + +// Poller will poll a diode until a value is available. +type Poller struct { + Diode + interval time.Duration + ctx context.Context +} + +// PollerConfigOption can be used to setup the poller. +type PollerConfigOption func(*Poller) + +// WithPollingInterval sets the interval at which the diode is queried +// for new data. The default is 10ms. +func WithPollingInterval(interval time.Duration) PollerConfigOption { + return PollerConfigOption(func(c *Poller) { + c.interval = interval + }) +} + +// WithPollingContext sets the context to cancel any retrieval (Next()). It +// will not change any results for adding data (Set()). Default is +// context.Background(). +func WithPollingContext(ctx context.Context) PollerConfigOption { + return PollerConfigOption(func(c *Poller) { + c.ctx = ctx + }) +} + +// NewPoller returns a new Poller that wraps the given diode. +func NewPoller(d Diode, opts ...PollerConfigOption) *Poller { + p := &Poller{ + Diode: d, + interval: 10 * time.Millisecond, + ctx: context.Background(), + } + + for _, o := range opts { + o(p) + } + + return p +} + +// Next polls the diode until data is available or until the context is done. +// If the context is done, then nil will be returned. +func (p *Poller) Next() GenericDataType { + for { + data, ok := p.Diode.TryNext() + if !ok { + if p.isDone() { + return nil + } + + time.Sleep(p.interval) + continue + } + return data + } +} + +func (p *Poller) isDone() bool { + select { + case <-p.ctx.Done(): + return true + default: + return false + } +} diff --git a/vendor/code.cloudfoundry.org/go-diodes/waiter.go b/vendor/code.cloudfoundry.org/go-diodes/waiter.go new file mode 100644 index 000000000000..a3770ffe4dae --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-diodes/waiter.go @@ -0,0 +1,83 @@ +package diodes + +import ( + "context" + "sync" +) + +// Waiter will use a conditional mutex to alert the reader to when data is +// available. +type Waiter struct { + Diode + mu sync.Mutex + c *sync.Cond + ctx context.Context +} + +// WaiterConfigOption can be used to setup the waiter. +type WaiterConfigOption func(*Waiter) + +// WithWaiterContext sets the context to cancel any retrieval (Next()). It +// will not change any results for adding data (Set()). Default is +// context.Background(). +func WithWaiterContext(ctx context.Context) WaiterConfigOption { + return WaiterConfigOption(func(c *Waiter) { + c.ctx = ctx + }) +} + +// NewWaiter returns a new Waiter that wraps the given diode. +func NewWaiter(d Diode, opts ...WaiterConfigOption) *Waiter { + w := new(Waiter) + w.Diode = d + w.c = sync.NewCond(&w.mu) + w.ctx = context.Background() + + for _, opt := range opts { + opt(w) + } + + go func() { + <-w.ctx.Done() + w.c.Broadcast() + }() + + return w +} + +// Set invokes the wrapped diode's Set with the given data and uses Broadcast +// to wake up any readers. +func (w *Waiter) Set(data GenericDataType) { + w.Diode.Set(data) + w.c.Broadcast() +} + +// Next returns the next data point on the wrapped diode. If there is not any +// new data, it will Wait for set to be called or the context to be done. +// If the context is done, then nil will be returned. +func (w *Waiter) Next() GenericDataType { + w.mu.Lock() + defer w.mu.Unlock() + + for { + data, ok := w.Diode.TryNext() + if !ok { + if w.isDone() { + return nil + } + + w.c.Wait() + continue + } + return data + } +} + +func (w *Waiter) isDone() bool { + select { + case <-w.ctx.Done(): + return true + default: + return false + } +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/LICENSE b/vendor/code.cloudfoundry.org/go-loggregator/LICENSE new file mode 100644 index 000000000000..e434046f641f --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/go-loggregator/NOTICE b/vendor/code.cloudfoundry.org/go-loggregator/NOTICE new file mode 100644 index 000000000000..2d5b763921f3 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/NOTICE @@ -0,0 +1,16 @@ +go-loggregator + +Copyright (c) 2017-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/vendor/code.cloudfoundry.org/go-loggregator/README.md b/vendor/code.cloudfoundry.org/go-loggregator/README.md new file mode 100644 index 000000000000..fb35c873ffca --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/README.md @@ -0,0 +1,66 @@ +# go-loggregator +[![GoDoc][go-doc-badge]][go-doc] [![travis][travis-badge]][travis] [![slack.cloudfoundry.org][slack-badge]][loggregator-slack] + +This is a golang client library for [Loggregator][loggregator]. + +## Versions + +At present, Loggregator supports two API versions: v1 (UDP) and v2 (gRPC). +This library provides clients for both versions. + +Note that this library is also versioned. Its versions have *no* relation to +the Loggregator API. + +## Usage + +This repository should be imported as: + +`import loggregator "code.cloudfoundry.org/go-loggregator"` + +## Examples + +To build the examples, `cd` into the directory of the example and run `go build` + +### V1 Ingress + +Emits envelopes to metron using dropsonde. + +### V2 Ingress + +Emits envelopes to metron using the V2 loggregator-api. + +Required Environment Variables: + +* `CA_CERT_PATH` +* `CERT_PATH` +* `KEY_PATH` + +### Runtime Stats + +Emits information about the running Go proccess using a V2 ingress client. + +Required Environment Variables: + +* `CA_CERT_PATH` +* `CERT_PATH` +* `KEY_PATH` + +### Envelope Stream Connector + +Reads envelopes from the Loggregator API (e.g. Reverse Log Proxy). + +Required Environment Variables: + +* `CA_CERT_PATH` +* `CERT_PATH` +* `KEY_PATH` +* `LOGS_API_ADDR` +* `SHARD_ID` + +[slack-badge]: https://slack.cloudfoundry.org/badge.svg +[loggregator-slack]: https://cloudfoundry.slack.com/archives/loggregator +[loggregator]: https://github.com/cloudfoundry/loggregator +[go-doc-badge]: https://godoc.org/code.cloudfoundry.org/go-loggregator?status.svg +[go-doc]: https://godoc.org/code.cloudfoundry.org/go-loggregator +[travis-badge]: https://travis-ci.org/cloudfoundry/go-loggregator.svg?branch=master +[travis]: https://travis-ci.org/cloudfoundry/go-loggregator?branch=master diff --git a/vendor/code.cloudfoundry.org/go-loggregator/conversion/tov1.go b/vendor/code.cloudfoundry.org/go-loggregator/conversion/tov1.go new file mode 100644 index 000000000000..12ba2bcf0188 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/conversion/tov1.go @@ -0,0 +1,327 @@ +package conversion + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "strconv" + "strings" + + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" + + "github.com/cloudfoundry/sonde-go/events" + "github.com/gogo/protobuf/proto" +) + +// ToV1 converts v2 envelopes down to v1 envelopes. The v2 Envelope may be +// mutated during the conversion and share pointers with the new v1 envelope +// for efficiency in creating the v1 envelope. As a result the envelope you +// pass in should no longer be used. +func ToV1(e *loggregator_v2.Envelope) []*events.Envelope { + var envelopes []*events.Envelope + switch (e.Message).(type) { + case *loggregator_v2.Envelope_Log: + envelopes = convertLog(e) + case *loggregator_v2.Envelope_Counter: + envelopes = convertCounter(e) + case *loggregator_v2.Envelope_Gauge: + envelopes = convertGauge(e) + case *loggregator_v2.Envelope_Timer: + envelopes = convertTimer(e) + } + + for _, v1e := range envelopes { + delete(v1e.Tags, "__v1_type") + delete(v1e.Tags, "origin") + delete(v1e.Tags, "deployment") + delete(v1e.Tags, "job") + delete(v1e.Tags, "index") + delete(v1e.Tags, "ip") + } + + return envelopes +} + +func createBaseV1(e *loggregator_v2.Envelope) *events.Envelope { + v1e := &events.Envelope{ + Origin: proto.String(getV2Tag(e, "origin")), + Deployment: proto.String(getV2Tag(e, "deployment")), + Job: proto.String(getV2Tag(e, "job")), + Index: proto.String(getV2Tag(e, "index")), + Timestamp: proto.Int64(e.Timestamp), + Ip: proto.String(getV2Tag(e, "ip")), + Tags: convertTags(e), + } + + if e.SourceId != "" { + v1e.Tags["source_id"] = e.SourceId + } + + return v1e +} + +func getV2Tag(e *loggregator_v2.Envelope, key string) string { + if value, ok := e.GetTags()[key]; ok { + return value + } + + d := e.GetDeprecatedTags()[key] + if d == nil { + return "" + } + + switch v := d.Data.(type) { + case *loggregator_v2.Value_Text: + return v.Text + case *loggregator_v2.Value_Integer: + return fmt.Sprintf("%d", v.Integer) + case *loggregator_v2.Value_Decimal: + return fmt.Sprintf("%f", v.Decimal) + default: + return "" + } +} + +func convertTimer(v2e *loggregator_v2.Envelope) []*events.Envelope { + v1e := createBaseV1(v2e) + timer := v2e.GetTimer() + v1e.EventType = events.Envelope_HttpStartStop.Enum() + instanceIndex, err := strconv.Atoi(v2e.InstanceId) + if err != nil { + instanceIndex = 0 + } + + method := events.Method(events.Method_value[getV2Tag(v2e, "method")]) + peerType := events.PeerType(events.PeerType_value[getV2Tag(v2e, "peer_type")]) + + v1e.HttpStartStop = &events.HttpStartStop{ + StartTimestamp: proto.Int64(timer.Start), + StopTimestamp: proto.Int64(timer.Stop), + RequestId: convertUUID(parseUUID(getV2Tag(v2e, "request_id"))), + ApplicationId: convertUUID(parseUUID(v2e.SourceId)), + PeerType: &peerType, + Method: &method, + Uri: proto.String(getV2Tag(v2e, "uri")), + RemoteAddress: proto.String(getV2Tag(v2e, "remote_address")), + UserAgent: proto.String(getV2Tag(v2e, "user_agent")), + StatusCode: proto.Int32(int32(atoi(getV2Tag(v2e, "status_code")))), + ContentLength: proto.Int64(atoi(getV2Tag(v2e, "content_length"))), + InstanceIndex: proto.Int32(int32(instanceIndex)), + InstanceId: proto.String(getV2Tag(v2e, "routing_instance_id")), + Forwarded: strings.Split(getV2Tag(v2e, "forwarded"), "\n"), + } + + delete(v1e.Tags, "peer_type") + delete(v1e.Tags, "method") + delete(v1e.Tags, "request_id") + delete(v1e.Tags, "uri") + delete(v1e.Tags, "remote_address") + delete(v1e.Tags, "user_agent") + delete(v1e.Tags, "status_code") + delete(v1e.Tags, "content_length") + delete(v1e.Tags, "routing_instance_id") + delete(v1e.Tags, "forwarded") + + return []*events.Envelope{v1e} +} + +func atoi(s string) int64 { + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0 + } + + return i +} + +func convertLog(v2e *loggregator_v2.Envelope) []*events.Envelope { + v1e := createBaseV1(v2e) + if getV2Tag(v2e, "__v1_type") == "Error" { + recoverError(v1e, v2e) + return []*events.Envelope{v1e} + } + logMessage := v2e.GetLog() + v1e.EventType = events.Envelope_LogMessage.Enum() + v1e.LogMessage = &events.LogMessage{ + Message: logMessage.Payload, + MessageType: messageType(logMessage), + Timestamp: proto.Int64(v2e.Timestamp), + AppId: proto.String(v2e.SourceId), + SourceType: proto.String(getV2Tag(v2e, "source_type")), + SourceInstance: proto.String(v2e.InstanceId), + } + delete(v1e.Tags, "source_type") + + return []*events.Envelope{v1e} +} + +func recoverError(v1e *events.Envelope, v2e *loggregator_v2.Envelope) { + logMessage := v2e.GetLog() + v1e.EventType = events.Envelope_Error.Enum() + code := int32(atoi(getV2Tag(v2e, "code"))) + v1e.Error = &events.Error{ + Source: proto.String(getV2Tag(v2e, "source")), + Code: proto.Int32(code), + Message: proto.String(string(logMessage.Payload)), + } + delete(v1e.Tags, "source") + delete(v1e.Tags, "code") +} + +func convertCounter(v2e *loggregator_v2.Envelope) []*events.Envelope { + v1e := createBaseV1(v2e) + counterEvent := v2e.GetCounter() + v1e.EventType = events.Envelope_CounterEvent.Enum() + if v2e.InstanceId != "" { + v1e.GetTags()["instance_id"] = v2e.InstanceId + } + v1e.CounterEvent = &events.CounterEvent{ + Name: proto.String(counterEvent.Name), + Delta: proto.Uint64(counterEvent.GetDelta()), + Total: proto.Uint64(counterEvent.GetTotal()), + } + + return []*events.Envelope{v1e} +} + +func convertGauge(v2e *loggregator_v2.Envelope) []*events.Envelope { + if v1e := tryConvertContainerMetric(v2e); v1e != nil { + return []*events.Envelope{v1e} + } + + var results []*events.Envelope + gaugeEvent := v2e.GetGauge() + + for key, metric := range gaugeEvent.Metrics { + v1e := createBaseV1(v2e) + v1e.EventType = events.Envelope_ValueMetric.Enum() + unit, value, ok := extractGaugeValues(metric) + if !ok { + return nil + } + + if v2e.InstanceId != "" { + v1e.GetTags()["instance_id"] = v2e.InstanceId + } + v1e.ValueMetric = &events.ValueMetric{ + Name: proto.String(key), + Unit: proto.String(unit), + Value: proto.Float64(value), + } + results = append(results, v1e) + } + + return results +} + +func extractGaugeValues(metric *loggregator_v2.GaugeValue) (string, float64, bool) { + if metric == nil { + return "", 0, false + } + + return metric.Unit, metric.Value, true +} + +func instanceIndex(v2e *loggregator_v2.Envelope) int32 { + defaultIndex, err := strconv.Atoi(v2e.InstanceId) + if err != nil { + defaultIndex = 0 + } + + id := v2e.GetGauge().GetMetrics()["instance_index"] + if id == nil { + return int32(defaultIndex) + } + return int32(id.Value) +} + +func tryConvertContainerMetric(v2e *loggregator_v2.Envelope) *events.Envelope { + v1e := createBaseV1(v2e) + gaugeEvent := v2e.GetGauge() + if len(gaugeEvent.Metrics) == 1 { + return nil + } + + required := []string{ + "cpu", + "memory", + "disk", + "memory_quota", + "disk_quota", + } + + for _, req := range required { + if v, ok := gaugeEvent.Metrics[req]; !ok || v == nil || (v.Unit == "" && v.Value == 0) { + return nil + } + } + + v1e.EventType = events.Envelope_ContainerMetric.Enum() + v1e.ContainerMetric = &events.ContainerMetric{ + ApplicationId: proto.String(v2e.SourceId), + InstanceIndex: proto.Int32(instanceIndex(v2e)), + CpuPercentage: proto.Float64(gaugeEvent.Metrics["cpu"].Value), + MemoryBytes: proto.Uint64(uint64(gaugeEvent.Metrics["memory"].Value)), + DiskBytes: proto.Uint64(uint64(gaugeEvent.Metrics["disk"].Value)), + MemoryBytesQuota: proto.Uint64(uint64(gaugeEvent.Metrics["memory_quota"].Value)), + DiskBytesQuota: proto.Uint64(uint64(gaugeEvent.Metrics["disk_quota"].Value)), + } + + return v1e +} + +func convertTags(e *loggregator_v2.Envelope) map[string]string { + oldTags := make(map[string]string) + for k, v := range e.Tags { + oldTags[k] = v + } + + for key, value := range e.GetDeprecatedTags() { + if value == nil { + continue + } + switch value.Data.(type) { + case *loggregator_v2.Value_Text: + oldTags[key] = value.GetText() + case *loggregator_v2.Value_Integer: + oldTags[key] = fmt.Sprintf("%d", value.GetInteger()) + case *loggregator_v2.Value_Decimal: + oldTags[key] = fmt.Sprintf("%f", value.GetDecimal()) + } + } + + return oldTags +} + +func messageType(log *loggregator_v2.Log) *events.LogMessage_MessageType { + if log.Type == loggregator_v2.Log_OUT { + return events.LogMessage_OUT.Enum() + } + return events.LogMessage_ERR.Enum() +} + +func parseUUID(id string) []byte { + // e.g. b3015d69-09cd-476d-aace-ad2d824d5ab7 + if len(id) != 36 { + return nil + } + h := id[:8] + id[9:13] + id[14:18] + id[19:23] + id[24:] + + data, err := hex.DecodeString(h) + if err != nil { + return nil + } + + return data +} + +func convertUUID(id []byte) *events.UUID { + if len(id) != 16 { + return nil + } + + return &events.UUID{ + Low: proto.Uint64(binary.LittleEndian.Uint64(id[:8])), + High: proto.Uint64(binary.LittleEndian.Uint64(id[8:])), + } +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/conversion/tov2.go b/vendor/code.cloudfoundry.org/go-loggregator/conversion/tov2.go new file mode 100644 index 000000000000..f5188fcbf848 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/conversion/tov2.go @@ -0,0 +1,232 @@ +package conversion + +import ( + "encoding/binary" + "fmt" + "strconv" + "strings" + + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" + "github.com/cloudfoundry/sonde-go/events" +) + +// ToV2 converts v1 envelopes up to v2 envelopes. e may be mutated during the +// conversion and share pointers with the resulting v2 envelope for efficiency +// in creating the v2 envelope. As a result the envelope you pass in should no +// longer be used. +func ToV2(e *events.Envelope, usePreferredTags bool) *loggregator_v2.Envelope { + v2e := &loggregator_v2.Envelope{ + Timestamp: e.GetTimestamp(), + } + + initTags(e, v2e, usePreferredTags) + + setV2Tag(v2e, "origin", e.GetOrigin(), usePreferredTags) + setV2Tag(v2e, "deployment", e.GetDeployment(), usePreferredTags) + setV2Tag(v2e, "job", e.GetJob(), usePreferredTags) + setV2Tag(v2e, "index", e.GetIndex(), usePreferredTags) + setV2Tag(v2e, "ip", e.GetIp(), usePreferredTags) + setV2Tag(v2e, "__v1_type", e.GetEventType().String(), usePreferredTags) + + sourceId, ok := e.GetTags()["source_id"] + v2e.SourceId = sourceId + if !ok { + v2e.SourceId = e.GetDeployment() + "/" + e.GetJob() + } + unsetV2Tag(v2e, "source_id") + + switch e.GetEventType() { + case events.Envelope_LogMessage: + convertLogMessage(v2e, e, usePreferredTags) + case events.Envelope_HttpStartStop: + convertHTTPStartStop(v2e, e, usePreferredTags) + case events.Envelope_ValueMetric: + convertValueMetric(v2e, e) + case events.Envelope_CounterEvent: + convertCounterEvent(v2e, e) + case events.Envelope_Error: + convertError(v2e, e, usePreferredTags) + case events.Envelope_ContainerMetric: + convertContainerMetric(v2e, e) + } + + return v2e +} + +// TODO: Do we still need to do an interface? +func setV2Tag(e *loggregator_v2.Envelope, key string, value interface{}, usePreferredTags bool) { + if usePreferredTags { + if s, ok := value.(string); ok { + e.GetTags()[key] = s + return + } + + e.GetTags()[key] = fmt.Sprintf("%v", value) + return + } + + e.GetDeprecatedTags()[key] = valueText(fmt.Sprintf("%v", value)) +} + +func unsetV2Tag(e *loggregator_v2.Envelope, key string) { + delete(e.GetDeprecatedTags(), key) + delete(e.GetTags(), key) +} + +func initTags(v1e *events.Envelope, v2e *loggregator_v2.Envelope, usePreferredTags bool) { + if usePreferredTags { + v2e.Tags = make(map[string]string) + for k, v := range v1e.Tags { + v2e.Tags[k] = v + } + + return + } + + v2e.DeprecatedTags = make(map[string]*loggregator_v2.Value) + + for k, v := range v1e.GetTags() { + setV2Tag(v2e, k, v, usePreferredTags) + } +} + +func convertError(v2e *loggregator_v2.Envelope, v1e *events.Envelope, usePreferredTags bool) { + t := v1e.GetError() + setV2Tag(v2e, "source", t.GetSource(), usePreferredTags) + setV2Tag(v2e, "code", t.GetCode(), usePreferredTags) + + v2e.Message = &loggregator_v2.Envelope_Log{ + Log: &loggregator_v2.Log{ + Payload: []byte(t.GetMessage()), + Type: loggregator_v2.Log_OUT, + }, + } +} + +func convertAppUUID(appID *events.UUID, sourceID string) string { + if appID.GetLow() == 0 && appID.GetHigh() == 0 { + return sourceID + } + return uuidToString(appID) +} + +func convertAppID(appID, sourceID string) string { + if appID == "" { + return sourceID + } + return appID +} + +func convertHTTPStartStop(v2e *loggregator_v2.Envelope, v1e *events.Envelope, usePreferredTags bool) { + t := v1e.GetHttpStartStop() + v2e.SourceId = convertAppUUID(t.GetApplicationId(), v2e.SourceId) + v2e.InstanceId = strconv.Itoa(int(t.GetInstanceIndex())) + v2e.Message = &loggregator_v2.Envelope_Timer{ + Timer: &loggregator_v2.Timer{ + Name: "http", + Start: t.GetStartTimestamp(), + Stop: t.GetStopTimestamp(), + }, + } + setV2Tag(v2e, "request_id", uuidToString(t.GetRequestId()), usePreferredTags) + setV2Tag(v2e, "peer_type", t.GetPeerType().String(), usePreferredTags) + setV2Tag(v2e, "method", t.GetMethod().String(), usePreferredTags) + setV2Tag(v2e, "uri", t.GetUri(), usePreferredTags) + setV2Tag(v2e, "remote_address", t.GetRemoteAddress(), usePreferredTags) + setV2Tag(v2e, "user_agent", t.GetUserAgent(), usePreferredTags) + setV2Tag(v2e, "status_code", t.GetStatusCode(), usePreferredTags) + setV2Tag(v2e, "content_length", t.GetContentLength(), usePreferredTags) + setV2Tag(v2e, "routing_instance_id", t.GetInstanceId(), usePreferredTags) + setV2Tag(v2e, "forwarded", strings.Join(t.GetForwarded(), "\n"), usePreferredTags) +} + +func convertLogMessageType(t events.LogMessage_MessageType) loggregator_v2.Log_Type { + name := events.LogMessage_MessageType_name[int32(t)] + return loggregator_v2.Log_Type(loggregator_v2.Log_Type_value[name]) +} + +func convertLogMessage(v2e *loggregator_v2.Envelope, e *events.Envelope, usePreferredTags bool) { + t := e.GetLogMessage() + setV2Tag(v2e, "source_type", t.GetSourceType(), usePreferredTags) + v2e.InstanceId = t.GetSourceInstance() + v2e.SourceId = convertAppID(t.GetAppId(), v2e.SourceId) + + v2e.Message = &loggregator_v2.Envelope_Log{ + Log: &loggregator_v2.Log{ + Payload: t.GetMessage(), + Type: convertLogMessageType(t.GetMessageType()), + }, + } +} + +func convertValueMetric(v2e *loggregator_v2.Envelope, e *events.Envelope) { + t := e.GetValueMetric() + v2e.InstanceId = e.GetTags()["instance_id"] + v2e.Message = &loggregator_v2.Envelope_Gauge{ + Gauge: &loggregator_v2.Gauge{ + Metrics: map[string]*loggregator_v2.GaugeValue{ + t.GetName(): { + Unit: t.GetUnit(), + Value: t.GetValue(), + }, + }, + }, + } +} + +func convertCounterEvent(v2e *loggregator_v2.Envelope, e *events.Envelope) { + t := e.GetCounterEvent() + v2e.InstanceId = e.GetTags()["instance_id"] + unsetV2Tag(v2e, "instance_id") + v2e.Message = &loggregator_v2.Envelope_Counter{ + Counter: &loggregator_v2.Counter{ + Name: t.GetName(), + Delta: t.GetDelta(), + Total: t.GetTotal(), + }, + } +} + +func convertContainerMetric(v2e *loggregator_v2.Envelope, e *events.Envelope) { + t := e.GetContainerMetric() + v2e.SourceId = convertAppID(t.GetApplicationId(), v2e.SourceId) + v2e.InstanceId = strconv.Itoa(int(t.GetInstanceIndex())) + v2e.Message = &loggregator_v2.Envelope_Gauge{ + Gauge: &loggregator_v2.Gauge{ + Metrics: map[string]*loggregator_v2.GaugeValue{ + "cpu": { + Unit: "percentage", + Value: t.GetCpuPercentage(), + }, + "memory": { + Unit: "bytes", + Value: float64(t.GetMemoryBytes()), + }, + "disk": { + Unit: "bytes", + Value: float64(t.GetDiskBytes()), + }, + "memory_quota": { + Unit: "bytes", + Value: float64(t.GetMemoryBytesQuota()), + }, + "disk_quota": { + Unit: "bytes", + Value: float64(t.GetDiskBytesQuota()), + }, + }, + }, + } +} + +func valueText(s string) *loggregator_v2.Value { + return &loggregator_v2.Value{Data: &loggregator_v2.Value_Text{Text: s}} +} + +func uuidToString(uuid *events.UUID) string { + low := make([]byte, 8) + high := make([]byte, 8) + binary.LittleEndian.PutUint64(low, uuid.GetLow()) + binary.LittleEndian.PutUint64(high, uuid.GetHigh()) + return fmt.Sprintf("%x-%x-%x-%x-%x", low[:4], low[4:6], low[6:], high[:2], high[2:]) +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/doc.go b/vendor/code.cloudfoundry.org/go-loggregator/doc.go new file mode 100644 index 000000000000..b66fd89d48d6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/doc.go @@ -0,0 +1,23 @@ +// Package loggregator provides clients to send data to the Loggregator v1 and +// v2 API. +// +// The v2 API distinguishes itself from the v1 API on three counts: +// +// 1) it uses gRPC, +// 2) it uses a streaming connection, and +// 3) it supports batching to improve performance. +// +// The code here provides a generic interface into the two APIs. Clients who +// prefer more fine grained control may generate their own code using the +// protobuf and gRPC service definitions found at: +// github.com/cloudfoundry/loggregator-api. +// +// Note that on account of the client using batching wherein multiple +// messages may be sent at once, there is no meaningful error return value +// available. Each of the methods below make a best-effort at message +// delivery. Even in the event of a failed send, the client will not block +// callers. +// +// In general, use IngressClient for communicating with Loggregator's v2 API. +// For Loggregator's v1 API, see v1/client.go. +package loggregator diff --git a/vendor/code.cloudfoundry.org/go-loggregator/envelope_stream_connector.go b/vendor/code.cloudfoundry.org/go-loggregator/envelope_stream_connector.go new file mode 100644 index 000000000000..d8091dd2e29b --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/envelope_stream_connector.go @@ -0,0 +1,205 @@ +package loggregator + +import ( + "context" + "crypto/tls" + "io/ioutil" + "log" + "time" + + gendiodes "code.cloudfoundry.org/go-diodes" + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// EnvelopeStreamConnector provides a way to connect to loggregator and +// consume a stream of envelopes. It handles reconnecting and provides +// a stream for the lifecycle of the given context. It should be created with +// the NewEnvelopeStreamConnector constructor. +type EnvelopeStreamConnector struct { + addr string + tlsConf *tls.Config + + // Buffering + bufferSize int + alerter func(int) + + log Logger + dialOptions []grpc.DialOption +} + +// NewEnvelopeStreamConnector creates a new EnvelopeStreamConnector. Its TLS +// configuration must share a CA with the loggregator server. +func NewEnvelopeStreamConnector( + addr string, + t *tls.Config, + opts ...EnvelopeStreamOption, +) *EnvelopeStreamConnector { + + c := &EnvelopeStreamConnector{ + addr: addr, + tlsConf: t, + + log: log.New(ioutil.Discard, "", 0), + } + + for _, o := range opts { + o(c) + } + + return c +} + +// EnvelopeStreamOption configures a EnvelopeStreamConnector. +type EnvelopeStreamOption func(*EnvelopeStreamConnector) + +// WithEnvelopeStreamLogger allows for the configuration of a logger. +// By default, the logger is disabled. +func WithEnvelopeStreamLogger(l Logger) EnvelopeStreamOption { + return func(c *EnvelopeStreamConnector) { + c.log = l + } +} + +// WithEnvelopeStreamConnectorDialOptions allows for configuration of +// grpc dial options. +func WithEnvelopeStreamConnectorDialOptions(opts ...grpc.DialOption) EnvelopeStreamOption { + return func(c *EnvelopeStreamConnector) { + c.dialOptions = opts + } +} + +// WithEnvelopeStreamBuffer enables the EnvelopeStream to read more quickly +// from the stream. It puts each envelope in a buffer that overwrites data if +// it is not being drained quick enough. If the buffer drops data, the +// 'alerter' function will be invoked with the number of envelopes dropped. +func WithEnvelopeStreamBuffer(size int, alerter func(missed int)) EnvelopeStreamOption { + return func(c *EnvelopeStreamConnector) { + c.bufferSize = size + c.alerter = alerter + } +} + +// EnvelopeStream returns batches of envelopes. It blocks until its context +// is done or a batch of envelopes is available. +type EnvelopeStream func() []*loggregator_v2.Envelope + +// Stream returns a new EnvelopeStream for the given context and request. The +// lifecycle of the EnvelopeStream is managed by the given context. If the +// underlying gRPC stream dies, it attempts to reconnect until the context +// is done. +func (c *EnvelopeStreamConnector) Stream(ctx context.Context, req *loggregator_v2.EgressBatchRequest) EnvelopeStream { + s := newStream(ctx, c.addr, req, c.tlsConf, c.dialOptions, c.log) + if c.alerter != nil || c.bufferSize > 0 { + d := NewOneToOneEnvelopeBatch( + c.bufferSize, + gendiodes.AlertFunc(c.alerter), + gendiodes.WithPollingContext(ctx), + ) + + go func() { + for { + select { + case <-ctx.Done(): + return + default: + } + + d.Set(s.recv()) + } + }() + return d.Next + } + + return s.recv +} + +type stream struct { + log Logger + ctx context.Context + req *loggregator_v2.EgressBatchRequest + client loggregator_v2.EgressClient + rx loggregator_v2.Egress_BatchedReceiverClient +} + +func newStream( + ctx context.Context, + addr string, + req *loggregator_v2.EgressBatchRequest, + c *tls.Config, + opts []grpc.DialOption, + log Logger, +) *stream { + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(c))) + conn, err := grpc.Dial( + addr, + opts..., + ) + if err != nil { + // This error occurs on invalid configuration. And more notably, + // it does NOT occur if the server is not up. + log.Panicf("invalid gRPC dial configuration: %s", err) + } + + // Protect against a go-routine leak. gRPC will keep a go-routine active + // within the connection to keep the connectin alive. We have to close + // this or the go-routine leaks. This is untested. We had trouble exposing + // the underlying connectin was still active. + go func() { + <-ctx.Done() + conn.Close() + }() + + client := loggregator_v2.NewEgressClient(conn) + + return &stream{ + ctx: ctx, + req: req, + client: client, + log: log, + } +} + +func (s *stream) recv() []*loggregator_v2.Envelope { + for { + ok := s.connect(s.ctx) + if !ok { + return nil + } + batch, err := s.rx.Recv() + if err != nil { + s.rx = nil + continue + } + + return batch.Batch + } +} + +func (s *stream) connect(ctx context.Context) bool { + for { + select { + case <-ctx.Done(): + return false + default: + if s.rx != nil { + return true + } + + var err error + s.rx, err = s.client.BatchedReceiver( + ctx, + s.req, + ) + + if err != nil { + s.log.Printf("Error connecting to Logs Provider: %s", err) + time.Sleep(50 * time.Millisecond) + continue + } + + return true + } + } +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/go.mod b/vendor/code.cloudfoundry.org/go-loggregator/go.mod new file mode 100644 index 000000000000..753f656bb9cf --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/go.mod @@ -0,0 +1,33 @@ +module code.cloudfoundry.org/go-loggregator + +go 1.12 + +require ( + code.cloudfoundry.org/go-diodes v0.0.0-20180905200951-72629b5276e3 + code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a + github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 // indirect + github.com/cloudfoundry/dropsonde v1.0.0 + github.com/cloudfoundry/gosteno v0.0.0-20150423193413-0c8581caea35 // indirect + github.com/cloudfoundry/loggregatorlib v0.0.0-20170823162133-36eddf15ef12 // indirect + github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 + github.com/gogo/protobuf v1.2.1 + github.com/golang/protobuf v1.3.2 + github.com/kr/pretty v0.1.0 // indirect + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect + github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 + github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 // indirect + github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/common v0.6.0 + github.com/prometheus/procfs v0.0.3 // indirect + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 + golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 // indirect + golang.org/x/text v0.3.2 // indirect + google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 // indirect + google.golang.org/grpc v1.22.1 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect + launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect +) diff --git a/vendor/code.cloudfoundry.org/go-loggregator/go.sum b/vendor/code.cloudfoundry.org/go-loggregator/go.sum new file mode 100644 index 000000000000..4ce889217e4b --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/go.sum @@ -0,0 +1,158 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +code.cloudfoundry.org/go-diodes v0.0.0-20180905200951-72629b5276e3 h1:oHsfl5AaineZubAUOXg2Vxcdu/TzgN/Q+/65lN70LZk= +code.cloudfoundry.org/go-diodes v0.0.0-20180905200951-72629b5276e3/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= +code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a h1:8rqv2w8xEceNwckcF5ONeRt0qBHlh5bnNfFnYTrZbxs= +code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= +github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudfoundry/dropsonde v1.0.0 h1:9MT6WFmhU96fQjhTiglx4b1X3ObNjk/Sze7KPntNitE= +github.com/cloudfoundry/dropsonde v1.0.0/go.mod h1:6zwvrWK5TpxBVYi1cdkE5WDsIO8E0n7qAJg3wR9B67c= +github.com/cloudfoundry/gosteno v0.0.0-20150423193413-0c8581caea35 h1:HdAWGlVEbFxuALqHXYu14XvAbRbyWZLd817ojygGnk0= +github.com/cloudfoundry/gosteno v0.0.0-20150423193413-0c8581caea35/go.mod h1:3YBPUR85RIrvaUTdA1dL38YSp6s3OHu1xrWLkGt2Mog= +github.com/cloudfoundry/loggregatorlib v0.0.0-20170823162133-36eddf15ef12 h1:A+SRy/ndY6QSrN+bVWIhKDLU4t7OLFWVlLPxUWL/oGE= +github.com/cloudfoundry/loggregatorlib v0.0.0-20170823162133-36eddf15ef12/go.mod h1:ucj7+svyACshmxV3Zze2NAcEcdbBf9scZYR+QKCX9/w= +github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI= +github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4/go.mod h1:GS0pCHd7onIsewbw8Ue9qa9pZPv2V88cUZDttK6KzgI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= diff --git a/vendor/code.cloudfoundry.org/go-loggregator/ingress_client.go b/vendor/code.cloudfoundry.org/go-loggregator/ingress_client.go new file mode 100644 index 000000000000..1e456f3e51e3 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/ingress_client.go @@ -0,0 +1,592 @@ +package loggregator + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" +) + +// IngressOption is the type of a configurable client option. +type IngressOption func(*IngressClient) + +func WithDialOptions(opts ...grpc.DialOption) IngressOption { + return func(c *IngressClient) { + c.dialOpts = append(c.dialOpts, opts...) + } +} + +// WithTag allows for the configuration of arbitrary string value +// metadata which will be included in all data sent to Loggregator +func WithTag(name, value string) IngressOption { + return func(c *IngressClient) { + c.tags[name] = value + } +} + +// WithBatchMaxSize allows for the configuration of the number of messages to +// collect before emitting them into loggregator. By default, its value is 100 +// messages. +// +// Note that aside from batch size, messages will be flushed from +// the client into loggregator at a fixed interval to ensure messages are not +// held for an undue amount of time before being sent. In other words, even if +// the client has not yet achieved the maximum batch size, the batch interval +// may trigger the messages to be sent. +func WithBatchMaxSize(maxSize uint) IngressOption { + return func(c *IngressClient) { + c.batchMaxSize = maxSize + } +} + +// WithBatchFlushInterval allows for the configuration of the maximum time to +// wait before sending a batch of messages. Note that the batch interval +// may be triggered prior to the batch reaching the configured maximum size. +func WithBatchFlushInterval(d time.Duration) IngressOption { + return func(c *IngressClient) { + c.batchFlushInterval = d + } +} + +// WithAddr allows for the configuration of the loggregator v2 address. +// The value to defaults to localhost:3458, which happens to be the default +// address in the loggregator server. +func WithAddr(addr string) IngressOption { + return func(c *IngressClient) { + c.addr = addr + } +} + +// Logger declares the minimal logging interface used within the v2 client +type Logger interface { + Printf(string, ...interface{}) + Panicf(string, ...interface{}) +} + +// WithLogger allows for the configuration of a logger. +// By default, the logger is disabled. +func WithLogger(l Logger) IngressOption { + return func(c *IngressClient) { + c.logger = l + } +} + +// WithContext configures the context that manages the lifecycle for the gRPC +// connection. It defaults to a context.Background(). +func WithContext(ctx context.Context) IngressOption { + return func(c *IngressClient) { + c.ctx = ctx + } +} + +// IngressClient represents an emitter into loggregator. It should be created with the +// NewIngressClient constructor. +type IngressClient struct { + client loggregator_v2.IngressClient + sender loggregator_v2.Ingress_BatchSenderClient + + envelopes chan *loggregator_v2.Envelope + tags map[string]string + + batchMaxSize uint + batchFlushInterval time.Duration + addr string + + dialOpts []grpc.DialOption + + logger Logger + + closeErrors chan error + + ctx context.Context + cancel func() +} + +// NewIngressClient creates a v2 loggregator client. Its TLS configuration +// must share a CA with the loggregator server. +func NewIngressClient(tlsConfig *tls.Config, opts ...IngressOption) (*IngressClient, error) { + c := &IngressClient{ + envelopes: make(chan *loggregator_v2.Envelope, 100), + tags: make(map[string]string), + batchMaxSize: 100, + batchFlushInterval: 100 * time.Millisecond, + addr: "localhost:3458", + logger: log.New(ioutil.Discard, "", 0), + closeErrors: make(chan error), + ctx: context.Background(), + } + + for _, o := range opts { + o(c) + } + + c.ctx, c.cancel = context.WithCancel(c.ctx) + + c.dialOpts = append(c.dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + + conn, err := grpc.Dial( + c.addr, + c.dialOpts..., + ) + if err != nil { + return nil, err + } + c.client = loggregator_v2.NewIngressClient(conn) + + go c.startSender() + + return c, nil +} + +// protoEditor is required for v1 envelopes. It should be removed once v1 +// is removed. It is necessary to prevent any v1 dependency in the v2 path. +type protoEditor interface { + SetLogAppInfo(appID, sourceType, sourceInstance string) + SetGaugeAppInfo(appID string, index int) + SetCounterAppInfo(appID string, index int) + SetSourceInfo(sourceID, instanceID string) + SetLogToStdout() + SetGaugeValue(name string, value float64, unit string) + SetDelta(d uint64) + SetTotal(t uint64) + SetTag(name, value string) +} + +// EmitLogOption is the option type passed into EmitLog +type EmitLogOption func(proto.Message) + +// WithAppInfo configures the meta data associated with emitted data. Exists +// for backward compatability. If possible, use WithSourceInfo instead. +func WithAppInfo(appID, sourceType, sourceInstance string) EmitLogOption { + return WithSourceInfo(appID, sourceType, sourceInstance) +} + +// WithSourceInfo configures the meta data associated with emitted data +func WithSourceInfo(sourceID, sourceType, sourceInstance string) EmitLogOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.SourceId = sourceID + e.InstanceId = sourceInstance + e.Tags["source_type"] = sourceType + case protoEditor: + e.SetLogAppInfo(sourceID, sourceType, sourceInstance) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// WithStdout sets the output type to stdout. Without using this option, +// all data is assumed to be stderr output. +func WithStdout() EmitLogOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.GetLog().Type = loggregator_v2.Log_OUT + case protoEditor: + e.SetLogToStdout() + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// EmitLog sends a message to loggregator. +func (c *IngressClient) EmitLog(message string, opts ...EmitLogOption) { + e := &loggregator_v2.Envelope{ + Timestamp: time.Now().UnixNano(), + Message: &loggregator_v2.Envelope_Log{ + Log: &loggregator_v2.Log{ + Payload: []byte(message), + Type: loggregator_v2.Log_ERR, + }, + }, + Tags: make(map[string]string), + } + + for k, v := range c.tags { + e.Tags[k] = v + } + + for _, o := range opts { + o(e) + } + + c.envelopes <- e +} + +// EmitGaugeOption is the option type passed into EmitGauge. +type EmitGaugeOption func(proto.Message) + +// WithGaugeAppInfo configures an envelope with both the app ID and index. +// Exists for backward compatability. If possible, use WithGaugeSourceInfo +// instead. +func WithGaugeAppInfo(appID string, index int) EmitGaugeOption { + return WithGaugeSourceInfo(appID, strconv.Itoa(index)) +} + +// WithGaugeSourceInfo configures an envelope with both the source ID and +// instance ID. +func WithGaugeSourceInfo(sourceID, instanceID string) EmitGaugeOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.SourceId = sourceID + e.InstanceId = instanceID + case protoEditor: + e.SetSourceInfo(sourceID, instanceID) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// WithGaugeValue adds a gauge information. For example, +// to send information about current CPU usage, one might use: +// +// WithGaugeValue("cpu", 3.0, "percent") +// +// An number of calls to WithGaugeValue may be passed into EmitGauge. +// If there are duplicate names in any of the options, i.e., "cpu" and "cpu", +// then the last EmitGaugeOption will take precedence. +func WithGaugeValue(name string, value float64, unit string) EmitGaugeOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.GetGauge().Metrics[name] = &loggregator_v2.GaugeValue{Value: value, Unit: unit} + case protoEditor: + e.SetGaugeValue(name, value, unit) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// EmitGauge sends the configured gauge values to loggregator. +// If no EmitGaugeOption values are present, the client will emit +// an empty gauge. +func (c *IngressClient) EmitGauge(opts ...EmitGaugeOption) { + e := &loggregator_v2.Envelope{ + Timestamp: time.Now().UnixNano(), + Message: &loggregator_v2.Envelope_Gauge{ + Gauge: &loggregator_v2.Gauge{ + Metrics: make(map[string]*loggregator_v2.GaugeValue), + }, + }, + Tags: make(map[string]string), + } + + for k, v := range c.tags { + e.Tags[k] = v + } + + for _, o := range opts { + o(e) + } + + c.envelopes <- e +} + +// EmitCounterOption is the option type passed into EmitCounter. +type EmitCounterOption func(proto.Message) + +// WithDelta is an option that sets the delta for a counter. +func WithDelta(d uint64) EmitCounterOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.GetCounter().Delta = d + case protoEditor: + e.SetDelta(d) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// WithTotal is an option that sets the total for a counter. +func WithTotal(t uint64) EmitCounterOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.GetCounter().Total = t + e.GetCounter().Delta = 0 + case protoEditor: + e.SetTotal(t) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// WithCounterAppInfo configures an envelope with both the app ID and index. +// Exists for backward compatability. If possible, use WithCounterSourceInfo +// instead. +func WithCounterAppInfo(appID string, index int) EmitCounterOption { + return WithCounterSourceInfo(appID, strconv.Itoa(index)) +} + +// WithCounterSourceInfo configures an envelope with both the app ID and +// source ID. +func WithCounterSourceInfo(sourceID, instanceID string) EmitCounterOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.SourceId = sourceID + e.InstanceId = instanceID + case protoEditor: + e.SetSourceInfo(sourceID, instanceID) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// EmitCounter sends a counter envelope with a delta of 1. +func (c *IngressClient) EmitCounter(name string, opts ...EmitCounterOption) { + e := &loggregator_v2.Envelope{ + Timestamp: time.Now().UnixNano(), + Message: &loggregator_v2.Envelope_Counter{ + Counter: &loggregator_v2.Counter{ + Name: name, + Delta: uint64(1), + }, + }, + Tags: make(map[string]string), + } + + for k, v := range c.tags { + e.Tags[k] = v + } + + for _, o := range opts { + o(e) + } + + c.envelopes <- e +} + +// EmitTimerOption is the option type passed into EmitTimer. +type EmitTimerOption func(proto.Message) + +// WithTimerSourceInfo configures an envelope with both the source and instance +// IDs. +func WithTimerSourceInfo(sourceID, instanceID string) EmitTimerOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.SourceId = sourceID + e.InstanceId = instanceID + case protoEditor: + e.SetSourceInfo(sourceID, instanceID) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// EmitTimer sends a timer envelope with the given name, start time and stop time. +func (c *IngressClient) EmitTimer(name string, start, stop time.Time, opts ...EmitTimerOption) { + e := &loggregator_v2.Envelope{ + Timestamp: time.Now().UnixNano(), + Message: &loggregator_v2.Envelope_Timer{ + Timer: &loggregator_v2.Timer{ + Name: name, + Start: start.UnixNano(), + Stop: stop.UnixNano(), + }, + }, + Tags: make(map[string]string), + } + + for k, v := range c.tags { + e.Tags[k] = v + } + + for _, o := range opts { + o(e) + } + + c.envelopes <- e +} + +// EmitEventOption is the option type passed into EmitEvent. +type EmitEventOption func(proto.Message) + +// WithEventSourceInfo configures an envelope with both the source and instance +// IDs. +func WithEventSourceInfo(sourceID, instanceID string) EmitEventOption { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.SourceId = sourceID + e.InstanceId = instanceID + case protoEditor: + e.SetSourceInfo(sourceID, instanceID) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// EmitEvent sends an Event envelope. +func (c *IngressClient) EmitEvent(ctx context.Context, title, body string, opts ...EmitEventOption) error { + e := &loggregator_v2.Envelope{ + Timestamp: time.Now().UnixNano(), + Message: &loggregator_v2.Envelope_Event{ + Event: &loggregator_v2.Event{ + Title: title, + Body: body, + }, + }, + Tags: make(map[string]string), + } + + for k, v := range c.tags { + e.Tags[k] = v + } + + for _, o := range opts { + o(e) + } + + _, err := c.client.Send(ctx, &loggregator_v2.EnvelopeBatch{ + Batch: []*loggregator_v2.Envelope{e}, + }) + + return err +} + +// Emit sends an envelope. It will sent within a batch. +func (c *IngressClient) Emit(e *loggregator_v2.Envelope) { + c.envelopes <- e +} + +// CloseSend will flush the envelope buffers and close the stream to the +// ingress server. This method will block until the buffers are flushed. +func (c *IngressClient) CloseSend() error { + close(c.envelopes) + + return <-c.closeErrors +} + +func (c *IngressClient) startSender() { + defer c.cancel() + + t := time.NewTimer(c.batchFlushInterval) + + var batch []*loggregator_v2.Envelope + for { + select { + case env, ok := <-c.envelopes: + if !ok { + if len(batch) > 0 { + err := c.flush(batch) + c.closeAndRecv() + c.closeErrors <- err + return + } + + c.closeAndRecv() + c.closeErrors <- nil + + return + } + + batch = append(batch, env) + + if len(batch) >= int(c.batchMaxSize) { + c.flush(batch) + batch = nil + if !t.Stop() { + <-t.C + } + t.Reset(c.batchFlushInterval) + } + case <-t.C: + if len(batch) > 0 { + c.flush(batch) + batch = nil + } + t.Reset(c.batchFlushInterval) + } + } +} + +func (c *IngressClient) closeAndRecv() { + if c.sender == nil { + return + } + c.sender.CloseAndRecv() +} + +func (c *IngressClient) flush(batch []*loggregator_v2.Envelope) error { + err := c.emit(batch) + if err != nil { + c.logger.Printf("Error while flushing: %s", err) + } + + return err +} + +func (c *IngressClient) emit(batch []*loggregator_v2.Envelope) error { + if c.sender == nil { + var err error + c.sender, err = c.client.BatchSender(c.ctx) + if err != nil { + return err + } + } + + err := c.sender.Send(&loggregator_v2.EnvelopeBatch{Batch: batch}) + if err != nil { + c.sender = nil + return err + } + + return nil +} + +// WithEnvelopeTag adds a tag to the envelope. +func WithEnvelopeTag(name, value string) func(proto.Message) { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + e.Tags[name] = value + case protoEditor: + e.SetTag(name, value) + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} + +// WithEnvelopeTags adds tag information that can be text, integer, or decimal to +// the envelope. WithEnvelopeTags expects a single call with a complete map +// and will overwrite if called a second time. +func WithEnvelopeTags(tags map[string]string) func(proto.Message) { + return func(m proto.Message) { + switch e := m.(type) { + case *loggregator_v2.Envelope: + for name, value := range tags { + e.Tags[name] = value + } + case protoEditor: + for name, value := range tags { + e.SetTag(name, value) + } + default: + panic(fmt.Sprintf("unsupported Message type: %T", m)) + } + } +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/one_to_one_envelope_batch_diode.go b/vendor/code.cloudfoundry.org/go-loggregator/one_to_one_envelope_batch_diode.go new file mode 100644 index 000000000000..a01fc6aba4c7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/one_to_one_envelope_batch_diode.go @@ -0,0 +1,46 @@ +package loggregator + +import ( + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" + + gendiodes "code.cloudfoundry.org/go-diodes" +) + +// OneToOneEnvelopeBatch diode is optimized for a single writer and a single reader +type OneToOneEnvelopeBatch struct { + d *gendiodes.Poller +} + +// NewOneToOneEnvelopeBatch initializes a new one to one diode for envelope +// batches of a given size and alerter. The alerter is called whenever data is +// dropped with an integer representing the number of envelope batches that +// were dropped. +func NewOneToOneEnvelopeBatch(size int, alerter gendiodes.Alerter, opts ...gendiodes.PollerConfigOption) *OneToOneEnvelopeBatch { + return &OneToOneEnvelopeBatch{ + d: gendiodes.NewPoller(gendiodes.NewOneToOne(size, alerter), opts...), + } +} + +// Set inserts the given V2 envelope into the diode. +func (d *OneToOneEnvelopeBatch) Set(data []*loggregator_v2.Envelope) { + d.d.Set(gendiodes.GenericDataType(&data)) +} + +// TryNext returns the next envelope batch to be read from the diode. If the +// diode is empty it will return a nil envelope and false for the bool. +func (d *OneToOneEnvelopeBatch) TryNext() ([]*loggregator_v2.Envelope, bool) { + data, ok := d.d.TryNext() + if !ok { + return nil, ok + } + + return *(*[]*loggregator_v2.Envelope)(data), true +} + +// Next will return the next envelope batch to be read from the diode. If the +// diode is empty this method will block until anenvelope is available to be +// read. +func (d *OneToOneEnvelopeBatch) Next() []*loggregator_v2.Envelope { + data := d.d.Next() + return *(*[]*loggregator_v2.Envelope)(data) +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rlp_gateway_client.go b/vendor/code.cloudfoundry.org/go-loggregator/rlp_gateway_client.go new file mode 100644 index 000000000000..99f6315284c1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rlp_gateway_client.go @@ -0,0 +1,275 @@ +package loggregator + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" + "github.com/golang/protobuf/jsonpb" + "golang.org/x/net/context" +) + +type RLPGatewayClient struct { + addr string + log *log.Logger + doer Doer +} + +func NewRLPGatewayClient(addr string, opts ...RLPGatewayClientOption) *RLPGatewayClient { + c := &RLPGatewayClient{ + addr: addr, + log: log.New(ioutil.Discard, "", 0), + doer: http.DefaultClient, + } + + for _, o := range opts { + o(c) + } + + return c +} + +// RLPGatewayClientOption is the type of a configurable client option. +type RLPGatewayClientOption func(*RLPGatewayClient) + +// WithRLPGatewayClientLogger returns a RLPGatewayClientOption to configure +// the logger of the RLPGatewayClient. It defaults to a silent logger. +func WithRLPGatewayClientLogger(log *log.Logger) RLPGatewayClientOption { + return func(c *RLPGatewayClient) { + c.log = log + } +} + +// WithRLPGatewayClientLogger returns a RLPGatewayClientOption to configure +// the HTTP client. It defaults to the http.DefaultClient. +func WithRLPGatewayHTTPClient(d Doer) RLPGatewayClientOption { + return func(c *RLPGatewayClient) { + c.doer = d + } +} + +// Doer is used to make HTTP requests to the RLP Gateway. +type Doer interface { + // Do is a implementation of the http.Client's Do method. + Do(*http.Request) (*http.Response, error) +} + +// Stream returns a new EnvelopeStream for the given context and request. The +// lifecycle of the EnvelopeStream is managed by the given context. If the +// underlying SSE stream dies, it attempts to reconnect until the context +// is done. Any errors are logged via the client's logger. +func (c *RLPGatewayClient) Stream(ctx context.Context, req *loggregator_v2.EgressBatchRequest) EnvelopeStream { + es := make(chan *loggregator_v2.Envelope, 100) + go func() { + defer close(es) + for ctx.Err() == nil { + c.connect(ctx, es, req) + } + }() + + return func() []*loggregator_v2.Envelope { + var batch []*loggregator_v2.Envelope + for { + select { + case <-ctx.Done(): + return nil + case e, ok := <-es: + if !ok { + return nil + } + batch = append(batch, e) + default: + if len(batch) > 0 { + return batch + } + + time.Sleep(50 * time.Millisecond) + } + } + } +} + +func (c *RLPGatewayClient) connect( + ctx context.Context, + es chan<- *loggregator_v2.Envelope, + logReq *loggregator_v2.EgressBatchRequest, +) { + readAddr := fmt.Sprintf("%s/v2/read%s", c.addr, c.buildQuery(logReq)) + + req, err := http.NewRequest(http.MethodGet, readAddr, nil) + if err != nil { + c.log.Panicf("failed to build request %s", err) + } + req.Header.Set("Accept", "text/event-stream") + req.Header.Set("Cache-Control", "no-cache") + + resp, err := c.doer.Do(req.WithContext(ctx)) + if err != nil { + c.log.Printf("error making request: %s", err) + return + } + + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + c.log.Printf("failed to read body: %s", err) + return + } + c.log.Printf("unexpected status code %d: %s", resp.StatusCode, body) + return + } + + buf := bytes.NewBuffer(nil) + reader := bufio.NewReader(resp.Body) + for { + line, err := reader.ReadBytes('\n') + if err != nil { + c.log.Printf("failed while reading stream: %s", err) + return + } + + switch { + case bytes.HasPrefix(line, []byte("heartbeat: ")): + // TODO: Remove this old case + continue + case bytes.HasPrefix(line, []byte("event: closing")): + return + case bytes.HasPrefix(line, []byte("event: heartbeat")): + // Throw away the data of the heartbeat event and the next + // newline. + _, _ = reader.ReadBytes('\n') + _, _ = reader.ReadBytes('\n') + continue + case bytes.HasPrefix(line, []byte("data: ")): + buf.Write(line[len("data: "):]) + case bytes.Equal(line, []byte("\n")): + if buf.Len() == 0 { + continue + } + + var eb loggregator_v2.EnvelopeBatch + if err := jsonpb.Unmarshal(buf, &eb); err != nil { + c.log.Printf("failed to unmarshal envelope: %s", err) + continue + } + + for _, e := range eb.Batch { + select { + case <-ctx.Done(): + return + case es <- e: + } + } + } + + } +} + +func (c *RLPGatewayClient) buildQuery(req *loggregator_v2.EgressBatchRequest) string { + var query []string + if req.GetShardId() != "" { + query = append(query, "shard_id="+req.GetShardId()) + } + + if req.GetDeterministicName() != "" { + query = append(query, "deterministic_name="+req.GetDeterministicName()) + } + + for _, selector := range req.GetSelectors() { + if selector.GetSourceId() != "" { + query = append(query, "source_id="+selector.GetSourceId()) + } + + switch selector.Message.(type) { + case *loggregator_v2.Selector_Log: + query = append(query, "log") + case *loggregator_v2.Selector_Counter: + if selector.GetCounter().GetName() != "" { + query = append(query, "counter.name="+selector.GetCounter().GetName()) + continue + } + query = append(query, "counter") + case *loggregator_v2.Selector_Gauge: + if len(selector.GetGauge().GetNames()) > 1 { + // TODO: This is a mistake in the gateway. + panic("This is not yet supported") + } + + if len(selector.GetGauge().GetNames()) != 0 { + query = append(query, "gauge.name="+selector.GetGauge().GetNames()[0]) + continue + } + query = append(query, "gauge") + case *loggregator_v2.Selector_Timer: + query = append(query, "timer") + case *loggregator_v2.Selector_Event: + query = append(query, "event") + } + } + + namedCounter := containsPrefix(query, "counter.name") + namedGauge := containsPrefix(query, "gauge.name") + + if namedCounter { + query = filter(query, "counter") + } + + if namedGauge { + query = filter(query, "gauge") + } + + query = removeDuplicateSourceIDs(query) + if len(query) == 0 { + return "" + } + + return "?" + strings.Join(query, "&") +} + +func removeDuplicateSourceIDs(query []string) []string { + sids := map[string]bool{} + duplicates := 0 + for i, j := 0, 0; i < len(query); i++ { + if strings.HasPrefix(query[i], "source_id=") && sids[query[i]] { + // Duplicate source ID + duplicates++ + continue + } + sids[query[i]] = true + query[j] = query[i] + j++ + } + + return query[:len(query)-duplicates] +} + +func containsPrefix(arr []string, prefix string) bool { + for _, i := range arr { + if strings.HasPrefix(i, prefix) { + return true + } + } + return false +} + +func filter(arr []string, target string) []string { + var filtered []string + for _, i := range arr { + if i != target { + filtered = append(filtered, i) + } + } + return filtered +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/doc.go b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/doc.go new file mode 100644 index 000000000000..2187e0bf0e19 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/doc.go @@ -0,0 +1,3 @@ +package loggregator_v2 + +//go:generate ./generate.sh diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/egress.pb.go b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/egress.pb.go new file mode 100644 index 000000000000..21aa90c5e63c --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/egress.pb.go @@ -0,0 +1,828 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: egress.proto + +package loggregator_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EgressRequest struct { + // shard_id instructs Loggregator to shard envelopes between other + // subscriptions with the same shard_id. Loggregator will do its best to + // split the load evenly between subscriptions with the same shard_id + // (unless deterministic_name is set). + ShardId string `protobuf:"bytes,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + // deterministic_name is used to enable deterministic routing. This implies + // that gauges and counters are routed based on name. If this is excluded, + // then they are routed to split load evenly. + DeterministicName string `protobuf:"bytes,5,opt,name=deterministic_name,json=deterministicName,proto3" json:"deterministic_name,omitempty"` + // TODO: This can be removed once selector has been around long enough. + LegacySelector *Selector `protobuf:"bytes,2,opt,name=legacy_selector,json=legacySelector,proto3" json:"legacy_selector,omitempty"` + // selector is the preferred (over legacy_selector) mechanism to select + // what envelope types the subscription wants. If there are no selectors + // given, no data will be sent. + Selectors []*Selector `protobuf:"bytes,4,rep,name=selectors,proto3" json:"selectors,omitempty"` + // TODO: This can be removed once the envelope.deprecated_tags is removed. + UsePreferredTags bool `protobuf:"varint,3,opt,name=use_preferred_tags,json=usePreferredTags,proto3" json:"use_preferred_tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EgressRequest) Reset() { *m = EgressRequest{} } +func (m *EgressRequest) String() string { return proto.CompactTextString(m) } +func (*EgressRequest) ProtoMessage() {} +func (*EgressRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{0} +} +func (m *EgressRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EgressRequest.Unmarshal(m, b) +} +func (m *EgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EgressRequest.Marshal(b, m, deterministic) +} +func (dst *EgressRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRequest.Merge(dst, src) +} +func (m *EgressRequest) XXX_Size() int { + return xxx_messageInfo_EgressRequest.Size(m) +} +func (m *EgressRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRequest proto.InternalMessageInfo + +func (m *EgressRequest) GetShardId() string { + if m != nil { + return m.ShardId + } + return "" +} + +func (m *EgressRequest) GetDeterministicName() string { + if m != nil { + return m.DeterministicName + } + return "" +} + +func (m *EgressRequest) GetLegacySelector() *Selector { + if m != nil { + return m.LegacySelector + } + return nil +} + +func (m *EgressRequest) GetSelectors() []*Selector { + if m != nil { + return m.Selectors + } + return nil +} + +func (m *EgressRequest) GetUsePreferredTags() bool { + if m != nil { + return m.UsePreferredTags + } + return false +} + +type EgressBatchRequest struct { + // shard_id instructs Loggregator to shard envelopes between other + // subscriptions with the same shard_id. Loggregator will do its best to + // split the load evenly between subscriptions with the same shard_id + // (unless deterministic_name is set). + ShardId string `protobuf:"bytes,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + // deterministic_name is used to enable deterministic routing. This implies + // that gauges and counters are routed based on name. If this is excluded, + // then they are routed to split load evenly. + DeterministicName string `protobuf:"bytes,5,opt,name=deterministic_name,json=deterministicName,proto3" json:"deterministic_name,omitempty"` + // TODO: This can be removed once selector has been around long enough. + LegacySelector *Selector `protobuf:"bytes,2,opt,name=legacy_selector,json=legacySelector,proto3" json:"legacy_selector,omitempty"` + // selector is the preferred (over legacy_selector) mechanism to select + // what envelope types the subscription wants. If there are no selectors + // given, no data will be sent. + Selectors []*Selector `protobuf:"bytes,4,rep,name=selectors,proto3" json:"selectors,omitempty"` + // TODO: This can be removed once the envelope.deprecated_tags is removed. + UsePreferredTags bool `protobuf:"varint,3,opt,name=use_preferred_tags,json=usePreferredTags,proto3" json:"use_preferred_tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EgressBatchRequest) Reset() { *m = EgressBatchRequest{} } +func (m *EgressBatchRequest) String() string { return proto.CompactTextString(m) } +func (*EgressBatchRequest) ProtoMessage() {} +func (*EgressBatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{1} +} +func (m *EgressBatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EgressBatchRequest.Unmarshal(m, b) +} +func (m *EgressBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EgressBatchRequest.Marshal(b, m, deterministic) +} +func (dst *EgressBatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressBatchRequest.Merge(dst, src) +} +func (m *EgressBatchRequest) XXX_Size() int { + return xxx_messageInfo_EgressBatchRequest.Size(m) +} +func (m *EgressBatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EgressBatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressBatchRequest proto.InternalMessageInfo + +func (m *EgressBatchRequest) GetShardId() string { + if m != nil { + return m.ShardId + } + return "" +} + +func (m *EgressBatchRequest) GetDeterministicName() string { + if m != nil { + return m.DeterministicName + } + return "" +} + +func (m *EgressBatchRequest) GetLegacySelector() *Selector { + if m != nil { + return m.LegacySelector + } + return nil +} + +func (m *EgressBatchRequest) GetSelectors() []*Selector { + if m != nil { + return m.Selectors + } + return nil +} + +func (m *EgressBatchRequest) GetUsePreferredTags() bool { + if m != nil { + return m.UsePreferredTags + } + return false +} + +// Selector instructs Loggregator to only send envelopes that match the given +// criteria. +type Selector struct { + SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` + // Types that are valid to be assigned to Message: + // *Selector_Log + // *Selector_Counter + // *Selector_Gauge + // *Selector_Timer + // *Selector_Event + Message isSelector_Message `protobuf_oneof:"Message"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Selector) Reset() { *m = Selector{} } +func (m *Selector) String() string { return proto.CompactTextString(m) } +func (*Selector) ProtoMessage() {} +func (*Selector) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{2} +} +func (m *Selector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Selector.Unmarshal(m, b) +} +func (m *Selector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Selector.Marshal(b, m, deterministic) +} +func (dst *Selector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Selector.Merge(dst, src) +} +func (m *Selector) XXX_Size() int { + return xxx_messageInfo_Selector.Size(m) +} +func (m *Selector) XXX_DiscardUnknown() { + xxx_messageInfo_Selector.DiscardUnknown(m) +} + +var xxx_messageInfo_Selector proto.InternalMessageInfo + +func (m *Selector) GetSourceId() string { + if m != nil { + return m.SourceId + } + return "" +} + +type isSelector_Message interface { + isSelector_Message() +} + +type Selector_Log struct { + Log *LogSelector `protobuf:"bytes,2,opt,name=log,proto3,oneof"` +} + +type Selector_Counter struct { + Counter *CounterSelector `protobuf:"bytes,3,opt,name=counter,proto3,oneof"` +} + +type Selector_Gauge struct { + Gauge *GaugeSelector `protobuf:"bytes,4,opt,name=gauge,proto3,oneof"` +} + +type Selector_Timer struct { + Timer *TimerSelector `protobuf:"bytes,5,opt,name=timer,proto3,oneof"` +} + +type Selector_Event struct { + Event *EventSelector `protobuf:"bytes,6,opt,name=event,proto3,oneof"` +} + +func (*Selector_Log) isSelector_Message() {} + +func (*Selector_Counter) isSelector_Message() {} + +func (*Selector_Gauge) isSelector_Message() {} + +func (*Selector_Timer) isSelector_Message() {} + +func (*Selector_Event) isSelector_Message() {} + +func (m *Selector) GetMessage() isSelector_Message { + if m != nil { + return m.Message + } + return nil +} + +func (m *Selector) GetLog() *LogSelector { + if x, ok := m.GetMessage().(*Selector_Log); ok { + return x.Log + } + return nil +} + +func (m *Selector) GetCounter() *CounterSelector { + if x, ok := m.GetMessage().(*Selector_Counter); ok { + return x.Counter + } + return nil +} + +func (m *Selector) GetGauge() *GaugeSelector { + if x, ok := m.GetMessage().(*Selector_Gauge); ok { + return x.Gauge + } + return nil +} + +func (m *Selector) GetTimer() *TimerSelector { + if x, ok := m.GetMessage().(*Selector_Timer); ok { + return x.Timer + } + return nil +} + +func (m *Selector) GetEvent() *EventSelector { + if x, ok := m.GetMessage().(*Selector_Event); ok { + return x.Event + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Selector) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Selector_OneofMarshaler, _Selector_OneofUnmarshaler, _Selector_OneofSizer, []interface{}{ + (*Selector_Log)(nil), + (*Selector_Counter)(nil), + (*Selector_Gauge)(nil), + (*Selector_Timer)(nil), + (*Selector_Event)(nil), + } +} + +func _Selector_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Selector) + // Message + switch x := m.Message.(type) { + case *Selector_Log: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Log); err != nil { + return err + } + case *Selector_Counter: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Counter); err != nil { + return err + } + case *Selector_Gauge: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Gauge); err != nil { + return err + } + case *Selector_Timer: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Timer); err != nil { + return err + } + case *Selector_Event: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Event); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Selector.Message has unexpected type %T", x) + } + return nil +} + +func _Selector_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Selector) + switch tag { + case 2: // Message.log + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(LogSelector) + err := b.DecodeMessage(msg) + m.Message = &Selector_Log{msg} + return true, err + case 3: // Message.counter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CounterSelector) + err := b.DecodeMessage(msg) + m.Message = &Selector_Counter{msg} + return true, err + case 4: // Message.gauge + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GaugeSelector) + err := b.DecodeMessage(msg) + m.Message = &Selector_Gauge{msg} + return true, err + case 5: // Message.timer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TimerSelector) + err := b.DecodeMessage(msg) + m.Message = &Selector_Timer{msg} + return true, err + case 6: // Message.event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(EventSelector) + err := b.DecodeMessage(msg) + m.Message = &Selector_Event{msg} + return true, err + default: + return false, nil + } +} + +func _Selector_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Selector) + // Message + switch x := m.Message.(type) { + case *Selector_Log: + s := proto.Size(x.Log) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Selector_Counter: + s := proto.Size(x.Counter) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Selector_Gauge: + s := proto.Size(x.Gauge) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Selector_Timer: + s := proto.Size(x.Timer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Selector_Event: + s := proto.Size(x.Event) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// LogSelector instructs Loggregator to egress Log envelopes to the given +// subscription. +type LogSelector struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogSelector) Reset() { *m = LogSelector{} } +func (m *LogSelector) String() string { return proto.CompactTextString(m) } +func (*LogSelector) ProtoMessage() {} +func (*LogSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{3} +} +func (m *LogSelector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogSelector.Unmarshal(m, b) +} +func (m *LogSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogSelector.Marshal(b, m, deterministic) +} +func (dst *LogSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogSelector.Merge(dst, src) +} +func (m *LogSelector) XXX_Size() int { + return xxx_messageInfo_LogSelector.Size(m) +} +func (m *LogSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LogSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_LogSelector proto.InternalMessageInfo + +// GaugeSelector instructs Loggregator to egress Gauge envelopes to the +// given subscription. +type GaugeSelector struct { + // Any egress Gauge envelope must consist of the given names. + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GaugeSelector) Reset() { *m = GaugeSelector{} } +func (m *GaugeSelector) String() string { return proto.CompactTextString(m) } +func (*GaugeSelector) ProtoMessage() {} +func (*GaugeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{4} +} +func (m *GaugeSelector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GaugeSelector.Unmarshal(m, b) +} +func (m *GaugeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GaugeSelector.Marshal(b, m, deterministic) +} +func (dst *GaugeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_GaugeSelector.Merge(dst, src) +} +func (m *GaugeSelector) XXX_Size() int { + return xxx_messageInfo_GaugeSelector.Size(m) +} +func (m *GaugeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_GaugeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_GaugeSelector proto.InternalMessageInfo + +func (m *GaugeSelector) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +// CounterSelector instructs Loggregator to egress Counter envelopes to the +// given subscription +type CounterSelector struct { + // Any egress Counter envelope must have the given name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CounterSelector) Reset() { *m = CounterSelector{} } +func (m *CounterSelector) String() string { return proto.CompactTextString(m) } +func (*CounterSelector) ProtoMessage() {} +func (*CounterSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{5} +} +func (m *CounterSelector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CounterSelector.Unmarshal(m, b) +} +func (m *CounterSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CounterSelector.Marshal(b, m, deterministic) +} +func (dst *CounterSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CounterSelector.Merge(dst, src) +} +func (m *CounterSelector) XXX_Size() int { + return xxx_messageInfo_CounterSelector.Size(m) +} +func (m *CounterSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CounterSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CounterSelector proto.InternalMessageInfo + +func (m *CounterSelector) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// TimerSelector instructs Loggregator to egress Timer envelopes to the given +// subscription. +type TimerSelector struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimerSelector) Reset() { *m = TimerSelector{} } +func (m *TimerSelector) String() string { return proto.CompactTextString(m) } +func (*TimerSelector) ProtoMessage() {} +func (*TimerSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{6} +} +func (m *TimerSelector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimerSelector.Unmarshal(m, b) +} +func (m *TimerSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimerSelector.Marshal(b, m, deterministic) +} +func (dst *TimerSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimerSelector.Merge(dst, src) +} +func (m *TimerSelector) XXX_Size() int { + return xxx_messageInfo_TimerSelector.Size(m) +} +func (m *TimerSelector) XXX_DiscardUnknown() { + xxx_messageInfo_TimerSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_TimerSelector proto.InternalMessageInfo + +// EventSelector instructs Loggregator to egress Event envelopes to the given +// subscription. +type EventSelector struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventSelector) Reset() { *m = EventSelector{} } +func (m *EventSelector) String() string { return proto.CompactTextString(m) } +func (*EventSelector) ProtoMessage() {} +func (*EventSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_egress_fcae6bb65dce0d2e, []int{7} +} +func (m *EventSelector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventSelector.Unmarshal(m, b) +} +func (m *EventSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventSelector.Marshal(b, m, deterministic) +} +func (dst *EventSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSelector.Merge(dst, src) +} +func (m *EventSelector) XXX_Size() int { + return xxx_messageInfo_EventSelector.Size(m) +} +func (m *EventSelector) XXX_DiscardUnknown() { + xxx_messageInfo_EventSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSelector proto.InternalMessageInfo + +func init() { + proto.RegisterType((*EgressRequest)(nil), "loggregator.v2.EgressRequest") + proto.RegisterType((*EgressBatchRequest)(nil), "loggregator.v2.EgressBatchRequest") + proto.RegisterType((*Selector)(nil), "loggregator.v2.Selector") + proto.RegisterType((*LogSelector)(nil), "loggregator.v2.LogSelector") + proto.RegisterType((*GaugeSelector)(nil), "loggregator.v2.GaugeSelector") + proto.RegisterType((*CounterSelector)(nil), "loggregator.v2.CounterSelector") + proto.RegisterType((*TimerSelector)(nil), "loggregator.v2.TimerSelector") + proto.RegisterType((*EventSelector)(nil), "loggregator.v2.EventSelector") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// EgressClient is the client API for Egress service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EgressClient interface { + Receiver(ctx context.Context, in *EgressRequest, opts ...grpc.CallOption) (Egress_ReceiverClient, error) + BatchedReceiver(ctx context.Context, in *EgressBatchRequest, opts ...grpc.CallOption) (Egress_BatchedReceiverClient, error) +} + +type egressClient struct { + cc *grpc.ClientConn +} + +func NewEgressClient(cc *grpc.ClientConn) EgressClient { + return &egressClient{cc} +} + +func (c *egressClient) Receiver(ctx context.Context, in *EgressRequest, opts ...grpc.CallOption) (Egress_ReceiverClient, error) { + stream, err := c.cc.NewStream(ctx, &_Egress_serviceDesc.Streams[0], "/loggregator.v2.Egress/Receiver", opts...) + if err != nil { + return nil, err + } + x := &egressReceiverClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Egress_ReceiverClient interface { + Recv() (*Envelope, error) + grpc.ClientStream +} + +type egressReceiverClient struct { + grpc.ClientStream +} + +func (x *egressReceiverClient) Recv() (*Envelope, error) { + m := new(Envelope) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *egressClient) BatchedReceiver(ctx context.Context, in *EgressBatchRequest, opts ...grpc.CallOption) (Egress_BatchedReceiverClient, error) { + stream, err := c.cc.NewStream(ctx, &_Egress_serviceDesc.Streams[1], "/loggregator.v2.Egress/BatchedReceiver", opts...) + if err != nil { + return nil, err + } + x := &egressBatchedReceiverClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Egress_BatchedReceiverClient interface { + Recv() (*EnvelopeBatch, error) + grpc.ClientStream +} + +type egressBatchedReceiverClient struct { + grpc.ClientStream +} + +func (x *egressBatchedReceiverClient) Recv() (*EnvelopeBatch, error) { + m := new(EnvelopeBatch) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// EgressServer is the server API for Egress service. +type EgressServer interface { + Receiver(*EgressRequest, Egress_ReceiverServer) error + BatchedReceiver(*EgressBatchRequest, Egress_BatchedReceiverServer) error +} + +func RegisterEgressServer(s *grpc.Server, srv EgressServer) { + s.RegisterService(&_Egress_serviceDesc, srv) +} + +func _Egress_Receiver_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EgressRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EgressServer).Receiver(m, &egressReceiverServer{stream}) +} + +type Egress_ReceiverServer interface { + Send(*Envelope) error + grpc.ServerStream +} + +type egressReceiverServer struct { + grpc.ServerStream +} + +func (x *egressReceiverServer) Send(m *Envelope) error { + return x.ServerStream.SendMsg(m) +} + +func _Egress_BatchedReceiver_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EgressBatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EgressServer).BatchedReceiver(m, &egressBatchedReceiverServer{stream}) +} + +type Egress_BatchedReceiverServer interface { + Send(*EnvelopeBatch) error + grpc.ServerStream +} + +type egressBatchedReceiverServer struct { + grpc.ServerStream +} + +func (x *egressBatchedReceiverServer) Send(m *EnvelopeBatch) error { + return x.ServerStream.SendMsg(m) +} + +var _Egress_serviceDesc = grpc.ServiceDesc{ + ServiceName: "loggregator.v2.Egress", + HandlerType: (*EgressServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Receiver", + Handler: _Egress_Receiver_Handler, + ServerStreams: true, + }, + { + StreamName: "BatchedReceiver", + Handler: _Egress_BatchedReceiver_Handler, + ServerStreams: true, + }, + }, + Metadata: "egress.proto", +} + +func init() { proto.RegisterFile("egress.proto", fileDescriptor_egress_fcae6bb65dce0d2e) } + +var fileDescriptor_egress_fcae6bb65dce0d2e = []byte{ + // 502 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x94, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xc7, 0xeb, 0xfc, 0xb5, 0x27, 0xbf, 0x24, 0xbf, 0xae, 0x38, 0x98, 0x54, 0x55, 0x2d, 0x4b, + 0x95, 0x72, 0x00, 0x83, 0xc2, 0x9f, 0x0b, 0x27, 0x82, 0xa2, 0x52, 0xa9, 0x20, 0x64, 0x7a, 0xe0, + 0x66, 0x2d, 0xf6, 0x74, 0x6b, 0xc9, 0xf1, 0x86, 0xdd, 0x75, 0xa4, 0x5e, 0xb9, 0xf0, 0x30, 0x3c, + 0x07, 0xef, 0x85, 0xbc, 0x1b, 0x37, 0xb6, 0x1b, 0x78, 0x01, 0x6e, 0x9e, 0xfd, 0x7e, 0x3f, 0xb3, + 0xb3, 0xe3, 0xd9, 0x85, 0xff, 0x90, 0x09, 0x94, 0x32, 0xd8, 0x08, 0xae, 0x38, 0x99, 0x64, 0x9c, + 0x31, 0x81, 0x8c, 0x2a, 0x2e, 0x82, 0xed, 0x62, 0x36, 0xc1, 0x7c, 0x8b, 0x19, 0xdf, 0xa0, 0xd1, + 0xfd, 0xef, 0x1d, 0x18, 0xaf, 0x34, 0x10, 0xe2, 0xb7, 0x02, 0xa5, 0x22, 0x8f, 0xc1, 0x96, 0xb7, + 0x54, 0x24, 0x51, 0x9a, 0xb8, 0x96, 0x67, 0xcd, 0x9d, 0x70, 0xa8, 0xe3, 0xcb, 0x84, 0x3c, 0x05, + 0x92, 0xa0, 0x42, 0xb1, 0x4e, 0xf3, 0x54, 0xaa, 0x34, 0x8e, 0x72, 0xba, 0x46, 0xb7, 0xaf, 0x4d, + 0xc7, 0x0d, 0xe5, 0x23, 0x5d, 0x23, 0x79, 0x0b, 0xd3, 0x0c, 0x19, 0x8d, 0xef, 0x22, 0x89, 0x19, + 0xc6, 0x8a, 0x0b, 0xb7, 0xe3, 0x59, 0xf3, 0xd1, 0xc2, 0x0d, 0x9a, 0x55, 0x05, 0x9f, 0x77, 0x7a, + 0x38, 0x31, 0x40, 0x15, 0x93, 0xd7, 0xe0, 0x54, 0xac, 0x74, 0x7b, 0x5e, 0xf7, 0xaf, 0xf0, 0xde, + 0x4a, 0x9e, 0x00, 0x29, 0x24, 0x46, 0x1b, 0x81, 0x37, 0x28, 0x04, 0x26, 0x91, 0xa2, 0x4c, 0xba, + 0x5d, 0xcf, 0x9a, 0xdb, 0xe1, 0xff, 0x85, 0xc4, 0x4f, 0x95, 0x70, 0x4d, 0x99, 0xf4, 0x7f, 0x74, + 0x80, 0x98, 0x26, 0x2c, 0xa9, 0x8a, 0x6f, 0xff, 0xe1, 0x4e, 0xfc, 0xea, 0x80, 0x7d, 0xbf, 0xe5, + 0x09, 0x38, 0x92, 0x17, 0x22, 0xc6, 0x7d, 0x03, 0x6c, 0xb3, 0x70, 0x99, 0x90, 0x67, 0xd0, 0xcd, + 0x38, 0xdb, 0x1d, 0xe3, 0xa4, 0x5d, 0xc9, 0x15, 0x67, 0x55, 0x9a, 0xf7, 0x47, 0x61, 0xe9, 0x24, + 0x6f, 0x60, 0x18, 0xf3, 0x22, 0x57, 0x28, 0xf4, 0xee, 0xa3, 0xc5, 0x59, 0x1b, 0x7a, 0x67, 0xe4, + 0x1a, 0x58, 0x11, 0xe4, 0x15, 0xf4, 0x19, 0x2d, 0x18, 0xba, 0x3d, 0x8d, 0x9e, 0xb6, 0xd1, 0x8b, + 0x52, 0xac, 0x81, 0xc6, 0x5d, 0x62, 0x2a, 0x5d, 0xa3, 0xd0, 0x7f, 0xe6, 0x00, 0x76, 0x5d, 0x8a, + 0x75, 0x4c, 0xbb, 0x4b, 0x0c, 0xb7, 0x98, 0x2b, 0x77, 0x70, 0x18, 0x5b, 0x95, 0x62, 0x1d, 0xd3, + 0xee, 0xa5, 0x03, 0xc3, 0x0f, 0x28, 0x25, 0x65, 0xe8, 0x8f, 0x61, 0x54, 0x6b, 0x81, 0x7f, 0x0e, + 0xe3, 0x46, 0x85, 0xe4, 0x11, 0xf4, 0xcb, 0x89, 0x91, 0xae, 0xe5, 0x75, 0xe7, 0x4e, 0x68, 0x02, + 0xff, 0x1c, 0xa6, 0xad, 0x1e, 0x10, 0x02, 0x3d, 0x3d, 0x5a, 0xa6, 0xfd, 0xfa, 0xdb, 0x9f, 0xc2, + 0xb8, 0x51, 0x78, 0xb9, 0xd0, 0x28, 0x69, 0xf1, 0xd3, 0x82, 0x81, 0x19, 0x68, 0x72, 0x01, 0x76, + 0x88, 0x31, 0xa6, 0x5b, 0x14, 0xe4, 0xe1, 0x41, 0xea, 0x37, 0x7f, 0xf6, 0x60, 0x9e, 0x56, 0xbb, + 0xb7, 0xc2, 0x3f, 0x7a, 0x6e, 0x91, 0x2f, 0x30, 0xd5, 0xb7, 0x03, 0x93, 0xfb, 0x7c, 0xfe, 0xe1, + 0x7c, 0xf5, 0x4b, 0x34, 0x3b, 0xfd, 0x53, 0x52, 0xed, 0x2a, 0x33, 0x2f, 0x5f, 0xc2, 0x19, 0x17, + 0x2c, 0x88, 0x33, 0x5e, 0x24, 0x37, 0xbc, 0xc8, 0x13, 0x71, 0xd7, 0x82, 0x96, 0xc7, 0x57, 0xfb, + 0xd8, 0x6c, 0xf2, 0x75, 0xa0, 0x1f, 0xb0, 0x17, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x04, 0xbd, + 0xa8, 0xa8, 0xf0, 0x04, 0x00, 0x00, +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/envelope.pb.go b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/envelope.pb.go new file mode 100644 index 000000000000..d4fa73bd81dd --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/envelope.pb.go @@ -0,0 +1,875 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: envelope.proto + +package loggregator_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Log_Type int32 + +const ( + Log_OUT Log_Type = 0 + Log_ERR Log_Type = 1 +) + +var Log_Type_name = map[int32]string{ + 0: "OUT", + 1: "ERR", +} +var Log_Type_value = map[string]int32{ + "OUT": 0, + "ERR": 1, +} + +func (x Log_Type) String() string { + return proto.EnumName(Log_Type_name, int32(x)) +} +func (Log_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{3, 0} +} + +type Envelope struct { + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + SourceId string `protobuf:"bytes,2,opt,name=source_id,proto3" json:"source_id,omitempty"` + InstanceId string `protobuf:"bytes,8,opt,name=instance_id,proto3" json:"instance_id,omitempty"` + DeprecatedTags map[string]*Value `protobuf:"bytes,3,rep,name=deprecated_tags,proto3" json:"deprecated_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tags map[string]string `protobuf:"bytes,9,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to Message: + // *Envelope_Log + // *Envelope_Counter + // *Envelope_Gauge + // *Envelope_Timer + // *Envelope_Event + Message isEnvelope_Message `protobuf_oneof:"message"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Envelope) Reset() { *m = Envelope{} } +func (m *Envelope) String() string { return proto.CompactTextString(m) } +func (*Envelope) ProtoMessage() {} +func (*Envelope) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{0} +} +func (m *Envelope) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Envelope.Unmarshal(m, b) +} +func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) +} +func (dst *Envelope) XXX_Merge(src proto.Message) { + xxx_messageInfo_Envelope.Merge(dst, src) +} +func (m *Envelope) XXX_Size() int { + return xxx_messageInfo_Envelope.Size(m) +} +func (m *Envelope) XXX_DiscardUnknown() { + xxx_messageInfo_Envelope.DiscardUnknown(m) +} + +var xxx_messageInfo_Envelope proto.InternalMessageInfo + +func (m *Envelope) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Envelope) GetSourceId() string { + if m != nil { + return m.SourceId + } + return "" +} + +func (m *Envelope) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *Envelope) GetDeprecatedTags() map[string]*Value { + if m != nil { + return m.DeprecatedTags + } + return nil +} + +func (m *Envelope) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + +type isEnvelope_Message interface { + isEnvelope_Message() +} + +type Envelope_Log struct { + Log *Log `protobuf:"bytes,4,opt,name=log,proto3,oneof"` +} + +type Envelope_Counter struct { + Counter *Counter `protobuf:"bytes,5,opt,name=counter,proto3,oneof"` +} + +type Envelope_Gauge struct { + Gauge *Gauge `protobuf:"bytes,6,opt,name=gauge,proto3,oneof"` +} + +type Envelope_Timer struct { + Timer *Timer `protobuf:"bytes,7,opt,name=timer,proto3,oneof"` +} + +type Envelope_Event struct { + Event *Event `protobuf:"bytes,10,opt,name=event,proto3,oneof"` +} + +func (*Envelope_Log) isEnvelope_Message() {} + +func (*Envelope_Counter) isEnvelope_Message() {} + +func (*Envelope_Gauge) isEnvelope_Message() {} + +func (*Envelope_Timer) isEnvelope_Message() {} + +func (*Envelope_Event) isEnvelope_Message() {} + +func (m *Envelope) GetMessage() isEnvelope_Message { + if m != nil { + return m.Message + } + return nil +} + +func (m *Envelope) GetLog() *Log { + if x, ok := m.GetMessage().(*Envelope_Log); ok { + return x.Log + } + return nil +} + +func (m *Envelope) GetCounter() *Counter { + if x, ok := m.GetMessage().(*Envelope_Counter); ok { + return x.Counter + } + return nil +} + +func (m *Envelope) GetGauge() *Gauge { + if x, ok := m.GetMessage().(*Envelope_Gauge); ok { + return x.Gauge + } + return nil +} + +func (m *Envelope) GetTimer() *Timer { + if x, ok := m.GetMessage().(*Envelope_Timer); ok { + return x.Timer + } + return nil +} + +func (m *Envelope) GetEvent() *Event { + if x, ok := m.GetMessage().(*Envelope_Event); ok { + return x.Event + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Envelope) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Envelope_OneofMarshaler, _Envelope_OneofUnmarshaler, _Envelope_OneofSizer, []interface{}{ + (*Envelope_Log)(nil), + (*Envelope_Counter)(nil), + (*Envelope_Gauge)(nil), + (*Envelope_Timer)(nil), + (*Envelope_Event)(nil), + } +} + +func _Envelope_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Envelope) + // message + switch x := m.Message.(type) { + case *Envelope_Log: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Log); err != nil { + return err + } + case *Envelope_Counter: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Counter); err != nil { + return err + } + case *Envelope_Gauge: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Gauge); err != nil { + return err + } + case *Envelope_Timer: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Timer); err != nil { + return err + } + case *Envelope_Event: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Event); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Envelope.Message has unexpected type %T", x) + } + return nil +} + +func _Envelope_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Envelope) + switch tag { + case 4: // message.log + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Log) + err := b.DecodeMessage(msg) + m.Message = &Envelope_Log{msg} + return true, err + case 5: // message.counter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Counter) + err := b.DecodeMessage(msg) + m.Message = &Envelope_Counter{msg} + return true, err + case 6: // message.gauge + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Gauge) + err := b.DecodeMessage(msg) + m.Message = &Envelope_Gauge{msg} + return true, err + case 7: // message.timer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Timer) + err := b.DecodeMessage(msg) + m.Message = &Envelope_Timer{msg} + return true, err + case 10: // message.event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Event) + err := b.DecodeMessage(msg) + m.Message = &Envelope_Event{msg} + return true, err + default: + return false, nil + } +} + +func _Envelope_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Envelope) + // message + switch x := m.Message.(type) { + case *Envelope_Log: + s := proto.Size(x.Log) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Envelope_Counter: + s := proto.Size(x.Counter) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Envelope_Gauge: + s := proto.Size(x.Gauge) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Envelope_Timer: + s := proto.Size(x.Timer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Envelope_Event: + s := proto.Size(x.Event) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type EnvelopeBatch struct { + Batch []*Envelope `protobuf:"bytes,1,rep,name=batch,proto3" json:"batch,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvelopeBatch) Reset() { *m = EnvelopeBatch{} } +func (m *EnvelopeBatch) String() string { return proto.CompactTextString(m) } +func (*EnvelopeBatch) ProtoMessage() {} +func (*EnvelopeBatch) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{1} +} +func (m *EnvelopeBatch) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnvelopeBatch.Unmarshal(m, b) +} +func (m *EnvelopeBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnvelopeBatch.Marshal(b, m, deterministic) +} +func (dst *EnvelopeBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvelopeBatch.Merge(dst, src) +} +func (m *EnvelopeBatch) XXX_Size() int { + return xxx_messageInfo_EnvelopeBatch.Size(m) +} +func (m *EnvelopeBatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvelopeBatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvelopeBatch proto.InternalMessageInfo + +func (m *EnvelopeBatch) GetBatch() []*Envelope { + if m != nil { + return m.Batch + } + return nil +} + +type Value struct { + // Types that are valid to be assigned to Data: + // *Value_Text + // *Value_Integer + // *Value_Decimal + Data isValue_Data `protobuf_oneof:"data"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{2} +} +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Data interface { + isValue_Data() +} + +type Value_Text struct { + Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"` +} + +type Value_Integer struct { + Integer int64 `protobuf:"varint,2,opt,name=integer,proto3,oneof"` +} + +type Value_Decimal struct { + Decimal float64 `protobuf:"fixed64,3,opt,name=decimal,proto3,oneof"` +} + +func (*Value_Text) isValue_Data() {} + +func (*Value_Integer) isValue_Data() {} + +func (*Value_Decimal) isValue_Data() {} + +func (m *Value) GetData() isValue_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *Value) GetText() string { + if x, ok := m.GetData().(*Value_Text); ok { + return x.Text + } + return "" +} + +func (m *Value) GetInteger() int64 { + if x, ok := m.GetData().(*Value_Integer); ok { + return x.Integer + } + return 0 +} + +func (m *Value) GetDecimal() float64 { + if x, ok := m.GetData().(*Value_Decimal); ok { + return x.Decimal + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_Text)(nil), + (*Value_Integer)(nil), + (*Value_Decimal)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // data + switch x := m.Data.(type) { + case *Value_Text: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Text) + case *Value_Integer: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Integer)) + case *Value_Decimal: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.Decimal)) + case nil: + default: + return fmt.Errorf("Value.Data has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // data.text + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Data = &Value_Text{x} + return true, err + case 2: // data.integer + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Data = &Value_Integer{int64(x)} + return true, err + case 3: // data.decimal + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Data = &Value_Decimal{math.Float64frombits(x)} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // data + switch x := m.Data.(type) { + case *Value_Text: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Text))) + n += len(x.Text) + case *Value_Integer: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Integer)) + case *Value_Decimal: + n += 1 // tag and wire + n += 8 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Log struct { + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + Type Log_Type `protobuf:"varint,2,opt,name=type,proto3,enum=loggregator.v2.Log_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Log) Reset() { *m = Log{} } +func (m *Log) String() string { return proto.CompactTextString(m) } +func (*Log) ProtoMessage() {} +func (*Log) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{3} +} +func (m *Log) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Log.Unmarshal(m, b) +} +func (m *Log) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Log.Marshal(b, m, deterministic) +} +func (dst *Log) XXX_Merge(src proto.Message) { + xxx_messageInfo_Log.Merge(dst, src) +} +func (m *Log) XXX_Size() int { + return xxx_messageInfo_Log.Size(m) +} +func (m *Log) XXX_DiscardUnknown() { + xxx_messageInfo_Log.DiscardUnknown(m) +} + +var xxx_messageInfo_Log proto.InternalMessageInfo + +func (m *Log) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *Log) GetType() Log_Type { + if m != nil { + return m.Type + } + return Log_OUT +} + +type Counter struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Delta uint64 `protobuf:"varint,2,opt,name=delta,proto3" json:"delta,omitempty"` + Total uint64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{4} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Counter) GetDelta() uint64 { + if m != nil { + return m.Delta + } + return 0 +} + +func (m *Counter) GetTotal() uint64 { + if m != nil { + return m.Total + } + return 0 +} + +type Gauge struct { + Metrics map[string]*GaugeValue `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{5} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetMetrics() map[string]*GaugeValue { + if m != nil { + return m.Metrics + } + return nil +} + +type GaugeValue struct { + Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GaugeValue) Reset() { *m = GaugeValue{} } +func (m *GaugeValue) String() string { return proto.CompactTextString(m) } +func (*GaugeValue) ProtoMessage() {} +func (*GaugeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{6} +} +func (m *GaugeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GaugeValue.Unmarshal(m, b) +} +func (m *GaugeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GaugeValue.Marshal(b, m, deterministic) +} +func (dst *GaugeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_GaugeValue.Merge(dst, src) +} +func (m *GaugeValue) XXX_Size() int { + return xxx_messageInfo_GaugeValue.Size(m) +} +func (m *GaugeValue) XXX_DiscardUnknown() { + xxx_messageInfo_GaugeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_GaugeValue proto.InternalMessageInfo + +func (m *GaugeValue) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *GaugeValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Timer struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` + Stop int64 `protobuf:"varint,3,opt,name=stop,proto3" json:"stop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timer) Reset() { *m = Timer{} } +func (m *Timer) String() string { return proto.CompactTextString(m) } +func (*Timer) ProtoMessage() {} +func (*Timer) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{7} +} +func (m *Timer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timer.Unmarshal(m, b) +} +func (m *Timer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timer.Marshal(b, m, deterministic) +} +func (dst *Timer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timer.Merge(dst, src) +} +func (m *Timer) XXX_Size() int { + return xxx_messageInfo_Timer.Size(m) +} +func (m *Timer) XXX_DiscardUnknown() { + xxx_messageInfo_Timer.DiscardUnknown(m) +} + +var xxx_messageInfo_Timer proto.InternalMessageInfo + +func (m *Timer) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Timer) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Timer) GetStop() int64 { + if m != nil { + return m.Stop + } + return 0 +} + +type Event struct { + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Body string `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_envelope_1843aa18364a6e12, []int{8} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Event.Unmarshal(m, b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) +} +func (dst *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(dst, src) +} +func (m *Event) XXX_Size() int { + return xxx_messageInfo_Event.Size(m) +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Event) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func init() { + proto.RegisterType((*Envelope)(nil), "loggregator.v2.Envelope") + proto.RegisterMapType((map[string]*Value)(nil), "loggregator.v2.Envelope.DeprecatedTagsEntry") + proto.RegisterMapType((map[string]string)(nil), "loggregator.v2.Envelope.TagsEntry") + proto.RegisterType((*EnvelopeBatch)(nil), "loggregator.v2.EnvelopeBatch") + proto.RegisterType((*Value)(nil), "loggregator.v2.Value") + proto.RegisterType((*Log)(nil), "loggregator.v2.Log") + proto.RegisterType((*Counter)(nil), "loggregator.v2.Counter") + proto.RegisterType((*Gauge)(nil), "loggregator.v2.Gauge") + proto.RegisterMapType((map[string]*GaugeValue)(nil), "loggregator.v2.Gauge.MetricsEntry") + proto.RegisterType((*GaugeValue)(nil), "loggregator.v2.GaugeValue") + proto.RegisterType((*Timer)(nil), "loggregator.v2.Timer") + proto.RegisterType((*Event)(nil), "loggregator.v2.Event") + proto.RegisterEnum("loggregator.v2.Log_Type", Log_Type_name, Log_Type_value) +} + +func init() { proto.RegisterFile("envelope.proto", fileDescriptor_envelope_1843aa18364a6e12) } + +var fileDescriptor_envelope_1843aa18364a6e12 = []byte{ + // 651 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xd1, 0x6b, 0xd4, 0x4e, + 0x10, 0xc7, 0x2f, 0x4d, 0xd2, 0x34, 0xd3, 0xfe, 0xfa, 0x2b, 0xdb, 0x8a, 0xe1, 0x10, 0x3c, 0xf2, + 0xe2, 0x81, 0x35, 0x68, 0x0b, 0x55, 0x44, 0x10, 0x4e, 0x0f, 0x4f, 0x38, 0x11, 0x96, 0xb3, 0xf8, + 0x22, 0x65, 0x9b, 0xac, 0x6b, 0x30, 0x97, 0x0d, 0xc9, 0xe6, 0x30, 0x8f, 0xfe, 0x21, 0xfe, 0xaf, + 0xb2, 0xb3, 0x89, 0x77, 0x3d, 0x73, 0xbe, 0xcd, 0xcc, 0xf7, 0x33, 0x73, 0xb3, 0x33, 0x93, 0x83, + 0x63, 0x9e, 0xaf, 0x78, 0x26, 0x0b, 0x1e, 0x15, 0xa5, 0x54, 0x92, 0x1c, 0x67, 0x52, 0x88, 0x92, + 0x0b, 0xa6, 0x64, 0x19, 0xad, 0x2e, 0xc2, 0x9f, 0x2e, 0x1c, 0x4c, 0x5b, 0x84, 0x3c, 0x00, 0x5f, + 0xa5, 0x4b, 0x5e, 0x29, 0xb6, 0x2c, 0x02, 0x6b, 0x64, 0x8d, 0x6d, 0xba, 0x0e, 0x68, 0xb5, 0x92, + 0x75, 0x19, 0xf3, 0x9b, 0x34, 0x09, 0xf6, 0x46, 0xd6, 0xd8, 0xa7, 0xeb, 0x00, 0x19, 0xc1, 0x61, + 0x9a, 0x57, 0x8a, 0xe5, 0x46, 0x3f, 0x40, 0x7d, 0x33, 0x44, 0xae, 0xe1, 0xff, 0x84, 0x17, 0x25, + 0x8f, 0x99, 0xe2, 0xc9, 0x8d, 0x62, 0xa2, 0x0a, 0xec, 0x91, 0x3d, 0x3e, 0xbc, 0x38, 0x8f, 0xee, + 0x36, 0x15, 0x75, 0x0d, 0x45, 0x6f, 0xff, 0xf0, 0x0b, 0x26, 0xaa, 0x69, 0xae, 0xca, 0x86, 0x6e, + 0x17, 0x21, 0x57, 0xe0, 0x60, 0x31, 0x1f, 0x8b, 0x85, 0x3b, 0x8b, 0xad, 0x4b, 0x20, 0x4f, 0x1e, + 0x81, 0x9d, 0x49, 0x11, 0x38, 0x23, 0x6b, 0x7c, 0x78, 0x71, 0xba, 0x9d, 0x36, 0x97, 0x62, 0x36, + 0xa0, 0x9a, 0x20, 0x97, 0xe0, 0xc5, 0xb2, 0xce, 0x15, 0x2f, 0x03, 0x17, 0xe1, 0xfb, 0xdb, 0xf0, + 0x1b, 0x23, 0xcf, 0x06, 0xb4, 0x23, 0xc9, 0x13, 0x70, 0x05, 0xab, 0x05, 0x0f, 0xf6, 0x31, 0xe5, + 0xde, 0x76, 0xca, 0x3b, 0x2d, 0xce, 0x06, 0xd4, 0x50, 0x1a, 0xd7, 0x93, 0x2e, 0x03, 0xaf, 0x1f, + 0x5f, 0x68, 0x51, 0xe3, 0x48, 0x69, 0x9c, 0xaf, 0x78, 0xae, 0x02, 0xe8, 0xc7, 0xa7, 0x5a, 0xd4, + 0x38, 0x52, 0xc3, 0xcf, 0x70, 0xda, 0x33, 0x4a, 0x72, 0x02, 0xf6, 0x77, 0xde, 0xe0, 0xa6, 0x7d, + 0xaa, 0x4d, 0xf2, 0x18, 0xdc, 0x15, 0xcb, 0x6a, 0x8e, 0xfb, 0xed, 0xa9, 0x7b, 0xad, 0x45, 0x6a, + 0x98, 0x97, 0x7b, 0x2f, 0xac, 0xe1, 0x73, 0xf0, 0xff, 0x55, 0xef, 0x6c, 0xb3, 0x9e, 0xbf, 0x91, + 0x38, 0xf1, 0xc1, 0x5b, 0xf2, 0xaa, 0x62, 0x82, 0x87, 0xaf, 0xe1, 0xbf, 0x6e, 0x49, 0x13, 0xa6, + 0xe2, 0x6f, 0x24, 0x02, 0xf7, 0x56, 0x1b, 0x81, 0x85, 0x2b, 0x0d, 0x76, 0xad, 0x94, 0x1a, 0x2c, + 0xfc, 0x02, 0x2e, 0x36, 0x46, 0xce, 0xc0, 0x51, 0xfc, 0x87, 0x32, 0x1d, 0xcc, 0x06, 0x14, 0x3d, + 0x32, 0x04, 0x2f, 0xcd, 0x15, 0x17, 0xbc, 0xc4, 0x36, 0x6c, 0xbd, 0xa6, 0x36, 0xa0, 0xb5, 0x84, + 0xc7, 0xe9, 0x92, 0x65, 0x81, 0x3d, 0xb2, 0xc6, 0x96, 0xd6, 0xda, 0xc0, 0x64, 0x1f, 0x9c, 0x84, + 0x29, 0x16, 0x0a, 0xb0, 0xe7, 0x52, 0x90, 0x00, 0xbc, 0x82, 0x35, 0x99, 0x64, 0x09, 0xd6, 0x3f, + 0xa2, 0x9d, 0x4b, 0xce, 0xc1, 0x51, 0x4d, 0x61, 0x1e, 0x79, 0xfc, 0x77, 0xbb, 0x73, 0x29, 0xa2, + 0x45, 0x53, 0x70, 0x8a, 0x54, 0x18, 0x80, 0xa3, 0x3d, 0xe2, 0x81, 0xfd, 0xf1, 0xd3, 0xe2, 0x64, + 0xa0, 0x8d, 0x29, 0xa5, 0x27, 0x56, 0xf8, 0x1e, 0xbc, 0xf6, 0x92, 0x08, 0x01, 0x27, 0x67, 0x4b, + 0xde, 0xce, 0x12, 0x6d, 0x3d, 0xcc, 0x84, 0x67, 0x8a, 0xe1, 0xef, 0x38, 0xd4, 0x38, 0x3a, 0xaa, + 0xa4, 0x6a, 0xfb, 0x77, 0xa8, 0x71, 0xc2, 0x5f, 0x16, 0xb8, 0x78, 0x62, 0xe4, 0x95, 0x1e, 0xb4, + 0x2a, 0xd3, 0xb8, 0x6a, 0xc7, 0x19, 0xf6, 0x9e, 0x62, 0xf4, 0xc1, 0x40, 0xe6, 0x0b, 0xe9, 0x52, + 0x86, 0xd7, 0x70, 0xb4, 0x29, 0xf4, 0xac, 0xf8, 0xe9, 0xdd, 0x93, 0x19, 0xf6, 0x56, 0xdf, 0xbe, + 0x9b, 0xf0, 0x0a, 0x60, 0x2d, 0xe8, 0xd7, 0xd6, 0x79, 0xaa, 0xba, 0xd7, 0x6a, 0xfb, 0xee, 0xe9, + 0x58, 0x6d, 0x6e, 0x38, 0x05, 0x17, 0x3f, 0x85, 0x5d, 0x03, 0xaa, 0x14, 0x2b, 0x95, 0x59, 0x33, + 0x35, 0x8e, 0x26, 0x2b, 0x25, 0x0b, 0x9c, 0x8f, 0x4d, 0xd1, 0x0e, 0x9f, 0x81, 0x8b, 0x9f, 0x08, + 0x4e, 0x2f, 0x55, 0x59, 0x57, 0xc7, 0x38, 0x3a, 0xe5, 0x56, 0x26, 0x4d, 0x7b, 0xb5, 0x68, 0x4f, + 0xae, 0xe0, 0xa1, 0x2c, 0x45, 0x14, 0x67, 0xb2, 0x4e, 0xbe, 0xca, 0x3a, 0x4f, 0xca, 0x66, 0xeb, + 0xa9, 0x93, 0xd3, 0xf9, 0xda, 0xef, 0x6e, 0xf4, 0x76, 0x1f, 0xff, 0x78, 0x2f, 0x7f, 0x07, 0x00, + 0x00, 0xff, 0xff, 0x0a, 0x63, 0xfb, 0xc3, 0x8a, 0x05, 0x00, 0x00, +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/generate.sh b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/generate.sh new file mode 100755 index 000000000000..6ee601b560d5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/generate.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +dir_resolve() +{ + cd "$1" 2>/dev/null || return $? # cd to desired directory; if fail, quell any error messages but return exit status + echo "`pwd -P`" # output full, link-resolved path +} + +set -e + +TARGET=`dirname $0` +TARGET=`dir_resolve $TARGET` +cd $TARGET + +go get github.com/golang/protobuf/{proto,protoc-gen-go} + + +tmp_dir=$(mktemp -d) +mkdir -p $tmp_dir/loggregator + +cp $GOPATH/src/github.com/cloudfoundry/loggregator-api/v2/*proto $tmp_dir/loggregator + +protoc $tmp_dir/loggregator/*.proto --go_out=plugins=grpc:. --proto_path=$tmp_dir/loggregator + +rm -r $tmp_dir diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/ingress.pb.go b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/ingress.pb.go new file mode 100644 index 000000000000..3ab51c50d13d --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/ingress.pb.go @@ -0,0 +1,346 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: ingress.proto + +package loggregator_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type IngressResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IngressResponse) Reset() { *m = IngressResponse{} } +func (m *IngressResponse) String() string { return proto.CompactTextString(m) } +func (*IngressResponse) ProtoMessage() {} +func (*IngressResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ingress_1df30cb6e71681fb, []int{0} +} +func (m *IngressResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IngressResponse.Unmarshal(m, b) +} +func (m *IngressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IngressResponse.Marshal(b, m, deterministic) +} +func (dst *IngressResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressResponse.Merge(dst, src) +} +func (m *IngressResponse) XXX_Size() int { + return xxx_messageInfo_IngressResponse.Size(m) +} +func (m *IngressResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IngressResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressResponse proto.InternalMessageInfo + +type BatchSenderResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchSenderResponse) Reset() { *m = BatchSenderResponse{} } +func (m *BatchSenderResponse) String() string { return proto.CompactTextString(m) } +func (*BatchSenderResponse) ProtoMessage() {} +func (*BatchSenderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ingress_1df30cb6e71681fb, []int{1} +} +func (m *BatchSenderResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchSenderResponse.Unmarshal(m, b) +} +func (m *BatchSenderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchSenderResponse.Marshal(b, m, deterministic) +} +func (dst *BatchSenderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchSenderResponse.Merge(dst, src) +} +func (m *BatchSenderResponse) XXX_Size() int { + return xxx_messageInfo_BatchSenderResponse.Size(m) +} +func (m *BatchSenderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BatchSenderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchSenderResponse proto.InternalMessageInfo + +type SendResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendResponse) Reset() { *m = SendResponse{} } +func (m *SendResponse) String() string { return proto.CompactTextString(m) } +func (*SendResponse) ProtoMessage() {} +func (*SendResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ingress_1df30cb6e71681fb, []int{2} +} +func (m *SendResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendResponse.Unmarshal(m, b) +} +func (m *SendResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendResponse.Marshal(b, m, deterministic) +} +func (dst *SendResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendResponse.Merge(dst, src) +} +func (m *SendResponse) XXX_Size() int { + return xxx_messageInfo_SendResponse.Size(m) +} +func (m *SendResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SendResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SendResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IngressResponse)(nil), "loggregator.v2.IngressResponse") + proto.RegisterType((*BatchSenderResponse)(nil), "loggregator.v2.BatchSenderResponse") + proto.RegisterType((*SendResponse)(nil), "loggregator.v2.SendResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IngressClient is the client API for Ingress service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IngressClient interface { + Sender(ctx context.Context, opts ...grpc.CallOption) (Ingress_SenderClient, error) + BatchSender(ctx context.Context, opts ...grpc.CallOption) (Ingress_BatchSenderClient, error) + Send(ctx context.Context, in *EnvelopeBatch, opts ...grpc.CallOption) (*SendResponse, error) +} + +type ingressClient struct { + cc *grpc.ClientConn +} + +func NewIngressClient(cc *grpc.ClientConn) IngressClient { + return &ingressClient{cc} +} + +func (c *ingressClient) Sender(ctx context.Context, opts ...grpc.CallOption) (Ingress_SenderClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingress_serviceDesc.Streams[0], "/loggregator.v2.Ingress/Sender", opts...) + if err != nil { + return nil, err + } + x := &ingressSenderClient{stream} + return x, nil +} + +type Ingress_SenderClient interface { + Send(*Envelope) error + CloseAndRecv() (*IngressResponse, error) + grpc.ClientStream +} + +type ingressSenderClient struct { + grpc.ClientStream +} + +func (x *ingressSenderClient) Send(m *Envelope) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingressSenderClient) CloseAndRecv() (*IngressResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(IngressResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingressClient) BatchSender(ctx context.Context, opts ...grpc.CallOption) (Ingress_BatchSenderClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingress_serviceDesc.Streams[1], "/loggregator.v2.Ingress/BatchSender", opts...) + if err != nil { + return nil, err + } + x := &ingressBatchSenderClient{stream} + return x, nil +} + +type Ingress_BatchSenderClient interface { + Send(*EnvelopeBatch) error + CloseAndRecv() (*BatchSenderResponse, error) + grpc.ClientStream +} + +type ingressBatchSenderClient struct { + grpc.ClientStream +} + +func (x *ingressBatchSenderClient) Send(m *EnvelopeBatch) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingressBatchSenderClient) CloseAndRecv() (*BatchSenderResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(BatchSenderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingressClient) Send(ctx context.Context, in *EnvelopeBatch, opts ...grpc.CallOption) (*SendResponse, error) { + out := new(SendResponse) + err := c.cc.Invoke(ctx, "/loggregator.v2.Ingress/Send", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IngressServer is the server API for Ingress service. +type IngressServer interface { + Sender(Ingress_SenderServer) error + BatchSender(Ingress_BatchSenderServer) error + Send(context.Context, *EnvelopeBatch) (*SendResponse, error) +} + +func RegisterIngressServer(s *grpc.Server, srv IngressServer) { + s.RegisterService(&_Ingress_serviceDesc, srv) +} + +func _Ingress_Sender_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngressServer).Sender(&ingressSenderServer{stream}) +} + +type Ingress_SenderServer interface { + SendAndClose(*IngressResponse) error + Recv() (*Envelope, error) + grpc.ServerStream +} + +type ingressSenderServer struct { + grpc.ServerStream +} + +func (x *ingressSenderServer) SendAndClose(m *IngressResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingressSenderServer) Recv() (*Envelope, error) { + m := new(Envelope) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Ingress_BatchSender_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngressServer).BatchSender(&ingressBatchSenderServer{stream}) +} + +type Ingress_BatchSenderServer interface { + SendAndClose(*BatchSenderResponse) error + Recv() (*EnvelopeBatch, error) + grpc.ServerStream +} + +type ingressBatchSenderServer struct { + grpc.ServerStream +} + +func (x *ingressBatchSenderServer) SendAndClose(m *BatchSenderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingressBatchSenderServer) Recv() (*EnvelopeBatch, error) { + m := new(EnvelopeBatch) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Ingress_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EnvelopeBatch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngressServer).Send(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/loggregator.v2.Ingress/Send", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngressServer).Send(ctx, req.(*EnvelopeBatch)) + } + return interceptor(ctx, in, info, handler) +} + +var _Ingress_serviceDesc = grpc.ServiceDesc{ + ServiceName: "loggregator.v2.Ingress", + HandlerType: (*IngressServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Send", + Handler: _Ingress_Send_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Sender", + Handler: _Ingress_Sender_Handler, + ClientStreams: true, + }, + { + StreamName: "BatchSender", + Handler: _Ingress_BatchSender_Handler, + ClientStreams: true, + }, + }, + Metadata: "ingress.proto", +} + +func init() { proto.RegisterFile("ingress.proto", fileDescriptor_ingress_1df30cb6e71681fb) } + +var fileDescriptor_ingress_1df30cb6e71681fb = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0xcc, 0x4b, 0x2f, + 0x4a, 0x2d, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xcb, 0xc9, 0x4f, 0x4f, 0x2f, + 0x4a, 0x4d, 0x4f, 0x2c, 0xc9, 0x2f, 0xd2, 0x2b, 0x33, 0x92, 0xe2, 0x4b, 0xcd, 0x2b, 0x4b, 0xcd, + 0xc9, 0x2f, 0x48, 0x85, 0xc8, 0x2b, 0x09, 0x72, 0xf1, 0x7b, 0x42, 0x34, 0x04, 0xa5, 0x16, 0x17, + 0xe4, 0xe7, 0x15, 0xa7, 0x2a, 0x89, 0x72, 0x09, 0x3b, 0x25, 0x96, 0x24, 0x67, 0x04, 0xa7, 0xe6, + 0xa5, 0xa4, 0x16, 0xc1, 0x85, 0xf9, 0xb8, 0x78, 0x40, 0x22, 0x30, 0xbe, 0xd1, 0x07, 0x46, 0x2e, + 0x76, 0xa8, 0x56, 0x21, 0x77, 0x2e, 0x36, 0x88, 0x6a, 0x21, 0x09, 0x3d, 0x54, 0x0b, 0xf5, 0x5c, + 0xa1, 0xf6, 0x49, 0xc9, 0xa3, 0xcb, 0xa0, 0xdb, 0xcb, 0xa0, 0xc1, 0x28, 0x14, 0xca, 0xc5, 0x8d, + 0x64, 0xb7, 0x90, 0x2c, 0x2e, 0xd3, 0xc0, 0x8a, 0xa4, 0x94, 0xd1, 0xa5, 0xb1, 0xb9, 0x1b, 0x64, + 0xac, 0x2b, 0x17, 0x0b, 0x48, 0x94, 0x90, 0x79, 0x32, 0xe8, 0xd2, 0xc8, 0x1e, 0x56, 0x62, 0x70, + 0x32, 0xe5, 0x92, 0xcf, 0x2f, 0x4a, 0xd7, 0x4b, 0xce, 0xc9, 0x2f, 0x4d, 0x49, 0xcb, 0x2f, 0xcd, + 0x4b, 0x29, 0xaa, 0x44, 0xd3, 0xe1, 0x24, 0xe4, 0x83, 0xe0, 0x43, 0x3d, 0x98, 0xc4, 0x06, 0x0e, + 0x6a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x87, 0x97, 0x11, 0x79, 0x9b, 0x01, 0x00, 0x00, +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/syslog.go b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/syslog.go new file mode 100644 index 000000000000..c366471b2bd9 --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2/syslog.go @@ -0,0 +1,234 @@ +package loggregator_v2 + +import ( + "bytes" + fmt "fmt" + "strconv" + "time" + + "code.cloudfoundry.org/rfc5424" +) + +// 47450 is the registered enterprise ID for the Cloud Foundry Foundation. +// See: https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers +const ( + gaugeStructuredDataID = "gauge@47450" + counterStructuredDataID = "counter@47450" + timerStructuredDataID = "timer@47450" + tagsStructuredDataID = "tags@47450" +) + +type syslogConfig struct { + hostname string + appName string + processID string +} + +// SyslogOption configures the behavior of Envelope.Syslog. +type SyslogOption func(*syslogConfig) + +// WithSyslogHostname changes the hostname of the resulting syslog messages. +func WithSyslogHostname(hostname string) SyslogOption { + return func(c *syslogConfig) { + c.hostname = hostname + } +} + +// WithSyslogAppName changes the app name of the resulting syslog messages. +func WithSyslogAppName(appName string) SyslogOption { + return func(c *syslogConfig) { + c.appName = appName + } +} + +// WithSyslogProcessID changes the process id of the resulting syslog messages. +func WithSyslogProcessID(processID string) SyslogOption { + return func(c *syslogConfig) { + c.processID = processID + } +} + +// Syslog converts an envelope into RFC 5424 compliant syslog messages. +// Typically, this will be a one to one (envelope to syslog) but for certain +// envelope type such as gauges a single envelope maps to multiple syslog +// messages (one per gauge metric). +func (m *Envelope) Syslog(opts ...SyslogOption) ([][]byte, error) { + c := &syslogConfig{ + processID: m.InstanceId, + appName: m.SourceId, + } + + for _, o := range opts { + o(c) + } + + priority, err := m.generatePriority() + if err != nil { + return nil, err + } + + switch m.GetMessage().(type) { + case *Envelope_Log: + msg := m.basicSyslogMessage(c, priority) + msg.Message = appendNewline(removeNulls(m.GetLog().Payload)) + d, err := msg.MarshalBinary() + if err != nil { + return nil, err + } + return [][]byte{d}, nil + case *Envelope_Gauge: + metrics := m.GetGauge().GetMetrics() + messages := make([][]byte, 0, len(metrics)) + for name, g := range metrics { + msg := m.basicSyslogMessage(c, priority) + msg.StructuredData = append(msg.StructuredData, rfc5424.StructuredData{ + ID: gaugeStructuredDataID, + Parameters: []rfc5424.SDParam{ + { + Name: "name", + Value: name, + }, + { + Name: "value", + Value: strconv.FormatFloat(g.GetValue(), 'g', -1, 64), + }, + { + Name: "unit", + Value: g.GetUnit(), + }, + }, + }, + ) + d, err := msg.MarshalBinary() + if err != nil { + return nil, err + } + messages = append(messages, d) + } + return messages, nil + case *Envelope_Counter: + msg := m.basicSyslogMessage(c, priority) + msg.StructuredData = append(msg.StructuredData, rfc5424.StructuredData{ + ID: counterStructuredDataID, + Parameters: []rfc5424.SDParam{ + { + Name: "name", + Value: m.GetCounter().GetName(), + }, + { + Name: "total", + Value: fmt.Sprint(m.GetCounter().GetTotal()), + }, + { + Name: "delta", + Value: fmt.Sprint(m.GetCounter().GetDelta()), + }, + }, + }, + ) + d, err := msg.MarshalBinary() + if err != nil { + return nil, err + } + return [][]byte{d}, nil + case *Envelope_Event: + msg := m.basicSyslogMessage(c, priority) + msg.Message = []byte(fmt.Sprintf( + "%s: %s\n", + m.GetEvent().GetTitle(), + m.GetEvent().GetBody(), + )) + d, err := msg.MarshalBinary() + if err != nil { + return nil, err + } + return [][]byte{d}, nil + case *Envelope_Timer: + msg := m.basicSyslogMessage(c, priority) + msg.StructuredData = append(msg.StructuredData, rfc5424.StructuredData{ + ID: timerStructuredDataID, + Parameters: []rfc5424.SDParam{ + { + Name: "name", + Value: m.GetTimer().GetName(), + }, + { + Name: "start", + Value: fmt.Sprint(m.GetTimer().GetStart()), + }, + { + Name: "stop", + Value: fmt.Sprint(m.GetTimer().GetStop()), + }, + }, + }, + ) + d, err := msg.MarshalBinary() + if err != nil { + return nil, err + } + return [][]byte{d}, nil + default: + msg := m.basicSyslogMessage(c, priority) + d, err := msg.MarshalBinary() + if err != nil { + return nil, err + } + return [][]byte{d}, nil + } +} + +func (m *Envelope) basicSyslogMessage( + c *syslogConfig, + priority rfc5424.Priority, +) rfc5424.Message { + msg := rfc5424.Message{ + Priority: priority, + Timestamp: time.Unix(0, m.GetTimestamp()).UTC(), + Hostname: c.hostname, + AppName: c.appName, + ProcessID: c.processID, + Message: []byte("\n"), + } + + tags := m.GetTags() + if len(tags) > 0 { + params := make([]rfc5424.SDParam, 0, len(tags)) + for k, v := range tags { + params = append(params, rfc5424.SDParam{Name: k, Value: v}) + } + msg.StructuredData = []rfc5424.StructuredData{ + { + ID: tagsStructuredDataID, + Parameters: params, + }, + } + } + + return msg +} + +func (m *Envelope) generatePriority() (rfc5424.Priority, error) { + if l := m.GetLog(); l != nil { + switch l.Type { + case Log_OUT: + return rfc5424.Info + rfc5424.User, nil + case Log_ERR: + return rfc5424.Error + rfc5424.User, nil + default: + return 0, fmt.Errorf("invalid log type: %s", l.Type) + } + } + return rfc5424.Info + rfc5424.User, nil +} + +func removeNulls(msg []byte) []byte { + return bytes.Replace(msg, []byte{0}, nil, -1) +} + +func appendNewline(msg []byte) []byte { + if !bytes.HasSuffix(msg, []byte("\n")) { + msg = append(msg, byte('\n')) + } + return msg +} diff --git a/vendor/code.cloudfoundry.org/go-loggregator/tls.go b/vendor/code.cloudfoundry.org/go-loggregator/tls.go new file mode 100644 index 000000000000..9fa0eed32aeb --- /dev/null +++ b/vendor/code.cloudfoundry.org/go-loggregator/tls.go @@ -0,0 +1,47 @@ +package loggregator + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "io/ioutil" +) + +// NewIngressTLSConfig provides a convenient means for creating a *tls.Config +// which uses the CA, cert, and key for the ingress endpoint. +func NewIngressTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + return newTLSConfig(caPath, certPath, keyPath, "metron") +} + +// NewEgressTLSConfig provides a convenient means for creating a *tls.Config +// which uses the CA, cert, and key for the egress endpoint. +func NewEgressTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + return newTLSConfig(caPath, certPath, keyPath, "reverselogproxy") +} + +func newTLSConfig(caPath, certPath, keyPath, cn string) (*tls.Config, error) { + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + ServerName: cn, + Certificates: []tls.Certificate{cert}, + InsecureSkipVerify: false, + } + + caCertBytes, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + + caCertPool := x509.NewCertPool() + if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { + return nil, errors.New("cannot parse ca cert") + } + + tlsConfig.RootCAs = caCertPool + + return tlsConfig, nil +} diff --git a/vendor/code.cloudfoundry.org/gofileutils/LICENSE b/vendor/code.cloudfoundry.org/gofileutils/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/code.cloudfoundry.org/gofileutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/code.cloudfoundry.org/gofileutils/NOTICE b/vendor/code.cloudfoundry.org/gofileutils/NOTICE new file mode 100644 index 000000000000..af7e5e66e198 --- /dev/null +++ b/vendor/code.cloudfoundry.org/gofileutils/NOTICE @@ -0,0 +1,11 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +This project is licensed to you under the Apache License, Version 2.0 (the "License"). + +You may not use this project except in compliance with the License. + +This project may include a number of subcomponents with separate copyright notices +and license terms. Your use of these subcomponents is subject to the terms and +conditions of the subcomponent's license, as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/gofileutils/fileutils/dir_utils.go b/vendor/code.cloudfoundry.org/gofileutils/fileutils/dir_utils.go new file mode 100644 index 000000000000..a430217ec56c --- /dev/null +++ b/vendor/code.cloudfoundry.org/gofileutils/fileutils/dir_utils.go @@ -0,0 +1,20 @@ +package fileutils + +import ( + "os" +) + +func IsDirEmpty(dir string) (isEmpty bool, err error) { + dirFile, err := os.Open(dir) + if err != nil { + return + } + + _, readErr := dirFile.Readdirnames(1) + if readErr != nil { + isEmpty = true + } else { + isEmpty = false + } + return +} diff --git a/vendor/code.cloudfoundry.org/gofileutils/fileutils/file_utils.go b/vendor/code.cloudfoundry.org/gofileutils/fileutils/file_utils.go new file mode 100644 index 000000000000..ac9b6eb4e01f --- /dev/null +++ b/vendor/code.cloudfoundry.org/gofileutils/fileutils/file_utils.go @@ -0,0 +1,74 @@ +package fileutils + +import ( + "io" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +func Open(path string) (file *os.File, err error) { + err = os.MkdirAll(filepath.Dir(path), os.ModeDir|os.ModePerm) + if err != nil { + return + } + + return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) +} + +func Create(path string) (file *os.File, err error) { + err = os.MkdirAll(filepath.Dir(path), os.ModeDir|os.ModePerm) + if err != nil { + return + } + + return os.Create(path) +} + +func CopyPathToPath(fromPath, toPath string) (err error) { + srcFileInfo, err := os.Stat(fromPath) + if err != nil { + return err + } + + if srcFileInfo.IsDir() { + err = os.MkdirAll(toPath, srcFileInfo.Mode()) + if err != nil { + return err + } + + files, err := ioutil.ReadDir(fromPath) + if err != nil { + return err + } + + for _, file := range files { + err = CopyPathToPath(path.Join(fromPath, file.Name()), path.Join(toPath, file.Name())) + if err != nil { + return err + } + } + } else { + var dst *os.File + dst, err = Create(toPath) + if err != nil { + return err + } + defer dst.Close() + + dst.Chmod(srcFileInfo.Mode()) + + src, err := os.Open(fromPath) + if err != nil { + return err + } + defer src.Close() + + _, err = io.Copy(dst, src) + if err != nil { + return err + } + } + return err +} diff --git a/vendor/code.cloudfoundry.org/gofileutils/fileutils/temp_utils.go b/vendor/code.cloudfoundry.org/gofileutils/fileutils/temp_utils.go new file mode 100644 index 000000000000..4961a8d40b7b --- /dev/null +++ b/vendor/code.cloudfoundry.org/gofileutils/fileutils/temp_utils.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "io/ioutil" + "os" +) + +func TempDir(namePrefix string, cb func(tmpDir string, err error)) { + tmpDir, err := ioutil.TempDir("", namePrefix) + + defer func() { + os.RemoveAll(tmpDir) + }() + + cb(tmpDir, err) +} + +func TempFile(namePrefix string, cb func(tmpFile *os.File, err error)) { + tmpFile, err := ioutil.TempFile("", namePrefix) + + defer func() { + tmpFile.Close() + os.Remove(tmpFile.Name()) + }() + + cb(tmpFile, err) +} diff --git a/vendor/code.cloudfoundry.org/rfc5424/LICENSE b/vendor/code.cloudfoundry.org/rfc5424/LICENSE new file mode 100644 index 000000000000..b4a8ea6d0774 --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2016, Ross Kinder +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/code.cloudfoundry.org/rfc5424/README.md b/vendor/code.cloudfoundry.org/rfc5424/README.md new file mode 100644 index 000000000000..990975b59aa4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/README.md @@ -0,0 +1,28 @@ + +[![Build Status](https://travis-ci.org/crewjam/rfc5424.png)](https://travis-ci.org/crewjam/rfc5424) + +[![](https://godoc.org/github.com/crewjam/rfc5424?status.png)](http://godoc.org/github.com/crewjam/rfc5424) + +This is a Go library that can read and write RFC-5424 syslog messages: + +Example usage: + + m := rfc5424.Message{ + Priority: rfc5424.Daemon | rfc5424.Info, + Timestamp: time.Now(), + Hostname: "myhostname", + AppName: "someapp", + Message: []byte("Hello, World!"), + } + m.AddDatum("foo@1234", "Revision", "1.2.3.4") + m.WriteTo(os.Stdout) + +Produces output like: + + 107 <7>1 2016-02-28T09:57:10.804642398-05:00 myhostname someapp - - [foo@1234 Revision="1.2.3.4"] Hello, World! + +You can also use the library to parse syslog messages: + + m := rfc5424.Message{} + _, err := m.ReadFrom(os.Stdin) + fmt.Printf("%s\n", m.Message) diff --git a/vendor/code.cloudfoundry.org/rfc5424/marshal.go b/vendor/code.cloudfoundry.org/rfc5424/marshal.go new file mode 100644 index 000000000000..a2904887d5c0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/marshal.go @@ -0,0 +1,179 @@ +package rfc5424 + +import ( + "bytes" + "fmt" + "unicode/utf8" +) + +// allowLongSdNames is true to allow names longer than the RFC-specified limit +// of 32-characters. (When true, this violates RFC-5424). +const allowLongSdNames = true + +// RFC5424TimeOffsetNum is the timestamp defined by RFC-5424 with the +// NUMOFFSET instead of Z. +const RFC5424TimeOffsetNum = "2006-01-02T15:04:05.999999-07:00" + +// RFC5424TimeOffsetUTC is the timestamp defined by RFC-5424 with the offset +// set to 0 for UTC. +const RFC5424TimeOffsetUTC = "2006-01-02T15:04:05.999999Z" + +// ErrInvalidValue is returned when a log message cannot be emitted because one +// of the values is invalid. +type ErrInvalidValue struct { + Property string + Value interface{} +} + +func (e ErrInvalidValue) Error() string { + return fmt.Sprintf("Message cannot be serialized because %s is invalid: %v", + e.Property, e.Value) +} + +// invalidValue returns an invalid value error with the given property +func invalidValue(property string, value interface{}) error { + return ErrInvalidValue{Property: property, Value: value} +} + +func nilify(x string) string { + if x == "" { + return "-" + } + return x +} + +func escapeSDParam(s string) string { + escapeCount := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + case '\\', '"', ']': + escapeCount++ + } + } + if escapeCount == 0 { + return s + } + + t := make([]byte, len(s)+escapeCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\\', '"', ']': + t[j] = '\\' + t[j+1] = c + j += 2 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} + +func isPrintableUsASCII(s string) bool { + for _, ch := range s { + if ch < 33 || ch > 126 { + return false + } + } + return true +} + +func isValidSdName(s string) bool { + if !allowLongSdNames && len(s) > 32 { + return false + } + for _, ch := range s { + if ch < 33 || ch > 126 { + return false + } + if ch == '=' || ch == ']' || ch == '"' { + return false + } + } + return true +} + +func (m Message) assertValid() error { + + // HOSTNAME = NILVALUE / 1*255PRINTUSASCII + if !isPrintableUsASCII(m.Hostname) { + return invalidValue("Hostname", m.Hostname) + } + if len(m.Hostname) > 255 { + return invalidValue("Hostname", m.Hostname) + } + + // APP-NAME = NILVALUE / 1*48PRINTUSASCII + if !isPrintableUsASCII(m.AppName) { + return invalidValue("AppName", m.AppName) + } + if len(m.AppName) > 48 { + return invalidValue("AppName", m.AppName) + } + + // PROCID = NILVALUE / 1*128PRINTUSASCII + if !isPrintableUsASCII(m.ProcessID) { + return invalidValue("ProcessID", m.ProcessID) + } + if len(m.ProcessID) > 128 { + return invalidValue("ProcessID", m.ProcessID) + } + + // MSGID = NILVALUE / 1*32PRINTUSASCII + if !isPrintableUsASCII(m.MessageID) { + return invalidValue("MessageID", m.MessageID) + } + if len(m.MessageID) > 32 { + return invalidValue("MessageID", m.MessageID) + } + + for _, sdElement := range m.StructuredData { + if !isValidSdName(sdElement.ID) { + return invalidValue("StructuredData/ID", sdElement.ID) + } + for _, sdParam := range sdElement.Parameters { + if !isValidSdName(sdParam.Name) { + return invalidValue("StructuredData/Name", sdParam.Name) + } + if !utf8.ValidString(sdParam.Value) { + return invalidValue("StructuredData/Value", sdParam.Value) + } + } + } + return nil +} + +// MarshalBinary marshals the message to a byte slice, or returns an error +func (m Message) MarshalBinary() ([]byte, error) { + if err := m.assertValid(); err != nil { + return nil, err + } + + b := bytes.NewBuffer(nil) + fmt.Fprintf(b, "<%d>1 %s %s %s %s %s ", + m.Priority, + m.Timestamp.Format(RFC5424TimeOffsetNum), + nilify(m.Hostname), + nilify(m.AppName), + nilify(m.ProcessID), + nilify(m.MessageID)) + + if len(m.StructuredData) == 0 { + fmt.Fprint(b, "-") + } + for _, sdElement := range m.StructuredData { + fmt.Fprintf(b, "[%s", sdElement.ID) + for _, sdParam := range sdElement.Parameters { + fmt.Fprintf(b, " %s=\"%s\"", sdParam.Name, + escapeSDParam(sdParam.Value)) + } + fmt.Fprintf(b, "]") + } + + if len(m.Message) > 0 { + fmt.Fprint(b, " ") + b.Write(m.Message) + } + return b.Bytes(), nil +} diff --git a/vendor/code.cloudfoundry.org/rfc5424/message.go b/vendor/code.cloudfoundry.org/rfc5424/message.go new file mode 100644 index 000000000000..a6bbec5e6df4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/message.go @@ -0,0 +1,76 @@ +// Pacakge rfc5424 is a library for parsing and serializing RFC-5424 structured +// syslog messages. +// +// Example usage: +// +// m := rfc5424.Message{ +// Priority: rfc5424.Daemon | rfc5424.Info, +// Timestamp: time.Now(), +// Hostname: "myhostname", +// AppName: "someapp", +// Message: []byte("Hello, World!"), +// } +// m.AddDatum("foo@1234", "Revision", "1.2.3.4") +// m.WriteTo(os.Stdout) +// +// Produces output like: +// +// 107 <7>1 2016-02-28T09:57:10.804642398-05:00 myhostname someapp - - [foo@1234 Revision="1.2.3.4"] Hello, World! +// +// You can also use the library to parse syslog messages: +// +// m := rfc5424.Message{} +// _, err := m.ReadFrom(os.Stdin) +// fmt.Printf("%s\n", m.Message) +package rfc5424 + +import "time" + +// Message represents a log message as defined by RFC-5424 +// (https://tools.ietf.org/html/rfc5424) +type Message struct { + Priority Priority + Timestamp time.Time + Hostname string + AppName string + ProcessID string + MessageID string + StructuredData []StructuredData + Message []byte +} + +// SDParam represents parameters for structured data +type SDParam struct { + Name string + Value string +} + +// StructuredData represents structured data within a log message +type StructuredData struct { + ID string + Parameters []SDParam +} + +// AddDatum adds structured data to a log message +func (m *Message) AddDatum(ID string, Name string, Value string) { + if m.StructuredData == nil { + m.StructuredData = []StructuredData{} + } + for i, sd := range m.StructuredData { + if sd.ID == ID { + sd.Parameters = append(sd.Parameters, SDParam{Name: Name, Value: Value}) + m.StructuredData[i] = sd + return + } + } + + m.StructuredData = append(m.StructuredData, StructuredData{ + ID: ID, + Parameters: []SDParam{ + { + Name: Name, + Value: Value, + }, + }, + }) +} diff --git a/vendor/code.cloudfoundry.org/rfc5424/rfc5424.go b/vendor/code.cloudfoundry.org/rfc5424/rfc5424.go new file mode 100644 index 000000000000..e143293352ee --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/rfc5424.go @@ -0,0 +1,40 @@ +package rfc5424 + +const severityMask = 0x07 +const facilityMask = 0xf8 + +type Priority int + +const ( + Emergency Priority = iota + Alert + Crit + Error + Warning + Notice + Info + Debug +) + +const ( + Kern Priority = iota << 3 + User + Mail + Daemon + Auth + Syslog + Lpr + News + Uucp + Cron + Authpriv + Ftp + Local0 + Local1 + Local2 + Local3 + Local4 + Local5 + Local6 + Local7 +) diff --git a/vendor/code.cloudfoundry.org/rfc5424/stream.go b/vendor/code.cloudfoundry.org/rfc5424/stream.go new file mode 100644 index 000000000000..9cc56cae41ec --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/stream.go @@ -0,0 +1,63 @@ +package rfc5424 + +import ( + "fmt" + "io" + "io/ioutil" + "strconv" +) + +// WriteTo writes the message to a stream of messages in the style defined +// by RFC-5425. (It does not implement the TLS stuff described in the RFC, just +// the length delimiting. +func (m Message) WriteTo(w io.Writer) (int64, error) { + b, err := m.MarshalBinary() + if err != nil { + return 0, err + } + n, err := fmt.Fprintf(w, "%d %s", len(b), b) + + return int64(n), err +} + +func readUntilSpace(r io.Reader) ([]byte, int, error) { + buf := []byte{} + nbytes := 0 + for { + b := []byte{0} + n, err := r.Read(b) + nbytes += n + if err != nil { + return nil, nbytes, err + } + if b[0] == ' ' { + return buf, nbytes, nil + } + buf = append(buf, b...) + } +} + +// ReadFrom reads a single record from an RFC-5425 style stream of messages +func (m *Message) ReadFrom(r io.Reader) (int64, error) { + lengthBuf, n1, err := readUntilSpace(r) + if err != nil { + return 0, err + } + length, err := strconv.Atoi(string(lengthBuf)) + if err != nil { + return 0, err + } + r2 := io.LimitReader(r, int64(length)) + buf, err := ioutil.ReadAll(r2) + if err != nil { + return int64(n1 + len(buf)), err + } + if len(buf) != int(length) { + return int64(n1 + len(buf)), fmt.Errorf("Expected to read %d bytes, got %d", length, len(buf)) + } + err = m.UnmarshalBinary(buf) + if err != nil { + return 0, err + } + return int64(n1 + len(buf)), err +} diff --git a/vendor/code.cloudfoundry.org/rfc5424/unmarshal.go b/vendor/code.cloudfoundry.org/rfc5424/unmarshal.go new file mode 100644 index 000000000000..000e24c5cdca --- /dev/null +++ b/vendor/code.cloudfoundry.org/rfc5424/unmarshal.go @@ -0,0 +1,452 @@ +package rfc5424 + +import ( + "bytes" + "fmt" + "io" + "strconv" + "time" + "unicode" +) + +// ErrBadFormat is the error that is returned when a log message cannot be parsed +type ErrBadFormat struct { + Property string +} + +func (e ErrBadFormat) Error() string { + return fmt.Sprintf("Message cannot be unmarshaled because it is not well formed (%s)", + e.Property) +} + +// badFormat returns a bad format error with the given property +func badFormat(property string) error { + return ErrBadFormat{Property: property} +} + +// UnmarshalBinary unmarshals a byte slice into a message +func (m *Message) UnmarshalBinary(inputBuffer []byte) error { + r := bytes.NewBuffer(inputBuffer) + + // RFC-5424 + // SYSLOG-MSG = HEADER SP STRUCTURED-DATA [SP MSG] + if err := m.readHeader(r); err != nil { + return err + } + + if err := readSpace(r); err != nil { + return err // unreachable + } + if err := m.readStructuredData(r); err != nil { + return err + } + + // MSG is optional + ch, _, err := r.ReadRune() + if err == io.EOF { + return nil + } else if ch != ' ' { + return badFormat("MSG") // unreachable + } + + // TODO(ross): detect and handle UTF-8 BOM (\xef\xbb\xbf) + // + // MSG = MSG-ANY / MSG-UTF8 + // MSG-ANY = *OCTET ; not starting with BOM + // MSG-UTF8 = BOM UTF-8-STRING + // BOM = %xEF.BB.BF + + // To be on the safe side, remaining stuff is copied over + m.Message = copyFrom(r.Bytes()) + return nil +} + +// readHeader reads a HEADER as defined in RFC-5424 +// +// HEADER = PRI VERSION SP TIMESTAMP SP HOSTNAME +// SP APP-NAME SP PROCID SP MSGID +// PRI = "<" PRIVAL ">" +// PRIVAL = 1*3DIGIT ; range 0 .. 191 +// VERSION = NONZERO-DIGIT 0*2DIGIT +// HOSTNAME = NILVALUE / 1*255PRINTUSASCII +// +// APP-NAME = NILVALUE / 1*48PRINTUSASCII +// PROCID = NILVALUE / 1*128PRINTUSASCII +// MSGID = NILVALUE / 1*32PRINTUSASCII +// +// TIMESTAMP = NILVALUE / FULL-DATE "T" FULL-TIME +// FULL-DATE = DATE-FULLYEAR "-" DATE-MONTH "-" DATE-MDAY +// DATE-FULLYEAR = 4DIGIT +// DATE-MONTH = 2DIGIT ; 01-12 +// DATE-MDAY = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on +// ; month/year +// FULL-TIME = PARTIAL-TIME TIME-OFFSET +// PARTIAL-TIME = TIME-HOUR ":" TIME-MINUTE ":" TIME-SECOND +// [TIME-SECFRAC] +// TIME-HOUR = 2DIGIT ; 00-23 +// TIME-MINUTE = 2DIGIT ; 00-59 +// TIME-SECOND = 2DIGIT ; 00-59 +// TIME-SECFRAC = "." 1*6DIGIT +// TIME-OFFSET = "Z" / TIME-NUMOFFSET +// TIME-NUMOFFSET = ("+" / "-") TIME-HOUR ":" TIME-MINUTE +// +func (m *Message) readHeader(r io.RuneScanner) error { + if err := m.readPriority(r); err != nil { + return err + } + if err := m.readVersion(r); err != nil { + return err + } + if err := readSpace(r); err != nil { + return err // unreachable + } + if err := m.readTimestamp(r); err != nil { + return err + } + if err := readSpace(r); err != nil { + return err // unreachable + } + if err := m.readHostname(r); err != nil { + return err + } + if err := readSpace(r); err != nil { + return err // unreachable + } + if err := m.readAppName(r); err != nil { + return err + } + if err := readSpace(r); err != nil { + return err // unreachable + } + if err := m.readProcID(r); err != nil { + return err + } + if err := readSpace(r); err != nil { + return err // unreachable + } + if err := m.readMsgID(r); err != nil { + return err + } + return nil +} + +// readPriority reads the PRI as defined in RFC-5424 and assigns Severity and +// Facility accordingly. +func (m *Message) readPriority(r io.RuneScanner) error { + ch, _, err := r.ReadRune() + if err != nil { + return err + } + if ch != '<' { + return badFormat("Priority") + } + + rv := &bytes.Buffer{} + for { + ch, _, err := r.ReadRune() + if err != nil { + return err + } + if unicode.IsDigit(ch) { + rv.WriteRune(ch) + continue + } + if ch != '>' { + return badFormat("Priority") + } + + // We have a complete integer expression + priority, err := strconv.ParseInt(string(rv.Bytes()), 10, 32) + if err != nil { + return badFormat("Priority") + } + m.Priority = Priority(priority) + return nil + } +} + +// readVersion reads the version string fails if it isn't `1` +func (m *Message) readVersion(r io.RuneScanner) error { + ch, _, err := r.ReadRune() + if err != nil { + return err + } + if ch != '1' { + return badFormat("Version") + } + return nil +} + +// readTimestamp reads a TIMESTAMP as defined in RFC-5424 and assigns +// m.Timestamp +// +// TIMESTAMP = NILVALUE / FULL-DATE "T" FULL-TIME +// FULL-DATE = DATE-FULLYEAR "-" DATE-MONTH "-" DATE-MDAY +// DATE-FULLYEAR = 4DIGIT +// DATE-MONTH = 2DIGIT ; 01-12 +// DATE-MDAY = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on +// ; month/year +// FULL-TIME = PARTIAL-TIME TIME-OFFSET +// PARTIAL-TIME = TIME-HOUR ":" TIME-MINUTE ":" TIME-SECOND +// [TIME-SECFRAC] +// TIME-HOUR = 2DIGIT ; 00-23 +// TIME-MINUTE = 2DIGIT ; 00-59 +// TIME-SECOND = 2DIGIT ; 00-59 +// TIME-SECFRAC = "." 1*6DIGIT +// TIME-OFFSET = "Z" / TIME-NUMOFFSET +// TIME-NUMOFFSET = ("+" / "-") TIME-HOUR ":" TIME-MINUTE +func (m *Message) readTimestamp(r io.RuneScanner) error { + timestampString, err := readWord(r) + if err != nil { + return err + } + + m.Timestamp, err = time.Parse(RFC5424TimeOffsetNum, timestampString) + if err == nil { + return nil + } + + m.Timestamp, err = time.Parse(RFC5424TimeOffsetUTC, timestampString) + if err == nil { + return nil + } + + return err +} + +func (m *Message) readHostname(r io.RuneScanner) (err error) { + m.Hostname, err = readWord(r) + return err +} + +func (m *Message) readAppName(r io.RuneScanner) (err error) { + m.AppName, err = readWord(r) + return err +} + +func (m *Message) readProcID(r io.RuneScanner) (err error) { + m.ProcessID, err = readWord(r) + return err +} + +func (m *Message) readMsgID(r io.RuneScanner) (err error) { + m.MessageID, err = readWord(r) + return err +} + +// readStructuredData reads a STRUCTURED-DATA (as defined in RFC-5424) +// from `r` and assigns the StructuredData member. +// +// STRUCTURED-DATA = NILVALUE / 1*SD-ELEMENT +// SD-ELEMENT = "[" SD-ID *(SP SD-PARAM) "]" +// SD-PARAM = PARAM-NAME "=" %d34 PARAM-VALUE %d34 +// SD-ID = SD-NAME +// PARAM-NAME = SD-NAME +// PARAM-VALUE = UTF-8-STRING ; characters '"', '\' and ']' MUST be escaped. +// SD-NAME = 1*32PRINTUSASCII except '=', SP, ']', %d34 (") +func (m *Message) readStructuredData(r io.RuneScanner) (err error) { + m.StructuredData = []StructuredData{} + + ch, _, err := r.ReadRune() + if err != nil { + return err + } + if ch == '-' { + return nil + } + r.UnreadRune() + + for { + ch, _, err := r.ReadRune() + if err == io.EOF { + return nil + } else if err != nil { + return err // hard to reach without underlying IO error + } else if ch == ' ' { + r.UnreadRune() + return nil + } else if ch == '[' { + r.UnreadRune() + sde, err := readSDElement(r) + if err != nil { + return err + } + m.StructuredData = append(m.StructuredData, sde) + } else { + return badFormat("StructuredData") + } + } +} + +// readSDElement reads an SD-ELEMENT as defined by RFC-5424 +// +// SD-ELEMENT = "[" SD-ID *(SP SD-PARAM) "]" +// SD-PARAM = PARAM-NAME "=" %d34 PARAM-VALUE %d34 +// SD-ID = SD-NAME +// PARAM-NAME = SD-NAME +// PARAM-VALUE = UTF-8-STRING ; characters '"', '\' and ']' MUST be escaped. +// SD-NAME = 1*32PRINTUSASCII except '=', SP, ']', %d34 (") +func readSDElement(r io.RuneScanner) (element StructuredData, err error) { + ch, _, err := r.ReadRune() + if err != nil { + return element, err // hard to reach without underlying IO error + } + if ch != '[' { + return element, badFormat("StructuredData[]") // unreachable + } + element.ID, err = readSdID(r) + if err != nil { + return element, err + } + for { + ch, _, err := r.ReadRune() + if err != nil { + return element, err + } else if ch == ']' { + return element, nil + } else if ch == ' ' { + param, err := readSdParam(r) + if err != nil { + return element, err + } + element.Parameters = append(element.Parameters, *param) + } else { + return element, badFormat("StructuredData[]") + } + } +} + +// readSDID reads an SD-ID as defined by RFC-5424 +// SD-ID = SD-NAME +// SD-NAME = 1*32PRINTUSASCII except '=', SP, ']', %d34 (") +func readSdID(r io.RuneScanner) (string, error) { + rv := &bytes.Buffer{} + for { + ch, _, err := r.ReadRune() + if err != nil { + return "", err + } + if ch == ' ' || ch == ']' { + r.UnreadRune() + return string(rv.Bytes()), nil + } + rv.WriteRune(ch) + } +} + +// readSdParam reads an SD-PARAM as defined by RFC-5424 +// SD-PARAM = PARAM-NAME "=" %d34 PARAM-VALUE %d34 +// SD-ID = SD-NAME +// PARAM-NAME = SD-NAME +// PARAM-VALUE = UTF-8-STRING ; characters '"', '\' and ']' MUST be escaped. +// SD-NAME = 1*32PRINTUSASCII except '=', SP, ']', %d34 (") +func readSdParam(r io.RuneScanner) (sdp *SDParam, err error) { + sdp = &SDParam{} + sdp.Name, err = readSdParamName(r) + if err != nil { + return nil, err + } + ch, _, err := r.ReadRune() + if err != nil { + return nil, err // hard to reach + } + if ch != '=' { + return nil, badFormat("StructuredData[].Parameters") // not reachable + } + + sdp.Value, err = readSdParamValue(r) + if err != nil { + return nil, err + } + return sdp, nil +} + +// readSdParam reads a PARAM-NAME as defined by RFC-5424 +// SD-PARAM = PARAM-NAME "=" %d34 PARAM-VALUE %d34 +// PARAM-NAME = SD-NAME +// SD-NAME = 1*32PRINTUSASCII except '=', SP, ']', %d34 (") +func readSdParamName(r io.RuneScanner) (string, error) { + rv := &bytes.Buffer{} + for { + ch, _, err := r.ReadRune() + if err != nil { + return "", err + } + if ch == '=' { + r.UnreadRune() + return string(rv.Bytes()), nil + } + rv.WriteRune(ch) + } +} + +// readSdParamValue reads an PARAM-VALUE as defined by RFC-5424 +// SD-PARAM = PARAM-NAME "=" %d34 PARAM-VALUE %d34 +// PARAM-VALUE = UTF-8-STRING ; characters '"', '\' and ']' MUST be escaped. +func readSdParamValue(r io.RuneScanner) (string, error) { + ch, _, err := r.ReadRune() + if err != nil { + return "", err + } + if ch != '"' { + return "", badFormat("StructuredData[].Parameters[]") // hard to reach + } + + rv := &bytes.Buffer{} + for { + ch, _, err := r.ReadRune() + if err != nil { + return "", err + } + if ch == '\\' { + ch, _, err := r.ReadRune() + if err != nil { + return "", err + } + rv.WriteRune(ch) + continue + } + if ch == '"' { + return string(rv.Bytes()), nil + } + rv.WriteRune(ch) + } +} + +// readSpace reads a single space +func readSpace(r io.RuneScanner) error { + ch, _, err := r.ReadRune() + if err != nil { + return err + } + if ch != ' ' { + return badFormat("expected space") + } + return nil +} + +// readWord reads `r` until it encounters a space (0x20) +func readWord(r io.RuneScanner) (string, error) { + rv := &bytes.Buffer{} + for { + ch, _, err := r.ReadRune() + if err != nil { + return "", err + } else if ch != ' ' { + rv.WriteRune(ch) + } else { + r.UnreadRune() + rvString := string(rv.Bytes()) + if rvString == "-" { + rvString = "" + } + return rvString, nil + } + } +} + +func copyFrom(in []byte) []byte { + out := make([]byte, len(in)) + copy(out, in) + return out +} diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 000000000000..083c7349672b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,178 @@ +# Changelog + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 000000000000..9ff7da9c48b6 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 000000000000..eac19178fbd1 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 000000000000..d8f54dcbd3c6 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 000000000000..a78235895fdc --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 000000000000..a57669f44db0 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,530 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(constraintMsg[c.origfunc], v, c.orig) + e = append(e, em) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + strings.Join(ops, "|"), + cvRegex)) + + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*\,?)+$`, + strings.Join(ops, "|"), + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + return comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + } + return false + } + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) == 1 + } + + if v.Major() > c.con.Major() { + return true + } else if v.Major() < c.con.Major() { + return false + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + return v.Minor() > c.con.Minor() + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) < 0 +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false + } + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + return v.Major() == c.con.Major() + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + return v.Minor() == c.con.Minor() + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + return c.con.Patch() == v.Patch() +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 000000000000..391aa46b76df --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3 < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` + * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + * `^0.2` is equivalent to `>=0.2.0 <0.3.0` + * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + * `^0.0` is equivalent to `>=0.0.0 <0.1.0` + * `^0` is equivalent to `>=0.0.0 <1.0.0` + +Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/fuzz.go b/vendor/github.com/Masterminds/semver/fuzz.go new file mode 100644 index 000000000000..a242ad70587c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/go.mod b/vendor/github.com/Masterminds/semver/go.mod new file mode 100644 index 000000000000..658233c8f012 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/go.mod @@ -0,0 +1,3 @@ +module github.com/Masterminds/semver/v3 + +go 1.12 diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 000000000000..1bb95f2635dc --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,583 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const num string = "0123456789" +const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x +// releases of semver provided use the NewSemver() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/Gopkg.lock b/vendor/github.com/cloudfoundry-community/go-cfclient/Gopkg.lock new file mode 100644 index 000000000000..0d18c2e0bea4 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/Gopkg.lock @@ -0,0 +1,174 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/Masterminds/semver" + packages = ["."] + revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" + version = "v1.4.2" + +[[projects]] + branch = "master" + name = "github.com/cloudfoundry/gofileutils" + packages = ["fileutils"] + revision = "4d0c80011a0f37da1711c184028bc40137cd45af" + +[[projects]] + name = "github.com/codegangsta/inject" + packages = ["."] + revision = "37d7f8432a3e684eef9b2edece76bdfa6ac85b39" + version = "v1.0-rc1" + +[[projects]] + name = "github.com/go-martini/martini" + packages = ["."] + revision = "49411a5b646861ad29a6ddd5351717a0a9c49b94" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" + +[[projects]] + branch = "master" + name = "github.com/gopherjs/gopherjs" + packages = ["js"] + revision = "444abdf920945de5d4a977b572bcc6c674d1e4eb" + +[[projects]] + name = "github.com/jtolds/gls" + packages = ["."] + revision = "77f18212c9c7edc9bd6a33d383a7b545ce62f064" + version = "v4.2.1" + +[[projects]] + branch = "master" + name = "github.com/martini-contrib/render" + packages = ["."] + revision = "ec18f8345a1181146728238980606fb1d6f40e8c" + +[[projects]] + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types" + ] + revision = "c893efa28eb45626cdaa76c9f653b62488858837" + version = "v1.2.0" + +[[projects]] + branch = "master" + name = "github.com/oxtoacart/bpool" + packages = ["."] + revision = "4e1c5567d7c2dd59fa4c7c83d34c2f3528b025d6" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/smartystreets/assertions" + packages = [ + ".", + "internal/go-render/render", + "internal/oglematchers" + ] + revision = "ff1918e1e5a13a74014644ae7c1e0ba2f791364d" + version = "1.8.0" + +[[projects]] + name = "github.com/smartystreets/goconvey" + packages = [ + "convey", + "convey/gotest", + "convey/reporting" + ] + revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857" + version = "1.6.3" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "html", + "html/atom", + "html/charset" + ] + revision = "9dfe39835686865bff950a07b394c12a98ddc811" + +[[projects]] + branch = "master" + name = "golang.org/x/oauth2" + packages = [ + ".", + "clientcredentials", + "internal" + ] + revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28" + +[[projects]] + branch = "master" + name = "golang.org/x/text" + packages = [ + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/gen", + "internal/tag", + "internal/utf8internal", + "language", + "runes", + "transform", + "unicode/cldr" + ] + revision = "88f656faf3f37f690df1a32515b479415e1a6769" + +[[projects]] + name = "google.golang.org/appengine" + packages = [ + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] + revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" + version = "v1.0.0" + +[[projects]] + branch = "v2" + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "287cf08546ab5e7e37d55a84f7ed3fd1db036de5" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "2e35689146470eb531e3645c63fb933ad86066e63f57021c20e592e25299a02b" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/Gopkg.toml b/vendor/github.com/cloudfoundry-community/go-cfclient/Gopkg.toml new file mode 100644 index 000000000000..5b0d1610cf29 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/Gopkg.toml @@ -0,0 +1,58 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/go-martini/martini" + version = "1.0.0" + +[[constraint]] + branch = "master" + name = "github.com/martini-contrib/render" + +[[constraint]] + name = "github.com/onsi/gomega" + version = "1.2.0" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.0" + +[[constraint]] + name = "github.com/smartystreets/goconvey" + version = "1.6.3" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + branch = "master" + name = "golang.org/x/oauth2" + +[[constraint]] + branch = "v2" + name = "gopkg.in/yaml.v2" + +[[constraint]] + name = "github.com/Masterminds/semver" + version = "1.4.2" diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE b/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE new file mode 100644 index 000000000000..cb2ec6c50df7 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Long Nguyen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/README.md b/vendor/github.com/cloudfoundry-community/go-cfclient/README.md new file mode 100644 index 000000000000..efae2d8806c8 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/README.md @@ -0,0 +1,59 @@ +# go-cfclient +[![Travis-CI](https://travis-ci.org/cloudfoundry-community/go-cfclient.svg)](https://travis-ci.org/cloudfoundry-community/go-cfclient) +[![GoDoc](https://godoc.org/github.com/cloudfoundry-community/go-cfclient?status.svg)](http://godoc.org/github.com/cloudfoundry-community/go-cfclient) +[![Report card](https://goreportcard.com/badge/github.com/cloudfoundry-community/go-cfclient)](https://goreportcard.com/report/github.com/cloudfoundry-community/go-cfclient) + +### Overview + +`cfclient` is a package to assist you in writing apps that need to interact with [Cloud Foundry](http://cloudfoundry.org). +It provides functions and structures to retrieve and update + + +### Usage + +``` +go get github.com/cloudfoundry-community/go-cfclient +``` + +NOTE: Currently this project is not versioning its releases and so breaking changes might be introduced. +Whilst hopefully notifications of breaking changes are made via commit messages, ideally your project will use a local +vendoring system to lock in a version of `go-cfclient` that is known to work for you. +This will allow you to control the timing and maintenance of upgrades to newer versions of this library. + +Some example code: + +```go +package main + +import ( + "github.com/cloudfoundry-community/go-cfclient" +) + +func main() { + c := &cfclient.Config{ + ApiAddress: "https://api.10.244.0.34.xip.io", + Username: "admin", + Password: "admin", + } + client, _ := cfclient.NewClient(c) + apps, _ := client.ListApps() + fmt.Println(apps) +} +``` + +### Development + +#### Errors + +If the Cloud Foundry error definitions change at +then the error predicate functions in this package need to be regenerated. + +To do this, simply use Go to regenerate the code: + +``` +go generate +``` + +### Contributing + +Pull requests welcome. diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/app_update.go b/vendor/github.com/cloudfoundry-community/go-cfclient/app_update.go new file mode 100644 index 000000000000..ef023228abc1 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/app_update.go @@ -0,0 +1,107 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +type UpdateResponse struct { + Metadata Meta `json:"metadata"` + Entity UpdateResponseEntity `json:"entity"` +} +type AppUpdateResource struct { + Name string `json:"name,omitempty"` + Memory int `json:"memory,omitempty"` + Instances int `json:"instances,omitempty"` + DiskQuota int `json:"disk_quota,omitempty"` + SpaceGuid string `json:"space_guid,omitempty"` + StackGuid string `json:"stack_guid,omitempty"` + State AppState `json:"state,omitempty"` + Command string `json:"command,omitempty"` + Buildpack string `json:"buildpack,omitempty"` + HealthCheckHttpEndpoint string `json:"health_check_http_endpoint,omitempty"` + HealthCheckType string `json:"health_check_type,omitempty"` + HealthCheckTimeout int `json:"health_check_timeout,omitempty"` + Diego bool `json:"diego,omitempty"` + EnableSSH bool `json:"enable_ssh,omitempty"` + DockerImage string `json:"docker_image,omitempty"` + DockerCredentials map[string]interface{} `json:"docker_credentials_json,omitempty"` + Environment map[string]interface{} `json:"environment_json,omitempty"` + StagingFailedReason string `json:"staging_failed_reason,omitempty"` + StagingFailedDescription string `json:"staging_failed_description,omitempty"` + Ports []int `json:"ports,omitempty"` +} + +type UpdateResponseEntity struct { + Name string `json:"name"` + Production bool `json:"production"` + SpaceGuid string `json:"space_guid"` + StackGuid string `json:"stack_guid"` + Buildpack string `json:"buildpack"` + DetectedBuildpack string `json:"detected_buildpack"` + DetectedBuildpackGuid string `json:"detected_buildpack_guid"` + Environment map[string]interface{} `json:"environment_json"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + State string `json:"state"` + Version string `json:"version"` + Command string `json:"command"` + Console bool `json:"console"` + Debug string `json:"debug"` + StagingTaskId string `json:"staging_task_id"` + PackageState string `json:"package_state"` + HealthCheckHttpEndpoint string `json:"health_check_http_endpoint"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout int `json:"health_check_timeout"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Diego bool `json:"diego,omitempty"` + DockerImage string `json:"docker_image"` + DockerCredentials struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"docker_credentials"` + PackageUpdatedAt string `json:"package_updated_at"` + DetectedStartCommand string `json:"detected_start_command"` + EnableSSH bool `json:"enable_ssh"` + Ports []int `json:"ports"` + SpaceURL string `json:"space_url"` + StackURL string `json:"stack_url"` + RoutesURL string `json:"routes_url"` + EventsURL string `json:"events_url"` + ServiceBindingsUrl string `json:"service_bindings_url"` + RouteMappingsUrl string `json:"route_mappings_url"` +} + +func (c *Client) UpdateApp(guid string, aur AppUpdateResource) (UpdateResponse, error) { + var updateResponse UpdateResponse + + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(aur) + if err != nil { + return UpdateResponse{}, err + } + req := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/apps/%s", guid), buf) + resp, err := c.DoRequest(req) + if err != nil { + return UpdateResponse{}, err + } + if resp.StatusCode != http.StatusCreated { + return UpdateResponse{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return UpdateResponse{}, err + } + err = json.Unmarshal(body, &updateResponse) + if err != nil { + return UpdateResponse{}, err + } + return updateResponse, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/app_usage_events.go b/vendor/github.com/cloudfoundry-community/go-cfclient/app_usage_events.go new file mode 100644 index 000000000000..227c4fe32421 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/app_usage_events.go @@ -0,0 +1,80 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/pkg/errors" +) + +type AppUsageEvent struct { + GUID string `json:"guid"` + CreatedAt string `json:"created_at"` + State string `json:"state"` + PreviousState string `json:"previous_state"` + MemoryInMbPerInstance int `json:"memory_in_mb_per_instance"` + PreviousMemoryInMbPerInstance int `json:"previous_memory_in_mb_per_instance"` + InstanceCount int `json:"instance_count"` + PreviousInstanceCount int `json:"previous_instance_count"` + AppGUID string `json:"app_guid"` + SpaceGUID string `json:"space_guid"` + SpaceName string `json:"space_name"` + OrgGUID string `json:"org_guid"` + BuildpackGUID string `json:"buildpack_guid"` + BuildpackName string `json:"buildpack_name"` + PackageState string `json:"package_state"` + PreviousPackageState string `json:"previous_package_state"` + ParentAppGUID string `json:"parent_app_guid"` + ParentAppName string `json:"parent_app_name"` + ProcessType string `json:"process_type"` + TaskName string `json:"task_name"` + TaskGUID string `json:"task_guid"` + c *Client +} + +type AppUsageEventsResponse struct { + TotalResults int `json:"total_results"` + Pages int `json:"total_pages"` + NextURL string `json:"next_url"` + Resources []AppUsageEventResource `json:"resources"` +} + +type AppUsageEventResource struct { + Meta Meta `json:"metadata"` + Entity AppUsageEvent `json:"entity"` +} + +// ListAppUsageEventsByQuery lists all events matching the provided query. +func (c *Client) ListAppUsageEventsByQuery(query url.Values) ([]AppUsageEvent, error) { + var appUsageEvents []AppUsageEvent + requestURL := fmt.Sprintf("/v2/app_usage_events?%s", query.Encode()) + for { + var appUsageEventsResponse AppUsageEventsResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "error requesting events") + } + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(&appUsageEventsResponse); err != nil { + return nil, errors.Wrap(err, "error unmarshaling events") + } + for _, e := range appUsageEventsResponse.Resources { + e.Entity.GUID = e.Meta.Guid + e.Entity.CreatedAt = e.Meta.CreatedAt + e.Entity.c = c + appUsageEvents = append(appUsageEvents, e.Entity) + } + requestURL = appUsageEventsResponse.NextURL + if requestURL == "" { + break + } + } + return appUsageEvents, nil +} + +// ListAppUsageEvents lists all unfiltered events. +func (c *Client) ListAppUsageEvents() ([]AppUsageEvent, error) { + return c.ListAppUsageEventsByQuery(nil) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/appevents.go b/vendor/github.com/cloudfoundry-community/go-cfclient/appevents.go new file mode 100644 index 000000000000..8784c62e79a1 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/appevents.go @@ -0,0 +1,182 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "time" + + "github.com/pkg/errors" +) + +const ( + //AppCrash app.crash event const + AppCrash = "app.crash" + //AppStart audit.app.start event const + AppStart = "audit.app.start" + //AppStop audit.app.stop event const + AppStop = "audit.app.stop" + //AppUpdate audit.app.update event const + AppUpdate = "audit.app.update" + //AppCreate audit.app.create event const + AppCreate = "audit.app.create" + //AppDelete audit.app.delete-request event const + AppDelete = "audit.app.delete-request" + //AppSSHAuth audit.app.ssh-authorized event const + AppSSHAuth = "audit.app.ssh-authorized" + //AppSSHUnauth audit.app.ssh-unauthorized event const + AppSSHUnauth = "audit.app.ssh-unauthorized" + //AppRestage audit.app.restage event const + AppRestage = "audit.app.restage" + //AppMapRoute audit.app.map-route event const + AppMapRoute = "audit.app.map-route" + //AppUnmapRoute audit.app.unmap-route event const + AppUnmapRoute = "audit.app.unmap-route" + //FilterTimestamp const for query filter timestamp + FilterTimestamp = "timestamp" + //FilterActee const for query filter actee + FilterActee = "actee" +) + +//ValidOperators const for all valid operators in a query +var ValidOperators = []string{":", ">=", "<=", "<", ">", "IN"} + +// AppEventResponse the entire response +type AppEventResponse struct { + Results int `json:"total_results"` + Pages int `json:"total_pages"` + PrevURL string `json:"prev_url"` + NextURL string `json:"next_url"` + Resources []AppEventResource `json:"resources"` +} + +// AppEventResource the event resources +type AppEventResource struct { + Meta Meta `json:"metadata"` + Entity AppEventEntity `json:"entity"` +} + +//AppEventQuery a struct for defining queries like 'q=filter>value' or 'q=filter IN a,b,c' +type AppEventQuery struct { + Filter string + Operator string + Value string +} + +// The AppEventEntity the actual app event body +type AppEventEntity struct { + //EventTypes are app.crash, audit.app.start, audit.app.stop, audit.app.update, audit.app.create, audit.app.delete-request + EventType string `json:"type"` + //The GUID of the actor. + Actor string `json:"actor"` + //The actor type, user or app + ActorType string `json:"actor_type"` + //The name of the actor. + ActorName string `json:"actor_name"` + //The GUID of the actee. + Actee string `json:"actee"` + //The actee type, space, app or v3-app + ActeeType string `json:"actee_type"` + //The name of the actee. + ActeeName string `json:"actee_name"` + //Timestamp format "2016-02-26T13:29:44Z". The event creation time. + Timestamp time.Time `json:"timestamp"` + MetaData struct { + //app.crash event fields + ExitDescription string `json:"exit_description,omitempty"` + ExitReason string `json:"reason,omitempty"` + ExitStatus string `json:"exit_status,omitempty"` + + Request struct { + Name string `json:"name,omitempty"` + Instances float64 `json:"instances,omitempty"` + State string `json:"state,omitempty"` + Memory float64 `json:"memory,omitempty"` + EnvironmentVars string `json:"environment_json,omitempty"` + DockerCredentials string `json:"docker_credentials_json,omitempty"` + //audit.app.create event fields + Console bool `json:"console,omitempty"` + Buildpack string `json:"buildpack,omitempty"` + Space string `json:"space_guid,omitempty"` + HealthcheckType string `json:"health_check_type,omitempty"` + HealthcheckTimeout float64 `json:"health_check_timeout,omitempty"` + Production bool `json:"production,omitempty"` + //app.crash event fields + Index float64 `json:"index,omitempty"` + } `json:"request"` + } `json:"metadata"` +} + +// ListAppEvents returns all app events based on eventType +func (c *Client) ListAppEvents(eventType string) ([]AppEventEntity, error) { + return c.ListAppEventsByQuery(eventType, nil) +} + +// ListAppEventsByQuery returns all app events based on eventType and queries +func (c *Client) ListAppEventsByQuery(eventType string, queries []AppEventQuery) ([]AppEventEntity, error) { + var events []AppEventEntity + + if eventType != AppCrash && eventType != AppStart && eventType != AppStop && eventType != AppUpdate && eventType != AppCreate && + eventType != AppDelete && eventType != AppSSHAuth && eventType != AppSSHUnauth && eventType != AppRestage && + eventType != AppMapRoute && eventType != AppUnmapRoute { + return nil, errors.New("Unsupported app event type " + eventType) + } + + var query = "/v2/events?q=type:" + eventType + //adding the additional queries + if queries != nil && len(queries) > 0 { + for _, eventQuery := range queries { + if eventQuery.Filter != FilterTimestamp && eventQuery.Filter != FilterActee { + return nil, errors.New("Unsupported query filter type " + eventQuery.Filter) + } + if !stringInSlice(eventQuery.Operator, ValidOperators) { + return nil, errors.New("Unsupported query operator type " + eventQuery.Operator) + } + query += "&q=" + eventQuery.Filter + eventQuery.Operator + eventQuery.Value + } + } + + for { + eventResponse, err := c.getAppEventsResponse(query) + if err != nil { + return []AppEventEntity{}, err + } + for _, event := range eventResponse.Resources { + events = append(events, event.Entity) + } + query = eventResponse.NextURL + if query == "" { + break + } + } + + return events, nil +} + +func (c *Client) getAppEventsResponse(query string) (AppEventResponse, error) { + var eventResponse AppEventResponse + r := c.NewRequest("GET", query) + resp, err := c.DoRequest(r) + if err != nil { + return AppEventResponse{}, errors.Wrap(err, "Error requesting appevents") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return AppEventResponse{}, errors.Wrap(err, "Error reading appevents response body") + } + + err = json.Unmarshal(resBody, &eventResponse) + if err != nil { + return AppEventResponse{}, errors.Wrap(err, "Error unmarshalling appevent") + } + return eventResponse, nil +} + +func stringInSlice(str string, list []string) bool { + for _, v := range list { + if v == str { + return true + } + } + return false +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go b/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go new file mode 100644 index 000000000000..f80ecaf89cb1 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go @@ -0,0 +1,685 @@ +package cfclient + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" +) + +type AppResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []AppResource `json:"resources"` +} + +type AppResource struct { + Meta Meta `json:"metadata"` + Entity App `json:"entity"` +} + +type AppState string + +const ( + APP_STOPPED AppState = "STOPPED" + APP_STARTED AppState = "STARTED" +) + +type HealthCheckType string + +const ( + HEALTH_HTTP HealthCheckType = "http" + HEALTH_PORT HealthCheckType = "port" + HEALTH_PROCESS HealthCheckType = "process" +) + +type DockerCredentials struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` +} + +type AppCreateRequest struct { + Name string `json:"name"` + SpaceGuid string `json:"space_guid"` + // Memory for the app, in MB + Memory int `json:"memory,omitempty"` + // Instances to startup + Instances int `json:"instances,omitempty"` + // Disk quota in MB + DiskQuota int `json:"disk_quota,omitempty"` + StackGuid string `json:"stack_guid,omitempty"` + // Desired state of the app. Either "STOPPED" or "STARTED" + State AppState `json:"state,omitempty"` + // Command to start an app + Command string `json:"command,omitempty"` + // Buildpack to build the app. Three options: + // 1. Blank for autodetection + // 2. GIT url + // 3. Name of an installed buildpack + Buildpack string `json:"buildpack,omitempty"` + // Endpoint to check if an app is healthy + HealthCheckHttpEndpoint string `json:"health_check_http_endpoint,omitempty"` + // How to check if an app is healthy. Defaults to HEALTH_PORT if not specified + HealthCheckType HealthCheckType `json:"health_check_type,omitempty"` + Diego bool `json:"diego,omitempty"` + EnableSSH bool `json:"enable_ssh,omitempty"` + DockerImage string `json:"docker_image,omitempty"` + DockerCredentials DockerCredentials `json:"docker_credentials,omitempty"` + Environment map[string]interface{} `json:"environment_json,omitempty"` +} + +type App struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Name string `json:"name"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + SpaceGuid string `json:"space_guid"` + StackGuid string `json:"stack_guid"` + State string `json:"state"` + PackageState string `json:"package_state"` + Command string `json:"command"` + Buildpack string `json:"buildpack"` + DetectedBuildpack string `json:"detected_buildpack"` + DetectedBuildpackGuid string `json:"detected_buildpack_guid"` + HealthCheckHttpEndpoint string `json:"health_check_http_endpoint"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout int `json:"health_check_timeout"` + Diego bool `json:"diego"` + EnableSSH bool `json:"enable_ssh"` + DetectedStartCommand string `json:"detected_start_command"` + DockerImage string `json:"docker_image"` + DockerCredentials map[string]interface{} `json:"docker_credentials_json"` + Environment map[string]interface{} `json:"environment_json"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Ports []int `json:"ports"` + SpaceURL string `json:"space_url"` + SpaceData SpaceResource `json:"space"` + PackageUpdatedAt string `json:"package_updated_at"` + c *Client +} + +type AppInstance struct { + State string `json:"state"` + Since sinceTime `json:"since"` +} + +type AppStats struct { + State string `json:"state"` + Stats struct { + Name string `json:"name"` + Uris []string `json:"uris"` + Host string `json:"host"` + Port int `json:"port"` + Uptime int `json:"uptime"` + MemQuota int `json:"mem_quota"` + DiskQuota int `json:"disk_quota"` + FdsQuota int `json:"fds_quota"` + Usage struct { + Time statTime `json:"time"` + CPU float64 `json:"cpu"` + Mem int `json:"mem"` + Disk int `json:"disk"` + } `json:"usage"` + } `json:"stats"` +} + +type AppSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + ServiceCount int `json:"service_count"` + RunningInstances int `json:"running_instances"` + SpaceGuid string `json:"space_guid"` + StackGuid string `json:"stack_guid"` + Buildpack string `json:"buildpack"` + DetectedBuildpack string `json:"detected_buildpack"` + Environment map[string]interface{} `json:"environment_json"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + State string `json:"state"` + Command string `json:"command"` + PackageState string `json:"package_state"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout int `json:"health_check_timeout"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Diego bool `json:"diego"` + DockerImage string `json:"docker_image"` + DetectedStartCommand string `json:"detected_start_command"` + EnableSSH bool `json:"enable_ssh"` + DockerCredentials map[string]interface{} `json:"docker_credentials_json"` +} + +type AppEnv struct { + // These can have arbitrary JSON so need to map to interface{} + Environment map[string]interface{} `json:"environment_json"` + StagingEnv map[string]interface{} `json:"staging_env_json"` + RunningEnv map[string]interface{} `json:"running_env_json"` + SystemEnv map[string]interface{} `json:"system_env_json"` + ApplicationEnv map[string]interface{} `json:"application_env_json"` +} + +// Custom time types to handle non-RFC3339 formatting in API JSON + +type sinceTime struct { + time.Time +} + +func (s *sinceTime) UnmarshalJSON(b []byte) (err error) { + timeFlt, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + time := time.Unix(int64(timeFlt), 0) + *s = sinceTime{time} + return nil +} + +func (s sinceTime) ToTime() time.Time { + t, _ := time.Parse(time.UnixDate, s.Format(time.UnixDate)) + return t +} + +type statTime struct { + time.Time +} + +func (s *statTime) UnmarshalJSON(b []byte) (err error) { + timeString, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + + possibleFormats := [...]string{time.RFC3339, time.RFC3339Nano, "2006-01-02 15:04:05 -0700", "2006-01-02 15:04:05 MST"} + + var value time.Time + for _, possibleFormat := range possibleFormats { + if value, err = time.Parse(possibleFormat, timeString); err == nil { + *s = statTime{value} + return nil + } + } + + return fmt.Errorf("%s was not in any of the expected Date Formats %v", timeString, possibleFormats) +} + +func (s statTime) ToTime() time.Time { + t, _ := time.Parse(time.UnixDate, s.Format(time.UnixDate)) + return t +} + +func (a *App) Space() (Space, error) { + var spaceResource SpaceResource + r := a.c.NewRequest("GET", a.SpaceURL) + resp, err := a.c.DoRequest(r) + if err != nil { + return Space{}, errors.Wrap(err, "Error requesting space") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return Space{}, errors.Wrap(err, "Error reading space response") + } + + err = json.Unmarshal(resBody, &spaceResource) + if err != nil { + return Space{}, errors.Wrap(err, "Error unmarshalling body") + } + return a.c.mergeSpaceResource(spaceResource), nil +} + +func (a *App) Summary() (AppSummary, error) { + var appSummary AppSummary + requestUrl := fmt.Sprintf("/v2/apps/%s/summary", a.Guid) + r := a.c.NewRequest("GET", requestUrl) + resp, err := a.c.DoRequest(r) + if err != nil { + return AppSummary{}, errors.Wrap(err, "Error requesting app summary") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return AppSummary{}, errors.Wrap(err, "Error reading app summary body") + } + err = json.Unmarshal(resBody, &appSummary) + if err != nil { + return AppSummary{}, errors.Wrap(err, "Error unmarshalling app summary") + } + return appSummary, nil +} + +// ListAppsByQueryWithLimits queries totalPages app info. When totalPages is +// less and equal than 0, it queries all app info +// When there are no more than totalPages apps on server side, all apps info will be returned +func (c *Client) ListAppsByQueryWithLimits(query url.Values, totalPages int) ([]App, error) { + return c.listApps("/v2/apps?"+query.Encode(), totalPages) +} + +func (c *Client) ListAppsByQuery(query url.Values) ([]App, error) { + return c.listApps("/v2/apps?"+query.Encode(), -1) +} + +// GetAppByGuidNoInlineCall will fetch app info including space and orgs information +// Without using inline-relations-depth=2 call +func (c *Client) GetAppByGuidNoInlineCall(guid string) (App, error) { + var appResource AppResource + r := c.NewRequest("GET", "/v2/apps/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return App{}, errors.Wrap(err, "Error requesting apps") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return App{}, errors.Wrap(err, "Error reading app response body") + } + + err = json.Unmarshal(resBody, &appResource) + if err != nil { + return App{}, errors.Wrap(err, "Error unmarshalling app") + } + app := c.mergeAppResource(appResource) + + // If no Space Information no need to check org. + if app.SpaceGuid != "" { + //Getting Spaces Resource + space, err := app.Space() + if err != nil { + errors.Wrap(err, "Unable to get the Space for the apps "+app.Name) + } else { + app.SpaceData.Entity = space + + } + + //Getting orgResource + org, err := app.SpaceData.Entity.Org() + if err != nil { + errors.Wrap(err, "Unable to get the Org for the apps "+app.Name) + } else { + app.SpaceData.Entity.OrgData.Entity = org + } + } + + return app, nil +} + +func (c *Client) ListApps() ([]App, error) { + q := url.Values{} + q.Set("inline-relations-depth", "2") + return c.ListAppsByQuery(q) +} + +func (c *Client) ListAppsByRoute(routeGuid string) ([]App, error) { + return c.listApps(fmt.Sprintf("/v2/routes/%s/apps", routeGuid), -1) +} + +func (c *Client) listApps(requestUrl string, totalPages int) ([]App, error) { + pages := 0 + apps := []App{} + for { + var appResp AppResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + + if err != nil { + return nil, errors.Wrap(err, "Error requesting apps") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading app request") + } + + err = json.Unmarshal(resBody, &appResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling app") + } + for _, app := range appResp.Resources { + apps = append(apps, c.mergeAppResource(app)) + } + + requestUrl = appResp.NextUrl + if requestUrl == "" { + break + } + + pages += 1 + if totalPages > 0 && pages >= totalPages { + break + } + } + return apps, nil +} + +func (c *Client) GetAppInstances(guid string) (map[string]AppInstance, error) { + var appInstances map[string]AppInstance + + requestURL := fmt.Sprintf("/v2/apps/%s/instances", guid) + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting app instances") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading app instances") + } + err = json.Unmarshal(resBody, &appInstances) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling app instances") + } + return appInstances, nil +} + +func (c *Client) GetAppEnv(guid string) (AppEnv, error) { + var appEnv AppEnv + + requestURL := fmt.Sprintf("/v2/apps/%s/env", guid) + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return appEnv, errors.Wrap(err, "Error requesting app env") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return appEnv, errors.Wrap(err, "Error reading app env") + } + err = json.Unmarshal(resBody, &appEnv) + if err != nil { + return appEnv, errors.Wrap(err, "Error unmarshalling app env") + } + return appEnv, nil +} + +func (c *Client) GetAppRoutes(guid string) ([]Route, error) { + return c.fetchRoutes(fmt.Sprintf("/v2/apps/%s/routes", guid)) +} + +func (c *Client) GetAppStats(guid string) (map[string]AppStats, error) { + var appStats map[string]AppStats + + requestURL := fmt.Sprintf("/v2/apps/%s/stats", guid) + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting app stats") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading app stats") + } + err = json.Unmarshal(resBody, &appStats) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling app stats") + } + return appStats, nil +} + +func (c *Client) KillAppInstance(guid string, index string) error { + requestURL := fmt.Sprintf("/v2/apps/%s/instances/%s", guid, index) + r := c.NewRequest("DELETE", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return errors.Wrapf(err, "Error stopping app %s at index %s", guid, index) + } + defer resp.Body.Close() + if resp.StatusCode != 204 { + return errors.Wrapf(err, "Error stopping app %s at index %s", guid, index) + } + return nil +} + +func (c *Client) GetAppByGuid(guid string) (App, error) { + var appResource AppResource + r := c.NewRequest("GET", "/v2/apps/"+guid+"?inline-relations-depth=2") + resp, err := c.DoRequest(r) + if err != nil { + return App{}, errors.Wrap(err, "Error requesting apps") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return App{}, errors.Wrap(err, "Error reading app response body") + } + + err = json.Unmarshal(resBody, &appResource) + if err != nil { + return App{}, errors.Wrap(err, "Error unmarshalling app") + } + return c.mergeAppResource(appResource), nil +} + +func (c *Client) AppByGuid(guid string) (App, error) { + return c.GetAppByGuid(guid) +} + +//AppByName takes an appName, and GUIDs for a space and org, and performs +// the API lookup with those query parameters set to return you the desired +// App object. +func (c *Client) AppByName(appName, spaceGuid, orgGuid string) (app App, err error) { + query := url.Values{} + query.Add("q", fmt.Sprintf("organization_guid:%s", orgGuid)) + query.Add("q", fmt.Sprintf("space_guid:%s", spaceGuid)) + query.Add("q", fmt.Sprintf("name:%s", appName)) + apps, err := c.ListAppsByQuery(query) + if err != nil { + return + } + if len(apps) == 0 { + err = fmt.Errorf("No app found with name: `%s` in space with GUID `%s` and org with GUID `%s`", appName, spaceGuid, orgGuid) + return + } + app = apps[0] + return +} + +// UploadAppBits uploads the application's contents +func (c *Client) UploadAppBits(file io.Reader, appGUID string) error { + requestFile, err := ioutil.TempFile("", "requests") + + defer func() { + requestFile.Close() + os.Remove(requestFile.Name()) + }() + + writer := multipart.NewWriter(requestFile) + err = writer.WriteField("resources", "[]") + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits", appGUID) + } + + part, err := writer.CreateFormFile("application", "application.zip") + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits", appGUID) + } + + _, err = io.Copy(part, file) + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits, failed to copy all bytes", appGUID) + } + + err = writer.Close() + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits, failed to close multipart writer", appGUID) + } + + requestFile.Seek(0, 0) + fileStats, err := requestFile.Stat() + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits, failed to get temp file stats", appGUID) + } + + requestURL := fmt.Sprintf("/v2/apps/%s/bits", appGUID) + r := c.NewRequestWithBody("PUT", requestURL, requestFile) + req, err := r.toHTTP() + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits", appGUID) + } + + req.ContentLength = fileStats.Size() + contentType := fmt.Sprintf("multipart/form-data; boundary=%s", writer.Boundary()) + req.Header.Set("Content-Type", contentType) + + resp, err := c.Do(req) + if err != nil { + return errors.Wrapf(err, "Error uploading app %s bits", appGUID) + } + if resp.StatusCode != http.StatusCreated { + return errors.Wrapf(err, "Error uploading app %s bits, response code: %d", appGUID, resp.StatusCode) + } + + return nil +} + +// GetAppBits downloads the application's bits as a tar file +func (c *Client) GetAppBits(guid string) (io.ReadCloser, error) { + requestURL := fmt.Sprintf("/v2/apps/%s/download", guid) + req := c.NewRequest("GET", requestURL) + resp, err := c.DoRequestWithoutRedirects(req) + if err != nil { + return nil, errors.Wrapf(err, "Error downloading app %s bits, API request failed", guid) + } + if isResponseRedirect(resp) { + // directly download the bits from blobstore using a non cloud controller transport + // some blobstores will return a 400 if an Authorization header is sent + blobStoreLocation := resp.Header.Get("Location") + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: c.Config.SkipSslValidation}, + } + client := &http.Client{Transport: tr} + resp, err = client.Get(blobStoreLocation) + if err != nil { + return nil, errors.Wrapf(err, "Error downloading app %s bits from blobstore", guid) + } + } else { + return nil, errors.Wrapf(err, "Error downloading app %s bits, expected redirect to blobstore", guid) + } + return resp.Body, nil +} + +// GetDropletBits downloads the application's droplet bits as a tar file +func (c *Client) GetDropletBits(guid string) (io.ReadCloser, error) { + requestURL := fmt.Sprintf("/v2/apps/%s/droplet/download", guid) + req := c.NewRequest("GET", requestURL) + resp, err := c.DoRequestWithoutRedirects(req) + if err != nil { + return nil, errors.Wrapf(err, "Error downloading droplet %s bits, API request failed", guid) + } + if isResponseRedirect(resp) { + // directly download the bits from blobstore using a non cloud controller transport + // some blobstores will return a 400 if an Authorization header is sent + blobStoreLocation := resp.Header.Get("Location") + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: c.Config.SkipSslValidation}, + } + client := &http.Client{Transport: tr} + resp, err = client.Get(blobStoreLocation) + if err != nil { + return nil, errors.Wrapf(err, "Error downloading droplet %s bits from blobstore", guid) + } + } else { + return nil, errors.Wrapf(err, "Error downloading droplet %s bits, expected redirect to blobstore", guid) + } + return resp.Body, nil +} + +// CreateApp creates a new empty application that still needs it's +// app bit uploaded and to be started +func (c *Client) CreateApp(req AppCreateRequest) (App, error) { + var appResp AppResource + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return App{}, err + } + r := c.NewRequestWithBody("POST", "/v2/apps", buf) + resp, err := c.DoRequest(r) + if err != nil { + return App{}, errors.Wrapf(err, "Error creating app %s", req.Name) + } + if resp.StatusCode != http.StatusCreated { + return App{}, errors.Wrapf(err, "Error creating app %s, response code: %d", req.Name, resp.StatusCode) + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return App{}, errors.Wrapf(err, "Error reading app %s http response body", req.Name) + } + err = json.Unmarshal(resBody, &appResp) + if err != nil { + return App{}, errors.Wrapf(err, "Error deserializing app %s response", req.Name) + } + return c.mergeAppResource(appResp), nil +} + +func (c *Client) StartApp(guid string) error { + startRequest := strings.NewReader(`{ "state": "STARTED" }`) + resp, err := c.DoRequest(c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/apps/%s", guid), startRequest)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error starting app %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) StopApp(guid string) error { + stopRequest := strings.NewReader(`{ "state": "STOPPED" }`) + resp, err := c.DoRequest(c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/apps/%s", guid), stopRequest)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error stopping app %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) DeleteApp(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/apps/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting app %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) mergeAppResource(app AppResource) App { + app.Entity.Guid = app.Meta.Guid + app.Entity.CreatedAt = app.Meta.CreatedAt + app.Entity.UpdatedAt = app.Meta.UpdatedAt + app.Entity.SpaceData.Entity.Guid = app.Entity.SpaceData.Meta.Guid + app.Entity.SpaceData.Entity.OrgData.Entity.Guid = app.Entity.SpaceData.Entity.OrgData.Meta.Guid + app.Entity.c = c + return app.Entity +} + +func isResponseRedirect(res *http.Response) bool { + switch res.StatusCode { + case http.StatusTemporaryRedirect, http.StatusPermanentRedirect, http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther: + return true + } + return false +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/buildpacks.go b/vendor/github.com/cloudfoundry-community/go-cfclient/buildpacks.go new file mode 100644 index 000000000000..0b1a09e66c12 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/buildpacks.go @@ -0,0 +1,247 @@ +package cfclient + +import ( + "encoding/json" + "io" + "io/ioutil" + "mime/multipart" + "os" + + "fmt" + "net/http" + + "code.cloudfoundry.org/gofileutils/fileutils" + "github.com/pkg/errors" +) + +type BuildpackResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []BuildpackResource `json:"resources"` +} + +type BuildpackResource struct { + Meta Meta `json:"metadata"` + Entity Buildpack `json:"entity"` +} + +type Buildpack struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Name string `json:"name"` + Enabled bool `json:"enabled"` + Locked bool `json:"locked"` + Position int `json:"position"` + Filename string `json:"filename"` + Stack string `json:"stack"` + c *Client +} + +type BuildpackRequest struct { + // These are all pointers to the values so that we can tell + // whether people wanted position 0, or enable/unlock values, + // vs whether they didn't specify them and want them unchanged/default. + Name *string `json:"name,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Locked *bool `json:"locked,omitempty"` + Position *int `json:"position,omitempty"` + Stack *string `json:"stack,omitempty"` +} + +func (c *Client) CreateBuildpack(bpr *BuildpackRequest) (*Buildpack, error) { + if bpr.Name == nil || *bpr.Name == "" { + return nil, errors.New("Unable to create a buidlpack with no name") + } + requestUrl := "/v2/buildpacks" + req := c.NewRequest("POST", requestUrl) + req.obj = bpr + resp, err := c.DoRequest(req) + if err != nil { + return nil, errors.Wrap(err, "Error creating buildpack:") + } + bp, err := c.handleBuildpackResp(resp) + if err != nil { + return nil, errors.Wrap(err, "Error creating buildpack:") + } + return &bp, nil +} + +func (c *Client) ListBuildpacks() ([]Buildpack, error) { + var buildpacks []Buildpack + requestUrl := "/v2/buildpacks" + for { + buildpackResp, err := c.getBuildpackResponse(requestUrl) + if err != nil { + return []Buildpack{}, err + } + for _, buildpack := range buildpackResp.Resources { + buildpacks = append(buildpacks, c.mergeBuildpackResource(buildpack)) + } + requestUrl = buildpackResp.NextUrl + if requestUrl == "" { + break + } + } + return buildpacks, nil +} + +func (c *Client) DeleteBuildpack(guid string, async bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/buildpacks/%s?async=%t", guid, async))) + if err != nil { + return err + } + if (async && (resp.StatusCode != http.StatusAccepted)) || (!async && (resp.StatusCode != http.StatusNoContent)) { + return errors.Wrapf(err, "Error deleting buildpack %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) getBuildpackResponse(requestUrl string) (BuildpackResponse, error) { + var buildpackResp BuildpackResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return BuildpackResponse{}, errors.Wrap(err, "Error requesting buildpacks") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return BuildpackResponse{}, errors.Wrap(err, "Error reading buildpack request") + } + err = json.Unmarshal(resBody, &buildpackResp) + if err != nil { + return BuildpackResponse{}, errors.Wrap(err, "Error unmarshalling buildpack") + } + return buildpackResp, nil +} + +func (c *Client) mergeBuildpackResource(buildpack BuildpackResource) Buildpack { + buildpack.Entity.Guid = buildpack.Meta.Guid + buildpack.Entity.CreatedAt = buildpack.Meta.CreatedAt + buildpack.Entity.UpdatedAt = buildpack.Meta.UpdatedAt + buildpack.Entity.c = c + return buildpack.Entity +} + +func (c *Client) GetBuildpackByGuid(buildpackGUID string) (Buildpack, error) { + requestUrl := fmt.Sprintf("/v2/buildpacks/%s", buildpackGUID) + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return Buildpack{}, errors.Wrap(err, "Error requesting buildpack info") + } + return c.handleBuildpackResp(resp) +} + +func (c *Client) handleBuildpackResp(resp *http.Response) (Buildpack, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Buildpack{}, err + } + var buildpackResource BuildpackResource + if err := json.Unmarshal(body, &buildpackResource); err != nil { + return Buildpack{}, err + } + return c.mergeBuildpackResource(buildpackResource), nil +} + +func (b *Buildpack) Upload(file io.Reader, fileName string) error { + var capturedErr error + fileutils.TempFile("requests", func(requestFile *os.File, err error) { + if err != nil { + capturedErr = err + return + } + writer := multipart.NewWriter(requestFile) + part, err := writer.CreateFormFile("buildpack", fileName) + + if err != nil { + _ = writer.Close() + capturedErr = err + return + } + + _, err = io.Copy(part, file) + if err != nil { + capturedErr = fmt.Errorf("Error creating upload: %s", err.Error()) + return + } + + err = writer.Close() + if err != nil { + capturedErr = err + return + } + + requestFile.Seek(0, 0) + fileStats, err := requestFile.Stat() + if err != nil { + capturedErr = fmt.Errorf("Error getting file info: %s", err) + } + + req, err := http.NewRequest("PUT", fmt.Sprintf("%s/v2/buildpacks/%s/bits", b.c.Config.ApiAddress, b.Guid), requestFile) + if err != nil { + capturedErr = err + return + } + + req.ContentLength = fileStats.Size() + contentType := fmt.Sprintf("multipart/form-data; boundary=%s", writer.Boundary()) + req.Header.Set("Content-Type", contentType) + resp, err := b.c.Do(req) //client.Do() handles the HTTP status code checking for us + if err != nil { + capturedErr = err + return + } + defer resp.Body.Close() + }) + + return errors.Wrap(capturedErr, "Error uploading buildpack:") +} + +func (b *Buildpack) Update(bpr *BuildpackRequest) error { + requestUrl := fmt.Sprintf("/v2/buildpacks/%s", b.Guid) + req := b.c.NewRequest("PUT", requestUrl) + req.obj = bpr + resp, err := b.c.DoRequest(req) + if err != nil { + return errors.Wrap(err, "Error updating buildpack:") + } + newBp, err := b.c.handleBuildpackResp(resp) + if err != nil { + return errors.Wrap(err, "Error updating buildpack:") + } + b.Name = newBp.Name + b.Locked = newBp.Locked + b.Enabled = newBp.Enabled + return nil +} + +func (bpr *BuildpackRequest) Lock() { + b := true + bpr.Locked = &b +} +func (bpr *BuildpackRequest) Unlock() { + b := false + bpr.Locked = &b +} +func (bpr *BuildpackRequest) Enable() { + b := true + bpr.Enabled = &b +} +func (bpr *BuildpackRequest) Disable() { + b := false + bpr.Enabled = &b +} +func (bpr *BuildpackRequest) SetPosition(i int) { + bpr.Position = &i +} +func (bpr *BuildpackRequest) SetName(s string) { + bpr.Name = &s +} +func (bpr *BuildpackRequest) SetStack(s string) { + bpr.Stack = &s +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/cf_error.go b/vendor/github.com/cloudfoundry-community/go-cfclient/cf_error.go new file mode 100644 index 000000000000..6555519acbf6 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/cf_error.go @@ -0,0 +1,3171 @@ +package cfclient + +// Code generated by go generate. DO NOT EDIT. +// This file was generated by robots at +// 2017-11-23 06:47:40.143734 +1100 AEDT m=+2.237010727 + +import "github.com/pkg/errors" + +// IsInvalidAuthTokenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 1000 +// - HTTP code: 401 +// - message: "Invalid Auth Token" +func IsInvalidAuthTokenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 1000 +} + +// IsMessageParseError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 1001 +// - HTTP code: 400 +// - message: "Request invalid due to parse error: %s" +func IsMessageParseError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 1001 +} + +// IsInvalidRelationError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 1002 +// - HTTP code: 400 +// - message: "%s" +func IsInvalidRelationError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 1002 +} + +// IsInvalidContentTypeError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 1003 +// - HTTP code: 400 +// - message: "Invalid content type, expected: %s" +func IsInvalidContentTypeError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 1003 +} + +// IsNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10000 +// - HTTP code: 404 +// - message: "Unknown request" +func IsNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10000 +} + +// IsServerError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10001 +// - HTTP code: 500 +// - message: "Server error" +func IsServerError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10001 +} + +// IsNotAuthenticatedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10002 +// - HTTP code: 401 +// - message: "Authentication error" +func IsNotAuthenticatedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10002 +} + +// IsNotAuthorizedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10003 +// - HTTP code: 403 +// - message: "You are not authorized to perform the requested action" +func IsNotAuthorizedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10003 +} + +// IsInvalidRequestError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10004 +// - HTTP code: 400 +// - message: "The request is invalid" +func IsInvalidRequestError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10004 +} + +// IsBadQueryParameterError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10005 +// - HTTP code: 400 +// - message: "The query parameter is invalid: %s" +func IsBadQueryParameterError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10005 +} + +// IsAssociationNotEmptyError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10006 +// - HTTP code: 400 +// - message: "Please delete the %s associations for your %s." +func IsAssociationNotEmptyError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10006 +} + +// IsInsufficientScopeError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10007 +// - HTTP code: 403 +// - message: "Your token lacks the necessary scopes to access this resource." +func IsInsufficientScopeError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10007 +} + +// IsUnprocessableEntityError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10008 +// - HTTP code: 422 +// - message: "%s" +func IsUnprocessableEntityError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10008 +} + +// IsUnableToPerformError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10009 +// - HTTP code: 400 +// - message: "%s could not be completed: %s" +func IsUnableToPerformError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10009 +} + +// IsResourceNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10010 +// - HTTP code: 404 +// - message: "%s" +func IsResourceNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10010 +} + +// IsDatabaseError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10011 +// - HTTP code: 500 +// - message: "Database error" +func IsDatabaseError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10011 +} + +// IsOrderByParameterInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10012 +// - HTTP code: 500 +// - message: "Cannot order by: %s" +func IsOrderByParameterInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10012 +} + +// IsRateLimitExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 10013 +// - HTTP code: 429 +// - message: "Rate Limit Exceeded" +func IsRateLimitExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 10013 +} + +// IsUserInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20001 +// - HTTP code: 400 +// - message: "The user info is invalid: %s" +func IsUserInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20001 +} + +// IsUaaIdTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20002 +// - HTTP code: 400 +// - message: "The UAA ID is taken: %s" +func IsUaaIdTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20002 +} + +// IsUserNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20003 +// - HTTP code: 404 +// - message: "The user could not be found: %s" +func IsUserNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20003 +} + +// IsUaaUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20004 +// - HTTP code: 503 +// - message: "The UAA service is currently unavailable" +func IsUaaUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20004 +} + +// IsUaaEndpointDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20005 +// - HTTP code: 501 +// - message: "The UAA endpoint needed is disabled" +func IsUaaEndpointDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20005 +} + +// IsUserIsInMultipleOriginsError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20006 +// - HTTP code: 400 +// - message: "The user exists in multiple origins. Specify an origin for the requested user from: %s" +func IsUserIsInMultipleOriginsError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20006 +} + +// IsUserWithOriginNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 20007 +// - HTTP code: 404 +// - message: "The user could not be found, %s" +func IsUserWithOriginNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 20007 +} + +// IsOutOfRouterGroupPortsError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 21008 +// - HTTP code: 403 +// - message: "There are no more ports available for router group: %s. Please contact your administrator for more information." +func IsOutOfRouterGroupPortsError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 21008 +} + +// IsOrganizationInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30001 +// - HTTP code: 400 +// - message: "The organization info is invalid: %s" +func IsOrganizationInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30001 +} + +// IsOrganizationNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30002 +// - HTTP code: 400 +// - message: "The organization name is taken: %s" +func IsOrganizationNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30002 +} + +// IsOrganizationNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30003 +// - HTTP code: 404 +// - message: "The organization could not be found: %s" +func IsOrganizationNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30003 +} + +// IsLastManagerInOrgError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30004 +// - HTTP code: 403 +// - message: "Cannot remove last Org Manager in org" +func IsLastManagerInOrgError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30004 +} + +// IsLastBillingManagerInOrgError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30005 +// - HTTP code: 403 +// - message: "Cannot remove last Billing Manager in org" +func IsLastBillingManagerInOrgError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30005 +} + +// IsLastUserInOrgError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30006 +// - HTTP code: 403 +// - message: "Cannot remove last User in org" +func IsLastUserInOrgError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30006 +} + +// IsOrganizationAlreadySetError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 30007 +// - HTTP code: 400 +// - message: "Cannot change organization" +func IsOrganizationAlreadySetError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 30007 +} + +// IsSpaceInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 40001 +// - HTTP code: 400 +// - message: "The app space info is invalid: %s" +func IsSpaceInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 40001 +} + +// IsSpaceNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 40002 +// - HTTP code: 400 +// - message: "The app space name is taken: %s" +func IsSpaceNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 40002 +} + +// IsSpaceUserNotInOrgError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 40003 +// - HTTP code: 400 +// - message: "The app space and the user are not in the same org: %s" +func IsSpaceUserNotInOrgError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 40003 +} + +// IsSpaceNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 40004 +// - HTTP code: 404 +// - message: "The app space could not be found: %s" +func IsSpaceNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 40004 +} + +// IsServiceInstanceNameEmptyError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60001 +// - HTTP code: 400 +// - message: "Service instance name is required." +func IsServiceInstanceNameEmptyError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60001 +} + +// IsServiceInstanceNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60002 +// - HTTP code: 400 +// - message: "The service instance name is taken: %s" +func IsServiceInstanceNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60002 +} + +// IsServiceInstanceInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60003 +// - HTTP code: 400 +// - message: "The service instance is invalid: %s" +func IsServiceInstanceInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60003 +} + +// IsServiceInstanceNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60004 +// - HTTP code: 404 +// - message: "The service instance could not be found: %s" +func IsServiceInstanceNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60004 +} + +// IsServiceInstanceQuotaExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60005 +// - HTTP code: 400 +// - message: "You have exceeded your organization's services limit." +func IsServiceInstanceQuotaExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60005 +} + +// IsPreviouslyUsedAs_ServiceInstancePaidQuotaExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60006 +// - HTTP code: 400 +// - message: "You have exceeded your organization's services limit." +func IsPreviouslyUsedAs_ServiceInstancePaidQuotaExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60006 +} + +// IsServiceInstanceServicePlanNotAllowedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60007 +// - HTTP code: 400 +// - message: "The service instance cannot be created because paid service plans are not allowed." +func IsServiceInstanceServicePlanNotAllowedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60007 +} + +// IsServiceInstanceDuplicateNotAllowedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60008 +// - HTTP code: 400 +// - message: "An instance of this service is already present in this space. Some services only support one instance per space." +func IsServiceInstanceDuplicateNotAllowedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60008 +} + +// IsServiceInstanceNameTooLongError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60009 +// - HTTP code: 400 +// - message: "You have requested an invalid service instance name. Names are limited to 50 characters." +func IsServiceInstanceNameTooLongError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60009 +} + +// IsServiceInstanceOrganizationNotAuthorizedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60010 +// - HTTP code: 403 +// - message: "A service instance for the selected plan cannot be created in this organization. The plan is visible because another organization you belong to has access to it." +func IsServiceInstanceOrganizationNotAuthorizedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60010 +} + +// IsServiceInstanceDeprovisionFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60011 +// - HTTP code: 409 +// - message: "The service broker reported an error during deprovisioning: %s" +func IsServiceInstanceDeprovisionFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60011 +} + +// IsServiceInstanceSpaceQuotaExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60012 +// - HTTP code: 400 +// - message: "You have exceeded your space's services limit." +func IsServiceInstanceSpaceQuotaExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60012 +} + +// IsServiceInstanceServicePlanNotAllowedBySpaceQuotaError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60013 +// - HTTP code: 400 +// - message: "The service instance cannot be created because paid service plans are not allowed for your space." +func IsServiceInstanceServicePlanNotAllowedBySpaceQuotaError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60013 +} + +// IsServiceInstanceSpaceChangeNotAllowedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60014 +// - HTTP code: 400 +// - message: "Cannot update space for service instance." +func IsServiceInstanceSpaceChangeNotAllowedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60014 +} + +// IsServiceInstanceTagsTooLongError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60015 +// - HTTP code: 400 +// - message: "Combined length of tags for service %s must be 2048 characters or less." +func IsServiceInstanceTagsTooLongError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60015 +} + +// IsAsyncServiceInstanceOperationInProgressError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60016 +// - HTTP code: 409 +// - message: "An operation for service instance %s is in progress." +func IsAsyncServiceInstanceOperationInProgressError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60016 +} + +// IsServiceInstanceRouteBindingSpaceMismatchError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60017 +// - HTTP code: 400 +// - message: "The service instance and the route are in different spaces." +func IsServiceInstanceRouteBindingSpaceMismatchError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60017 +} + +// IsServiceInstanceSpaceNotAuthorizedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60018 +// - HTTP code: 403 +// - message: "A service instance for the selected plan cannot be created in this space." +func IsServiceInstanceSpaceNotAuthorizedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60018 +} + +// IsServiceInstanceRouteServiceURLInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60019 +// - HTTP code: 400 +// - message: "The route service URL is invalid: %s" +func IsServiceInstanceRouteServiceURLInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60019 +} + +// IsServiceInstanceRouteServiceRequiresDiegoError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60020 +// - HTTP code: 400 +// - message: "Route services are only supported for apps on Diego. Unbind the service instance from the route or enable Diego for the app." +func IsServiceInstanceRouteServiceRequiresDiegoError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60020 +} + +// IsServiceInstanceRouteServiceDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60021 +// - HTTP code: 403 +// - message: "Support for route services is disabled" +func IsServiceInstanceRouteServiceDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60021 +} + +// IsAppPortMappingRequiresDiegoError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60022 +// - HTTP code: 400 +// - message: "App ports are supported for Diego apps only." +func IsAppPortMappingRequiresDiegoError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60022 +} + +// IsRoutePortNotEnabledOnAppError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60023 +// - HTTP code: 400 +// - message: "Routes can only be mapped to ports already enabled for the application." +func IsRoutePortNotEnabledOnAppError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60023 +} + +// IsMultipleAppPortsMappedDiegoToDeaError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60024 +// - HTTP code: 400 +// - message: "The app has routes mapped to multiple ports. Multiple ports are supported for Diego only. Please unmap routes from all but one app port. Multiple routes can be mapped to the same port if desired." +func IsMultipleAppPortsMappedDiegoToDeaError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60024 +} + +// IsVolumeMountServiceDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60025 +// - HTTP code: 403 +// - message: "Support for volume mount services is disabled" +func IsVolumeMountServiceDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60025 +} + +// IsDockerAppToDeaError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 60026 +// - HTTP code: 400 +// - message: "Docker apps cannot run on DEAs" +func IsDockerAppToDeaError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 60026 +} + +// IsRuntimeInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 70001 +// - HTTP code: 400 +// - message: "The runtime is invalid: %s" +func IsRuntimeInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 70001 +} + +// IsRuntimeNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 70002 +// - HTTP code: 400 +// - message: "The runtime name is taken: %s" +func IsRuntimeNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 70002 +} + +// IsRuntimeNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 70003 +// - HTTP code: 404 +// - message: "The runtime could not be found: %s" +func IsRuntimeNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 70003 +} + +// IsFrameworkInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 80001 +// - HTTP code: 400 +// - message: "The framework is invalid: %s" +func IsFrameworkInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 80001 +} + +// IsFrameworkNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 80002 +// - HTTP code: 400 +// - message: "The framework name is taken: %s" +func IsFrameworkNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 80002 +} + +// IsFrameworkNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 80003 +// - HTTP code: 404 +// - message: "The framework could not be found: %s" +func IsFrameworkNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 80003 +} + +// IsServiceBindingInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 90001 +// - HTTP code: 400 +// - message: "The service binding is invalid: %s" +func IsServiceBindingInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 90001 +} + +// IsServiceBindingDifferentSpacesError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 90002 +// - HTTP code: 400 +// - message: "The app and the service are not in the same app space: %s" +func IsServiceBindingDifferentSpacesError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 90002 +} + +// IsServiceBindingAppServiceTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 90003 +// - HTTP code: 400 +// - message: "%s" +func IsServiceBindingAppServiceTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 90003 +} + +// IsServiceBindingNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 90004 +// - HTTP code: 404 +// - message: "The service binding could not be found: %s" +func IsServiceBindingNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 90004 +} + +// IsUnbindableServiceError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 90005 +// - HTTP code: 400 +// - message: "The service instance doesn't support binding." +func IsUnbindableServiceError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 90005 +} + +// IsInvalidLoggingServiceBindingError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 90006 +// - HTTP code: 502 +// - message: "The service is attempting to stream logs from your application, but is not registered as a logging service. Please contact the service provider." +func IsInvalidLoggingServiceBindingError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 90006 +} + +// IsAppInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100001 +// - HTTP code: 400 +// - message: "The app is invalid: %s" +func IsAppInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100001 +} + +// IsAppNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100002 +// - HTTP code: 400 +// - message: "The app name is taken: %s" +func IsAppNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100002 +} + +// IsAppNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100004 +// - HTTP code: 404 +// - message: "The app could not be found: %s" +func IsAppNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100004 +} + +// IsAppMemoryQuotaExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100005 +// - HTTP code: 400 +// - message: "You have exceeded your organization's memory limit: %s" +func IsAppMemoryQuotaExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100005 +} + +// IsAppMemoryInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100006 +// - HTTP code: 400 +// - message: "You have specified an invalid amount of memory for your application." +func IsAppMemoryInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100006 +} + +// IsQuotaInstanceMemoryLimitExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100007 +// - HTTP code: 400 +// - message: "You have exceeded the instance memory limit for your organization's quota." +func IsQuotaInstanceMemoryLimitExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100007 +} + +// IsQuotaInstanceLimitExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 100008 +// - HTTP code: 400 +// - message: "You have exceeded the instance limit for your organization's quota." +func IsQuotaInstanceLimitExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 100008 +} + +// IsServicePlanInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 110001 +// - HTTP code: 400 +// - message: "The service plan is invalid: %s" +func IsServicePlanInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 110001 +} + +// IsServicePlanNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 110002 +// - HTTP code: 400 +// - message: "The service plan name is taken: %s" +func IsServicePlanNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 110002 +} + +// IsServicePlanNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 110003 +// - HTTP code: 404 +// - message: "The service plan could not be found: %s" +func IsServicePlanNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 110003 +} + +// IsServicePlanNotUpdateableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 110004 +// - HTTP code: 400 +// - message: "The service does not support changing plans." +func IsServicePlanNotUpdateableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 110004 +} + +// IsServiceInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 120001 +// - HTTP code: 400 +// - message: "The service is invalid: %s" +func IsServiceInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 120001 +} + +// IsServiceLabelTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 120002 +// - HTTP code: 400 +// - message: "The service label is taken: %s" +func IsServiceLabelTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 120002 +} + +// IsServiceNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 120003 +// - HTTP code: 404 +// - message: "The service could not be found: %s" +func IsServiceNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 120003 +} + +// IsDomainInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130001 +// - HTTP code: 400 +// - message: "The domain is invalid: %s" +func IsDomainInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130001 +} + +// IsDomainNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130002 +// - HTTP code: 404 +// - message: "The domain could not be found: %s" +func IsDomainNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130002 +} + +// IsDomainNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130003 +// - HTTP code: 400 +// - message: "The domain name is taken: %s" +func IsDomainNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130003 +} + +// IsPathInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130004 +// - HTTP code: 400 +// - message: "The path is invalid: %s" +func IsPathInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130004 +} + +// IsTotalPrivateDomainsExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130005 +// - HTTP code: 400 +// - message: "The number of private domains exceeds the quota for organization: %s" +func IsTotalPrivateDomainsExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130005 +} + +// IsServiceDoesNotSupportRoutesError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130006 +// - HTTP code: 400 +// - message: "This service does not support route binding." +func IsServiceDoesNotSupportRoutesError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130006 +} + +// IsRouteAlreadyBoundToServiceInstanceError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130007 +// - HTTP code: 400 +// - message: "A route may only be bound to a single service instance" +func IsRouteAlreadyBoundToServiceInstanceError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130007 +} + +// IsServiceInstanceAlreadyBoundToSameRouteError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 130008 +// - HTTP code: 400 +// - message: "The route and service instance are already bound." +func IsServiceInstanceAlreadyBoundToSameRouteError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 130008 +} + +// IsLegacyApiWithoutDefaultSpaceError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 140001 +// - HTTP code: 400 +// - message: "A legacy api call requiring a default app space was called, but no default app space is set for the user." +func IsLegacyApiWithoutDefaultSpaceError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 140001 +} + +// IsAppPackageInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150001 +// - HTTP code: 400 +// - message: "The app package is invalid: %s" +func IsAppPackageInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150001 +} + +// IsAppPackageNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150002 +// - HTTP code: 404 +// - message: "The app package could not be found: %s" +func IsAppPackageNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150002 +} + +// IsInsufficientRunningResourcesAvailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150003 +// - HTTP code: 503 +// - message: "One or more instances could not be started because of insufficient running resources." +func IsInsufficientRunningResourcesAvailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150003 +} + +// IsPackageBitsAlreadyUploadedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150004 +// - HTTP code: 400 +// - message: "Bits may be uploaded only once. Create a new package to upload different bits." +func IsPackageBitsAlreadyUploadedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150004 +} + +// IsBlobstoreNotLocalError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150005 +// - HTTP code: 400 +// - message: "Downloading blobs can only be done directly to the blobstore." +func IsBlobstoreNotLocalError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150005 +} + +// IsBlobstoreUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150006 +// - HTTP code: 502 +// - message: "Failed to perform operation due to blobstore unavailability." +func IsBlobstoreUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150006 +} + +// IsBlobstoreError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150007 +// - HTTP code: 500 +// - message: "Failed to perform blobstore operation after three retries." +func IsBlobstoreError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150007 +} + +// IsDockerImageMissingError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 150008 +// - HTTP code: 400 +// - message: "Docker credentials can only be supplied for apps with a 'docker_image'" +func IsDockerImageMissingError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 150008 +} + +// IsAppBitsUploadInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 160001 +// - HTTP code: 400 +// - message: "The app upload is invalid: %s" +func IsAppBitsUploadInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 160001 +} + +// IsAppBitsCopyInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 160002 +// - HTTP code: 400 +// - message: "The app copy is invalid: %s" +func IsAppBitsCopyInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 160002 +} + +// IsAppResourcesFileModeInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 160003 +// - HTTP code: 400 +// - message: "The resource file mode is invalid: %s" +func IsAppResourcesFileModeInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 160003 +} + +// IsAppResourcesFilePathInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 160004 +// - HTTP code: 400 +// - message: "The resource file path is invalid: %s" +func IsAppResourcesFilePathInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 160004 +} + +// IsStagingError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170001 +// - HTTP code: 400 +// - message: "Staging error: %s" +func IsStagingError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170001 +} + +// IsNotStagedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170002 +// - HTTP code: 400 +// - message: "App has not finished staging" +func IsNotStagedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170002 +} + +// IsNoAppDetectedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170003 +// - HTTP code: 400 +// - message: "An app was not successfully detected by any available buildpack" +func IsNoAppDetectedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170003 +} + +// IsBuildpackCompileFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170004 +// - HTTP code: 400 +// - message: "App staging failed in the buildpack compile phase" +func IsBuildpackCompileFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170004 +} + +// IsBuildpackReleaseFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170005 +// - HTTP code: 400 +// - message: "App staging failed in the buildpack release phase" +func IsBuildpackReleaseFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170005 +} + +// IsNoBuildpacksFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170006 +// - HTTP code: 400 +// - message: "There are no buildpacks available" +func IsNoBuildpacksFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170006 +} + +// IsStagingTimeExpiredError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170007 +// - HTTP code: 504 +// - message: "Staging time expired: %s" +func IsStagingTimeExpiredError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170007 +} + +// IsInsufficientResourcesError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170008 +// - HTTP code: 400 +// - message: "Insufficient resources" +func IsInsufficientResourcesError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170008 +} + +// IsNoCompatibleCellError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170009 +// - HTTP code: 400 +// - message: "Found no compatible cell" +func IsNoCompatibleCellError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170009 +} + +// IsStagerUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170010 +// - HTTP code: 503 +// - message: "Stager is unavailable: %s" +func IsStagerUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170010 +} + +// IsStagerError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170011 +// - HTTP code: 500 +// - message: "Stager error: %s" +func IsStagerError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170011 +} + +// IsRunnerInvalidRequestError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170014 +// - HTTP code: 500 +// - message: "Runner invalid request: %s" +func IsRunnerInvalidRequestError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170014 +} + +// IsRunnerUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170015 +// - HTTP code: 503 +// - message: "Runner is unavailable: %s" +func IsRunnerUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170015 +} + +// IsRunnerError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170016 +// - HTTP code: 500 +// - message: "Runner error: %s" +func IsRunnerError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170016 +} + +// IsStagingInProgressError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170017 +// - HTTP code: 422 +// - message: "Only one build can be STAGING at a time per application." +func IsStagingInProgressError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170017 +} + +// IsInvalidTaskAddressError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170018 +// - HTTP code: 500 +// - message: "Invalid config: %s" +func IsInvalidTaskAddressError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170018 +} + +// IsTaskError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170019 +// - HTTP code: 500 +// - message: "Task failed: %s" +func IsTaskError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170019 +} + +// IsTaskWorkersUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170020 +// - HTTP code: 503 +// - message: "Task workers are unavailable: %s" +func IsTaskWorkersUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170020 +} + +// IsInvalidTaskRequestError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 170021 +// - HTTP code: 422 +// - message: "The task request is invalid: %s" +func IsInvalidTaskRequestError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 170021 +} + +// IsServiceGatewayError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 180002 +// - HTTP code: 503 +// - message: "Service gateway internal error: %s" +func IsServiceGatewayError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 180002 +} + +// IsServiceNotImplementedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 180003 +// - HTTP code: 501 +// - message: "Operation not supported for service" +func IsServiceNotImplementedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 180003 +} + +// IsSDSNotAvailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 180004 +// - HTTP code: 501 +// - message: "No serialization service backends available" +func IsSDSNotAvailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 180004 +} + +// IsFileError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 190001 +// - HTTP code: 400 +// - message: "File error: %s" +func IsFileError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 190001 +} + +// IsStatsError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 200001 +// - HTTP code: 400 +// - message: "Stats error: %s" +func IsStatsError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 200001 +} + +// IsStatsUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 200002 +// - HTTP code: 503 +// - message: "Stats unavailable: %s" +func IsStatsUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 200002 +} + +// IsAppStoppedStatsError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 200003 +// - HTTP code: 400 +// - message: "Could not fetch stats for stopped app: %s" +func IsAppStoppedStatsError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 200003 +} + +// IsRouteInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210001 +// - HTTP code: 400 +// - message: "The route is invalid: %s" +func IsRouteInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210001 +} + +// IsRouteNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210002 +// - HTTP code: 404 +// - message: "The route could not be found: %s" +func IsRouteNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210002 +} + +// IsRouteHostTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210003 +// - HTTP code: 400 +// - message: "The host is taken: %s" +func IsRouteHostTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210003 +} + +// IsRoutePathTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210004 +// - HTTP code: 400 +// - message: "The path is taken: %s" +func IsRoutePathTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210004 +} + +// IsRoutePortTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210005 +// - HTTP code: 400 +// - message: "The port is taken: %s" +func IsRoutePortTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210005 +} + +// IsRouteMappingTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210006 +// - HTTP code: 400 +// - message: "The route mapping is taken: %s" +func IsRouteMappingTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210006 +} + +// IsRouteMappingNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210007 +// - HTTP code: 404 +// - message: "The route mapping could not be found: %s" +func IsRouteMappingNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210007 +} + +// IsRouterGroupNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 210009 +// - HTTP code: 404 +// - message: "The router group could not be found: %s" +func IsRouterGroupNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 210009 +} + +// IsInstancesError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 220001 +// - HTTP code: 400 +// - message: "Instances error: %s" +func IsInstancesError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 220001 +} + +// IsInstancesUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 220002 +// - HTTP code: 503 +// - message: "Instances information unavailable: %s" +func IsInstancesUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 220002 +} + +// IsEventNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 230002 +// - HTTP code: 404 +// - message: "Event could not be found: %s" +func IsEventNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 230002 +} + +// IsQuotaDefinitionNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 240001 +// - HTTP code: 404 +// - message: "Quota Definition could not be found: %s" +func IsQuotaDefinitionNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 240001 +} + +// IsQuotaDefinitionNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 240002 +// - HTTP code: 400 +// - message: "Quota Definition is taken: %s" +func IsQuotaDefinitionNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 240002 +} + +// IsQuotaDefinitionInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 240003 +// - HTTP code: 400 +// - message: "Quota Definition is invalid: %s" +func IsQuotaDefinitionInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 240003 +} + +// IsQuotaDefinitionMemoryLimitNegativeError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 240004 +// - HTTP code: 400 +// - message: "Quota Definition memory limit cannot be negative" +func IsQuotaDefinitionMemoryLimitNegativeError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 240004 +} + +// IsStackInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 250001 +// - HTTP code: 400 +// - message: "The stack is invalid: %s" +func IsStackInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 250001 +} + +// IsStackNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 250002 +// - HTTP code: 400 +// - message: "The stack name is taken: %s" +func IsStackNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 250002 +} + +// IsStackNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 250003 +// - HTTP code: 404 +// - message: "The stack could not be found: %s" +func IsStackNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 250003 +} + +// IsServicePlanVisibilityInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 260001 +// - HTTP code: 400 +// - message: "Service Plan Visibility is invalid: %s" +func IsServicePlanVisibilityInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 260001 +} + +// IsServicePlanVisibilityAlreadyExistsError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 260002 +// - HTTP code: 400 +// - message: "This combination of ServicePlan and Organization is already taken: %s" +func IsServicePlanVisibilityAlreadyExistsError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 260002 +} + +// IsServicePlanVisibilityNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 260003 +// - HTTP code: 404 +// - message: "The service plan visibility could not be found: %s" +func IsServicePlanVisibilityNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 260003 +} + +// IsServiceBrokerInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270001 +// - HTTP code: 400 +// - message: "Service broker is invalid: %s" +func IsServiceBrokerInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270001 +} + +// IsServiceBrokerNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270002 +// - HTTP code: 400 +// - message: "The service broker name is taken" +func IsServiceBrokerNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270002 +} + +// IsServiceBrokerUrlTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270003 +// - HTTP code: 400 +// - message: "The service broker url is taken: %s" +func IsServiceBrokerUrlTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270003 +} + +// IsServiceBrokerNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270004 +// - HTTP code: 404 +// - message: "The service broker was not found: %s" +func IsServiceBrokerNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270004 +} + +// IsServiceBrokerNotRemovableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270010 +// - HTTP code: 400 +// - message: "Can not remove brokers that have associated service instances: %s" +func IsServiceBrokerNotRemovableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270010 +} + +// IsServiceBrokerUrlInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270011 +// - HTTP code: 400 +// - message: "%s is not a valid URL" +func IsServiceBrokerUrlInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270011 +} + +// IsServiceBrokerCatalogInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270012 +// - HTTP code: 502 +// - message: "Service broker catalog is invalid: %s" +func IsServiceBrokerCatalogInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270012 +} + +// IsServiceBrokerDashboardClientFailureError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270013 +// - HTTP code: 502 +// - message: "Service broker dashboard clients could not be modified: %s" +func IsServiceBrokerDashboardClientFailureError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270013 +} + +// IsServiceBrokerAsyncRequiredError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270014 +// - HTTP code: 400 +// - message: "This service plan requires client support for asynchronous service operations." +func IsServiceBrokerAsyncRequiredError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270014 +} + +// IsServiceDashboardClientMissingUrlError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 270015 +// - HTTP code: 502 +// - message: "Service broker returned dashboard client configuration without a dashboard URL" +func IsServiceDashboardClientMissingUrlError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 270015 +} + +// IsBuildpackNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290001 +// - HTTP code: 400 +// - message: "The buildpack name is already in use: %s" +func IsBuildpackNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290001 +} + +// IsBuildpackBitsUploadInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290002 +// - HTTP code: 400 +// - message: "The buildpack upload is invalid: %s" +func IsBuildpackBitsUploadInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290002 +} + +// IsBuildpackInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290003 +// - HTTP code: 400 +// - message: "Buildpack is invalid: %s" +func IsBuildpackInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290003 +} + +// IsCustomBuildpacksDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290004 +// - HTTP code: 400 +// - message: "Custom buildpacks are disabled" +func IsCustomBuildpacksDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290004 +} + +// IsBuildpackLockedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290005 +// - HTTP code: 409 +// - message: "The buildpack is locked" +func IsBuildpackLockedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290005 +} + +// IsJobTimeoutError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290006 +// - HTTP code: 524 +// - message: "The job execution has timed out." +func IsJobTimeoutError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290006 +} + +// IsSpaceDeleteTimeoutError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290007 +// - HTTP code: 524 +// - message: "Deletion of space %s timed out before all resources within could be deleted" +func IsSpaceDeleteTimeoutError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290007 +} + +// IsSpaceDeletionFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290008 +// - HTTP code: 502 +// - message: "Deletion of space %s failed because one or more resources within could not be deleted.\n\n%s" +func IsSpaceDeletionFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290008 +} + +// IsOrganizationDeleteTimeoutError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290009 +// - HTTP code: 524 +// - message: "Delete of organization %s timed out before all resources within could be deleted" +func IsOrganizationDeleteTimeoutError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290009 +} + +// IsOrganizationDeletionFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290010 +// - HTTP code: 502 +// - message: "Deletion of organization %s failed because one or more resources within could not be deleted.\n\n%s" +func IsOrganizationDeletionFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290010 +} + +// IsNonrecursiveSpaceDeletionFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290011 +// - HTTP code: 400 +// - message: "Resource inside space %s must first be deleted, or specify recursive delete." +func IsNonrecursiveSpaceDeletionFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290011 +} + +// IsBitsServiceError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290012 +// - HTTP code: 500 +// - message: "The bits service returned an error: %s" +func IsBitsServiceError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290012 +} + +// IsSpaceRolesDeletionTimeoutError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290013 +// - HTTP code: 524 +// - message: "Deletion of roles for space %s timed out before all roles could be deleted" +func IsSpaceRolesDeletionTimeoutError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290013 +} + +// IsOrganizationRolesDeletionFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290014 +// - HTTP code: 502 +// - message: "Failed to delete one or more roles for organization %s" +func IsOrganizationRolesDeletionFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290014 +} + +// IsSpaceRolesDeletionFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 290016 +// - HTTP code: 502 +// - message: "Failed to delete one or more roles for space %s" +func IsSpaceRolesDeletionFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 290016 +} + +// IsSecurityGroupInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 300001 +// - HTTP code: 400 +// - message: "The security group is invalid: %s" +func IsSecurityGroupInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 300001 +} + +// IsSecurityGroupNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 300002 +// - HTTP code: 404 +// - message: "The security group could not be found: %s" +func IsSecurityGroupNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 300002 +} + +// IsSecurityGroupStagingDefaultInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 300003 +// - HTTP code: 400 +// - message: "The security group could not be found: %s" +func IsSecurityGroupStagingDefaultInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 300003 +} + +// IsSecurityGroupRunningDefaultInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 300004 +// - HTTP code: 400 +// - message: "The security group could not be found: %s" +func IsSecurityGroupRunningDefaultInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 300004 +} + +// IsSecurityGroupNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 300005 +// - HTTP code: 400 +// - message: "The security group name is taken: %s" +func IsSecurityGroupNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 300005 +} + +// IsSpaceQuotaDefinitionInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310001 +// - HTTP code: 400 +// - message: "Space Quota Definition is invalid: %s" +func IsSpaceQuotaDefinitionInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310001 +} + +// IsSpaceQuotaDefinitionNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310002 +// - HTTP code: 400 +// - message: "The space quota definition name is taken: %s" +func IsSpaceQuotaDefinitionNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310002 +} + +// IsSpaceQuotaMemoryLimitExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310003 +// - HTTP code: 400 +// - message: "You have exceeded your space's memory limit: %s" +func IsSpaceQuotaMemoryLimitExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310003 +} + +// IsSpaceQuotaInstanceMemoryLimitExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310004 +// - HTTP code: 400 +// - message: "You have exceeded the instance memory limit for your space's quota." +func IsSpaceQuotaInstanceMemoryLimitExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310004 +} + +// IsSpaceQuotaTotalRoutesExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310005 +// - HTTP code: 400 +// - message: "You have exceeded the total routes for your space's quota." +func IsSpaceQuotaTotalRoutesExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310005 +} + +// IsOrgQuotaTotalRoutesExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310006 +// - HTTP code: 400 +// - message: "You have exceeded the total routes for your organization's quota." +func IsOrgQuotaTotalRoutesExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310006 +} + +// IsSpaceQuotaDefinitionNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310007 +// - HTTP code: 404 +// - message: "Space Quota Definition could not be found: %s" +func IsSpaceQuotaDefinitionNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310007 +} + +// IsSpaceQuotaInstanceLimitExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310008 +// - HTTP code: 400 +// - message: "You have exceeded the instance limit for your space's quota." +func IsSpaceQuotaInstanceLimitExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310008 +} + +// IsOrgQuotaTotalReservedRoutePortsExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310009 +// - HTTP code: 400 +// - message: "You have exceeded the total reserved route ports for your organization's quota." +func IsOrgQuotaTotalReservedRoutePortsExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310009 +} + +// IsSpaceQuotaTotalReservedRoutePortsExceededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 310010 +// - HTTP code: 400 +// - message: "You have exceeded the total reserved route ports for your space's quota." +func IsSpaceQuotaTotalReservedRoutePortsExceededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 310010 +} + +// IsDiegoDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 320001 +// - HTTP code: 400 +// - message: "Diego has not been enabled." +func IsDiegoDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 320001 +} + +// IsDiegoDockerBuildpackConflictError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 320002 +// - HTTP code: 400 +// - message: "You cannot specify a custom buildpack and a docker image at the same time." +func IsDiegoDockerBuildpackConflictError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 320002 +} + +// IsDockerDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 320003 +// - HTTP code: 400 +// - message: "Docker support has not been enabled." +func IsDockerDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 320003 +} + +// IsStagingBackendInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 320004 +// - HTTP code: 403 +// - message: "The request staging completion endpoint only handles apps desired to stage on the Diego backend." +func IsStagingBackendInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 320004 +} + +// IsBackendSelectionNotAuthorizedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 320005 +// - HTTP code: 403 +// - message: "You cannot select the backend on which to run this application" +func IsBackendSelectionNotAuthorizedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 320005 +} + +// IsFeatureFlagNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 330000 +// - HTTP code: 404 +// - message: "The feature flag could not be found: %s" +func IsFeatureFlagNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 330000 +} + +// IsFeatureFlagInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 330001 +// - HTTP code: 400 +// - message: "The feature flag is invalid: %s" +func IsFeatureFlagInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 330001 +} + +// IsFeatureDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 330002 +// - HTTP code: 403 +// - message: "Feature Disabled: %s" +func IsFeatureDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 330002 +} + +// IsUserProvidedServiceInstanceNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 340001 +// - HTTP code: 404 +// - message: "The service instance could not be found: %s" +func IsUserProvidedServiceInstanceNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 340001 +} + +// IsUserProvidedServiceInstanceHandlerNeededError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 340002 +// - HTTP code: 400 +// - message: "Please use the User Provided Services API to manage this resource." +func IsUserProvidedServiceInstanceHandlerNeededError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 340002 +} + +// IsProcessInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 350001 +// - HTTP code: 400 +// - message: "The process is invalid: %s" +func IsProcessInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 350001 +} + +// IsUnableToDeleteError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 350002 +// - HTTP code: 400 +// - message: "Unable to perform delete action: %s" +func IsUnableToDeleteError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 350002 +} + +// IsProcessNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 350003 +// - HTTP code: 404 +// - message: "The process could not be found: %s" +func IsProcessNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 350003 +} + +// IsServiceKeyNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 360001 +// - HTTP code: 400 +// - message: "The service key name is taken: %s" +func IsServiceKeyNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 360001 +} + +// IsServiceKeyInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 360002 +// - HTTP code: 400 +// - message: "The service key is invalid: %s" +func IsServiceKeyInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 360002 +} + +// IsServiceKeyNotFoundError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 360003 +// - HTTP code: 404 +// - message: "The service key could not be found: %s" +func IsServiceKeyNotFoundError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 360003 +} + +// IsServiceKeyNotSupportedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 360004 +// - HTTP code: 400 +// - message: "%s" +func IsServiceKeyNotSupportedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 360004 +} + +// IsServiceKeyCredentialStoreUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 360005 +// - HTTP code: 503 +// - message: "Credential store is unavailable" +func IsServiceKeyCredentialStoreUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 360005 +} + +// IsRoutingApiUnavailableError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 370001 +// - HTTP code: 503 +// - message: "The Routing API is currently unavailable" +func IsRoutingApiUnavailableError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 370001 +} + +// IsRoutingApiDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 370003 +// - HTTP code: 403 +// - message: "Routing API is disabled" +func IsRoutingApiDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 370003 +} + +// IsEnvironmentVariableGroupInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 380001 +// - HTTP code: 400 +// - message: "The Environment Variable Group is invalid: %s" +func IsEnvironmentVariableGroupInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 380001 +} + +// IsDropletUploadInvalidError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 380002 +// - HTTP code: 400 +// - message: "The droplet upload is invalid: %s" +func IsDropletUploadInvalidError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 380002 +} + +// IsServiceInstanceUnshareFailedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390001 +// - HTTP code: 502 +// - message: "Unshare of service instance failed because one or more bindings could not be deleted.\n\n%s" +func IsServiceInstanceUnshareFailedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390001 +} + +// IsServiceInstanceDeletionSharesExistsError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390002 +// - HTTP code: 400 +// - message: "Service instances must be unshared before they can be deleted. Unsharing %s will automatically delete any bindings that have been made to applications in other spaces." +func IsServiceInstanceDeletionSharesExistsError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390002 +} + +// IsServiceShareIsDisabledError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390003 +// - HTTP code: 400 +// - message: "The %s service does not support service instance sharing." +func IsServiceShareIsDisabledError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390003 +} + +// IsUserProvidedServiceInstanceSharingNotSupportedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390004 +// - HTTP code: 400 +// - message: "User-provided services cannot be shared" +func IsUserProvidedServiceInstanceSharingNotSupportedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390004 +} + +// IsRouteServiceInstanceSharingNotSupportedError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390005 +// - HTTP code: 400 +// - message: "Route services cannot be shared" +func IsRouteServiceInstanceSharingNotSupportedError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390005 +} + +// IsSharedServiceInstanceNameTakenError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390006 +// - HTTP code: 400 +// - message: "A service instance called %s already exists in %s" +func IsSharedServiceInstanceNameTakenError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390006 +} + +// IsInvalidServiceInstanceSharingTargetSpaceError returns a boolean indicating whether +// the error is known to report the Cloud Foundry error: +// - Cloud Foundry code: 390007 +// - HTTP code: 422 +// - message: "Service instances cannot be shared into the space where they were created" +func IsInvalidServiceInstanceSharingTargetSpaceError(err error) bool { + cause := errors.Cause(err) + cferr, ok := cause.(CloudFoundryError) + if !ok { + return false + } + return cferr.Code == 390007 +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/client.go b/vendor/github.com/cloudfoundry-community/go-cfclient/client.go new file mode 100644 index 000000000000..16bc10996cfc --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/client.go @@ -0,0 +1,407 @@ +package cfclient + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "time" + + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +//Client used to communicate with Cloud Foundry +type Client struct { + Config Config + Endpoint Endpoint +} + +type Endpoint struct { + DopplerEndpoint string `json:"doppler_logging_endpoint"` + LoggingEndpoint string `json:"logging_endpoint"` + AuthEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` +} + +//Config is used to configure the creation of a client +type Config struct { + ApiAddress string `json:"api_url"` + Username string `json:"user"` + Password string `json:"password"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + SkipSslValidation bool `json:"skip_ssl_validation"` + HttpClient *http.Client + Token string `json:"auth_token"` + TokenSource oauth2.TokenSource + tokenSourceDeadline *time.Time + UserAgent string `json:"user_agent"` +} + +// Request is used to help build up a request +type Request struct { + method string + url string + params url.Values + body io.Reader + obj interface{} +} + +//DefaultConfig configuration for client +//Keep LoginAdress for backward compatibility +//Need to be remove in close future +func DefaultConfig() *Config { + return &Config{ + ApiAddress: "http://api.bosh-lite.com", + Username: "admin", + Password: "admin", + Token: "", + SkipSslValidation: false, + HttpClient: http.DefaultClient, + UserAgent: "Go-CF-client/1.1", + } +} + +func DefaultEndpoint() *Endpoint { + return &Endpoint{ + DopplerEndpoint: "wss://doppler.10.244.0.34.xip.io:443", + LoggingEndpoint: "wss://loggregator.10.244.0.34.xip.io:443", + TokenEndpoint: "https://uaa.10.244.0.34.xip.io", + AuthEndpoint: "https://login.10.244.0.34.xip.io", + } +} + +// NewClient returns a new client +func NewClient(config *Config) (client *Client, err error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.ApiAddress) == 0 { + config.ApiAddress = defConfig.ApiAddress + } + + if len(config.Username) == 0 { + config.Username = defConfig.Username + } + + if len(config.Password) == 0 { + config.Password = defConfig.Password + } + + if len(config.Token) == 0 { + config.Token = defConfig.Token + } + + if len(config.UserAgent) == 0 { + config.UserAgent = defConfig.UserAgent + } + + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } + + if config.HttpClient.Transport == nil { + config.HttpClient.Transport = shallowDefaultTransport() + } + + var tp *http.Transport + + switch t := config.HttpClient.Transport.(type) { + case *http.Transport: + tp = t + case *oauth2.Transport: + if bt, ok := t.Base.(*http.Transport); ok { + tp = bt + } + } + + if tp != nil { + if tp.TLSClientConfig == nil { + tp.TLSClientConfig = &tls.Config{} + } + tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation + } + + config.ApiAddress = strings.TrimRight(config.ApiAddress, "/") + + client = &Client{ + Config: *config, + } + + if err := client.refreshEndpoint(); err != nil { + return nil, err + } + + return client, nil +} + +func shallowDefaultTransport() *http.Transport { + defaultTransport := http.DefaultTransport.(*http.Transport) + return &http.Transport{ + Proxy: defaultTransport.Proxy, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + } +} + +func getUserAuth(ctx context.Context, config Config, endpoint *Endpoint) (Config, error) { + authConfig := &oauth2.Config{ + ClientID: "cf", + Scopes: []string{""}, + Endpoint: oauth2.Endpoint{ + AuthURL: endpoint.AuthEndpoint + "/oauth/auth", + TokenURL: endpoint.TokenEndpoint + "/oauth/token", + }, + } + + token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password) + if err != nil { + return config, errors.Wrap(err, "Error getting token") + } + + config.tokenSourceDeadline = &token.Expiry + config.TokenSource = authConfig.TokenSource(ctx, token) + config.HttpClient = oauth2.NewClient(ctx, config.TokenSource) + + return config, err +} + +func getClientAuth(ctx context.Context, config Config, endpoint *Endpoint) Config { + authConfig := &clientcredentials.Config{ + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, + TokenURL: endpoint.TokenEndpoint + "/oauth/token", + } + + config.TokenSource = authConfig.TokenSource(ctx) + config.HttpClient = authConfig.Client(ctx) + return config +} + +// getUserTokenAuth initializes client credentials from existing bearer token. +func getUserTokenAuth(ctx context.Context, config Config, endpoint *Endpoint) Config { + authConfig := &oauth2.Config{ + ClientID: "cf", + Scopes: []string{""}, + Endpoint: oauth2.Endpoint{ + AuthURL: endpoint.AuthEndpoint + "/oauth/auth", + TokenURL: endpoint.TokenEndpoint + "/oauth/token", + }, + } + + // Token is expected to have no "bearer" prefix + token := &oauth2.Token{ + AccessToken: config.Token, + TokenType: "Bearer"} + + config.TokenSource = authConfig.TokenSource(ctx, token) + config.HttpClient = oauth2.NewClient(ctx, config.TokenSource) + + return config +} + +func getInfo(api string, httpClient *http.Client) (*Endpoint, error) { + var endpoint Endpoint + + if api == "" { + return DefaultEndpoint(), nil + } + + resp, err := httpClient.Get(api + "/v2/info") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + err = decodeBody(resp, &endpoint) + if err != nil { + return nil, err + } + + return &endpoint, err +} + +// NewRequest is used to create a new Request +func (c *Client) NewRequest(method, path string) *Request { + r := &Request{ + method: method, + url: c.Config.ApiAddress + path, + params: make(map[string][]string), + } + return r +} + +// NewRequestWithBody is used to create a new request with +// arbigtrary body io.Reader. +func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request { + r := c.NewRequest(method, path) + + // Set request body + r.body = body + + return r +} + +// DoRequest runs a request with our client +func (c *Client) DoRequest(r *Request) (*http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return nil, err + } + return c.Do(req) +} + +// DoRequestWithoutRedirects executes the request without following redirects +func (c *Client) DoRequestWithoutRedirects(r *Request) (*http.Response, error) { + prevCheckRedirect := c.Config.HttpClient.CheckRedirect + c.Config.HttpClient.CheckRedirect = func(httpReq *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + defer func() { + c.Config.HttpClient.CheckRedirect = prevCheckRedirect + }() + return c.DoRequest(r) +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", c.Config.UserAgent) + if req.Body != nil && req.Header.Get("Content-type") == "" { + req.Header.Set("Content-type", "application/json") + } + + resp, err := c.Config.HttpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode >= http.StatusBadRequest { + return c.handleError(resp) + } + + return resp, nil +} + +func (c *Client) handleError(resp *http.Response) (*http.Response, error) { + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return resp, CloudFoundryHTTPError{ + StatusCode: resp.StatusCode, + Status: resp.Status, + Body: body, + } + } + defer resp.Body.Close() + + // Unmarshal V2 error response + if strings.HasPrefix(resp.Request.URL.Path, "/v2/") { + var cfErr CloudFoundryError + if err := json.Unmarshal(body, &cfErr); err != nil { + return resp, CloudFoundryHTTPError{ + StatusCode: resp.StatusCode, + Status: resp.Status, + Body: body, + } + } + return nil, cfErr + } + + // Unmarshal a V3 error response and convert it into a V2 model + var cfErrorsV3 CloudFoundryErrorsV3 + if err := json.Unmarshal(body, &cfErrorsV3); err != nil { + return resp, CloudFoundryHTTPError{ + StatusCode: resp.StatusCode, + Status: resp.Status, + Body: body, + } + } + return nil, NewCloudFoundryErrorFromV3Errors(cfErrorsV3) +} + +func (c *Client) refreshEndpoint() error { + // we want to keep the Timeout value from config.HttpClient + timeout := c.Config.HttpClient.Timeout + + ctx := context.Background() + ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient) + + endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil)) + + if err != nil { + return errors.Wrap(err, "Could not get api /v2/info") + } + + switch { + case c.Config.Token != "": + c.Config = getUserTokenAuth(ctx, c.Config, endpoint) + case c.Config.ClientID != "": + c.Config = getClientAuth(ctx, c.Config, endpoint) + default: + c.Config, err = getUserAuth(ctx, c.Config, endpoint) + if err != nil { + return err + } + } + // make sure original Timeout value will be used + if c.Config.HttpClient.Timeout != timeout { + c.Config.HttpClient.Timeout = timeout + } + + c.Endpoint = *endpoint + return nil +} + +// toHTTP converts the request to an HTTP Request +func (r *Request) toHTTP() (*http.Request, error) { + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP Request + return http.NewRequest(r.method, r.url, r.body) +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + defer resp.Body.Close() + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +func (c *Client) GetToken() (string, error) { + if c.Config.tokenSourceDeadline != nil && c.Config.tokenSourceDeadline.Before(time.Now()) { + if err := c.refreshEndpoint(); err != nil { + return "", err + } + } + + token, err := c.Config.TokenSource.Token() + if err != nil { + return "", errors.Wrap(err, "Error getting bearer token") + } + return "bearer " + token.AccessToken, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/client_interface.go b/vendor/github.com/cloudfoundry-community/go-cfclient/client_interface.go new file mode 100644 index 000000000000..0ff47ee9c823 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/client_interface.go @@ -0,0 +1,270 @@ +package cfclient + +import ( + "io" + "net/http" + "net/url" +) + +// CloudFoundryClient provides a baseline reference for the currently supported APIs with Cloud Foundry. It can be used +// for providing custom implementations or extensions beyond the base implementation provided with this package. +type CloudFoundryClient interface { + ListAllProcesses() ([]Process, error) + ListAllProcessesByQuery(query url.Values) ([]Process, error) + ListServicePlanVisibilitiesByQuery(query url.Values) ([]ServicePlanVisibility, error) + ListServicePlanVisibilities() ([]ServicePlanVisibility, error) + GetServicePlanVisibilityByGuid(guid string) (ServicePlanVisibility, error) + CreateServicePlanVisibilityByUniqueId(uniqueId string, organizationGuid string) (ServicePlanVisibility, error) + CreateServicePlanVisibility(servicePlanGuid string, organizationGuid string) (ServicePlanVisibility, error) + DeleteServicePlanVisibilityByPlanAndOrg(servicePlanGuid string, organizationGuid string, async bool) error + DeleteServicePlanVisibility(guid string, async bool) error + UpdateServicePlanVisibility(guid string, servicePlanGuid string, organizationGuid string) (ServicePlanVisibility, error) + ListStacksByQuery(query url.Values) ([]Stack, error) + ListStacks() ([]Stack, error) + GetRunningEnvironmentVariableGroup() (EnvironmentVariableGroup, error) + GetStagingEnvironmentVariableGroup() (EnvironmentVariableGroup, error) + SetRunningEnvironmentVariableGroup(evg EnvironmentVariableGroup) error + SetStagingEnvironmentVariableGroup(evg EnvironmentVariableGroup) error + ListSecGroups() (secGroups []SecGroup, err error) + ListRunningSecGroups() ([]SecGroup, error) + ListStagingSecGroups() ([]SecGroup, error) + GetSecGroupByName(name string) (secGroup SecGroup, err error) + CreateSecGroup(name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) + UpdateSecGroup(guid, name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) + DeleteSecGroup(guid string) error + GetSecGroup(guid string) (*SecGroup, error) + BindSecGroup(secGUID, spaceGUID string) error + BindStagingSecGroupToSpace(secGUID, spaceGUID string) error + BindRunningSecGroup(secGUID string) error + UnbindRunningSecGroup(secGUID string) error + BindStagingSecGroup(secGUID string) error + UnbindStagingSecGroup(secGUID string) error + UnbindSecGroup(secGUID, spaceGUID string) error + CreateIsolationSegment(name string) (*IsolationSegment, error) + GetIsolationSegmentByGUID(guid string) (*IsolationSegment, error) + ListIsolationSegmentsByQuery(query url.Values) ([]IsolationSegment, error) + ListIsolationSegments() ([]IsolationSegment, error) + DeleteIsolationSegmentByGUID(guid string) error + AddIsolationSegmentToOrg(isolationSegmentGUID, orgGUID string) error + RemoveIsolationSegmentFromOrg(isolationSegmentGUID, orgGUID string) error + AddIsolationSegmentToSpace(isolationSegmentGUID, spaceGUID string) error + RemoveIsolationSegmentFromSpace(isolationSegmentGUID, spaceGUID string) error + ListAppEvents(eventType string) ([]AppEventEntity, error) + ListAppEventsByQuery(eventType string, queries []AppEventQuery) ([]AppEventEntity, error) + GetInfo() (*Info, error) + CreateBuildpack(bpr *BuildpackRequest) (*Buildpack, error) + ListBuildpacks() ([]Buildpack, error) + DeleteBuildpack(guid string, async bool) error + GetBuildpackByGuid(buildpackGUID string) (Buildpack, error) + CreateSpace(req SpaceRequest) (Space, error) + UpdateSpace(spaceGUID string, req SpaceRequest) (Space, error) + DeleteSpace(guid string, recursive, async bool) error + ListSpaceManagersByQuery(spaceGUID string, query url.Values) ([]User, error) + ListSpaceManagers(spaceGUID string) ([]User, error) + ListSpaceAuditorsByQuery(spaceGUID string, query url.Values) ([]User, error) + ListSpaceAuditors(spaceGUID string) ([]User, error) + ListSpaceDevelopersByQuery(spaceGUID string, query url.Values) ([]User, error) + ListSpaceDevelopers(spaceGUID string) ([]User, error) + AssociateSpaceDeveloper(spaceGUID, userGUID string) (Space, error) + AssociateSpaceDeveloperByUsername(spaceGUID, name string) (Space, error) + AssociateSpaceDeveloperByUsernameAndOrigin(spaceGUID, name, origin string) (Space, error) + RemoveSpaceDeveloper(spaceGUID, userGUID string) error + RemoveSpaceDeveloperByUsername(spaceGUID, name string) error + RemoveSpaceDeveloperByUsernameAndOrigin(spaceGUID, name, origin string) error + AssociateSpaceAuditor(spaceGUID, userGUID string) (Space, error) + AssociateSpaceAuditorByUsername(spaceGUID, name string) (Space, error) + AssociateSpaceAuditorByUsernameAndOrigin(spaceGUID, name, origin string) (Space, error) + RemoveSpaceAuditor(spaceGUID, userGUID string) error + RemoveSpaceAuditorByUsername(spaceGUID, name string) error + RemoveSpaceAuditorByUsernameAndOrigin(spaceGUID, name, origin string) error + AssociateSpaceManager(spaceGUID, userGUID string) (Space, error) + AssociateSpaceManagerByUsername(spaceGUID, name string) (Space, error) + AssociateSpaceManagerByUsernameAndOrigin(spaceGUID, name, origin string) (Space, error) + RemoveSpaceManager(spaceGUID, userGUID string) error + RemoveSpaceManagerByUsername(spaceGUID, name string) error + RemoveSpaceManagerByUsernameAndOrigin(spaceGUID, name, origin string) error + ListSpaceSecGroups(spaceGUID string) (secGroups []SecGroup, err error) + ListSpacesByQuery(query url.Values) ([]Space, error) + ListSpaces() ([]Space, error) + GetSpaceByName(spaceName string, orgGuid string) (space Space, err error) + GetSpaceByGuid(spaceGUID string) (Space, error) + IsolationSegmentForSpace(spaceGUID, isolationSegmentGUID string) error + ResetIsolationSegmentForSpace(spaceGUID string) error + ListDomainsByQuery(query url.Values) ([]Domain, error) + ListDomains() ([]Domain, error) + ListSharedDomainsByQuery(query url.Values) ([]SharedDomain, error) + ListSharedDomains() ([]SharedDomain, error) + GetSharedDomainByGuid(guid string) (SharedDomain, error) + CreateSharedDomain(name string, internal bool, router_group_guid string) (*SharedDomain, error) + DeleteSharedDomain(guid string, async bool) error + GetDomainByName(name string) (Domain, error) + GetSharedDomainByName(name string) (SharedDomain, error) + CreateDomain(name, orgGuid string) (*Domain, error) + DeleteDomain(guid string) error + DeleteServiceBroker(guid string) error + UpdateServiceBroker(guid string, usb UpdateServiceBrokerRequest) (ServiceBroker, error) + CreateServiceBroker(csb CreateServiceBrokerRequest) (ServiceBroker, error) + ListServiceBrokersByQuery(query url.Values) ([]ServiceBroker, error) + ListServiceBrokers() ([]ServiceBroker, error) + GetServiceBrokerByGuid(guid string) (ServiceBroker, error) + GetServiceBrokerByName(name string) (ServiceBroker, error) + ListServiceUsageEventsByQuery(query url.Values) ([]ServiceUsageEvent, error) + ListServiceUsageEvents() ([]ServiceUsageEvent, error) + ListServiceKeysByQuery(query url.Values) ([]ServiceKey, error) + ListServiceKeys() ([]ServiceKey, error) + GetServiceKeyByName(name string) (ServiceKey, error) + GetServiceKeyByInstanceGuid(guid string) (ServiceKey, error) + GetServiceKeysByInstanceGuid(guid string) ([]ServiceKey, error) + CreateServiceKey(csr CreateServiceKeyRequest) (ServiceKey, error) + DeleteServiceKey(guid string) error + ListOrgsByQuery(query url.Values) ([]Org, error) + ListOrgs() ([]Org, error) + GetOrgByName(name string) (Org, error) + GetOrgByGuid(guid string) (Org, error) + OrgSpaces(guid string) ([]Space, error) + ListOrgUsersByQuery(orgGUID string, query url.Values) ([]User, error) + ListOrgUsers(orgGUID string) ([]User, error) + ListOrgManagersByQuery(orgGUID string, query url.Values) ([]User, error) + ListOrgManagers(orgGUID string) ([]User, error) + ListOrgAuditorsByQuery(orgGUID string, query url.Values) ([]User, error) + ListOrgAuditors(orgGUID string) ([]User, error) + ListOrgBillingManagersByQuery(orgGUID string, query url.Values) ([]User, error) + ListOrgBillingManagers(orgGUID string) ([]User, error) + AssociateOrgManager(orgGUID, userGUID string) (Org, error) + AssociateOrgManagerByUsername(orgGUID, name string) (Org, error) + AssociateOrgManagerByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) + AssociateOrgUser(orgGUID, userGUID string) (Org, error) + AssociateOrgAuditor(orgGUID, userGUID string) (Org, error) + AssociateOrgUserByUsername(orgGUID, name string) (Org, error) + AssociateOrgUserByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) + AssociateOrgAuditorByUsername(orgGUID, name string) (Org, error) + AssociateOrgAuditorByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) + AssociateOrgBillingManager(orgGUID, userGUID string) (Org, error) + AssociateOrgBillingManagerByUsername(orgGUID, name string) (Org, error) + AssociateOrgBillingManagerByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) + RemoveOrgManager(orgGUID, userGUID string) error + RemoveOrgManagerByUsername(orgGUID, name string) error + RemoveOrgManagerByUsernameAndOrigin(orgGUID, name, origin string) error + RemoveOrgUser(orgGUID, userGUID string) error + RemoveOrgAuditor(orgGUID, userGUID string) error + RemoveOrgUserByUsername(orgGUID, name string) error + RemoveOrgUserByUsernameAndOrigin(orgGUID, name, origin string) error + RemoveOrgAuditorByUsername(orgGUID, name string) error + RemoveOrgAuditorByUsernameAndOrigin(orgGUID, name, origin string) error + RemoveOrgBillingManager(orgGUID, userGUID string) error + RemoveOrgBillingManagerByUsername(orgGUID, name string) error + RemoveOrgBillingManagerByUsernameAndOrigin(orgGUID, name, origin string) error + ListOrgSpaceQuotas(orgGUID string) ([]SpaceQuota, error) + ListOrgPrivateDomains(orgGUID string) ([]Domain, error) + ShareOrgPrivateDomain(orgGUID, privateDomainGUID string) (*Domain, error) + UnshareOrgPrivateDomain(orgGUID, privateDomainGUID string) error + CreateOrg(req OrgRequest) (Org, error) + UpdateOrg(orgGUID string, orgRequest OrgRequest) (Org, error) + DeleteOrg(guid string, recursive, async bool) error + DefaultIsolationSegmentForOrg(orgGUID, isolationSegmentGUID string) error + ResetDefaultIsolationSegmentForOrg(orgGUID string) error + ListEventsByQuery(query url.Values) ([]Event, error) + ListEvents() ([]Event, error) + TotalEventsByQuery(query url.Values) (int, error) + TotalEvents() (int, error) + GetUserByGUID(guid string) (User, error) + ListUsersByQuery(query url.Values) (Users, error) + ListUsers() (Users, error) + ListUserSpaces(userGuid string) ([]Space, error) + ListUserAuditedSpaces(userGuid string) ([]Space, error) + ListUserManagedSpaces(userGuid string) ([]Space, error) + ListUserOrgs(userGuid string) ([]Org, error) + ListUserManagedOrgs(userGuid string) ([]Org, error) + ListUserAuditedOrgs(userGuid string) ([]Org, error) + ListUserBillingManagedOrgs(userGuid string) ([]Org, error) + CreateUser(req UserRequest) (User, error) + DeleteUser(userGuid string) error + ListServicePlansByQuery(query url.Values) ([]ServicePlan, error) + ListServicePlans() ([]ServicePlan, error) + GetServicePlanByGUID(guid string) (*ServicePlan, error) + MakeServicePlanPublic(servicePlanGUID string) error + MakeServicePlanPrivate(servicePlanGUID string) error + ListServiceBindingsByQuery(query url.Values) ([]ServiceBinding, error) + ListServiceBindings() ([]ServiceBinding, error) + GetServiceBindingByGuid(guid string) (ServiceBinding, error) + ServiceBindingByGuid(guid string) (ServiceBinding, error) + DeleteServiceBinding(guid string) error + CreateServiceBinding(appGUID, serviceInstanceGUID string) (*ServiceBinding, error) + CreateRouteServiceBinding(routeGUID, serviceInstanceGUID string) error + DeleteRouteServiceBinding(routeGUID, serviceInstanceGUID string) error + UpdateApp(guid string, aur AppUpdateResource) (UpdateResponse, error) + ListServiceInstancesByQuery(query url.Values) ([]ServiceInstance, error) + ListServiceInstances() ([]ServiceInstance, error) + GetServiceInstanceByGuid(guid string) (ServiceInstance, error) + ServiceInstanceByGuid(guid string) (ServiceInstance, error) + CreateServiceInstance(req ServiceInstanceRequest) (ServiceInstance, error) + UpdateServiceInstance(serviceInstanceGuid string, updatedConfiguration io.Reader, async bool) error + DeleteServiceInstance(guid string, recursive, async bool) error + ListOrgQuotasByQuery(query url.Values) ([]OrgQuota, error) + ListOrgQuotas() ([]OrgQuota, error) + GetOrgQuotaByName(name string) (OrgQuota, error) + CreateOrgQuota(orgQuote OrgQuotaRequest) (*OrgQuota, error) + UpdateOrgQuota(orgQuotaGUID string, orgQuota OrgQuotaRequest) (*OrgQuota, error) + DeleteOrgQuota(guid string, async bool) error + CreateRoute(routeRequest RouteRequest) (Route, error) + CreateTcpRoute(routeRequest RouteRequest) (Route, error) + BindRoute(routeGUID, appGUID string) error + ListRoutesByQuery(query url.Values) ([]Route, error) + ListRoutes() ([]Route, error) + DeleteRoute(guid string) error + ListTasks() ([]Task, error) + ListTasksByQuery(query url.Values) ([]Task, error) + TasksByApp(guid string) ([]Task, error) + TasksByAppByQuery(guid string, query url.Values) ([]Task, error) + CreateTask(tr TaskRequest) (task Task, err error) + GetTaskByGuid(guid string) (task Task, err error) + TaskByGuid(guid string) (task Task, err error) + TerminateTask(guid string) error + MappingAppAndRoute(req RouteMappingRequest) (*RouteMapping, error) + ListRouteMappings() ([]*RouteMapping, error) + ListRouteMappingsByQuery(query url.Values) ([]*RouteMapping, error) + GetRouteMappingByGuid(guid string) (*RouteMapping, error) + DeleteRouteMapping(guid string) error + ListAppUsageEventsByQuery(query url.Values) ([]AppUsageEvent, error) + ListAppUsageEvents() ([]AppUsageEvent, error) + ListAppsByQueryWithLimits(query url.Values, totalPages int) ([]App, error) + ListAppsByQuery(query url.Values) ([]App, error) + GetAppByGuidNoInlineCall(guid string) (App, error) + ListApps() ([]App, error) + ListAppsByRoute(routeGuid string) ([]App, error) + GetAppInstances(guid string) (map[string]AppInstance, error) + GetAppEnv(guid string) (AppEnv, error) + GetAppRoutes(guid string) ([]Route, error) + GetAppStats(guid string) (map[string]AppStats, error) + KillAppInstance(guid string, index string) error + GetAppByGuid(guid string) (App, error) + AppByGuid(guid string) (App, error) + AppByName(appName, spaceGuid, orgGuid string) (app App, err error) + UploadAppBits(file io.Reader, appGUID string) error + GetAppBits(guid string) (io.ReadCloser, error) + CreateApp(req AppCreateRequest) (App, error) + StartApp(guid string) error + StopApp(guid string) error + GetServiceByGuid(guid string) (Service, error) + ListServicesByQuery(query url.Values) ([]Service, error) + ListServices() ([]Service, error) + NewRequest(method, path string) *Request + NewRequestWithBody(method, path string, body io.Reader) *Request + DoRequest(r *Request) (*http.Response, error) + DoRequestWithoutRedirects(r *Request) (*http.Response, error) + Do(req *http.Request) (*http.Response, error) + GetToken() (string, error) + ListUserProvidedServiceInstancesByQuery(query url.Values) ([]UserProvidedServiceInstance, error) + ListUserProvidedServiceInstances() ([]UserProvidedServiceInstance, error) + GetUserProvidedServiceInstanceByGuid(guid string) (UserProvidedServiceInstance, error) + UserProvidedServiceInstanceByGuid(guid string) (UserProvidedServiceInstance, error) + CreateUserProvidedServiceInstance(req UserProvidedServiceInstanceRequest) (*UserProvidedServiceInstance, error) + DeleteUserProvidedServiceInstance(guid string) error + UpdateUserProvidedServiceInstance(guid string, req UserProvidedServiceInstanceRequest) (*UserProvidedServiceInstance, error) + ListSpaceQuotasByQuery(query url.Values) ([]SpaceQuota, error) + ListSpaceQuotas() ([]SpaceQuota, error) + GetSpaceQuotaByName(name string) (SpaceQuota, error) + AssignSpaceQuota(quotaGUID, spaceGUID string) error + CreateSpaceQuota(spaceQuote SpaceQuotaRequest) (*SpaceQuota, error) + UpdateSpaceQuota(spaceQuotaGUID string, spaceQuote SpaceQuotaRequest) (*SpaceQuota, error) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/domains.go b/vendor/github.com/cloudfoundry-community/go-cfclient/domains.go new file mode 100644 index 000000000000..c5ebe9dfbda7 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/domains.go @@ -0,0 +1,301 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/pkg/errors" +) + +type DomainsResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []DomainResource `json:"resources"` +} + +type SharedDomainsResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SharedDomainResource `json:"resources"` +} + +type DomainResource struct { + Meta Meta `json:"metadata"` + Entity Domain `json:"entity"` +} + +type SharedDomainResource struct { + Meta Meta `json:"metadata"` + Entity SharedDomain `json:"entity"` +} + +type Domain struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + OwningOrganizationGuid string `json:"owning_organization_guid"` + OwningOrganizationUrl string `json:"owning_organization_url"` + SharedOrganizationsUrl string `json:"shared_organizations_url"` + c *Client +} + +type SharedDomain struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + RouterGroupGuid string `json:"router_group_guid"` + RouterGroupType string `json:"router_group_type"` + Internal bool `json:"internal"` + c *Client +} + +func (c *Client) ListDomainsByQuery(query url.Values) ([]Domain, error) { + var domains []Domain + requestUrl := "/v2/private_domains?" + query.Encode() + for { + var domainResp DomainsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting domains") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading domains request") + } + + err = json.Unmarshal(resBody, &domainResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling domains") + } + for _, domain := range domainResp.Resources { + domain.Entity.Guid = domain.Meta.Guid + domain.Entity.CreatedAt = domain.Meta.CreatedAt + domain.Entity.UpdatedAt = domain.Meta.UpdatedAt + domain.Entity.c = c + domains = append(domains, domain.Entity) + } + requestUrl = domainResp.NextUrl + if requestUrl == "" { + break + } + } + return domains, nil +} + +func (c *Client) ListDomains() ([]Domain, error) { + return c.ListDomainsByQuery(nil) +} + +func (c *Client) ListSharedDomainsByQuery(query url.Values) ([]SharedDomain, error) { + var domains []SharedDomain + requestUrl := "/v2/shared_domains?" + query.Encode() + for { + var domainResp SharedDomainsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting shared domains") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading shared domains request") + } + + err = json.Unmarshal(resBody, &domainResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling shared domains") + } + for _, domain := range domainResp.Resources { + domain.Entity.Guid = domain.Meta.Guid + domain.Entity.CreatedAt = domain.Meta.CreatedAt + domain.Entity.UpdatedAt = domain.Meta.UpdatedAt + domain.Entity.c = c + domains = append(domains, domain.Entity) + } + requestUrl = domainResp.NextUrl + if requestUrl == "" { + break + } + } + return domains, nil +} + +func (c *Client) ListSharedDomains() ([]SharedDomain, error) { + return c.ListSharedDomainsByQuery(nil) +} + +func (c *Client) GetSharedDomainByGuid(guid string) (SharedDomain, error) { + r := c.NewRequest("GET", "/v2/shared_domains/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return SharedDomain{}, errors.Wrap(err, "Error requesting shared domain") + } + defer resp.Body.Close() + retval, err := c.handleSharedDomainResp(resp) + return *retval, err +} + +func (c *Client) CreateSharedDomain(name string, internal bool, router_group_guid string) (*SharedDomain, error) { + req := c.NewRequest("POST", "/v2/shared_domains") + params := map[string]interface{}{ + "name": name, + "internal": internal, + } + + if strings.TrimSpace(router_group_guid) != "" { + params["router_group_guid"] = router_group_guid + } + + req.obj = params + + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, errors.Wrapf(err, "Error creating shared domain %s, response code: %d", name, resp.StatusCode) + } + return c.handleSharedDomainResp(resp) +} + +func (c *Client) DeleteSharedDomain(guid string, async bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/shared_domains/%s?async=%t", guid, async))) + if err != nil { + return err + } + if (async && (resp.StatusCode != http.StatusAccepted)) || (!async && (resp.StatusCode != http.StatusNoContent)) { + return errors.Wrapf(err, "Error deleting organization %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) GetDomainByName(name string) (Domain, error) { + q := url.Values{} + q.Set("q", "name:"+name) + domains, err := c.ListDomainsByQuery(q) + if err != nil { + return Domain{}, errors.Wrapf(err, "Error during domain lookup %s", name) + } + if len(domains) == 0 { + return Domain{}, fmt.Errorf("Unable to find domain %s", name) + } + return domains[0], nil +} + +func (c *Client) GetDomainByGuid(guid string) (Domain, error) { + r := c.NewRequest("GET", "/v2/private_domains/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return Domain{}, errors.Wrap(err, "Error requesting private domain") + } + defer resp.Body.Close() + retval, err := c.handleDomainResp(resp) + return *retval, err +} + +func (c *Client) GetSharedDomainByName(name string) (SharedDomain, error) { + q := url.Values{} + q.Set("q", "name:"+name) + domains, err := c.ListSharedDomainsByQuery(q) + if err != nil { + return SharedDomain{}, errors.Wrapf(err, "Error during shared domain lookup %s", name) + } + if len(domains) == 0 { + return SharedDomain{}, fmt.Errorf("Unable to find shared domain %s", name) + } + return domains[0], nil +} + +func (c *Client) CreateDomain(name, orgGuid string) (*Domain, error) { + req := c.NewRequest("POST", "/v2/private_domains") + req.obj = map[string]interface{}{ + "name": name, + "owning_organization_guid": orgGuid, + } + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, errors.Wrapf(err, "Error creating domain %s, response code: %d", name, resp.StatusCode) + } + return c.handleDomainResp(resp) +} + +func (c *Client) DeleteDomain(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/private_domains/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting domain %s, response code: %d", guid, resp.StatusCode) + } + return nil +} +func (c *Client) handleDomainResp(resp *http.Response) (*Domain, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var domainResource DomainResource + err = json.Unmarshal(body, &domainResource) + if err != nil { + return nil, err + } + return c.mergeDomainResource(domainResource), nil +} + +func (c *Client) handleSharedDomainResp(resp *http.Response) (*SharedDomain, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var domainResource SharedDomainResource + err = json.Unmarshal(body, &domainResource) + if err != nil { + return nil, err + } + return c.mergeSharedDomainResource(domainResource), nil +} + +func (c *Client) getDomainsResponse(requestUrl string) (DomainsResponse, error) { + var domainResp DomainsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return DomainsResponse{}, errors.Wrap(err, "Error requesting domains") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return DomainsResponse{}, errors.Wrap(err, "Error reading domains request") + } + err = json.Unmarshal(resBody, &domainResp) + if err != nil { + return DomainsResponse{}, errors.Wrap(err, "Error unmarshalling org") + } + return domainResp, nil +} + +func (c *Client) mergeDomainResource(domainResource DomainResource) *Domain { + domainResource.Entity.Guid = domainResource.Meta.Guid + domainResource.Entity.c = c + return &domainResource.Entity +} + +func (c *Client) mergeSharedDomainResource(domainResource SharedDomainResource) *SharedDomain { + domainResource.Entity.Guid = domainResource.Meta.Guid + domainResource.Entity.c = c + return &domainResource.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/environmentvariablegroups.go b/vendor/github.com/cloudfoundry-community/go-cfclient/environmentvariablegroups.go new file mode 100644 index 000000000000..87b94260721c --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/environmentvariablegroups.go @@ -0,0 +1,59 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" +) + +type EnvironmentVariableGroup map[string]interface{} + +func (c *Client) GetRunningEnvironmentVariableGroup() (EnvironmentVariableGroup, error) { + return c.getEnvironmentVariableGroup(true) +} + +func (c *Client) GetStagingEnvironmentVariableGroup() (EnvironmentVariableGroup, error) { + return c.getEnvironmentVariableGroup(false) +} + +func (c *Client) getEnvironmentVariableGroup(running bool) (EnvironmentVariableGroup, error) { + evgType := "staging" + if running { + evgType = "running" + } + + req := c.NewRequest("GET", fmt.Sprintf("/v2/config/environment_variable_groups/%s", evgType)) + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + evg := EnvironmentVariableGroup{} + err = json.NewDecoder(resp.Body).Decode(&evg) + return evg, err +} + +func (c *Client) SetRunningEnvironmentVariableGroup(evg EnvironmentVariableGroup) error { + return c.setEnvironmentVariableGroup(evg, true) +} + +func (c *Client) SetStagingEnvironmentVariableGroup(evg EnvironmentVariableGroup) error { + return c.setEnvironmentVariableGroup(evg, false) +} + +func (c *Client) setEnvironmentVariableGroup(evg EnvironmentVariableGroup, running bool) error { + evgType := "staging" + if running { + evgType = "running" + } + + marshalled, err := json.Marshal(evg) + if err != nil { + return err + } + + req := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/config/environment_variable_groups/%s", evgType), bytes.NewBuffer(marshalled)) + _, err = c.DoRequest(req) + return err +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/error.go b/vendor/github.com/cloudfoundry-community/go-cfclient/error.go new file mode 100644 index 000000000000..e4decdf88be0 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/error.go @@ -0,0 +1,54 @@ +package cfclient + +//go:generate go run gen_error.go + +import ( + "fmt" +) + +type CloudFoundryError struct { + Code int `json:"code"` + ErrorCode string `json:"error_code"` + Description string `json:"description"` +} + +type CloudFoundryErrorsV3 struct { + Errors []CloudFoundryErrorV3 `json:"errors"` +} + +type CloudFoundryErrorV3 struct { + Code int `json:"code"` + Title string `json:"title"` + Detail string `json:"detail"` +} + +// CF APIs v3 can return multiple errors, we take the first one and convert it into a V2 model +func NewCloudFoundryErrorFromV3Errors(cfErrorsV3 CloudFoundryErrorsV3) CloudFoundryError { + if len(cfErrorsV3.Errors) == 0 { + return CloudFoundryError{ + 0, + "GO-Client-No-Errors", + "No Errors in response from V3", + } + } + + return CloudFoundryError{ + cfErrorsV3.Errors[0].Code, + cfErrorsV3.Errors[0].Title, + cfErrorsV3.Errors[0].Detail, + } +} + +func (cfErr CloudFoundryError) Error() string { + return fmt.Sprintf("cfclient error (%s|%d): %s", cfErr.ErrorCode, cfErr.Code, cfErr.Description) +} + +type CloudFoundryHTTPError struct { + StatusCode int + Status string + Body []byte +} + +func (e CloudFoundryHTTPError) Error() string { + return fmt.Sprintf("cfclient: HTTP error (%d): %s", e.StatusCode, e.Status) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/events.go b/vendor/github.com/cloudfoundry-community/go-cfclient/events.go new file mode 100644 index 000000000000..6c42c565b297 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/events.go @@ -0,0 +1,95 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/pkg/errors" +) + +// EventsResponse is a type that wraps a collection of event resources. +type EventsResponse struct { + TotalResults int `json:"total_results"` + Pages int `json:"total_pages"` + NextURL string `json:"next_url"` + Resources []EventResource `json:"resources"` +} + +// EventResource is a type that contains metadata and the entity for an event. +type EventResource struct { + Meta Meta `json:"metadata"` + Entity Event `json:"entity"` +} + +// Event is a type that contains event data. +type Event struct { + GUID string `json:"guid"` + Type string `json:"type"` + CreatedAt string `json:"created_at"` + Actor string `json:"actor"` + ActorType string `json:"actor_type"` + ActorName string `json:"actor_name"` + ActorUsername string `json:"actor_username"` + Actee string `json:"actee"` + ActeeType string `json:"actee_type"` + ActeeName string `json:"actee_name"` + OrganizationGUID string `json:"organization_guid"` + SpaceGUID string `json:"space_guid"` + Metadata map[string]interface{} `json:"metadata"` + c *Client +} + +// ListEventsByQuery lists all events matching the provided query. +func (c *Client) ListEventsByQuery(query url.Values) ([]Event, error) { + var events []Event + requestURL := fmt.Sprintf("/v2/events?%s", query.Encode()) + for { + var eventResp EventsResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "error requesting events") + } + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(&eventResp); err != nil { + return nil, errors.Wrap(err, "error unmarshaling events") + } + for _, e := range eventResp.Resources { + e.Entity.GUID = e.Meta.Guid + e.Entity.CreatedAt = e.Meta.CreatedAt + e.Entity.c = c + events = append(events, e.Entity) + } + requestURL = eventResp.NextURL + if requestURL == "" { + break + } + } + return events, nil +} + +// ListEvents lists all unfiltered events. +func (c *Client) ListEvents() ([]Event, error) { + return c.ListEventsByQuery(nil) +} + +// TotalEventsByQuery returns the number of events matching the provided query. +func (c *Client) TotalEventsByQuery(query url.Values) (int, error) { + r := c.NewRequest("GET", fmt.Sprintf("/v2/events?%s", query.Encode())) + resp, err := c.DoRequest(r) + if err != nil { + return 0, errors.Wrap(err, "error requesting events") + } + defer resp.Body.Close() + var apiResp EventsResponse + if err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil { + return 0, errors.Wrap(err, "error unmarshaling events") + } + return apiResp.TotalResults, nil +} + +// TotalEvents returns the number of unfiltered events. +func (c *Client) TotalEvents() (int, error) { + return c.TotalEventsByQuery(nil) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/go.mod b/vendor/github.com/cloudfoundry-community/go-cfclient/go.mod new file mode 100644 index 000000000000..b9d8dc55536e --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/go.mod @@ -0,0 +1,17 @@ +module github.com/cloudfoundry-community/go-cfclient + +require ( + code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f + github.com/Masterminds/semver v1.4.2 + github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect + github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab + github.com/kr/pretty v0.1.0 // indirect + github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 + github.com/onsi/gomega v1.4.3 + github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 // indirect + github.com/pkg/errors v0.8.1 + github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect +) diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/go.sum b/vendor/github.com/cloudfoundry-community/go-cfclient/go.sum new file mode 100644 index 000000000000..3e0f60d93ae2 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/go.sum @@ -0,0 +1,77 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f h1:FDM3EtwZLyhW48YRiyqjivNlNZjAObv4xt4NnJaU+NQ= +github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470 h1:R0uuDVEvfDha2O6dfJRr4/5NBHKEbZhMPZmqOWpEkSo= +github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1 h1:VeAkjQVzKLmu+JnFcK96TPbkuaTIqwGGAzQ9hgwPjVg= +golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/info.go b/vendor/github.com/cloudfoundry-community/go-cfclient/info.go new file mode 100644 index 000000000000..4f14ac582d0d --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/info.go @@ -0,0 +1,43 @@ +package cfclient + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// Info is metadata about a Cloud Foundry deployment +type Info struct { + Name string `json:"name"` + Build string `json:"build"` + Support string `json:"support"` + Version int `json:"version"` + Description string `json:"description"` + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + MinCLIVersion string `json:"min_cli_version"` + MinRecommendedCLIVersion string `json:"min_recommended_cli_version"` + APIVersion string `json:"api_version"` + AppSSHEndpoint string `json:"app_ssh_endpoint"` + AppSSHHostKeyFingerprint string `json:"app_ssh_host_key_fingerprint"` + AppSSHOauthClient string `json:"app_ssh_oauth_client"` + DopplerLoggingEndpoint string `json:"doppler_logging_endpoint"` + RoutingEndpoint string `json:"routing_endpoint"` + User string `json:"user,omitempty"` +} + +// GetInfo retrieves Info from the Cloud Controller API +func (c *Client) GetInfo() (*Info, error) { + r := c.NewRequest("GET", "/v2/info") + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting info") + } + defer resp.Body.Close() + var i Info + err = json.NewDecoder(resp.Body).Decode(&i) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling info") + } + return &i, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/isolationsegments.go b/vendor/github.com/cloudfoundry-community/go-cfclient/isolationsegments.go new file mode 100644 index 000000000000..2be40e557263 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/isolationsegments.go @@ -0,0 +1,251 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/pkg/errors" +) + +type IsolationSegment struct { + GUID string `json:"guid"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + c *Client +} + +type IsolationSegementResponse struct { + GUID string `json:"guid"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Links struct { + Self struct { + Href string `json:"href"` + } `json:"self"` + Spaces struct { + Href string `json:"href"` + } `json:"spaces"` + Organizations struct { + Href string `json:"href"` + } `json:"organizations"` + } `json:"links"` +} + +type ListIsolationSegmentsResponse struct { + Pagination Pagination `json:"pagination"` + Resources []IsolationSegementResponse `json:"resources"` +} + +func (c *Client) CreateIsolationSegment(name string) (*IsolationSegment, error) { + req := c.NewRequest("POST", "/v3/isolation_segments") + req.obj = map[string]interface{}{ + "name": name, + } + resp, err := c.DoRequest(req) + if err != nil { + return nil, errors.Wrap(err, "Error while creating isolation segment") + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("Error creating isolation segment %s, response code: %d", name, resp.StatusCode) + } + return respBodyToIsolationSegment(resp.Body, c) +} + +func respBodyToIsolationSegment(body io.ReadCloser, c *Client) (*IsolationSegment, error) { + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + isr := IsolationSegementResponse{} + err = json.Unmarshal(bodyRaw, &isr) + if err != nil { + return nil, err + } + + return &IsolationSegment{ + GUID: isr.GUID, + Name: isr.Name, + CreatedAt: isr.CreatedAt, + UpdatedAt: isr.UpdatedAt, + c: c, + }, nil +} + +func (c *Client) GetIsolationSegmentByGUID(guid string) (*IsolationSegment, error) { + var isr IsolationSegementResponse + r := c.NewRequest("GET", "/v3/isolation_segments/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting isolation segment by GUID") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading isolation segment response body") + } + + err = json.Unmarshal(resBody, &isr) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling isolation segment response") + } + return &IsolationSegment{Name: isr.Name, GUID: isr.GUID, CreatedAt: isr.CreatedAt, UpdatedAt: isr.UpdatedAt, c: c}, nil +} + +func (c *Client) ListIsolationSegmentsByQuery(query url.Values) ([]IsolationSegment, error) { + var iss []IsolationSegment + requestUrl := "/v3/isolation_segments?" + query.Encode() + for { + var isr ListIsolationSegmentsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting isolation segments") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading isolation segment request") + } + + err = json.Unmarshal(resBody, &isr) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling isolation segment") + } + + for _, is := range isr.Resources { + iss = append(iss, IsolationSegment{ + Name: is.Name, + GUID: is.GUID, + CreatedAt: is.CreatedAt, + UpdatedAt: is.UpdatedAt, + c: c, + }) + } + + var ok bool + requestUrl, ok = isr.Pagination.Next.(string) + if !ok || requestUrl == "" { + break + } + } + return iss, nil +} + +func (c *Client) ListIsolationSegments() ([]IsolationSegment, error) { + return c.ListIsolationSegmentsByQuery(nil) +} + +// TODO listOrgsForIsolationSegments +// TODO listSpacesForIsolationSegments +// TODO setDefaultIsolationSegmentForOrg + +func (c *Client) DeleteIsolationSegmentByGUID(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v3/isolation_segments/%s", guid))) + if err != nil { + return errors.Wrap(err, "Error during sending DELETE request for isolation segments") + } + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("Error deleting isolation segment %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (i *IsolationSegment) Delete() error { + return i.c.DeleteIsolationSegmentByGUID(i.GUID) +} + +func (c *Client) AddIsolationSegmentToOrg(isolationSegmentGUID, orgGUID string) error { + isoSegment := IsolationSegment{GUID: isolationSegmentGUID, c: c} + return isoSegment.AddOrg(orgGUID) +} + +func (c *Client) RemoveIsolationSegmentFromOrg(isolationSegmentGUID, orgGUID string) error { + isoSegment := IsolationSegment{GUID: isolationSegmentGUID, c: c} + return isoSegment.RemoveOrg(orgGUID) +} + +func (c *Client) AddIsolationSegmentToSpace(isolationSegmentGUID, spaceGUID string) error { + isoSegment := IsolationSegment{GUID: isolationSegmentGUID, c: c} + return isoSegment.AddSpace(spaceGUID) +} + +func (c *Client) RemoveIsolationSegmentFromSpace(isolationSegmentGUID, spaceGUID string) error { + isoSegment := IsolationSegment{GUID: isolationSegmentGUID, c: c} + return isoSegment.RemoveSpace(spaceGUID) +} + +func (i *IsolationSegment) AddOrg(orgGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("POST", fmt.Sprintf("/v3/isolation_segments/%s/relationships/organizations", i.GUID)) + type Entry struct { + GUID string `json:"guid"` + } + req.obj = map[string]interface{}{ + "data": []Entry{{GUID: orgGuid}}, + } + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrap(err, "Error during adding org to isolation segment") + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("Error adding org %s to isolation segment %s, response code: %d", orgGuid, i.Name, resp.StatusCode) + } + return nil +} + +func (i *IsolationSegment) RemoveOrg(orgGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("DELETE", fmt.Sprintf("/v3/isolation_segments/%s/relationships/organizations/%s", i.GUID, orgGuid)) + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrapf(err, "Error during removing org %s in isolation segment %s", orgGuid, i.Name) + } + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("Error deleting org %s in isolation segment %s, response code: %d", orgGuid, i.Name, resp.StatusCode) + } + return nil +} + +func (i *IsolationSegment) AddSpace(spaceGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("PUT", fmt.Sprintf("/v2/spaces/%s", spaceGuid)) + req.obj = map[string]interface{}{ + "isolation_segment_guid": i.GUID, + } + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrapf(err, "Error during adding space %s to isolation segment %s", spaceGuid, i.Name) + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("Error adding space to isolation segment %s, response code: %d", i.Name, resp.StatusCode) + } + return nil +} + +func (i *IsolationSegment) RemoveSpace(spaceGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("DELETE", fmt.Sprintf("/v2/spaces/%s/isolation_segment", spaceGuid)) + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrapf(err, "Error during deleting space %s in isolation segment %s", spaceGuid, i.Name) + } + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("Error deleting space %s from isolation segment %s, response code: %d", spaceGuid, i.Name, resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/org_quotas.go b/vendor/github.com/cloudfoundry-community/go-cfclient/org_quotas.go new file mode 100644 index 000000000000..ea935e16022d --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/org_quotas.go @@ -0,0 +1,184 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type OrgQuotasResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []OrgQuotasResource `json:"resources"` +} + +type OrgQuotasResource struct { + Meta Meta `json:"metadata"` + Entity OrgQuota `json:"entity"` +} + +type OrgQuotaRequest struct { + Name string `json:"name"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + TotalServices int `json:"total_services"` + TotalRoutes int `json:"total_routes"` + TotalPrivateDomains int `json:"total_private_domains"` + MemoryLimit int `json:"memory_limit"` + TrialDBAllowed bool `json:"trial_db_allowed"` + InstanceMemoryLimit int `json:"instance_memory_limit"` + AppInstanceLimit int `json:"app_instance_limit"` + AppTaskLimit int `json:"app_task_limit"` + TotalServiceKeys int `json:"total_service_keys"` + TotalReservedRoutePorts int `json:"total_reserved_route_ports"` +} + +type OrgQuota struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + TotalServices int `json:"total_services"` + TotalRoutes int `json:"total_routes"` + TotalPrivateDomains int `json:"total_private_domains"` + MemoryLimit int `json:"memory_limit"` + TrialDBAllowed bool `json:"trial_db_allowed"` + InstanceMemoryLimit int `json:"instance_memory_limit"` + AppInstanceLimit int `json:"app_instance_limit"` + AppTaskLimit int `json:"app_task_limit"` + TotalServiceKeys int `json:"total_service_keys"` + TotalReservedRoutePorts int `json:"total_reserved_route_ports"` + c *Client +} + +func (c *Client) ListOrgQuotasByQuery(query url.Values) ([]OrgQuota, error) { + var orgQuotas []OrgQuota + requestUrl := "/v2/quota_definitions?" + query.Encode() + for { + orgQuotasResp, err := c.getOrgQuotasResponse(requestUrl) + if err != nil { + return []OrgQuota{}, err + } + for _, org := range orgQuotasResp.Resources { + org.Entity.Guid = org.Meta.Guid + org.Entity.CreatedAt = org.Meta.CreatedAt + org.Entity.UpdatedAt = org.Meta.UpdatedAt + org.Entity.c = c + orgQuotas = append(orgQuotas, org.Entity) + } + requestUrl = orgQuotasResp.NextUrl + if requestUrl == "" { + break + } + } + return orgQuotas, nil +} + +func (c *Client) ListOrgQuotas() ([]OrgQuota, error) { + return c.ListOrgQuotasByQuery(nil) +} + +func (c *Client) GetOrgQuotaByName(name string) (OrgQuota, error) { + q := url.Values{} + q.Set("q", "name:"+name) + orgQuotas, err := c.ListOrgQuotasByQuery(q) + if err != nil { + return OrgQuota{}, err + } + if len(orgQuotas) != 1 { + return OrgQuota{}, fmt.Errorf("Unable to find org quota " + name) + } + return orgQuotas[0], nil +} + +func (c *Client) getOrgQuotasResponse(requestUrl string) (OrgQuotasResponse, error) { + var orgQuotasResp OrgQuotasResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return OrgQuotasResponse{}, errors.Wrap(err, "Error requesting org quotas") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return OrgQuotasResponse{}, errors.Wrap(err, "Error reading org quotas body") + } + err = json.Unmarshal(resBody, &orgQuotasResp) + if err != nil { + return OrgQuotasResponse{}, errors.Wrap(err, "Error unmarshalling org quotas") + } + return orgQuotasResp, nil +} + +func (c *Client) CreateOrgQuota(orgQuote OrgQuotaRequest) (*OrgQuota, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(orgQuote) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("POST", "/v2/quota_definitions", buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleOrgQuotaResp(resp) +} + +func (c *Client) UpdateOrgQuota(orgQuotaGUID string, orgQuota OrgQuotaRequest) (*OrgQuota, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(orgQuota) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/quota_definitions/%s", orgQuotaGUID), buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleOrgQuotaResp(resp) +} + +func (c *Client) DeleteOrgQuota(guid string, async bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/quota_definitions/%s?async=%t", guid, async))) + if err != nil { + return err + } + if (async && (resp.StatusCode != http.StatusAccepted)) || (!async && (resp.StatusCode != http.StatusNoContent)) { + return errors.Wrapf(err, "Error deleting organization %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) handleOrgQuotaResp(resp *http.Response) (*OrgQuota, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var orgQuotasResource OrgQuotasResource + err = json.Unmarshal(body, &orgQuotasResource) + if err != nil { + return nil, err + } + return c.mergeOrgQuotaResource(orgQuotasResource), nil +} + +func (c *Client) mergeOrgQuotaResource(orgQuotaResource OrgQuotasResource) *OrgQuota { + orgQuotaResource.Entity.Guid = orgQuotaResource.Meta.Guid + orgQuotaResource.Entity.CreatedAt = orgQuotaResource.Meta.CreatedAt + orgQuotaResource.Entity.UpdatedAt = orgQuotaResource.Meta.UpdatedAt + orgQuotaResource.Entity.c = c + return &orgQuotaResource.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go b/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go new file mode 100644 index 000000000000..4c71b94f9ecc --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go @@ -0,0 +1,832 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type OrgResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []OrgResource `json:"resources"` +} + +type OrgResource struct { + Meta Meta `json:"metadata"` + Entity Org `json:"entity"` +} + +type OrgUserResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextURL string `json:"next_url"` + Resources []UserResource `json:"resources"` +} + +type Org struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Name string `json:"name"` + Status string `json:"status"` + QuotaDefinitionGuid string `json:"quota_definition_guid"` + DefaultIsolationSegmentGuid string `json:"default_isolation_segment_guid"` + c *Client +} + +type OrgSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + Status string `json:"status"` + Spaces []OrgSummarySpaces `json:"spaces"` +} + +type OrgSummarySpaces struct { + Guid string `json:"guid"` + Name string `json:"name"` + ServiceCount int `json:"service_count"` + AppCount int `json:"app_count"` + MemDevTotal int `json:"mem_dev_total"` + MemProdTotal int `json:"mem_prod_total"` +} + +type OrgRequest struct { + Name string `json:"name"` + Status string `json:"status,omitempty"` + QuotaDefinitionGuid string `json:"quota_definition_guid,omitempty"` + DefaultIsolationSegmentGuid string `json:"default_isolation_segment_guid,omitempty"` +} + +func (c *Client) ListOrgsByQuery(query url.Values) ([]Org, error) { + var orgs []Org + requestURL := "/v2/organizations?" + query.Encode() + for { + orgResp, err := c.getOrgResponse(requestURL) + if err != nil { + return []Org{}, err + } + for _, org := range orgResp.Resources { + orgs = append(orgs, c.mergeOrgResource(org)) + } + requestURL = orgResp.NextUrl + if requestURL == "" { + break + } + } + return orgs, nil +} + +func (c *Client) ListOrgs() ([]Org, error) { + return c.ListOrgsByQuery(nil) +} + +func (c *Client) GetOrgByName(name string) (Org, error) { + var org Org + q := url.Values{} + q.Set("q", "name:"+name) + orgs, err := c.ListOrgsByQuery(q) + if err != nil { + return org, err + } + if len(orgs) == 0 { + return org, fmt.Errorf("Unable to find org %s", name) + } + return orgs[0], nil +} + +func (c *Client) GetOrgByGuid(guid string) (Org, error) { + var orgRes OrgResource + r := c.NewRequest("GET", "/v2/organizations/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return Org{}, err + } + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Org{}, err + } + err = json.Unmarshal(body, &orgRes) + if err != nil { + return Org{}, err + } + return c.mergeOrgResource(orgRes), nil +} + +func (c *Client) OrgSpaces(guid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/organizations/%s/spaces", guid)) +} + +func (o *Org) Summary() (OrgSummary, error) { + var orgSummary OrgSummary + requestURL := fmt.Sprintf("/v2/organizations/%s/summary", o.Guid) + r := o.c.NewRequest("GET", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return OrgSummary{}, errors.Wrap(err, "Error requesting org summary") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return OrgSummary{}, errors.Wrap(err, "Error reading org summary body") + } + err = json.Unmarshal(resBody, &orgSummary) + if err != nil { + return OrgSummary{}, errors.Wrap(err, "Error unmarshalling org summary") + } + return orgSummary, nil +} + +func (o *Org) Quota() (*OrgQuota, error) { + var orgQuota *OrgQuota + var orgQuotaResource OrgQuotasResource + if o.QuotaDefinitionGuid == "" { + return nil, nil + } + requestURL := fmt.Sprintf("/v2/quota_definitions/%s", o.QuotaDefinitionGuid) + r := o.c.NewRequest("GET", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return &OrgQuota{}, errors.Wrap(err, "Error requesting org quota") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return &OrgQuota{}, errors.Wrap(err, "Error reading org quota body") + } + err = json.Unmarshal(resBody, &orgQuotaResource) + if err != nil { + return &OrgQuota{}, errors.Wrap(err, "Error unmarshalling org quota") + } + orgQuota = &orgQuotaResource.Entity + orgQuota.Guid = orgQuotaResource.Meta.Guid + orgQuota.c = o.c + return orgQuota, nil +} + +func (c *Client) ListOrgUsersByQuery(orgGUID string, query url.Values) ([]User, error) { + var users []User + requestURL := fmt.Sprintf("/v2/organizations/%s/users?%s", orgGUID, query.Encode()) + for { + omResp, err := c.getOrgUserResponse(requestURL) + if err != nil { + return []User{}, err + } + for _, u := range omResp.Resources { + users = append(users, c.mergeUserResource(u)) + } + requestURL = omResp.NextURL + if requestURL == "" { + break + } + } + return users, nil +} + +func (c *Client) ListOrgUsers(orgGUID string) ([]User, error) { + return c.ListOrgUsersByQuery(orgGUID, nil) +} + +func (c *Client) listOrgRolesByQuery(orgGUID, role string, query url.Values) ([]User, error) { + var users []User + requestURL := fmt.Sprintf("/v2/organizations/%s/%s?%s", orgGUID, role, query.Encode()) + for { + omResp, err := c.getOrgUserResponse(requestURL) + if err != nil { + return []User{}, err + } + for _, u := range omResp.Resources { + users = append(users, c.mergeUserResource(u)) + } + requestURL = omResp.NextURL + if requestURL == "" { + break + } + } + return users, nil +} + +func (c *Client) ListOrgManagersByQuery(orgGUID string, query url.Values) ([]User, error) { + return c.listOrgRolesByQuery(orgGUID, "managers", query) +} + +func (c *Client) ListOrgManagers(orgGUID string) ([]User, error) { + return c.ListOrgManagersByQuery(orgGUID, nil) +} + +func (c *Client) ListOrgAuditorsByQuery(orgGUID string, query url.Values) ([]User, error) { + return c.listOrgRolesByQuery(orgGUID, "auditors", query) +} + +func (c *Client) ListOrgAuditors(orgGUID string) ([]User, error) { + return c.ListOrgAuditorsByQuery(orgGUID, nil) +} + +func (c *Client) ListOrgBillingManagersByQuery(orgGUID string, query url.Values) ([]User, error) { + return c.listOrgRolesByQuery(orgGUID, "billing_managers", query) +} + +func (c *Client) ListOrgBillingManagers(orgGUID string) ([]User, error) { + return c.ListOrgBillingManagersByQuery(orgGUID, nil) +} + +func (c *Client) AssociateOrgManager(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateManager(userGUID) +} + +func (c *Client) AssociateOrgManagerByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateManagerByUsername(name) +} + +func (c *Client) AssociateOrgManagerByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateManagerByUsernameAndOrigin(name, origin) +} + +func (c *Client) AssociateOrgUser(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateUser(userGUID) +} + +func (c *Client) AssociateOrgAuditor(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateAuditor(userGUID) +} + +func (c *Client) AssociateOrgUserByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateUserByUsername(name) +} + +func (c *Client) AssociateOrgUserByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateUserByUsernameAndOrigin(name, origin) +} + +func (c *Client) AssociateOrgAuditorByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateAuditorByUsername(name) +} + +func (c *Client) AssociateOrgAuditorByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateAuditorByUsernameAndOrigin(name, origin) +} + +func (c *Client) AssociateOrgBillingManager(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateBillingManager(userGUID) +} + +func (c *Client) AssociateOrgBillingManagerByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateBillingManagerByUsername(name) +} + +func (c *Client) AssociateOrgBillingManagerByUsernameAndOrigin(orgGUID, name, origin string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateBillingManagerByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveOrgManager(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveManager(userGUID) +} + +func (c *Client) RemoveOrgManagerByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveManagerByUsername(name) +} + +func (c *Client) RemoveOrgManagerByUsernameAndOrigin(orgGUID, name, origin string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveManagerByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveOrgUser(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveUser(userGUID) +} + +func (c *Client) RemoveOrgAuditor(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveAuditor(userGUID) +} + +func (c *Client) RemoveOrgUserByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveUserByUsername(name) +} + +func (c *Client) RemoveOrgUserByUsernameAndOrigin(orgGUID, name, origin string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveUserByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveOrgAuditorByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveAuditorByUsername(name) +} + +func (c *Client) RemoveOrgAuditorByUsernameAndOrigin(orgGUID, name, origin string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveAuditorByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveOrgBillingManager(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveBillingManager(userGUID) +} + +func (c *Client) RemoveOrgBillingManagerByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveBillingManagerByUsername(name) +} + +func (c *Client) RemoveOrgBillingManagerByUsernameAndOrigin(orgGUID, name, origin string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveBillingManagerByUsernameAndOrigin(name, origin) +} + +func (c *Client) ListOrgSpaceQuotas(orgGUID string) ([]SpaceQuota, error) { + org := Org{Guid: orgGUID, c: c} + return org.ListSpaceQuotas() +} + +func (c *Client) ListOrgPrivateDomains(orgGUID string) ([]Domain, error) { + org := Org{Guid: orgGUID, c: c} + return org.ListPrivateDomains() +} + +func (c *Client) ShareOrgPrivateDomain(orgGUID, privateDomainGUID string) (*Domain, error) { + org := Org{Guid: orgGUID, c: c} + return org.SharePrivateDomain(privateDomainGUID) +} + +func (c *Client) UnshareOrgPrivateDomain(orgGUID, privateDomainGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.UnsharePrivateDomain(privateDomainGUID) +} + +func (o *Org) ListSpaceQuotas() ([]SpaceQuota, error) { + var spaceQuotas []SpaceQuota + requestURL := fmt.Sprintf("/v2/organizations/%s/space_quota_definitions", o.Guid) + for { + spaceQuotasResp, err := o.c.getSpaceQuotasResponse(requestURL) + if err != nil { + return []SpaceQuota{}, err + } + for _, resource := range spaceQuotasResp.Resources { + spaceQuotas = append(spaceQuotas, *o.c.mergeSpaceQuotaResource(resource)) + } + requestURL = spaceQuotasResp.NextUrl + if requestURL == "" { + break + } + } + return spaceQuotas, nil +} + +func (o *Org) ListPrivateDomains() ([]Domain, error) { + var domains []Domain + requestURL := fmt.Sprintf("/v2/organizations/%s/private_domains", o.Guid) + for { + domainsResp, err := o.c.getDomainsResponse(requestURL) + if err != nil { + return []Domain{}, err + } + for _, resource := range domainsResp.Resources { + domains = append(domains, *o.c.mergeDomainResource(resource)) + } + requestURL = domainsResp.NextUrl + if requestURL == "" { + break + } + } + return domains, nil +} + +func (o *Org) SharePrivateDomain(privateDomainGUID string) (*Domain, error) { + requestURL := fmt.Sprintf("/v2/organizations/%s/private_domains/%s", o.Guid, privateDomainGUID) + r := o.c.NewRequest("PUT", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, errors.Wrapf(err, "Error sharing domain %s for org %s, response code: %d", privateDomainGUID, o.Guid, resp.StatusCode) + } + return o.c.handleDomainResp(resp) +} + +func (o *Org) UnsharePrivateDomain(privateDomainGUID string) error { + requestURL := fmt.Sprintf("/v2/organizations/%s/private_domains/%s", o.Guid, privateDomainGUID) + r := o.c.NewRequest("DELETE", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error unsharing domain %s for org %s, response code: %d", privateDomainGUID, o.Guid, resp.StatusCode) + } + return nil +} + +func (o *Org) associateRole(userGUID, role string) (Org, error) { + requestURL := fmt.Sprintf("/v2/organizations/%s/%s/%s", o.Guid, role, userGUID) + r := o.c.NewRequest("PUT", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating %s %s, response code: %d", role, userGUID, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) associateRoleByUsernameAndOrigin(name, role, origin string) (Org, error) { + requestURL := fmt.Sprintf("/v2/organizations/%s/%s", o.Guid, role) + buf := bytes.NewBuffer(nil) + payload := make(map[string]string) + payload["username"] = name + if origin != "" { + payload["origin"] = origin + } + err := json.NewEncoder(buf).Encode(payload) + if err != nil { + return Org{}, err + } + r := o.c.NewRequestWithBody("PUT", requestURL, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating %s %s, response code: %d", role, name, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) AssociateManager(userGUID string) (Org, error) { + return o.associateRole(userGUID, "managers") +} + +func (o *Org) AssociateManagerByUsername(name string) (Org, error) { + return o.associateRoleByUsernameAndOrigin(name, "managers", "") +} + +func (o *Org) AssociateManagerByUsernameAndOrigin(name, origin string) (Org, error) { + return o.associateRoleByUsernameAndOrigin(name, "managers", origin) +} + +func (o *Org) AssociateUser(userGUID string) (Org, error) { + requestURL := fmt.Sprintf("/v2/organizations/%s/users/%s", o.Guid, userGUID) + r := o.c.NewRequest("PUT", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating user %s, response code: %d", userGUID, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) AssociateAuditor(userGUID string) (Org, error) { + return o.associateRole(userGUID, "auditors") +} + +func (o *Org) AssociateAuditorByUsername(name string) (Org, error) { + return o.associateRoleByUsernameAndOrigin(name, "auditors", "") +} + +func (o *Org) AssociateAuditorByUsernameAndOrigin(name, origin string) (Org, error) { + return o.associateRoleByUsernameAndOrigin(name, "auditors", origin) +} + +func (o *Org) AssociateBillingManager(userGUID string) (Org, error) { + return o.associateRole(userGUID, "billing_managers") +} + +func (o *Org) AssociateBillingManagerByUsername(name string) (Org, error) { + return o.associateRoleByUsernameAndOrigin(name, "billing_managers", "") +} +func (o *Org) AssociateBillingManagerByUsernameAndOrigin(name, origin string) (Org, error) { + return o.associateRoleByUsernameAndOrigin(name, "billing_managers", origin) +} + +func (o *Org) AssociateUserByUsername(name string) (Org, error) { + return o.associateUserByUsernameAndOrigin(name, "") +} + +func (o *Org) AssociateUserByUsernameAndOrigin(name, origin string) (Org, error) { + return o.associateUserByUsernameAndOrigin(name, origin) +} + +func (o *Org) associateUserByUsernameAndOrigin(name, origin string) (Org, error) { + requestURL := fmt.Sprintf("/v2/organizations/%s/users", o.Guid) + buf := bytes.NewBuffer(nil) + payload := make(map[string]string) + payload["username"] = name + if origin != "" { + payload["origin"] = origin + } + err := json.NewEncoder(buf).Encode(payload) + if err != nil { + return Org{}, err + } + r := o.c.NewRequestWithBody("PUT", requestURL, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating user %s, response code: %d", name, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) removeRole(userGUID, role string) error { + requestURL := fmt.Sprintf("/v2/organizations/%s/%s/%s", o.Guid, role, userGUID) + r := o.c.NewRequest("DELETE", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing %s %s, response code: %d", role, userGUID, resp.StatusCode) + } + return nil +} + +func (o *Org) removeRoleByUsernameAndOrigin(name, role, origin string) error { + var requestURL string + var method string + buf := bytes.NewBuffer(nil) + payload := make(map[string]string) + payload["username"] = name + if origin != "" { + requestURL = fmt.Sprintf("/v2/organizations/%s/%s/remove", o.Guid, role) + method = "POST" + payload["origin"] = origin + } else { + requestURL = fmt.Sprintf("/v2/organizations/%s/%s", o.Guid, role) + method = "DELETE" + } + err := json.NewEncoder(buf).Encode(payload) + if err != nil { + return err + } + + r := o.c.NewRequestWithBody(method, requestURL, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing manager %s, response code: %d", name, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveManager(userGUID string) error { + return o.removeRole(userGUID, "managers") +} + +func (o *Org) RemoveManagerByUsername(name string) error { + return o.removeRoleByUsernameAndOrigin(name, "managers", "") +} +func (o *Org) RemoveManagerByUsernameAndOrigin(name, origin string) error { + return o.removeRoleByUsernameAndOrigin(name, "managers", origin) +} + +func (o *Org) RemoveAuditor(userGUID string) error { + return o.removeRole(userGUID, "auditors") +} + +func (o *Org) RemoveAuditorByUsername(name string) error { + return o.removeRoleByUsernameAndOrigin(name, "auditors", "") +} +func (o *Org) RemoveAuditorByUsernameAndOrigin(name, origin string) error { + return o.removeRoleByUsernameAndOrigin(name, "auditors", origin) +} + +func (o *Org) RemoveBillingManager(userGUID string) error { + return o.removeRole(userGUID, "billing_managers") +} + +func (o *Org) RemoveBillingManagerByUsername(name string) error { + return o.removeRoleByUsernameAndOrigin(name, "billing_managers", "") +} + +func (o *Org) RemoveBillingManagerByUsernameAndOrigin(name, origin string) error { + return o.removeRoleByUsernameAndOrigin(name, "billing_managers", origin) +} + +func (o *Org) RemoveUser(userGUID string) error { + requestURL := fmt.Sprintf("/v2/organizations/%s/users/%s", o.Guid, userGUID) + r := o.c.NewRequest("DELETE", requestURL) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing user %s, response code: %d", userGUID, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveUserByUsername(name string) error { + return o.removeUserByUsernameAndOrigin(name, "") +} + +func (o *Org) RemoveUserByUsernameAndOrigin(name, origin string) error { + return o.removeUserByUsernameAndOrigin(name, origin) +} + +func (o *Org) removeUserByUsernameAndOrigin(name, origin string) error { + var requestURL string + var method string + buf := bytes.NewBuffer(nil) + payload := make(map[string]string) + payload["username"] = name + if origin != "" { + payload["origin"] = origin + requestURL = fmt.Sprintf("/v2/organizations/%s/users/remove", o.Guid) + method = "POST" + } else { + requestURL = fmt.Sprintf("/v2/organizations/%s/users", o.Guid) + method = "DELETE" + } + err := json.NewEncoder(buf).Encode(payload) + if err != nil { + return err + } + r := o.c.NewRequestWithBody(method, requestURL, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing user %s, response code: %d", name, resp.StatusCode) + } + return nil +} + +func (c *Client) CreateOrg(req OrgRequest) (Org, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return Org{}, err + } + r := c.NewRequestWithBody("POST", "/v2/organizations", buf) + resp, err := c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error creating organization, response code: %d", resp.StatusCode) + } + return c.handleOrgResp(resp) +} + +func (c *Client) UpdateOrg(orgGUID string, orgRequest OrgRequest) (Org, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(orgRequest) + if err != nil { + return Org{}, err + } + r := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/organizations/%s", orgGUID), buf) + resp, err := c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error updating organization, response code: %d", resp.StatusCode) + } + return c.handleOrgResp(resp) +} + +func (c *Client) DeleteOrg(guid string, recursive, async bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/organizations/%s?recursive=%t&async=%t", guid, recursive, async))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting organization %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) getOrgResponse(requestURL string) (OrgResponse, error) { + var orgResp OrgResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return OrgResponse{}, errors.Wrap(err, "Error requesting orgs") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return OrgResponse{}, errors.Wrap(err, "Error reading org request") + } + err = json.Unmarshal(resBody, &orgResp) + if err != nil { + return OrgResponse{}, errors.Wrap(err, "Error unmarshalling org") + } + return orgResp, nil +} + +func (c *Client) fetchOrgs(requestURL string) ([]Org, error) { + var orgs []Org + for { + orgResp, err := c.getOrgResponse(requestURL) + if err != nil { + return []Org{}, err + } + for _, org := range orgResp.Resources { + orgs = append(orgs, c.mergeOrgResource(org)) + } + requestURL = orgResp.NextUrl + if requestURL == "" { + break + } + } + return orgs, nil +} + +func (c *Client) handleOrgResp(resp *http.Response) (Org, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Org{}, err + } + var orgResource OrgResource + err = json.Unmarshal(body, &orgResource) + if err != nil { + return Org{}, err + } + return c.mergeOrgResource(orgResource), nil +} + +func (c *Client) getOrgUserResponse(requestURL string) (OrgUserResponse, error) { + var omResp OrgUserResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return OrgUserResponse{}, errors.Wrap(err, "error requesting org managers") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return OrgUserResponse{}, errors.Wrap(err, "error reading org managers response body") + } + if err := json.Unmarshal(resBody, &omResp); err != nil { + return OrgUserResponse{}, errors.Wrap(err, "error unmarshaling org managers") + } + return omResp, nil +} + +func (c *Client) mergeOrgResource(org OrgResource) Org { + org.Entity.Guid = org.Meta.Guid + org.Entity.CreatedAt = org.Meta.CreatedAt + org.Entity.UpdatedAt = org.Meta.UpdatedAt + org.Entity.c = c + return org.Entity +} + +func (c *Client) DefaultIsolationSegmentForOrg(orgGUID, isolationSegmentGUID string) error { + return c.updateOrgDefaultIsolationSegment(orgGUID, map[string]interface{}{"guid": isolationSegmentGUID}) +} + +func (c *Client) ResetDefaultIsolationSegmentForOrg(orgGUID string) error { + return c.updateOrgDefaultIsolationSegment(orgGUID, nil) +} + +func (c *Client) updateOrgDefaultIsolationSegment(orgGUID string, data interface{}) error { + requestURL := fmt.Sprintf("/v3/organizations/%s/relationships/default_isolation_segment", orgGUID) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]interface{}{"data": data}) + if err != nil { + return err + } + r := c.NewRequestWithBody("PATCH", requestURL, buf) + resp, err := c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return errors.Wrapf(err, "Error setting default isolation segment for org %s, response code: %d", orgGUID, resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/processes.go b/vendor/github.com/cloudfoundry-community/go-cfclient/processes.go new file mode 100644 index 000000000000..bfdb804c0f37 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/processes.go @@ -0,0 +1,124 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" +) + +// ProcessListResponse is the json body returned from the API +type ProcessListResponse struct { + Pagination Pagination `json:"pagination"` + Processes []Process `json:"resources"` +} + +// Process represents a running process in a container. +type Process struct { + GUID string `json:"guid"` + Type string `json:"type"` + Instances int `json:"instances"` + MemoryInMB int `json:"memory_in_mb"` + DiskInMB int `json:"disk_in_mb"` + Ports []int `json:"ports,omitempty"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + HealthCheck struct { + Type string `json:"type"` + Data struct { + Timeout int `json:"timeout"` + InvocationTimeout int `json:"invocation_timeout"` + Endpoint string `json:"endpoint"` + } `json:"data"` + } `json:"health_check"` + Links struct { + Self Link `json:"self"` + Scale Link `json:"scale"` + App Link `json:"app"` + Space Link `json:"space"` + Stats Link `json:"stats"` + } `json:"links"` +} + +// ListAllProcesses will call the v3 processes api +func (c *Client) ListAllProcesses() ([]Process, error) { + return c.ListAllProcessesByQuery(url.Values{}) +} + +// ListAllProcessesByQuery will call the v3 processes api +func (c *Client) ListAllProcessesByQuery(query url.Values) ([]Process, error) { + var allProcesses []Process + + urlPath := "/v3/processes" + for { + resp, err := c.getProcessPage(urlPath, query) + if err != nil { + return nil, err + } + + if resp.Pagination.TotalResults == 0 { + return nil, nil + } + + if allProcesses == nil { + allProcesses = make([]Process, 0, resp.Pagination.TotalResults) + } + + allProcesses = append(allProcesses, resp.Processes...) + if resp.Pagination.Next == nil { + return allProcesses, nil + } + + var nextURL string + + if resp.Pagination.Next == nil { + return allProcesses, nil + } + + switch resp.Pagination.Next.(type) { + case string: + nextURL = resp.Pagination.Next.(string) + case map[string]interface{}: + m := resp.Pagination.Next.(map[string]interface{}) + u, ok := m["href"] + if ok { + nextURL = u.(string) + } + default: + return nil, fmt.Errorf("Unexpected type [%s] for next url", reflect.TypeOf(resp.Pagination.Next).String()) + } + + if nextURL == "" { + return allProcesses, nil + } + + u, err := url.Parse(nextURL) + if err != nil { + return nil, err + } + + urlPath = u.Path + query, err = url.ParseQuery(u.RawQuery) + if err != nil { + return nil, err + } + } +} + +func (c *Client) getProcessPage(urlPath string, query url.Values) (*ProcessListResponse, error) { + req := c.NewRequest("GET", fmt.Sprintf("%s?%s", urlPath, query.Encode())) + + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + + procResp := new(ProcessListResponse) + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(procResp) + if err != nil { + return nil, err + } + + return procResp, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/route_mappings.go b/vendor/github.com/cloudfoundry-community/go-cfclient/route_mappings.go new file mode 100644 index 000000000000..d422d496beec --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/route_mappings.go @@ -0,0 +1,159 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type RouteMappingRequest struct { + AppGUID string `json:"app_guid"` + RouteGUID string `json:"route_guid"` + AppPort int `json:"app_port"` +} + +type RouteMappingResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []RouteMappingResource `json:"resources"` +} + +type RouteMapping struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + AppPort int `json:"app_port"` + AppGUID string `json:"app_guid"` + RouteGUID string `json:"route_guid"` + AppUrl string `json:"app_url"` + RouteUrl string `json:"route_url"` + c *Client +} + +type RouteMappingResource struct { + Meta Meta `json:"metadata"` + Entity RouteMapping `json:"entity"` +} + +func (c *Client) MappingAppAndRoute(req RouteMappingRequest) (*RouteMapping, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("POST", "/v2/route_mappings", buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleMappingResp(resp) +} + +func (c *Client) ListRouteMappings() ([]*RouteMapping, error) { + return c.ListRouteMappingsByQuery(nil) +} + +func (c *Client) ListRouteMappingsByQuery(query url.Values) ([]*RouteMapping, error) { + var routeMappings []*RouteMapping + var routeMappingsResp RouteMappingResponse + pages := 0 + + requestUrl := "/v2/route_mappings?" + query.Encode() + for { + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting route mappings") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading route mappings request:") + } + + err = json.Unmarshal(resBody, &routeMappingsResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling route mappings") + } + + for _, routeMapping := range routeMappingsResp.Resources { + routeMappings = append(routeMappings, c.mergeRouteMappingResource(routeMapping)) + } + requestUrl = routeMappingsResp.NextUrl + if requestUrl == "" { + break + } + pages++ + totalPages := routeMappingsResp.Pages + if totalPages > 0 && pages >= totalPages { + break + } + } + return routeMappings, nil +} + +func (c *Client) GetRouteMappingByGuid(guid string) (*RouteMapping, error) { + var routeMapping RouteMappingResource + requestUrl := fmt.Sprintf("/v2/route_mappings/%s", guid) + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting route mapping") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading route mapping response body") + } + err = json.Unmarshal(resBody, &routeMapping) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling route mapping") + } + routeMapping.Entity.Guid = routeMapping.Meta.Guid + routeMapping.Entity.CreatedAt = routeMapping.Meta.CreatedAt + routeMapping.Entity.UpdatedAt = routeMapping.Meta.UpdatedAt + routeMapping.Entity.c = c + return &routeMapping.Entity, nil +} + +func (c *Client) DeleteRouteMapping(guid string) error { + requestUrl := fmt.Sprintf("/v2/route_mappings/%s?", guid) + resp, err := c.DoRequest(c.NewRequest("DELETE", requestUrl)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting route mapping %s, response code %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) handleMappingResp(resp *http.Response) (*RouteMapping, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var mappingResource RouteMappingResource + err = json.Unmarshal(body, &mappingResource) + if err != nil { + return nil, err + } + return c.mergeRouteMappingResource(mappingResource), nil +} + +func (c *Client) mergeRouteMappingResource(mapping RouteMappingResource) *RouteMapping { + mapping.Entity.Guid = mapping.Meta.Guid + mapping.Entity.CreatedAt = mapping.Meta.CreatedAt + mapping.Entity.UpdatedAt = mapping.Meta.UpdatedAt + mapping.Entity.c = c + return &mapping.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/routes.go b/vendor/github.com/cloudfoundry-community/go-cfclient/routes.go new file mode 100644 index 000000000000..2be651bec33c --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/routes.go @@ -0,0 +1,207 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type RoutesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []RoutesResource `json:"resources"` +} + +type RoutesResource struct { + Meta Meta `json:"metadata"` + Entity Route `json:"entity"` +} + +type RouteRequest struct { + DomainGuid string `json:"domain_guid"` + SpaceGuid string `json:"space_guid"` + Host string `json:"host"` // required for http routes + Path string `json:"path"` + Port int `json:"port"` +} + +type Route struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Host string `json:"host"` + Path string `json:"path"` + DomainGuid string `json:"domain_guid"` + DomainURL string `json:"domain_url"` + SpaceGuid string `json:"space_guid"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Port int `json:"port"` + c *Client +} + +// CreateRoute creates a regular http route +func (c *Client) CreateRoute(routeRequest RouteRequest) (Route, error) { + routesResource, err := c.createRoute("/v2/routes", routeRequest) + if nil != err { + return Route{}, err + } + return c.mergeRouteResource(routesResource), nil +} + +// CreateTcpRoute creates a TCP route +func (c *Client) CreateTcpRoute(routeRequest RouteRequest) (Route, error) { + routesResource, err := c.createRoute("/v2/routes?generate_port=true", routeRequest) + if nil != err { + return Route{}, err + } + return c.mergeRouteResource(routesResource), nil +} + +// BindRoute associates the specified route with the application +func (c *Client) BindRoute(routeGUID, appGUID string) error { + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/routes/%s/apps/%s", routeGUID, appGUID))) + if err != nil { + return errors.Wrapf(err, "Error binding route %s to app %s", routeGUID, appGUID) + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("Error binding route %s to app %s, response code: %d", routeGUID, appGUID, resp.StatusCode) + } + return nil +} + +func (c *Client) GetRouteByGuid(guid string) (Route, error) { + var route RoutesResource + + r := c.NewRequest("GET", fmt.Sprintf("/v2/routes/%s", guid)) + resp, err := c.DoRequest(r) + if err != nil { + return route.Entity, errors.Wrap(err, "Error requesting route") + } + defer resp.Body.Close() + + err = json.NewDecoder(resp.Body).Decode(&route) + if err != nil { + return route.Entity, errors.Wrap(err, "Error unmarshalling route response body") + } + + route.Entity.Guid = route.Meta.Guid + route.Entity.CreatedAt = route.Meta.CreatedAt + route.Entity.UpdatedAt = route.Meta.UpdatedAt + route.Entity.c = c + return route.Entity, nil +} + +func (c *Client) ListRoutesByQuery(query url.Values) ([]Route, error) { + return c.fetchRoutes("/v2/routes?" + query.Encode()) +} + +func (c *Client) fetchRoutes(requestUrl string) ([]Route, error) { + var routes []Route + for { + routesResp, err := c.getRoutesResponse(requestUrl) + if err != nil { + return []Route{}, err + } + for _, route := range routesResp.Resources { + route.Entity.Guid = route.Meta.Guid + route.Entity.CreatedAt = route.Meta.CreatedAt + route.Entity.UpdatedAt = route.Meta.UpdatedAt + route.Entity.c = c + routes = append(routes, route.Entity) + } + requestUrl = routesResp.NextUrl + if requestUrl == "" { + break + } + } + return routes, nil +} + +func (c *Client) ListRoutes() ([]Route, error) { + return c.ListRoutesByQuery(nil) +} + +func (r *Route) Domain() (*Domain, error) { + req := r.c.NewRequest("GET", r.DomainURL) + resp, err := r.c.DoRequest(req) + if err != nil { + return nil, errors.Wrap(err, "requesting domain for route "+r.DomainURL) + } + + defer resp.Body.Close() + var domain DomainResource + if err = json.NewDecoder(resp.Body).Decode(&domain); err != nil { + return nil, errors.Wrap(err, "unmarshalling domain") + } + + return r.c.mergeDomainResource(domain), nil +} + +func (c *Client) getRoutesResponse(requestUrl string) (RoutesResponse, error) { + var routesResp RoutesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return RoutesResponse{}, errors.Wrap(err, "Error requesting routes") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return RoutesResponse{}, errors.Wrap(err, "Error reading routes body") + } + err = json.Unmarshal(resBody, &routesResp) + if err != nil { + return RoutesResponse{}, errors.Wrap(err, "Error unmarshalling routes") + } + return routesResp, nil +} + +func (c *Client) createRoute(requestUrl string, routeRequest RouteRequest) (RoutesResource, error) { + var routeResp RoutesResource + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(routeRequest) + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error creating route - failed to serialize request body") + } + r := c.NewRequestWithBody("POST", requestUrl, buf) + resp, err := c.DoRequest(r) + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error creating route") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error creating route") + } + err = json.Unmarshal(resBody, &routeResp) + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error unmarshalling routes") + } + routeResp.Entity.c = c + return routeResp, nil +} + +func (c *Client) DeleteRoute(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/routes/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting route %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) mergeRouteResource(rr RoutesResource) Route { + rr.Entity.Guid = rr.Meta.Guid + rr.Entity.CreatedAt = rr.Meta.CreatedAt + rr.Entity.UpdatedAt = rr.Meta.UpdatedAt + rr.Entity.c = c + return rr.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/secgroups.go b/vendor/github.com/cloudfoundry-community/go-cfclient/secgroups.go new file mode 100644 index 000000000000..bcc827bbf834 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/secgroups.go @@ -0,0 +1,565 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + + "github.com/Masterminds/semver" + "github.com/pkg/errors" +) + +type SecGroupResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SecGroupResource `json:"resources"` +} + +type SecGroupCreateResponse struct { + Code int `json:"code"` + ErrorCode string `json:"error_code"` + Description string `json:"description"` +} + +type SecGroupResource struct { + Meta Meta `json:"metadata"` + Entity SecGroup `json:"entity"` +} + +type SecGroup struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Rules []SecGroupRule `json:"rules"` + Running bool `json:"running_default"` + Staging bool `json:"staging_default"` + SpacesURL string `json:"spaces_url"` + StagingSpacesURL string `json:"staging_spaces_url"` + SpacesData []SpaceResource `json:"spaces"` + StagingSpacesData []SpaceResource `json:"staging_spaces"` + c *Client +} + +type SecGroupRule struct { + Protocol string `json:"protocol"` + Ports string `json:"ports,omitempty"` //e.g. "4000-5000,9142" + Destination string `json:"destination"` //CIDR Format + Description string `json:"description,omitempty"` //Optional description + Code int `json:"code"` // ICMP code + Type int `json:"type"` //ICMP type. Only valid if Protocol=="icmp" + Log bool `json:"log,omitempty"` //If true, log this rule +} + +var MinStagingSpacesVersion *semver.Version = getMinStagingSpacesVersion() + +func (c *Client) ListSecGroups() (secGroups []SecGroup, err error) { + requestURL := "/v2/security_groups?inline-relations-depth=1" + for requestURL != "" { + var secGroupResp SecGroupResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + + if err != nil { + return nil, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling sec group") + } + + for _, secGroup := range secGroupResp.Resources { + secGroup.Entity.Guid = secGroup.Meta.Guid + secGroup.Entity.CreatedAt = secGroup.Meta.CreatedAt + secGroup.Entity.UpdatedAt = secGroup.Meta.UpdatedAt + secGroup.Entity.c = c + for i, space := range secGroup.Entity.SpacesData { + space.Entity.Guid = space.Meta.Guid + secGroup.Entity.SpacesData[i] = space + } + if len(secGroup.Entity.SpacesData) == 0 { + spaces, err := secGroup.Entity.ListSpaceResources() + if err != nil { + return nil, err + } + for _, space := range spaces { + secGroup.Entity.SpacesData = append(secGroup.Entity.SpacesData, space) + } + } + if len(secGroup.Entity.StagingSpacesData) == 0 { + spaces, err := secGroup.Entity.ListStagingSpaceResources() + if err != nil { + return nil, err + } + for _, space := range spaces { + secGroup.Entity.StagingSpacesData = append(secGroup.Entity.SpacesData, space) + } + } + secGroups = append(secGroups, secGroup.Entity) + } + + requestURL = secGroupResp.NextUrl + resp.Body.Close() + } + return secGroups, nil +} + +func (c *Client) ListRunningSecGroups() ([]SecGroup, error) { + secGroups := make([]SecGroup, 0) + requestURL := "/v2/config/running_security_groups" + for requestURL != "" { + var secGroupResp SecGroupResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + + if err != nil { + return nil, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling sec group") + } + + for _, secGroup := range secGroupResp.Resources { + secGroup.Entity.Guid = secGroup.Meta.Guid + secGroup.Entity.CreatedAt = secGroup.Meta.CreatedAt + secGroup.Entity.UpdatedAt = secGroup.Meta.UpdatedAt + secGroup.Entity.c = c + + secGroups = append(secGroups, secGroup.Entity) + } + + requestURL = secGroupResp.NextUrl + resp.Body.Close() + } + return secGroups, nil +} + +func (c *Client) ListStagingSecGroups() ([]SecGroup, error) { + secGroups := make([]SecGroup, 0) + requestURL := "/v2/config/staging_security_groups" + for requestURL != "" { + var secGroupResp SecGroupResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + + if err != nil { + return nil, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling sec group") + } + + for _, secGroup := range secGroupResp.Resources { + secGroup.Entity.Guid = secGroup.Meta.Guid + secGroup.Entity.CreatedAt = secGroup.Meta.CreatedAt + secGroup.Entity.UpdatedAt = secGroup.Meta.UpdatedAt + secGroup.Entity.c = c + + secGroups = append(secGroups, secGroup.Entity) + } + + requestURL = secGroupResp.NextUrl + resp.Body.Close() + } + return secGroups, nil +} + +func (c *Client) GetSecGroupByName(name string) (secGroup SecGroup, err error) { + requestURL := "/v2/security_groups?q=name:" + name + var secGroupResp SecGroupResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + + if err != nil { + return secGroup, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return secGroup, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return secGroup, errors.Wrap(err, "Error unmarshaling sec group") + } + if len(secGroupResp.Resources) == 0 { + return secGroup, fmt.Errorf("No security group with name %v found", name) + } + secGroup = secGroupResp.Resources[0].Entity + secGroup.Guid = secGroupResp.Resources[0].Meta.Guid + secGroup.CreatedAt = secGroupResp.Resources[0].Meta.CreatedAt + secGroup.UpdatedAt = secGroupResp.Resources[0].Meta.UpdatedAt + secGroup.c = c + + resp.Body.Close() + return secGroup, nil +} + +func (secGroup *SecGroup) ListSpaceResources() ([]SpaceResource, error) { + var spaceResources []SpaceResource + requestURL := secGroup.SpacesURL + for requestURL != "" { + spaceResp, err := secGroup.c.getSpaceResponse(requestURL) + if err != nil { + return []SpaceResource{}, err + } + for i, spaceRes := range spaceResp.Resources { + spaceRes.Entity.Guid = spaceRes.Meta.Guid + spaceRes.Entity.CreatedAt = spaceRes.Meta.CreatedAt + spaceRes.Entity.UpdatedAt = spaceRes.Meta.UpdatedAt + spaceResp.Resources[i] = spaceRes + } + spaceResources = append(spaceResources, spaceResp.Resources...) + requestURL = spaceResp.NextUrl + } + return spaceResources, nil +} + +func (secGroup *SecGroup) ListStagingSpaceResources() ([]SpaceResource, error) { + var spaceResources []SpaceResource + requestURL := secGroup.StagingSpacesURL + for requestURL != "" { + spaceResp, err := secGroup.c.getSpaceResponse(requestURL) + if err != nil { + // if this is a 404, let's make sure that it's not because we're on a legacy system + if cause := errors.Cause(err); cause != nil { + if httpErr, ok := cause.(CloudFoundryHTTPError); ok { + if httpErr.StatusCode == 404 { + info, infoErr := secGroup.c.GetInfo() + if infoErr != nil { + return nil, infoErr + } + + apiVersion, versionErr := semver.NewVersion(info.APIVersion) + if versionErr != nil { + return nil, versionErr + } + + if MinStagingSpacesVersion.GreaterThan(apiVersion) { + // this is probably not really an error, we're just trying to use a non-existent api + return nil, nil + } + } + } + } + + return []SpaceResource{}, err + } + for i, spaceRes := range spaceResp.Resources { + spaceRes.Entity.Guid = spaceRes.Meta.Guid + spaceRes.Entity.CreatedAt = spaceRes.Meta.CreatedAt + spaceRes.Entity.UpdatedAt = spaceRes.Meta.UpdatedAt + spaceResp.Resources[i] = spaceRes + } + spaceResources = append(spaceResources, spaceResp.Resources...) + requestURL = spaceResp.NextUrl + } + return spaceResources, nil +} + +/* +CreateSecGroup contacts the CF endpoint for creating a new security group. +name: the name to give to the created security group +rules: A slice of rule objects that describe the rules that this security group enforces. + This can technically be nil or an empty slice - we won't judge you +spaceGuids: The security group will be associated with the spaces specified by the contents of this slice. + If nil, the security group will not be associated with any spaces initially. +*/ +func (c *Client) CreateSecGroup(name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) { + return c.secGroupCreateHelper("/v2/security_groups", "POST", name, rules, spaceGuids) +} + +/* +UpdateSecGroup contacts the CF endpoint to update an existing security group. +guid: identifies the security group that you would like to update. +name: the new name to give to the security group +rules: A slice of rule objects that describe the rules that this security group enforces. + If this is left nil, the rules will not be changed. +spaceGuids: The security group will be associated with the spaces specified by the contents of this slice. + If nil, the space associations will not be changed. +*/ +func (c *Client) UpdateSecGroup(guid, name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) { + return c.secGroupCreateHelper("/v2/security_groups/"+guid, "PUT", name, rules, spaceGuids) +} + +/* +DeleteSecGroup contacts the CF endpoint to delete an existing security group. +guid: Indentifies the security group to be deleted. +*/ +func (c *Client) DeleteSecGroup(guid string) error { + //Perform the DELETE and check for errors + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/security_groups/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != 204 { //204 No Content + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +GetSecGroup contacts the CF endpoint for fetching the info for a particular security group. +guid: Identifies the security group to fetch information from +*/ +func (c *Client) GetSecGroup(guid string) (*SecGroup, error) { + //Perform the GET and check for errors + resp, err := c.DoRequest(c.NewRequest("GET", "/v2/security_groups/"+guid)) + if err != nil { + return nil, err + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + //get the json out of the response body + return respBodyToSecGroup(resp.Body, c) +} + +/* +BindSecGroup contacts the CF endpoint to associate a space with a security group +secGUID: identifies the security group to add a space to +spaceGUID: identifies the space to associate +*/ +func (c *Client) BindSecGroup(secGUID, spaceGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/security_groups/%s/spaces/%s", secGUID, spaceGUID))) + if err != nil { + return err + } + if resp.StatusCode != 201 { //201 Created + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +BindSpaceStagingSecGroup contacts the CF endpoint to associate a space with a security group for staging functions only +secGUID: identifies the security group to add a space to +spaceGUID: identifies the space to associate +*/ +func (c *Client) BindStagingSecGroupToSpace(secGUID, spaceGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/security_groups/%s/staging_spaces/%s", secGUID, spaceGUID))) + if err != nil { + return err + } + if resp.StatusCode != 201 { //201 Created + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +BindRunningSecGroup contacts the CF endpoint to associate a security group +secGUID: identifies the security group to add a space to +*/ +func (c *Client) BindRunningSecGroup(secGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/config/running_security_groups/%s", secGUID))) + if err != nil { + return err + } + if resp.StatusCode != 200 { //200 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +UnbindRunningSecGroup contacts the CF endpoint to dis-associate a security group +secGUID: identifies the security group to add a space to +*/ +func (c *Client) UnbindRunningSecGroup(secGUID string) error { + //Perform the DELETE and check for errors + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/config/running_security_groups/%s", secGUID))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { //204 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +BindStagingSecGroup contacts the CF endpoint to associate a space with a security group +secGUID: identifies the security group to add a space to +*/ +func (c *Client) BindStagingSecGroup(secGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/config/staging_security_groups/%s", secGUID))) + if err != nil { + return err + } + if resp.StatusCode != 200 { //200 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +UnbindStagingSecGroup contacts the CF endpoint to dis-associate a space with a security group +secGUID: identifies the security group to add a space to +*/ +func (c *Client) UnbindStagingSecGroup(secGUID string) error { + //Perform the DELETE and check for errors + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/config/staging_security_groups/%s", secGUID))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { //204 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +UnbindSecGroup contacts the CF endpoint to dissociate a space from a security group +secGUID: identifies the security group to remove a space from +spaceGUID: identifies the space to dissociate from the security group +*/ +func (c *Client) UnbindSecGroup(secGUID, spaceGUID string) error { + //Perform the DELETE and check for errors + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/security_groups/%s/spaces/%s", secGUID, spaceGUID))) + if err != nil { + return err + } + if resp.StatusCode != 204 { //204 No Content + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +//Reads most security group response bodies into a SecGroup object +func respBodyToSecGroup(body io.ReadCloser, c *Client) (*SecGroup, error) { + //get the json from the response body + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return nil, errors.Wrap(err, "Could not read response body") + } + jStruct := SecGroupResource{} + //make it a SecGroup + err = json.Unmarshal(bodyRaw, &jStruct) + if err != nil { + return nil, errors.Wrap(err, "Could not unmarshal response body as json") + } + //pull a few extra fields from other places + ret := jStruct.Entity + ret.Guid = jStruct.Meta.Guid + ret.CreatedAt = jStruct.Meta.CreatedAt + ret.UpdatedAt = jStruct.Meta.UpdatedAt + ret.c = c + return &ret, nil +} + +func convertStructToMap(st interface{}) map[string]interface{} { + reqRules := make(map[string]interface{}) + + v := reflect.ValueOf(st) + t := reflect.TypeOf(st) + + for i := 0; i < v.NumField(); i++ { + key := strings.ToLower(t.Field(i).Name) + typ := v.FieldByName(t.Field(i).Name).Kind().String() + structTag := t.Field(i).Tag.Get("json") + jsonName := strings.TrimSpace(strings.Split(structTag, ",")[0]) + value := v.FieldByName(t.Field(i).Name) + + // if jsonName is not empty use it for the key + if jsonName != "" { + key = jsonName + } + + if typ == "string" { + if !(value.String() == "" && strings.Contains(structTag, "omitempty")) { + reqRules[key] = value.String() + } + } else if typ == "int" { + reqRules[key] = value.Int() + } else { + reqRules[key] = value.Interface() + } + + } + + return reqRules +} + +//Create and Update secGroup pretty much do the same thing, so this function abstracts those out. +func (c *Client) secGroupCreateHelper(url, method, name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) { + reqRules := make([]map[string]interface{}, len(rules)) + + for i, rule := range rules { + reqRules[i] = convertStructToMap(rule) + protocol := strings.ToLower(reqRules[i]["protocol"].(string)) + + // if not icmp protocol need to remove the Code/Type fields + if protocol != "icmp" { + delete(reqRules[i], "code") + delete(reqRules[i], "type") + } + } + + req := c.NewRequest(method, url) + //set up request body + inputs := map[string]interface{}{ + "name": name, + "rules": reqRules, + } + + if spaceGuids != nil { + inputs["space_guids"] = spaceGuids + } + req.obj = inputs + //fire off the request and check for problems + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + if resp.StatusCode != 201 { // Both create and update should give 201 CREATED + var response SecGroupCreateResponse + + bodyRaw, _ := ioutil.ReadAll(resp.Body) + + err = json.Unmarshal(bodyRaw, &response) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling response") + } + + return nil, fmt.Errorf(`Request failed CF API returned with status code %d +------------------------------- +Error Code %s +Code %d +Description %s`, + resp.StatusCode, response.ErrorCode, response.Code, response.Description) + } + //get the json from the response body + return respBodyToSecGroup(resp.Body, c) +} + +func getMinStagingSpacesVersion() *semver.Version { + v, _ := semver.NewVersion("2.68.0") + return v +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_bindings.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_bindings.go new file mode 100644 index 000000000000..09bdecc8a362 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_bindings.go @@ -0,0 +1,176 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceBindingsResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + Resources []ServiceBindingResource `json:"resources"` + NextUrl string `json:"next_url"` +} + +type ServiceBindingResource struct { + Meta Meta `json:"metadata"` + Entity ServiceBinding `json:"entity"` +} + +type ServiceBinding struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + AppGuid string `json:"app_guid"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Credentials interface{} `json:"credentials"` + BindingOptions interface{} `json:"binding_options"` + GatewayData interface{} `json:"gateway_data"` + GatewayName string `json:"gateway_name"` + SyslogDrainUrl string `json:"syslog_drain_url"` + VolumeMounts interface{} `json:"volume_mounts"` + AppUrl string `json:"app_url"` + ServiceInstanceUrl string `json:"service_instance_url"` + c *Client +} + +func (c *Client) ListServiceBindingsByQuery(query url.Values) ([]ServiceBinding, error) { + var serviceBindings []ServiceBinding + requestUrl := "/v2/service_bindings?" + query.Encode() + + for { + var serviceBindingsResp ServiceBindingsResponse + + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service bindings") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service bindings request:") + } + + err = json.Unmarshal(resBody, &serviceBindingsResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service bindings") + } + for _, serviceBinding := range serviceBindingsResp.Resources { + serviceBinding.Entity.Guid = serviceBinding.Meta.Guid + serviceBinding.Entity.CreatedAt = serviceBinding.Meta.CreatedAt + serviceBinding.Entity.UpdatedAt = serviceBinding.Meta.UpdatedAt + serviceBinding.Entity.c = c + serviceBindings = append(serviceBindings, serviceBinding.Entity) + } + requestUrl = serviceBindingsResp.NextUrl + if requestUrl == "" { + break + } + } + + return serviceBindings, nil +} + +func (c *Client) ListServiceBindings() ([]ServiceBinding, error) { + return c.ListServiceBindingsByQuery(nil) +} + +func (c *Client) GetServiceBindingByGuid(guid string) (ServiceBinding, error) { + var serviceBinding ServiceBindingResource + r := c.NewRequest("GET", "/v2/service_bindings/"+url.QueryEscape(guid)) + resp, err := c.DoRequest(r) + if err != nil { + return ServiceBinding{}, errors.Wrap(err, "Error requesting serving binding") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return ServiceBinding{}, errors.Wrap(err, "Error reading service binding response body") + } + err = json.Unmarshal(resBody, &serviceBinding) + if err != nil { + return ServiceBinding{}, errors.Wrap(err, "Error unmarshalling service binding") + } + serviceBinding.Entity.Guid = serviceBinding.Meta.Guid + serviceBinding.Entity.CreatedAt = serviceBinding.Meta.CreatedAt + serviceBinding.Entity.UpdatedAt = serviceBinding.Meta.UpdatedAt + serviceBinding.Entity.c = c + return serviceBinding.Entity, nil +} + +func (c *Client) ServiceBindingByGuid(guid string) (ServiceBinding, error) { + return c.GetServiceBindingByGuid(guid) +} + +func (c *Client) DeleteServiceBinding(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/service_bindings/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting service binding %s, response code %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) CreateServiceBinding(appGUID, serviceInstanceGUID string) (*ServiceBinding, error) { + req := c.NewRequest("POST", fmt.Sprintf("/v2/service_bindings")) + req.obj = map[string]interface{}{ + "app_guid": appGUID, + "service_instance_guid": serviceInstanceGUID, + } + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, errors.Wrapf(err, "Error binding app %s to service instance %s, response code %d", appGUID, serviceInstanceGUID, resp.StatusCode) + } + return c.handleServiceBindingResp(resp) +} + +func (c *Client) CreateRouteServiceBinding(routeGUID, serviceInstanceGUID string) error { + req := c.NewRequest("PUT", fmt.Sprintf("/v2/user_provided_service_instances/%s/routes/%s", serviceInstanceGUID, routeGUID)) + resp, err := c.DoRequest(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return errors.Wrapf(err, "Error binding route %s to service instance %s, response code %d", routeGUID, serviceInstanceGUID, resp.StatusCode) + } + return nil +} + +func (c *Client) DeleteRouteServiceBinding(routeGUID, serviceInstanceGUID string) error { + req := c.NewRequest("DELETE", fmt.Sprintf("/v2/service_instances/%s/routes/%s", serviceInstanceGUID, routeGUID)) + resp, err := c.DoRequest(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return errors.Wrapf(err, "Error deleting bound route %s from service instance %s, response code %d", routeGUID, serviceInstanceGUID, resp.StatusCode) + } + return nil +} + +func (c *Client) handleServiceBindingResp(resp *http.Response) (*ServiceBinding, error) { + defer resp.Body.Close() + var sb ServiceBindingResource + err := json.NewDecoder(resp.Body).Decode(&sb) + if err != nil { + return nil, err + } + return c.mergeServiceBindingResource(sb), nil +} + +func (c *Client) mergeServiceBindingResource(serviceBinding ServiceBindingResource) *ServiceBinding { + serviceBinding.Entity.Guid = serviceBinding.Meta.Guid + serviceBinding.Entity.c = c + return &serviceBinding.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_brokers.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_brokers.go new file mode 100644 index 000000000000..4cc5b28e4481 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_brokers.go @@ -0,0 +1,207 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceBrokerResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServiceBrokerResource `json:"resources"` +} + +type ServiceBrokerResource struct { + Meta Meta `json:"metadata"` + Entity ServiceBroker `json:"entity"` +} + +type UpdateServiceBrokerRequest struct { + Name string `json:"name"` + BrokerURL string `json:"broker_url"` + Username string `json:"auth_username"` + Password string `json:"auth_password"` +} + +type CreateServiceBrokerRequest struct { + Name string `json:"name"` + BrokerURL string `json:"broker_url"` + Username string `json:"auth_username"` + Password string `json:"auth_password"` + SpaceGUID string `json:"space_guid,omitempty"` +} + +type ServiceBroker struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + BrokerURL string `json:"broker_url"` + Username string `json:"auth_username"` + Password string `json:"auth_password"` + SpaceGUID string `json:"space_guid,omitempty"` + c *Client +} + +func (c *Client) DeleteServiceBroker(guid string) error { + requestUrl := fmt.Sprintf("/v2/service_brokers/%s", guid) + r := c.NewRequest("DELETE", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleteing service broker %s, response code: %d", guid, resp.StatusCode) + } + return nil + +} + +func (c *Client) UpdateServiceBroker(guid string, usb UpdateServiceBrokerRequest) (ServiceBroker, error) { + var serviceBrokerResource ServiceBrokerResource + + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(usb) + if err != nil { + return ServiceBroker{}, err + } + req := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/service_brokers/%s", guid), buf) + resp, err := c.DoRequest(req) + if err != nil { + return ServiceBroker{}, err + } + if resp.StatusCode != http.StatusOK { + return ServiceBroker{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return ServiceBroker{}, err + } + err = json.Unmarshal(body, &serviceBrokerResource) + if err != nil { + return ServiceBroker{}, err + } + serviceBrokerResource.Entity.Guid = serviceBrokerResource.Meta.Guid + return serviceBrokerResource.Entity, nil +} + +func (c *Client) CreateServiceBroker(csb CreateServiceBrokerRequest) (ServiceBroker, error) { + var serviceBrokerResource ServiceBrokerResource + + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(csb) + if err != nil { + return ServiceBroker{}, err + } + req := c.NewRequestWithBody("POST", "/v2/service_brokers", buf) + resp, err := c.DoRequest(req) + if err != nil { + return ServiceBroker{}, err + } + if resp.StatusCode != http.StatusCreated { + return ServiceBroker{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return ServiceBroker{}, err + } + err = json.Unmarshal(body, &serviceBrokerResource) + if err != nil { + return ServiceBroker{}, err + } + + serviceBrokerResource.Entity.Guid = serviceBrokerResource.Meta.Guid + return serviceBrokerResource.Entity, nil +} + +func (c *Client) ListServiceBrokersByQuery(query url.Values) ([]ServiceBroker, error) { + var sbs []ServiceBroker + requestUrl := "/v2/service_brokers?" + query.Encode() + for { + serviceBrokerResp, err := c.getServiceBrokerResponse(requestUrl) + if err != nil { + return []ServiceBroker{}, err + } + for _, sb := range serviceBrokerResp.Resources { + sb.Entity.Guid = sb.Meta.Guid + sb.Entity.CreatedAt = sb.Meta.CreatedAt + sb.Entity.UpdatedAt = sb.Meta.UpdatedAt + sbs = append(sbs, sb.Entity) + } + requestUrl = serviceBrokerResp.NextUrl + if requestUrl == "" { + break + } + } + return sbs, nil +} + +func (c *Client) ListServiceBrokers() ([]ServiceBroker, error) { + return c.ListServiceBrokersByQuery(nil) +} + +func (c *Client) GetServiceBrokerByGuid(guid string) (ServiceBroker, error) { + var serviceBrokerRes ServiceBrokerResource + r := c.NewRequest("GET", "/v2/service_brokers/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return ServiceBroker{}, err + } + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return ServiceBroker{}, err + } + err = json.Unmarshal(body, &serviceBrokerRes) + if err != nil { + return ServiceBroker{}, err + } + serviceBrokerRes.Entity.Guid = serviceBrokerRes.Meta.Guid + serviceBrokerRes.Entity.CreatedAt = serviceBrokerRes.Meta.CreatedAt + serviceBrokerRes.Entity.UpdatedAt = serviceBrokerRes.Meta.UpdatedAt + return serviceBrokerRes.Entity, nil +} + +func (c *Client) GetServiceBrokerByName(name string) (ServiceBroker, error) { + var sb ServiceBroker + q := url.Values{} + q.Set("q", "name:"+name) + sbs, err := c.ListServiceBrokersByQuery(q) + if err != nil { + return sb, err + } + if len(sbs) == 0 { + return sb, fmt.Errorf("Unable to find service broker %s", name) + } + return sbs[0], nil +} + +func (c *Client) getServiceBrokerResponse(requestUrl string) (ServiceBrokerResponse, error) { + var serviceBrokerResp ServiceBrokerResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return ServiceBrokerResponse{}, errors.Wrap(err, "Error requesting Service Brokers") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return ServiceBrokerResponse{}, errors.Wrap(err, "Error reading Service Broker request") + } + err = json.Unmarshal(resBody, &serviceBrokerResp) + if err != nil { + return ServiceBrokerResponse{}, errors.Wrap(err, "Error unmarshalling Service Broker") + } + return serviceBrokerResp, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_instances.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_instances.go new file mode 100644 index 000000000000..129b31381915 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_instances.go @@ -0,0 +1,186 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceInstancesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServiceInstanceResource `json:"resources"` +} + +type ServiceInstanceRequest struct { + Name string `json:"name"` + SpaceGuid string `json:"space_guid"` + ServicePlanGuid string `json:"service_plan_guid"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +type ServiceInstanceResource struct { + Meta Meta `json:"metadata"` + Entity ServiceInstance `json:"entity"` +} + +type ServiceInstance struct { + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Credentials map[string]interface{} `json:"credentials"` + ServicePlanGuid string `json:"service_plan_guid"` + SpaceGuid string `json:"space_guid"` + DashboardUrl string `json:"dashboard_url"` + Type string `json:"type"` + LastOperation LastOperation `json:"last_operation"` + Tags []string `json:"tags"` + ServiceGuid string `json:"service_guid"` + SpaceUrl string `json:"space_url"` + ServicePlanUrl string `json:"service_plan_url"` + ServiceBindingsUrl string `json:"service_bindings_url"` + ServiceKeysUrl string `json:"service_keys_url"` + RoutesUrl string `json:"routes_url"` + ServiceUrl string `json:"service_url"` + Guid string `json:"guid"` + c *Client +} + +type LastOperation struct { + Type string `json:"type"` + State string `json:"state"` + Description string `json:"description"` + UpdatedAt string `json:"updated_at"` + CreatedAt string `json:"created_at"` +} + +func (c *Client) ListServiceInstancesByQuery(query url.Values) ([]ServiceInstance, error) { + var instances []ServiceInstance + + requestUrl := "/v2/service_instances?" + query.Encode() + for { + var sir ServiceInstancesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service instances") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service instances request:") + } + + err = json.Unmarshal(resBody, &sir) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service instances") + } + for _, instance := range sir.Resources { + instances = append(instances, c.mergeServiceInstance(instance)) + } + + requestUrl = sir.NextUrl + if requestUrl == "" { + break + } + } + return instances, nil +} + +func (c *Client) ListServiceInstances() ([]ServiceInstance, error) { + return c.ListServiceInstancesByQuery(nil) +} + +func (c *Client) GetServiceInstanceByGuid(guid string) (ServiceInstance, error) { + var sir ServiceInstanceResource + req := c.NewRequest("GET", "/v2/service_instances/"+guid) + res, err := c.DoRequest(req) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error requesting service instance") + } + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error reading service instance response") + } + err = json.Unmarshal(data, &sir) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error JSON parsing service instance response") + } + return c.mergeServiceInstance(sir), nil +} + +func (c *Client) ServiceInstanceByGuid(guid string) (ServiceInstance, error) { + return c.GetServiceInstanceByGuid(guid) +} + +func (c *Client) mergeServiceInstance(instance ServiceInstanceResource) ServiceInstance { + instance.Entity.Guid = instance.Meta.Guid + instance.Entity.CreatedAt = instance.Meta.CreatedAt + instance.Entity.UpdatedAt = instance.Meta.UpdatedAt + instance.Entity.c = c + return instance.Entity +} + +func (c *Client) CreateServiceInstance(req ServiceInstanceRequest) (ServiceInstance, error) { + var sir ServiceInstanceResource + + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return ServiceInstance{}, err + } + + r := c.NewRequestWithBody("POST", "/v2/service_instances?accepts_incomplete=true", buf) + + res, err := c.DoRequest(r) + if err != nil { + return ServiceInstance{}, err + } + + if res.StatusCode != http.StatusAccepted && res.StatusCode != http.StatusCreated { + return ServiceInstance{}, errors.Wrapf(err, "Error creating service, response code: %d", res.StatusCode) + } + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error reading service instance response") + } + + err = json.Unmarshal(data, &sir) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error JSON parsing service instance response") + } + + return c.mergeServiceInstance(sir), nil +} + +func (c *Client) UpdateServiceInstance(serviceInstanceGuid string, updatedConfiguration io.Reader, async bool) error { + u := fmt.Sprintf("/v2/service_instances/%s?accepts_incomplete=%t", serviceInstanceGuid, async) + resp, err := c.DoRequest(c.NewRequestWithBody("PUT", u, updatedConfiguration)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted { + return errors.Wrapf(err, "Error updating service instance %s, response code %d", serviceInstanceGuid, resp.StatusCode) + } + return nil +} + +func (c *Client) DeleteServiceInstance(guid string, recursive, async bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/service_instances/%s?recursive=%t&accepts_incomplete=%t&async=%t", guid, recursive, async, async))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted { + return errors.Wrapf(err, "Error deleting service instance %s, response code %d", guid, resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_keys.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_keys.go new file mode 100644 index 000000000000..3c13a0350985 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_keys.go @@ -0,0 +1,171 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceKeysResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + Resources []ServiceKeyResource `json:"resources"` + NextUrl string `json:"next_url"` +} + +type ServiceKeyResource struct { + Meta Meta `json:"metadata"` + Entity ServiceKey `json:"entity"` +} + +type CreateServiceKeyRequest struct { + Name string `json:"name"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Parameters interface{} `json:"parameters,omitempty"` +} + +type ServiceKey struct { + Name string `json:"name"` + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Credentials interface{} `json:"credentials"` + ServiceInstanceUrl string `json:"service_instance_url"` + c *Client +} + +func (c *Client) ListServiceKeysByQuery(query url.Values) ([]ServiceKey, error) { + var serviceKeys []ServiceKey + requestUrl := "/v2/service_keys?" + query.Encode() + + for { + var serviceKeysResp ServiceKeysResponse + + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service keys") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service keys request:") + } + + err = json.Unmarshal(resBody, &serviceKeysResp) + if err != nil { + return nil, errors.Wrapf(err, "Error unmarshaling service keys: %q", string(resBody)) + } + for _, serviceKey := range serviceKeysResp.Resources { + serviceKey.Entity.Guid = serviceKey.Meta.Guid + serviceKey.Entity.CreatedAt = serviceKey.Meta.CreatedAt + serviceKey.Entity.UpdatedAt = serviceKey.Meta.UpdatedAt + serviceKey.Entity.c = c + serviceKeys = append(serviceKeys, serviceKey.Entity) + } + + requestUrl = serviceKeysResp.NextUrl + if requestUrl == "" { + break + } + } + + return serviceKeys, nil +} + +func (c *Client) ListServiceKeys() ([]ServiceKey, error) { + return c.ListServiceKeysByQuery(nil) +} + +func (c *Client) GetServiceKeyByName(name string) (ServiceKey, error) { + var serviceKey ServiceKey + q := url.Values{} + q.Set("q", "name:"+name) + serviceKeys, err := c.ListServiceKeysByQuery(q) + if err != nil { + return serviceKey, err + } + if len(serviceKeys) == 0 { + return serviceKey, fmt.Errorf("Unable to find service key %s", name) + } + return serviceKeys[0], nil +} + +// GetServiceKeyByInstanceGuid is deprecated in favor of GetServiceKeysByInstanceGuid +func (c *Client) GetServiceKeyByInstanceGuid(guid string) (ServiceKey, error) { + var serviceKey ServiceKey + q := url.Values{} + q.Set("q", "service_instance_guid:"+guid) + serviceKeys, err := c.ListServiceKeysByQuery(q) + if err != nil { + return serviceKey, err + } + if len(serviceKeys) == 0 { + return serviceKey, fmt.Errorf("Unable to find service key for guid %s", guid) + } + return serviceKeys[0], nil +} + +// GetServiceKeysByInstanceGuid returns the service keys for a service instance. +// If none are found, it returns an error. +func (c *Client) GetServiceKeysByInstanceGuid(guid string) ([]ServiceKey, error) { + q := url.Values{} + q.Set("q", "service_instance_guid:"+guid) + serviceKeys, err := c.ListServiceKeysByQuery(q) + if err != nil { + return serviceKeys, err + } + if len(serviceKeys) == 0 { + return serviceKeys, fmt.Errorf("Unable to find service key for guid %s", guid) + } + return serviceKeys, nil +} + +// CreateServiceKey creates a service key from the request. If a service key +// exists already, it returns an error containing `CF-ServiceKeyNameTaken` +func (c *Client) CreateServiceKey(csr CreateServiceKeyRequest) (ServiceKey, error) { + var serviceKeyResource ServiceKeyResource + + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(csr) + if err != nil { + return ServiceKey{}, err + } + req := c.NewRequestWithBody("POST", "/v2/service_keys", buf) + resp, err := c.DoRequest(req) + if err != nil { + return ServiceKey{}, err + } + if resp.StatusCode != http.StatusCreated { + return ServiceKey{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return ServiceKey{}, err + } + err = json.Unmarshal(body, &serviceKeyResource) + if err != nil { + return ServiceKey{}, err + } + + return serviceKeyResource.Entity, nil +} + +// DeleteServiceKey removes a service key instance +func (c *Client) DeleteServiceKey(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/service_keys/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting service instance key %s, response code %d", guid, resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_plan_visibilities.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_plan_visibilities.go new file mode 100644 index 000000000000..d4a84579f6cc --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_plan_visibilities.go @@ -0,0 +1,169 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type ServicePlanVisibilitiesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServicePlanVisibilityResource `json:"resources"` +} + +type ServicePlanVisibilityResource struct { + Meta Meta `json:"metadata"` + Entity ServicePlanVisibility `json:"entity"` +} + +type ServicePlanVisibility struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ServicePlanGuid string `json:"service_plan_guid"` + OrganizationGuid string `json:"organization_guid"` + ServicePlanUrl string `json:"service_plan_url"` + OrganizationUrl string `json:"organization_url"` + c *Client +} + +func (c *Client) ListServicePlanVisibilitiesByQuery(query url.Values) ([]ServicePlanVisibility, error) { + var servicePlanVisibilities []ServicePlanVisibility + requestUrl := "/v2/service_plan_visibilities?" + query.Encode() + for { + var servicePlanVisibilitiesResp ServicePlanVisibilitiesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service plan visibilities") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service plan visibilities request:") + } + + err = json.Unmarshal(resBody, &servicePlanVisibilitiesResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service plan visibilities") + } + for _, servicePlanVisibility := range servicePlanVisibilitiesResp.Resources { + servicePlanVisibility.Entity.Guid = servicePlanVisibility.Meta.Guid + servicePlanVisibility.Entity.CreatedAt = servicePlanVisibility.Meta.CreatedAt + servicePlanVisibility.Entity.UpdatedAt = servicePlanVisibility.Meta.UpdatedAt + servicePlanVisibility.Entity.c = c + servicePlanVisibilities = append(servicePlanVisibilities, servicePlanVisibility.Entity) + } + requestUrl = servicePlanVisibilitiesResp.NextUrl + if requestUrl == "" { + break + } + } + return servicePlanVisibilities, nil +} + +func (c *Client) ListServicePlanVisibilities() ([]ServicePlanVisibility, error) { + return c.ListServicePlanVisibilitiesByQuery(nil) +} + +func (c *Client) GetServicePlanVisibilityByGuid(guid string) (ServicePlanVisibility, error) { + r := c.NewRequest("GET", "/v2/service_plan_visibilities/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return ServicePlanVisibility{}, err + } + return respBodyToServicePlanVisibility(resp.Body, c) +} + +//a uniqueID is the id of the service in the catalog and not in cf internal db +func (c *Client) CreateServicePlanVisibilityByUniqueId(uniqueId string, organizationGuid string) (ServicePlanVisibility, error) { + q := url.Values{} + q.Set("q", fmt.Sprintf("unique_id:%s", uniqueId)) + plans, err := c.ListServicePlansByQuery(q) + if err != nil { + return ServicePlanVisibility{}, errors.Wrap(err, fmt.Sprintf("Couldn't find a service plan with unique_id: %s", uniqueId)) + } + return c.CreateServicePlanVisibility(plans[0].Guid, organizationGuid) +} + +func (c *Client) CreateServicePlanVisibility(servicePlanGuid string, organizationGuid string) (ServicePlanVisibility, error) { + req := c.NewRequest("POST", "/v2/service_plan_visibilities") + req.obj = map[string]interface{}{ + "service_plan_guid": servicePlanGuid, + "organization_guid": organizationGuid, + } + resp, err := c.DoRequest(req) + if err != nil { + return ServicePlanVisibility{}, err + } + if resp.StatusCode != http.StatusCreated { + return ServicePlanVisibility{}, errors.Wrapf(err, "Error creating service plan visibility, response code: %d", resp.StatusCode) + } + return respBodyToServicePlanVisibility(resp.Body, c) +} + +func (c *Client) DeleteServicePlanVisibilityByPlanAndOrg(servicePlanGuid string, organizationGuid string, async bool) error { + q := url.Values{} + q.Set("q", fmt.Sprintf("organization_guid:%s;service_plan_guid:%s", organizationGuid, servicePlanGuid)) + plans, err := c.ListServicePlanVisibilitiesByQuery(q) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("Couldn't find a service plan visibility for service plan %s and org %s", servicePlanGuid, organizationGuid)) + } + if len(plans) != 1 { + return fmt.Errorf("Query for a service plan visibility did not return exactly one result when searching for a service plan visibility for service plan %s and org %s", + servicePlanGuid, organizationGuid) + } + return c.DeleteServicePlanVisibility(plans[0].Guid, async) +} + +func (c *Client) DeleteServicePlanVisibility(guid string, async bool) error { + req := c.NewRequest("DELETE", fmt.Sprintf("/v2/service_plan_visibilities/%s?async=%v", guid, async)) + resp, err := c.DoRequest(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting service plan visibility, response code: %d", resp.StatusCode) + } + return nil +} + +func (c *Client) UpdateServicePlanVisibility(guid string, servicePlanGuid string, organizationGuid string) (ServicePlanVisibility, error) { + req := c.NewRequest("PUT", "/v2/service_plan_visibilities/"+guid) + req.obj = map[string]interface{}{ + "service_plan_guid": servicePlanGuid, + "organization_guid": organizationGuid, + } + resp, err := c.DoRequest(req) + if err != nil { + return ServicePlanVisibility{}, err + } + if resp.StatusCode != http.StatusCreated { + return ServicePlanVisibility{}, errors.Wrapf(err, "Error updating service plan visibility, response code: %d", resp.StatusCode) + } + return respBodyToServicePlanVisibility(resp.Body, c) +} + +func respBodyToServicePlanVisibility(body io.ReadCloser, c *Client) (ServicePlanVisibility, error) { + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return ServicePlanVisibility{}, err + } + servicePlanVisibilityRes := ServicePlanVisibilityResource{} + err = json.Unmarshal(bodyRaw, &servicePlanVisibilityRes) + if err != nil { + return ServicePlanVisibility{}, err + } + servicePlanVisibility := servicePlanVisibilityRes.Entity + servicePlanVisibility.Guid = servicePlanVisibilityRes.Meta.Guid + servicePlanVisibility.CreatedAt = servicePlanVisibilityRes.Meta.CreatedAt + servicePlanVisibility.UpdatedAt = servicePlanVisibilityRes.Meta.UpdatedAt + servicePlanVisibility.c = c + return servicePlanVisibility, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_plans.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_plans.go new file mode 100644 index 000000000000..a26035949247 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_plans.go @@ -0,0 +1,129 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type ServicePlansResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServicePlanResource `json:"resources"` +} + +type ServicePlanResource struct { + Meta Meta `json:"metadata"` + Entity ServicePlan `json:"entity"` +} + +type ServicePlan struct { + Name string `json:"name"` + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Free bool `json:"free"` + Description string `json:"description"` + ServiceGuid string `json:"service_guid"` + Extra interface{} `json:"extra"` + UniqueId string `json:"unique_id"` + Public bool `json:"public"` + Active bool `json:"active"` + Bindable bool `json:"bindable"` + ServiceUrl string `json:"service_url"` + ServiceInstancesUrl string `json:"service_instances_url"` + c *Client +} + +func (c *Client) ListServicePlansByQuery(query url.Values) ([]ServicePlan, error) { + var servicePlans []ServicePlan + requestUrl := "/v2/service_plans?" + query.Encode() + for { + var servicePlansResp ServicePlansResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service plans") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service plans request:") + } + err = json.Unmarshal(resBody, &servicePlansResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service plans") + } + for _, servicePlan := range servicePlansResp.Resources { + servicePlan.Entity.Guid = servicePlan.Meta.Guid + servicePlan.Entity.CreatedAt = servicePlan.Meta.CreatedAt + servicePlan.Entity.UpdatedAt = servicePlan.Meta.UpdatedAt + servicePlan.Entity.c = c + servicePlans = append(servicePlans, servicePlan.Entity) + } + requestUrl = servicePlansResp.NextUrl + if requestUrl == "" { + break + } + } + return servicePlans, nil +} + +func (c *Client) ListServicePlans() ([]ServicePlan, error) { + return c.ListServicePlansByQuery(nil) +} + +func (c *Client) GetServicePlanByGUID(guid string) (*ServicePlan, error) { + var ( + plan *ServicePlan + planResponse ServicePlanResource + ) + + r := c.NewRequest("GET", "/v2/service_plans/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + + err = json.Unmarshal(body, &planResponse) + if err != nil { + return nil, err + } + + planResponse.Entity.Guid = planResponse.Meta.Guid + planResponse.Entity.CreatedAt = planResponse.Meta.CreatedAt + planResponse.Entity.UpdatedAt = planResponse.Meta.UpdatedAt + plan = &planResponse.Entity + + return plan, nil +} + +func (c *Client) MakeServicePlanPublic(servicePlanGUID string) error { + return c.setPlanGlobalVisibility(servicePlanGUID, true) +} + +func (c *Client) MakeServicePlanPrivate(servicePlanGUID string) error { + return c.setPlanGlobalVisibility(servicePlanGUID, false) +} + +func (c *Client) setPlanGlobalVisibility(servicePlanGUID string, public bool) error { + bodyString := fmt.Sprintf(`{"public": %t}`, public) + req := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/service_plans/%s", servicePlanGUID), bytes.NewBufferString(bodyString)) + + resp, err := c.DoRequest(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/service_usage_events.go b/vendor/github.com/cloudfoundry-community/go-cfclient/service_usage_events.go new file mode 100644 index 000000000000..17fb8a2b1e1f --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/service_usage_events.go @@ -0,0 +1,72 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceUsageEvent struct { + GUID string `json:"guid"` + CreatedAt string `json:"created_at"` + State string `json:"state"` + OrgGUID string `json:"org_guid"` + SpaceGUID string `json:"space_guid"` + SpaceName string `json:"space_name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + ServiceInstanceName string `json:"service_instance_name"` + ServiceInstanceType string `json:"service_instance_type"` + ServicePlanGUID string `json:"service_plan_guid"` + ServicePlanName string `json:"service_plan_name"` + ServiceGUID string `json:"service_guid"` + ServiceLabel string `json:"service_label"` + c *Client +} + +type ServiceUsageEventsResponse struct { + TotalResults int `json:"total_results"` + Pages int `json:"total_pages"` + NextURL string `json:"next_url"` + Resources []ServiceUsageEventResource `json:"resources"` +} + +type ServiceUsageEventResource struct { + Meta Meta `json:"metadata"` + Entity ServiceUsageEvent `json:"entity"` +} + +// ListServiceUsageEventsByQuery lists all events matching the provided query. +func (c *Client) ListServiceUsageEventsByQuery(query url.Values) ([]ServiceUsageEvent, error) { + var serviceUsageEvents []ServiceUsageEvent + requestURL := fmt.Sprintf("/v2/service_usage_events?%s", query.Encode()) + for { + var serviceUsageEventsResponse ServiceUsageEventsResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "error requesting events") + } + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(&serviceUsageEventsResponse); err != nil { + return nil, errors.Wrap(err, "error unmarshaling events") + } + for _, e := range serviceUsageEventsResponse.Resources { + e.Entity.GUID = e.Meta.Guid + e.Entity.CreatedAt = e.Meta.CreatedAt + e.Entity.c = c + serviceUsageEvents = append(serviceUsageEvents, e.Entity) + } + requestURL = serviceUsageEventsResponse.NextURL + if requestURL == "" { + break + } + } + return serviceUsageEvents, nil +} + +// ListServiceUsageEvents lists all unfiltered events. +func (c *Client) ListServiceUsageEvents() ([]ServiceUsageEvent, error) { + return c.ListServiceUsageEventsByQuery(nil) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/services.go b/vendor/github.com/cloudfoundry-community/go-cfclient/services.go new file mode 100644 index 000000000000..b02365b94675 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/services.go @@ -0,0 +1,107 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type ServicesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServicesResource `json:"resources"` +} + +type ServicesResource struct { + Meta Meta `json:"metadata"` + Entity Service `json:"entity"` +} + +type Service struct { + Guid string `json:"guid"` + Label string `json:"label"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Description string `json:"description"` + Active bool `json:"active"` + Bindable bool `json:"bindable"` + ServiceBrokerGuid string `json:"service_broker_guid"` + PlanUpdateable bool `json:"plan_updateable"` + Tags []string `json:"tags"` + UniqueID string `json:"unique_id"` + Extra string `json:"extra"` + Requires []string `json:"requires"` + InstancesRetrievable bool `json:"instances_retrievable"` + BindingsRetrievable bool `json:"bindings_retrievable"` + c *Client +} + +type ServiceSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + BoundAppCount int `json:"bound_app_count"` +} + +func (c *Client) GetServiceByGuid(guid string) (Service, error) { + var serviceRes ServicesResource + r := c.NewRequest("GET", "/v2/services/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return Service{}, err + } + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Service{}, err + } + err = json.Unmarshal(body, &serviceRes) + if err != nil { + return Service{}, err + } + serviceRes.Entity.Guid = serviceRes.Meta.Guid + serviceRes.Entity.CreatedAt = serviceRes.Meta.CreatedAt + serviceRes.Entity.UpdatedAt = serviceRes.Meta.UpdatedAt + return serviceRes.Entity, nil + +} + +func (c *Client) ListServicesByQuery(query url.Values) ([]Service, error) { + var services []Service + requestUrl := "/v2/services?" + query.Encode() + for { + var serviceResp ServicesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting services") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading services request:") + } + + err = json.Unmarshal(resBody, &serviceResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling services") + } + for _, service := range serviceResp.Resources { + service.Entity.Guid = service.Meta.Guid + service.Entity.CreatedAt = service.Meta.CreatedAt + service.Entity.UpdatedAt = service.Meta.UpdatedAt + service.Entity.c = c + services = append(services, service.Entity) + } + requestUrl = serviceResp.NextUrl + if requestUrl == "" { + break + } + } + return services, nil +} + +func (c *Client) ListServices() ([]Service, error) { + return c.ListServicesByQuery(nil) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/space_quotas.go b/vendor/github.com/cloudfoundry-community/go-cfclient/space_quotas.go new file mode 100644 index 000000000000..6c82a062e1bb --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/space_quotas.go @@ -0,0 +1,183 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type SpaceQuotasResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SpaceQuotasResource `json:"resources"` +} + +type SpaceQuotasResource struct { + Meta Meta `json:"metadata"` + Entity SpaceQuota `json:"entity"` +} + +type SpaceQuotaRequest struct { + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + TotalServices int `json:"total_services"` + TotalRoutes int `json:"total_routes"` + MemoryLimit int `json:"memory_limit"` + InstanceMemoryLimit int `json:"instance_memory_limit"` + AppInstanceLimit int `json:"app_instance_limit"` + AppTaskLimit int `json:"app_task_limit"` + TotalServiceKeys int `json:"total_service_keys"` + TotalReservedRoutePorts int `json:"total_reserved_route_ports"` +} + +type SpaceQuota struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + TotalServices int `json:"total_services"` + TotalRoutes int `json:"total_routes"` + MemoryLimit int `json:"memory_limit"` + InstanceMemoryLimit int `json:"instance_memory_limit"` + AppInstanceLimit int `json:"app_instance_limit"` + AppTaskLimit int `json:"app_task_limit"` + TotalServiceKeys int `json:"total_service_keys"` + TotalReservedRoutePorts int `json:"total_reserved_route_ports"` + c *Client +} + +func (c *Client) ListSpaceQuotasByQuery(query url.Values) ([]SpaceQuota, error) { + var spaceQuotas []SpaceQuota + requestUrl := "/v2/space_quota_definitions?" + query.Encode() + for { + spaceQuotasResp, err := c.getSpaceQuotasResponse(requestUrl) + if err != nil { + return []SpaceQuota{}, err + } + for _, space := range spaceQuotasResp.Resources { + space.Entity.Guid = space.Meta.Guid + space.Entity.CreatedAt = space.Meta.CreatedAt + space.Entity.UpdatedAt = space.Meta.UpdatedAt + space.Entity.c = c + spaceQuotas = append(spaceQuotas, space.Entity) + } + requestUrl = spaceQuotasResp.NextUrl + if requestUrl == "" { + break + } + } + return spaceQuotas, nil +} + +func (c *Client) ListSpaceQuotas() ([]SpaceQuota, error) { + return c.ListSpaceQuotasByQuery(nil) +} + +func (c *Client) GetSpaceQuotaByName(name string) (SpaceQuota, error) { + q := url.Values{} + q.Set("q", "name:"+name) + spaceQuotas, err := c.ListSpaceQuotasByQuery(q) + if err != nil { + return SpaceQuota{}, err + } + if len(spaceQuotas) != 1 { + return SpaceQuota{}, fmt.Errorf("Unable to find space quota " + name) + } + return spaceQuotas[0], nil +} + +func (c *Client) getSpaceQuotasResponse(requestUrl string) (SpaceQuotasResponse, error) { + var spaceQuotasResp SpaceQuotasResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return SpaceQuotasResponse{}, errors.Wrap(err, "Error requesting space quotas") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return SpaceQuotasResponse{}, errors.Wrap(err, "Error reading space quotas body") + } + err = json.Unmarshal(resBody, &spaceQuotasResp) + if err != nil { + return SpaceQuotasResponse{}, errors.Wrap(err, "Error unmarshalling space quotas") + } + return spaceQuotasResp, nil +} + +func (c *Client) AssignSpaceQuota(quotaGUID, spaceGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/space_quota_definitions/%s/spaces/%s", quotaGUID, spaceGUID))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { //201 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +func (c *Client) CreateSpaceQuota(spaceQuote SpaceQuotaRequest) (*SpaceQuota, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(spaceQuote) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("POST", "/v2/space_quota_definitions", buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleSpaceQuotaResp(resp) +} + +func (c *Client) UpdateSpaceQuota(spaceQuotaGUID string, spaceQuote SpaceQuotaRequest) (*SpaceQuota, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(spaceQuote) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/space_quota_definitions/%s", spaceQuotaGUID), buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleSpaceQuotaResp(resp) +} + +func (c *Client) handleSpaceQuotaResp(resp *http.Response) (*SpaceQuota, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var spaceQuotasResource SpaceQuotasResource + err = json.Unmarshal(body, &spaceQuotasResource) + if err != nil { + return nil, err + } + return c.mergeSpaceQuotaResource(spaceQuotasResource), nil +} + +func (c *Client) mergeSpaceQuotaResource(spaceQuote SpaceQuotasResource) *SpaceQuota { + spaceQuote.Entity.Guid = spaceQuote.Meta.Guid + spaceQuote.Entity.CreatedAt = spaceQuote.Meta.CreatedAt + spaceQuote.Entity.UpdatedAt = spaceQuote.Meta.UpdatedAt + spaceQuote.Entity.c = c + return &spaceQuote.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go b/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go new file mode 100644 index 000000000000..be79a3a4f886 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go @@ -0,0 +1,790 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + + "github.com/pkg/errors" +) + +type SpaceRequest struct { + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + DeveloperGuid []string `json:"developer_guids,omitempty"` + ManagerGuid []string `json:"manager_guids,omitempty"` + AuditorGuid []string `json:"auditor_guids,omitempty"` + DomainGuid []string `json:"domain_guids,omitempty"` + SecurityGroupGuids []string `json:"security_group_guids,omitempty"` + SpaceQuotaDefGuid string `json:"space_quota_definition_guid,omitempty"` + IsolationSegmentGuid string `json:"isolation_segment_guid,omitempty"` + AllowSSH bool `json:"allow_ssh"` +} + +type SpaceResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SpaceResource `json:"resources"` +} + +type SpaceResource struct { + Meta Meta `json:"metadata"` + Entity Space `json:"entity"` +} + +type ServicePlanEntity struct { + Name string `json:"name"` + Free bool `json:"free"` + Public bool `json:"public"` + Active bool `json:"active"` + Description string `json:"description"` + ServiceOfferingGUID string `json:"service_guid"` + ServiceOffering ServiceOfferingResource `json:"service"` +} + +type ServiceOfferingExtra struct { + DisplayName string `json:"displayName"` + DocumentationURL string `json:"documentationURL"` + LongDescription string `json:"longDescription"` +} + +type ServiceOfferingEntity struct { + Label string + Description string + Provider string `json:"provider"` + BrokerGUID string `json:"service_broker_guid"` + Requires []string `json:"requires"` + ServicePlans []interface{} `json:"service_plans"` + Extra ServiceOfferingExtra +} + +type ServiceOfferingResource struct { + Metadata Meta + Entity ServiceOfferingEntity +} + +type ServiceOfferingResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + PrevUrl string `json:"prev_url"` + Resources []ServiceOfferingResource `json:"resources"` +} + +type SpaceUserResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextURL string `json:"next_url"` + Resources []UserResource `json:"resources"` +} + +type Space struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + OrgURL string `json:"organization_url"` + OrgData OrgResource `json:"organization"` + QuotaDefinitionGuid string `json:"space_quota_definition_guid"` + IsolationSegmentGuid string `json:"isolation_segment_guid"` + AllowSSH bool `json:"allow_ssh"` + c *Client +} + +type SpaceSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + Apps []AppSummary `json:"apps"` + Services []ServiceSummary `json:"services"` +} + +type SpaceRoleResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SpaceRoleResource `json:"resources"` +} + +type SpaceRoleResource struct { + Meta Meta `json:"metadata"` + Entity SpaceRole `json:"entity"` +} + +type SpaceRole struct { + Guid string `json:"guid"` + Admin bool `json:"admin"` + Active bool `json:"active"` + DefaultSpaceGuid string `json:"default_space_guid"` + Username string `json:"username"` + SpaceRoles []string `json:"space_roles"` + SpacesUrl string `json:"spaces_url"` + OrganizationsUrl string `json:"organizations_url"` + ManagedOrganizationsUrl string `json:"managed_organizations_url"` + BillingManagedOrganizationsUrl string `json:"billing_managed_organizations_url"` + AuditedOrganizationsUrl string `json:"audited_organizations_url"` + ManagedSpacesUrl string `json:"managed_spaces_url"` + AuditedSpacesUrl string `json:"audited_spaces_url"` + c *Client +} + +func (s *Space) Org() (Org, error) { + var orgResource OrgResource + r := s.c.NewRequest("GET", s.OrgURL) + resp, err := s.c.DoRequest(r) + if err != nil { + return Org{}, errors.Wrap(err, "Error requesting org") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return Org{}, errors.Wrap(err, "Error reading org request") + } + + err = json.Unmarshal(resBody, &orgResource) + if err != nil { + return Org{}, errors.Wrap(err, "Error unmarshaling org") + } + return s.c.mergeOrgResource(orgResource), nil +} + +func (s *Space) Quota() (*SpaceQuota, error) { + var spaceQuota *SpaceQuota + var spaceQuotaResource SpaceQuotasResource + if s.QuotaDefinitionGuid == "" { + return nil, nil + } + requestUrl := fmt.Sprintf("/v2/space_quota_definitions/%s", s.QuotaDefinitionGuid) + r := s.c.NewRequest("GET", requestUrl) + resp, err := s.c.DoRequest(r) + if err != nil { + return &SpaceQuota{}, errors.Wrap(err, "Error requesting space quota") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return &SpaceQuota{}, errors.Wrap(err, "Error reading space quota body") + } + err = json.Unmarshal(resBody, &spaceQuotaResource) + if err != nil { + return &SpaceQuota{}, errors.Wrap(err, "Error unmarshalling space quota") + } + spaceQuota = &spaceQuotaResource.Entity + spaceQuota.Guid = spaceQuotaResource.Meta.Guid + spaceQuota.c = s.c + return spaceQuota, nil +} + +func (s *Space) Summary() (SpaceSummary, error) { + var spaceSummary SpaceSummary + requestUrl := fmt.Sprintf("/v2/spaces/%s/summary", s.Guid) + r := s.c.NewRequest("GET", requestUrl) + resp, err := s.c.DoRequest(r) + if err != nil { + return SpaceSummary{}, errors.Wrap(err, "Error requesting space summary") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return SpaceSummary{}, errors.Wrap(err, "Error reading space summary body") + } + err = json.Unmarshal(resBody, &spaceSummary) + if err != nil { + return SpaceSummary{}, errors.Wrap(err, "Error unmarshalling space summary") + } + return spaceSummary, nil +} + +func (s *Space) Roles() ([]SpaceRole, error) { + var roles []SpaceRole + requestUrl := fmt.Sprintf("/v2/spaces/%s/user_roles", s.Guid) + for { + rolesResp, err := s.c.getSpaceRolesResponse(requestUrl) + if err != nil { + return roles, err + } + for _, role := range rolesResp.Resources { + role.Entity.Guid = role.Meta.Guid + role.Entity.c = s.c + roles = append(roles, role.Entity) + } + requestUrl = rolesResp.NextUrl + if requestUrl == "" { + break + } + } + return roles, nil +} + +func (c *Client) CreateSpace(req SpaceRequest) (Space, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return Space{}, err + } + r := c.NewRequestWithBody("POST", "/v2/spaces", buf) + resp, err := c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleSpaceResp(resp) +} + +func (c *Client) UpdateSpace(spaceGUID string, req SpaceRequest) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.Update(req) +} + +func (c *Client) DeleteSpace(guid string, recursive, async bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/spaces/%s?recursive=%t&async=%t", guid, recursive, async))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting space %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) ListSpaceManagersByQuery(spaceGUID string, query url.Values) ([]User, error) { + return c.listSpaceUsersByRoleAndQuery(spaceGUID, "managers", query) +} + +func (c *Client) ListSpaceManagers(spaceGUID string) ([]User, error) { + return c.ListSpaceManagersByQuery(spaceGUID, nil) +} + +func (c *Client) ListSpaceAuditorsByQuery(spaceGUID string, query url.Values) ([]User, error) { + return c.listSpaceUsersByRoleAndQuery(spaceGUID, "auditors", query) +} + +func (c *Client) ListSpaceAuditors(spaceGUID string) ([]User, error) { + return c.ListSpaceAuditorsByQuery(spaceGUID, nil) +} + +func (c *Client) ListSpaceDevelopersByQuery(spaceGUID string, query url.Values) ([]User, error) { + return c.listSpaceUsersByRoleAndQuery(spaceGUID, "developers", query) +} + +func (c *Client) listSpaceUsersByRoleAndQuery(spaceGUID, role string, query url.Values) ([]User, error) { + var users []User + requestURL := fmt.Sprintf("/v2/spaces/%s/%s?%s", spaceGUID, role, query.Encode()) + for { + userResp, err := c.getUserResponse(requestURL) + if err != nil { + return []User{}, err + } + for _, u := range userResp.Resources { + users = append(users, c.mergeUserResource(u)) + } + requestURL = userResp.NextUrl + if requestURL == "" { + break + } + } + return users, nil +} + +func (c *Client) ListSpaceDevelopers(spaceGUID string) ([]User, error) { + return c.ListSpaceDevelopersByQuery(spaceGUID, nil) +} + +func (c *Client) AssociateSpaceDeveloper(spaceGUID, userGUID string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateDeveloper(userGUID) +} + +func (c *Client) AssociateSpaceDeveloperByUsername(spaceGUID, name string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateDeveloperByUsername(name) +} + +func (c *Client) AssociateSpaceDeveloperByUsernameAndOrigin(spaceGUID, name, origin string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateDeveloperByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveSpaceDeveloper(spaceGUID, userGUID string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveDeveloper(userGUID) +} + +func (c *Client) RemoveSpaceDeveloperByUsername(spaceGUID, name string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveDeveloperByUsername(name) +} + +func (c *Client) RemoveSpaceDeveloperByUsernameAndOrigin(spaceGUID, name, origin string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveDeveloperByUsernameAndOrigin(name, origin) +} + +func (c *Client) AssociateSpaceAuditor(spaceGUID, userGUID string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateAuditor(userGUID) +} + +func (c *Client) AssociateSpaceAuditorByUsername(spaceGUID, name string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateAuditorByUsername(name) +} + +func (c *Client) AssociateSpaceAuditorByUsernameAndOrigin(spaceGUID, name, origin string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateAuditorByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveSpaceAuditor(spaceGUID, userGUID string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveAuditor(userGUID) +} + +func (c *Client) RemoveSpaceAuditorByUsername(spaceGUID, name string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveAuditorByUsername(name) +} + +func (c *Client) RemoveSpaceAuditorByUsernameAndOrigin(spaceGUID, name, origin string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveAuditorByUsernameAndOrigin(name, origin) +} + +func (c *Client) AssociateSpaceManager(spaceGUID, userGUID string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateManager(userGUID) +} + +func (c *Client) AssociateSpaceManagerByUsername(spaceGUID, name string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateManagerByUsername(name) +} + +func (c *Client) AssociateSpaceManagerByUsernameAndOrigin(spaceGUID, name, origin string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateManagerByUsernameAndOrigin(name, origin) +} + +func (c *Client) RemoveSpaceManager(spaceGUID, userGUID string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveManager(userGUID) +} + +func (c *Client) RemoveSpaceManagerByUsername(spaceGUID, name string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveManagerByUsername(name) +} + +func (c *Client) RemoveSpaceManagerByUsernameAndOrigin(spaceGUID, name, origin string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveManagerByUsernameAndOrigin(name, origin) +} + +func (s *Space) AssociateDeveloper(userGUID string) (Space, error) { + return s.associateRole(userGUID, "developers") +} + +func (s *Space) AssociateDeveloperByUsername(name string) (Space, error) { + return s.associateUserByRole(name, "developers", "") +} + +func (s *Space) AssociateDeveloperByUsernameAndOrigin(name, origin string) (Space, error) { + return s.associateUserByRole(name, "developers", origin) +} + +func (s *Space) RemoveDeveloper(userGUID string) error { + return s.removeRole(userGUID, "developers") +} + +func (s *Space) RemoveDeveloperByUsername(name string) error { + return s.removeUserByRole(name, "developers", "") +} + +func (s *Space) RemoveDeveloperByUsernameAndOrigin(name, origin string) error { + return s.removeUserByRole(name, "developers", origin) +} + +func (s *Space) AssociateAuditor(userGUID string) (Space, error) { + return s.associateRole(userGUID, "auditors") +} + +func (s *Space) AssociateAuditorByUsername(name string) (Space, error) { + return s.associateUserByRole(name, "auditors", "") +} + +func (s *Space) AssociateAuditorByUsernameAndOrigin(name, origin string) (Space, error) { + return s.associateUserByRole(name, "auditors", origin) +} + +func (s *Space) RemoveAuditor(userGUID string) error { + return s.removeRole(userGUID, "auditors") +} + +func (s *Space) RemoveAuditorByUsername(name string) error { + return s.removeUserByRole(name, "auditors", "") +} + +func (s *Space) RemoveAuditorByUsernameAndOrigin(name, origin string) error { + return s.removeUserByRole(name, "auditors", origin) +} + +func (s *Space) AssociateManager(userGUID string) (Space, error) { + return s.associateRole(userGUID, "managers") +} + +func (s *Space) AssociateManagerByUsername(name string) (Space, error) { + return s.associateUserByRole(name, "managers", "") +} + +func (s *Space) AssociateManagerByUsernameAndOrigin(name, origin string) (Space, error) { + return s.associateUserByRole(name, "managers", origin) +} + +func (s *Space) RemoveManager(userGUID string) error { + return s.removeRole(userGUID, "managers") +} + +func (s *Space) RemoveManagerByUsername(name string) error { + return s.removeUserByRole(name, "managers", "") +} +func (s *Space) RemoveManagerByUsernameAndOrigin(name, origin string) error { + return s.removeUserByRole(name, "managers", origin) +} + +func (s *Space) associateRole(userGUID, role string) (Space, error) { + requestUrl := fmt.Sprintf("/v2/spaces/%s/%s/%s", s.Guid, role, userGUID) + r := s.c.NewRequest("PUT", requestUrl) + resp, err := s.c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, errors.Wrapf(err, "Error associating %s %s, response code: %d", role, userGUID, resp.StatusCode) + } + return s.c.handleSpaceResp(resp) +} + +func (s *Space) associateUserByRole(name, role, origin string) (Space, error) { + requestUrl := fmt.Sprintf("/v2/spaces/%s/%s", s.Guid, role) + buf := bytes.NewBuffer(nil) + payload := make(map[string]string) + payload["username"] = name + if origin != "" { + payload["origin"] = origin + } + err := json.NewEncoder(buf).Encode(payload) + if err != nil { + return Space{}, err + } + r := s.c.NewRequestWithBody("PUT", requestUrl, buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, errors.Wrapf(err, "Error associating %s %s, response code: %d", role, name, resp.StatusCode) + } + return s.c.handleSpaceResp(resp) +} + +func (s *Space) removeRole(userGUID, role string) error { + requestUrl := fmt.Sprintf("/v2/spaces/%s/%s/%s", s.Guid, role, userGUID) + r := s.c.NewRequest("DELETE", requestUrl) + resp, err := s.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing %s %s, response code: %d", role, userGUID, resp.StatusCode) + } + return nil +} + +func (s *Space) removeUserByRole(name, role, origin string) error { + var requestURL string + var method string + + buf := bytes.NewBuffer(nil) + payload := make(map[string]string) + payload["username"] = name + if origin != "" { + payload["origin"] = origin + requestURL = fmt.Sprintf("/v2/spaces/%s/%s/remove", s.Guid, role) + method = "POST" + } else { + requestURL = fmt.Sprintf("/v2/spaces/%s/%s", s.Guid, role) + method = "DELETE" + } + err := json.NewEncoder(buf).Encode(payload) + if err != nil { + return err + } + r := s.c.NewRequestWithBody(method, requestURL, buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return errors.Wrapf(err, "Error removing %s %s, response code: %d", role, name, resp.StatusCode) + } + return nil +} + +func (c *Client) ListSpaceSecGroups(spaceGUID string) (secGroups []SecGroup, err error) { + space := Space{Guid: spaceGUID, c: c} + return space.ListSecGroups() +} + +func (s *Space) ListSecGroups() (secGroups []SecGroup, err error) { + requestURL := fmt.Sprintf("/v2/spaces/%s/security_groups?inline-relations-depth=1", s.Guid) + for requestURL != "" { + var secGroupResp SecGroupResponse + r := s.c.NewRequest("GET", requestURL) + resp, err := s.c.DoRequest(r) + + if err != nil { + return nil, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling sec group") + } + + for _, secGroup := range secGroupResp.Resources { + secGroup.Entity.Guid = secGroup.Meta.Guid + secGroup.Entity.c = s.c + for i, space := range secGroup.Entity.SpacesData { + space.Entity.Guid = space.Meta.Guid + secGroup.Entity.SpacesData[i] = space + } + if len(secGroup.Entity.SpacesData) == 0 { + spaces, err := secGroup.Entity.ListSpaceResources() + if err != nil { + return nil, err + } + for _, space := range spaces { + secGroup.Entity.SpacesData = append(secGroup.Entity.SpacesData, space) + } + } + secGroups = append(secGroups, secGroup.Entity) + } + + requestURL = secGroupResp.NextUrl + resp.Body.Close() + } + return secGroups, nil +} + +func (s *Space) GetServiceOfferings() (ServiceOfferingResponse, error) { + var response ServiceOfferingResponse + requestURL := fmt.Sprintf("/v2/spaces/%s/services", s.Guid) + req := s.c.NewRequest("GET", requestURL) + + resp, err := s.c.DoRequest(req) + if err != nil { + return ServiceOfferingResponse{}, errors.Wrap(err, "Error requesting service offerings") + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return ServiceOfferingResponse{}, errors.Wrap(err, "Error reading service offering response") + } + + err = json.Unmarshal(body, &response) + if err != nil { + return ServiceOfferingResponse{}, errors.Wrap(err, "Error unmarshalling service offering response") + } + + return response, nil +} + +func (s *Space) Update(req SpaceRequest) (Space, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return Space{}, err + } + r := s.c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/spaces/%s", s.Guid), buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return s.c.handleSpaceResp(resp) +} + +func (c *Client) ListSpacesByQuery(query url.Values) ([]Space, error) { + return c.fetchSpaces("/v2/spaces?" + query.Encode()) +} + +func (c *Client) ListSpaces() ([]Space, error) { + return c.ListSpacesByQuery(nil) +} + +func (c *Client) fetchSpaces(requestUrl string) ([]Space, error) { + var spaces []Space + for { + spaceResp, err := c.getSpaceResponse(requestUrl) + if err != nil { + return []Space{}, err + } + for _, space := range spaceResp.Resources { + spaces = append(spaces, c.mergeSpaceResource(space)) + } + requestUrl = spaceResp.NextUrl + if requestUrl == "" { + break + } + } + return spaces, nil +} + +func (c *Client) GetSpaceByName(spaceName string, orgGuid string) (space Space, err error) { + query := url.Values{} + query.Add("q", fmt.Sprintf("organization_guid:%s", orgGuid)) + query.Add("q", fmt.Sprintf("name:%s", spaceName)) + spaces, err := c.ListSpacesByQuery(query) + if err != nil { + return + } + + if len(spaces) == 0 { + return space, fmt.Errorf("No space found with name: `%s` in org with GUID: `%s`", spaceName, orgGuid) + } + + return spaces[0], nil + +} + +func (c *Client) GetSpaceByGuid(spaceGUID string) (Space, error) { + requestUrl := fmt.Sprintf("/v2/spaces/%s", spaceGUID) + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return Space{}, errors.Wrap(err, "Error requesting space info") + } + return c.handleSpaceResp(resp) +} + +func (c *Client) getSpaceResponse(requestUrl string) (SpaceResponse, error) { + var spaceResp SpaceResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return SpaceResponse{}, errors.Wrap(err, "Error requesting spaces") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return SpaceResponse{}, errors.Wrap(err, "Error reading space request") + } + err = json.Unmarshal(resBody, &spaceResp) + if err != nil { + return SpaceResponse{}, errors.Wrap(err, "Error unmarshalling space") + } + return spaceResp, nil +} + +func (c *Client) getSpaceRolesResponse(requestUrl string) (SpaceRoleResponse, error) { + var roleResp SpaceRoleResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return roleResp, errors.Wrap(err, "Error requesting space roles") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return roleResp, errors.Wrap(err, "Error reading space roles request") + } + err = json.Unmarshal(resBody, &roleResp) + if err != nil { + return roleResp, errors.Wrap(err, "Error unmarshalling space roles") + } + return roleResp, nil +} + +func (c *Client) handleSpaceResp(resp *http.Response) (Space, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Space{}, err + } + var spaceResource SpaceResource + err = json.Unmarshal(body, &spaceResource) + if err != nil { + return Space{}, err + } + return c.mergeSpaceResource(spaceResource), nil +} + +func (c *Client) mergeSpaceResource(space SpaceResource) Space { + space.Entity.Guid = space.Meta.Guid + space.Entity.CreatedAt = space.Meta.CreatedAt + space.Entity.UpdatedAt = space.Meta.UpdatedAt + space.Entity.c = c + return space.Entity +} + +type serviceOfferingExtra ServiceOfferingExtra + +func (resource *ServiceOfferingExtra) UnmarshalJSON(rawData []byte) error { + if string(rawData) == "null" { + return nil + } + + extra := serviceOfferingExtra{} + + unquoted, err := strconv.Unquote(string(rawData)) + if err != nil { + return err + } + + err = json.Unmarshal([]byte(unquoted), &extra) + if err != nil { + return err + } + + *resource = ServiceOfferingExtra(extra) + + return nil +} + +func (c *Client) IsolationSegmentForSpace(spaceGUID, isolationSegmentGUID string) error { + return c.updateSpaceIsolationSegment(spaceGUID, map[string]interface{}{"guid": isolationSegmentGUID}) +} + +func (c *Client) ResetIsolationSegmentForSpace(spaceGUID string) error { + return c.updateSpaceIsolationSegment(spaceGUID, nil) +} + +func (c *Client) updateSpaceIsolationSegment(spaceGUID string, data interface{}) error { + requestURL := fmt.Sprintf("/v3/spaces/%s/relationships/isolation_segment", spaceGUID) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]interface{}{"data": data}) + if err != nil { + return err + } + r := c.NewRequestWithBody("PATCH", requestURL, buf) + resp, err := c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return errors.Wrapf(err, "Error setting isolation segment for space %s, response code: %d", spaceGUID, resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/stacks.go b/vendor/github.com/cloudfoundry-community/go-cfclient/stacks.go new file mode 100644 index 000000000000..23124c00036b --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/stacks.go @@ -0,0 +1,76 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type StacksResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []StacksResource `json:"resources"` +} + +type StacksResource struct { + Meta Meta `json:"metadata"` + Entity Stack `json:"entity"` +} + +type Stack struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Description string `json:"description"` + c *Client +} + +func (c *Client) ListStacksByQuery(query url.Values) ([]Stack, error) { + var stacks []Stack + requestUrl := "/v2/stacks?" + query.Encode() + for { + stacksResp, err := c.getStacksResponse(requestUrl) + if err != nil { + return []Stack{}, err + } + for _, stack := range stacksResp.Resources { + stack.Entity.Guid = stack.Meta.Guid + stack.Entity.CreatedAt = stack.Meta.CreatedAt + stack.Entity.UpdatedAt = stack.Meta.UpdatedAt + stack.Entity.c = c + stacks = append(stacks, stack.Entity) + } + requestUrl = stacksResp.NextUrl + if requestUrl == "" { + break + } + } + return stacks, nil +} + +func (c *Client) ListStacks() ([]Stack, error) { + return c.ListStacksByQuery(nil) +} + +func (c *Client) getStacksResponse(requestUrl string) (StacksResponse, error) { + var stacksResp StacksResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return StacksResponse{}, errors.Wrap(err, "Error requesting stacks") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return StacksResponse{}, errors.Wrap(err, "Error reading stacks body") + } + err = json.Unmarshal(resBody, &stacksResp) + if err != nil { + return StacksResponse{}, errors.Wrap(err, "Error unmarshalling stacks") + } + return stacksResp, nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/tasks.go b/vendor/github.com/cloudfoundry-community/go-cfclient/tasks.go new file mode 100644 index 000000000000..cce27ef36f4a --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/tasks.go @@ -0,0 +1,204 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/pkg/errors" +) + +// TaskListResponse is the JSON response from the API. +type TaskListResponse struct { + Pagination Pagination `json:"pagination"` + Tasks []Task `json:"resources"` +} + +// Task is a description of a task element. +type Task struct { + GUID string `json:"guid"` + SequenceID int `json:"sequence_id"` + Name string `json:"name"` + Command string `json:"command"` + State string `json:"state"` + MemoryInMb int `json:"memory_in_mb"` + DiskInMb int `json:"disk_in_mb"` + Result struct { + FailureReason string `json:"failure_reason"` + } `json:"result"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DropletGUID string `json:"droplet_guid"` + Links struct { + Self Link `json:"self"` + App Link `json:"app"` + Droplet Link `json:"droplet"` + } `json:"links"` +} + +// TaskRequest is a v3 JSON object as described in: +// http://v3-apidocs.cloudfoundry.org/version/3.0.0/index.html#create-a-task +type TaskRequest struct { + Command string `json:"command"` + Name string `json:"name"` + MemoryInMegabyte int `json:"memory_in_mb"` + DiskInMegabyte int `json:"disk_in_mb"` + DropletGUID string `json:"droplet_guid"` +} + +func (c *Client) makeTaskListRequestWithParams(baseUrl string, query url.Values) ([]byte, error) { + requestUrl := baseUrl + "?" + query.Encode() + req := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(req) + if err != nil { + return nil, errors.Wrap(err, "Error requesting tasks") + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, errors.Wrapf(err, "Error requesting tasks: status code not 200, it was %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} + +func parseTaskListRespones(answer []byte) (TaskListResponse, error) { + var response TaskListResponse + err := json.Unmarshal(answer, &response) + if err != nil { + return response, errors.Wrap(err, "Error unmarshaling response %v") + } + return response, nil +} + +func (c *Client) handleTasksApiCall(apiUrl string, query url.Values) ([]Task, error) { + body, err := c.makeTaskListRequestWithParams(apiUrl, query) + if err != nil { + return nil, errors.Wrap(err, "Error requesting tasks") + } + response, err := parseTaskListRespones(body) + if err != nil { + return nil, errors.Wrap(err, "Error reading tasks") + } + return response.Tasks, nil +} + +// ListTasks returns all tasks the user has access to. +// See http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks +func (c *Client) ListTasks() ([]Task, error) { + return c.handleTasksApiCall("/v3/tasks", url.Values{}) +} + +// ListTasksByQuery returns all tasks the user has access to, with query parameters. +// See http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks +func (c *Client) ListTasksByQuery(query url.Values) ([]Task, error) { + return c.handleTasksApiCall("/v3/tasks", query) +} + +// TasksByApp returns task structures which aligned to an app identified by the given guid. +// See: http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks-for-an-app +func (c *Client) TasksByApp(guid string) ([]Task, error) { + return c.TasksByAppByQuery(guid, url.Values{}) +} + +// TasksByAppByQuery returns task structures which aligned to an app identified by the given guid +// and filtered by the given query parameters. +// See: http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks-for-an-app +func (c *Client) TasksByAppByQuery(guid string, query url.Values) ([]Task, error) { + uri := fmt.Sprintf("/v3/apps/%s/tasks", guid) + return c.handleTasksApiCall(uri, query) +} + +func createReader(tr TaskRequest) (io.Reader, error) { + rmap := make(map[string]string) + rmap["command"] = tr.Command + if tr.Name != "" { + rmap["name"] = tr.Name + } + // setting droplet GUID causing issues + if tr.MemoryInMegabyte != 0 { + rmap["memory_in_mb"] = fmt.Sprintf("%d", tr.MemoryInMegabyte) + } + if tr.DiskInMegabyte != 0 { + rmap["disk_in_mb"] = fmt.Sprintf("%d", tr.DiskInMegabyte) + } + + bodyReader := bytes.NewBuffer(nil) + enc := json.NewEncoder(bodyReader) + if err := enc.Encode(rmap); err != nil { + return nil, errors.Wrap(err, "Error during encoding task request") + } + return bodyReader, nil +} + +// CreateTask creates a new task in CF system and returns its structure. +func (c *Client) CreateTask(tr TaskRequest) (task Task, err error) { + bodyReader, err := createReader(tr) + if err != nil { + return task, err + } + + request := fmt.Sprintf("/v3/apps/%s/tasks", tr.DropletGUID) + req := c.NewRequestWithBody("POST", request, bodyReader) + + resp, err := c.DoRequest(req) + if err != nil { + return task, errors.Wrap(err, "Error creating task") + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return task, errors.Wrap(err, "Error reading task after creation") + } + + err = json.Unmarshal(body, &task) + if err != nil { + return task, errors.Wrap(err, "Error unmarshaling task") + } + return task, err +} + +// GetTaskByGuid returns a task structure by requesting it with the tasks GUID. +func (c *Client) GetTaskByGuid(guid string) (task Task, err error) { + request := fmt.Sprintf("/v3/tasks/%s", guid) + req := c.NewRequest("GET", request) + + resp, err := c.DoRequest(req) + if err != nil { + return task, errors.Wrap(err, "Error requesting task") + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return task, errors.Wrap(err, "Error reading task") + } + + err = json.Unmarshal(body, &task) + if err != nil { + return task, errors.Wrap(err, "Error unmarshaling task") + } + return task, err +} + +func (c *Client) TaskByGuid(guid string) (task Task, err error) { + return c.GetTaskByGuid(guid) +} + +// TerminateTask cancels a task identified by its GUID. +func (c *Client) TerminateTask(guid string) error { + req := c.NewRequest("PUT", fmt.Sprintf("/v3/tasks/%s/cancel", guid)) + resp, err := c.DoRequest(req) + if err != nil { + return errors.Wrap(err, "Error terminating task") + } + defer resp.Body.Close() + + if resp.StatusCode != 202 { + return errors.Wrapf(err, "Failed terminating task, response status code %d", resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/types.go b/vendor/github.com/cloudfoundry-community/go-cfclient/types.go new file mode 100644 index 000000000000..279106bfa40b --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/types.go @@ -0,0 +1,8 @@ +package cfclient + +type Meta struct { + Guid string `json:"guid"` + Url string `json:"url"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/user_provided_service_instances.go b/vendor/github.com/cloudfoundry-community/go-cfclient/user_provided_service_instances.go new file mode 100644 index 000000000000..221057ac2bbd --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/user_provided_service_instances.go @@ -0,0 +1,185 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type UserProvidedServiceInstancesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []UserProvidedServiceInstanceResource `json:"resources"` +} + +type UserProvidedServiceInstanceResource struct { + Meta Meta `json:"metadata"` + Entity UserProvidedServiceInstance `json:"entity"` +} + +type UserProvidedServiceInstance struct { + Guid string `json:"guid"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Credentials map[string]interface{} `json:"credentials"` + SpaceGuid string `json:"space_guid"` + Type string `json:"type"` + Tags []string `json:"tags"` + SpaceUrl string `json:"space_url"` + ServiceBindingsUrl string `json:"service_bindings_url"` + RoutesUrl string `json:"routes_url"` + RouteServiceUrl string `json:"route_service_url"` + SyslogDrainUrl string `json:"syslog_drain_url"` + c *Client +} + +type UserProvidedServiceInstanceRequest struct { + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + SpaceGuid string `json:"space_guid"` + Tags []string `json:"tags"` + RouteServiceUrl string `json:"route_service_url"` + SyslogDrainUrl string `json:"syslog_drain_url"` +} + +func (c *Client) ListUserProvidedServiceInstancesByQuery(query url.Values) ([]UserProvidedServiceInstance, error) { + var instances []UserProvidedServiceInstance + + requestUrl := "/v2/user_provided_service_instances?" + query.Encode() + for { + var sir UserProvidedServiceInstancesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting user provided service instances") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading user provided service instances request:") + } + + err = json.Unmarshal(resBody, &sir) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling user provided service instances") + } + for _, instance := range sir.Resources { + instance.Entity.Guid = instance.Meta.Guid + instance.Entity.CreatedAt = instance.Meta.CreatedAt + instance.Entity.UpdatedAt = instance.Meta.UpdatedAt + instance.Entity.c = c + instances = append(instances, instance.Entity) + } + + requestUrl = sir.NextUrl + if requestUrl == "" { + break + } + } + return instances, nil +} + +func (c *Client) ListUserProvidedServiceInstances() ([]UserProvidedServiceInstance, error) { + return c.ListUserProvidedServiceInstancesByQuery(nil) +} + +func (c *Client) GetUserProvidedServiceInstanceByGuid(guid string) (UserProvidedServiceInstance, error) { + var sir UserProvidedServiceInstanceResource + req := c.NewRequest("GET", "/v2/user_provided_service_instances/"+guid) + res, err := c.DoRequest(req) + if err != nil { + return UserProvidedServiceInstance{}, errors.Wrap(err, "Error requesting user provided service instance") + } + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return UserProvidedServiceInstance{}, errors.Wrap(err, "Error reading user provided service instance response") + } + err = json.Unmarshal(data, &sir) + if err != nil { + return UserProvidedServiceInstance{}, errors.Wrap(err, "Error JSON parsing user provided service instance response") + } + sir.Entity.Guid = sir.Meta.Guid + sir.Entity.CreatedAt = sir.Meta.CreatedAt + sir.Entity.UpdatedAt = sir.Meta.UpdatedAt + sir.Entity.c = c + return sir.Entity, nil +} + +func (c *Client) UserProvidedServiceInstanceByGuid(guid string) (UserProvidedServiceInstance, error) { + return c.GetUserProvidedServiceInstanceByGuid(guid) +} + +func (c *Client) CreateUserProvidedServiceInstance(req UserProvidedServiceInstanceRequest) (*UserProvidedServiceInstance, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("POST", "/v2/user_provided_service_instances", buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + + return c.handleUserProvidedServiceInstanceResp(resp) +} + +func (c *Client) DeleteUserProvidedServiceInstance(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/user_provided_service_instances/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting user provided service instance %s, response code %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) UpdateUserProvidedServiceInstance(guid string, req UserProvidedServiceInstanceRequest) (*UserProvidedServiceInstance, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return nil, err + } + r := c.NewRequestWithBody("PUT", fmt.Sprintf("/v2/user_provided_service_instances/%s", guid), buf) + resp, err := c.DoRequest(r) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleUserProvidedServiceInstanceResp(resp) +} + +func (c *Client) handleUserProvidedServiceInstanceResp(resp *http.Response) (*UserProvidedServiceInstance, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + var upsResource UserProvidedServiceInstanceResource + err = json.Unmarshal(body, &upsResource) + if err != nil { + return nil, err + } + return c.mergeUserProvidedServiceInstanceResource(upsResource), nil +} + +func (c *Client) mergeUserProvidedServiceInstanceResource(ups UserProvidedServiceInstanceResource) *UserProvidedServiceInstance { + ups.Entity.Guid = ups.Meta.Guid + ups.Entity.CreatedAt = ups.Meta.CreatedAt + ups.Entity.UpdatedAt = ups.Meta.UpdatedAt + ups.Entity.c = c + return &ups.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/users.go b/vendor/github.com/cloudfoundry-community/go-cfclient/users.go new file mode 100644 index 000000000000..b0ddc10c7e31 --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/users.go @@ -0,0 +1,201 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type UserRequest struct { + Guid string `json:"guid"` + DefaultSpaceGuid string `json:"default_space_guid,omitempty"` +} + +type Users []User + +type User struct { + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Admin bool `json:"admin"` + Active bool `json:"active"` + DefaultSpaceGUID string `json:"default_space_guid"` + Username string `json:"username"` + SpacesURL string `json:"spaces_url"` + OrgsURL string `json:"organizations_url"` + ManagedOrgsURL string `json:"managed_organizations_url"` + BillingManagedOrgsURL string `json:"billing_managed_organizations_url"` + AuditedOrgsURL string `json:"audited_organizations_url"` + ManagedSpacesURL string `json:"managed_spaces_url"` + AuditedSpacesURL string `json:"audited_spaces_url"` + c *Client +} + +type UserResource struct { + Meta Meta `json:"metadata"` + Entity User `json:"entity"` +} + +type UserResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []UserResource `json:"resources"` +} + +// GetUserByGUID retrieves the user with the provided guid. +func (c *Client) GetUserByGUID(guid string) (User, error) { + var userRes UserResource + r := c.NewRequest("GET", "/v2/users/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return User{}, err + } + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return User{}, err + } + err = json.Unmarshal(body, &userRes) + if err != nil { + return User{}, err + } + return c.mergeUserResource(userRes), nil +} + +func (c *Client) ListUsersByQuery(query url.Values) (Users, error) { + var users []User + requestUrl := "/v2/users?" + query.Encode() + for { + userResp, err := c.getUserResponse(requestUrl) + if err != nil { + return []User{}, err + } + for _, user := range userResp.Resources { + user.Entity.Guid = user.Meta.Guid + user.Entity.CreatedAt = user.Meta.CreatedAt + user.Entity.UpdatedAt = user.Meta.UpdatedAt + user.Entity.c = c + users = append(users, user.Entity) + } + requestUrl = userResp.NextUrl + if requestUrl == "" { + break + } + } + return users, nil +} + +func (c *Client) ListUsers() (Users, error) { + return c.ListUsersByQuery(nil) +} + +func (c *Client) ListUserSpaces(userGuid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/users/%s/spaces", userGuid)) +} + +func (c *Client) ListUserAuditedSpaces(userGuid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/users/%s/audited_spaces", userGuid)) +} + +func (c *Client) ListUserManagedSpaces(userGuid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/users/%s/managed_spaces", userGuid)) +} + +func (c *Client) ListUserOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/organizations", userGuid)) +} + +func (c *Client) ListUserManagedOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/managed_organizations", userGuid)) +} + +func (c *Client) ListUserAuditedOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/audited_organizations", userGuid)) +} + +func (c *Client) ListUserBillingManagedOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/billing_managed_organizations", userGuid)) +} + +func (c *Client) CreateUser(req UserRequest) (User, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return User{}, err + } + r := c.NewRequestWithBody("POST", "/v2/users", buf) + resp, err := c.DoRequest(r) + if err != nil { + return User{}, err + } + if resp.StatusCode != http.StatusCreated { + return User{}, errors.Wrapf(err, "Error creating user, response code: %d", resp.StatusCode) + } + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return User{}, err + } + var userResource UserResource + err = json.Unmarshal(body, &userResource) + if err != nil { + return User{}, err + } + user := userResource.Entity + user.Guid = userResource.Meta.Guid + user.c = c + return user, nil +} + +func (c *Client) DeleteUser(userGuid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/users/%s", userGuid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting user %s, response code: %d", userGuid, resp.StatusCode) + } + return nil +} + +func (u Users) GetUserByUsername(username string) User { + for _, user := range u { + if user.Username == username { + return user + } + } + return User{} +} + +func (c *Client) getUserResponse(requestUrl string) (UserResponse, error) { + var userResp UserResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return UserResponse{}, errors.Wrap(err, "Error requesting users") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return UserResponse{}, errors.Wrap(err, "Error reading user request") + } + err = json.Unmarshal(resBody, &userResp) + if err != nil { + return UserResponse{}, errors.Wrap(err, "Error unmarshalling user") + } + return userResp, nil +} + +func (c *Client) mergeUserResource(u UserResource) User { + u.Entity.Guid = u.Meta.Guid + u.Entity.CreatedAt = u.Meta.CreatedAt + u.Entity.UpdatedAt = u.Meta.UpdatedAt + u.Entity.c = c + return u.Entity +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/v3types.go b/vendor/github.com/cloudfoundry-community/go-cfclient/v3types.go new file mode 100644 index 000000000000..7028f167432a --- /dev/null +++ b/vendor/github.com/cloudfoundry-community/go-cfclient/v3types.go @@ -0,0 +1,17 @@ +package cfclient + +// Pagination is used by the V3 apis +type Pagination struct { + TotalResults int `json:"total_results"` + TotalPages int `json:"total_pages"` + First Link `json:"first"` + Last Link `json:"last"` + Next interface{} `json:"next"` + Previous interface{} `json:"previous"` +} + +// Link is a HATEOAS-style link for v3 apis +type Link struct { + Href string `json:"href"` + Method string `json:"method,omitempty"` +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/LICENSE b/vendor/github.com/cloudfoundry/sonde-go/LICENSE new file mode 100644 index 000000000000..f433b1a53f5b --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/cloudfoundry/sonde-go/NOTICE b/vendor/github.com/cloudfoundry/sonde-go/NOTICE new file mode 100644 index 000000000000..29ab4afcea5b --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/NOTICE @@ -0,0 +1,15 @@ +sonde-go + +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +Limitations under the License. diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go b/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go new file mode 100644 index 000000000000..c0847c3ceb52 --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go @@ -0,0 +1,1186 @@ +// Code generated by protoc-gen-gogo. +// source: envelope.proto +// DO NOT EDIT! + +/* + Package events is a generated protocol buffer package. + + It is generated from these files: + envelope.proto + error.proto + http.proto + log.proto + metric.proto + uuid.proto + + It has these top-level messages: + Envelope + Error + HttpStartStop + LogMessage + ValueMetric + CounterEvent + ContainerMetric + UUID +*/ +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// / Type of the wrapped event. +type Envelope_EventType int32 + +const ( + // Removed Heartbeat at position 1 + // Removed HttpStart at position 2 + // Removed HttpStop at position 3 + Envelope_HttpStartStop Envelope_EventType = 4 + Envelope_LogMessage Envelope_EventType = 5 + Envelope_ValueMetric Envelope_EventType = 6 + Envelope_CounterEvent Envelope_EventType = 7 + Envelope_Error Envelope_EventType = 8 + Envelope_ContainerMetric Envelope_EventType = 9 +) + +var Envelope_EventType_name = map[int32]string{ + 4: "HttpStartStop", + 5: "LogMessage", + 6: "ValueMetric", + 7: "CounterEvent", + 8: "Error", + 9: "ContainerMetric", +} +var Envelope_EventType_value = map[string]int32{ + "HttpStartStop": 4, + "LogMessage": 5, + "ValueMetric": 6, + "CounterEvent": 7, + "Error": 8, + "ContainerMetric": 9, +} + +func (x Envelope_EventType) Enum() *Envelope_EventType { + p := new(Envelope_EventType) + *p = x + return p +} +func (x Envelope_EventType) String() string { + return proto.EnumName(Envelope_EventType_name, int32(x)) +} +func (x *Envelope_EventType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Envelope_EventType_value, data, "Envelope_EventType") + if err != nil { + return err + } + *x = Envelope_EventType(value) + return nil +} +func (Envelope_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorEnvelope, []int{0, 0} } + +// / Envelope wraps an Event and adds metadata. +type Envelope struct { + Origin *string `protobuf:"bytes,1,req,name=origin" json:"origin,omitempty"` + EventType *Envelope_EventType `protobuf:"varint,2,req,name=eventType,enum=events.Envelope_EventType" json:"eventType,omitempty"` + Timestamp *int64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` + Deployment *string `protobuf:"bytes,13,opt,name=deployment" json:"deployment,omitempty"` + Job *string `protobuf:"bytes,14,opt,name=job" json:"job,omitempty"` + Index *string `protobuf:"bytes,15,opt,name=index" json:"index,omitempty"` + Ip *string `protobuf:"bytes,16,opt,name=ip" json:"ip,omitempty"` + Tags map[string]string `protobuf:"bytes,17,rep,name=tags" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Removed Heartbeat at position 3 + // Removed HttpStart at position 4 + // Removed HttpStop at position 5 + HttpStartStop *HttpStartStop `protobuf:"bytes,7,opt,name=httpStartStop" json:"httpStartStop,omitempty"` + LogMessage *LogMessage `protobuf:"bytes,8,opt,name=logMessage" json:"logMessage,omitempty"` + ValueMetric *ValueMetric `protobuf:"bytes,9,opt,name=valueMetric" json:"valueMetric,omitempty"` + CounterEvent *CounterEvent `protobuf:"bytes,10,opt,name=counterEvent" json:"counterEvent,omitempty"` + Error *Error `protobuf:"bytes,11,opt,name=error" json:"error,omitempty"` + ContainerMetric *ContainerMetric `protobuf:"bytes,12,opt,name=containerMetric" json:"containerMetric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Envelope) Reset() { *m = Envelope{} } +func (m *Envelope) String() string { return proto.CompactTextString(m) } +func (*Envelope) ProtoMessage() {} +func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEnvelope, []int{0} } + +func (m *Envelope) GetOrigin() string { + if m != nil && m.Origin != nil { + return *m.Origin + } + return "" +} + +func (m *Envelope) GetEventType() Envelope_EventType { + if m != nil && m.EventType != nil { + return *m.EventType + } + return Envelope_HttpStartStop +} + +func (m *Envelope) GetTimestamp() int64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *Envelope) GetDeployment() string { + if m != nil && m.Deployment != nil { + return *m.Deployment + } + return "" +} + +func (m *Envelope) GetJob() string { + if m != nil && m.Job != nil { + return *m.Job + } + return "" +} + +func (m *Envelope) GetIndex() string { + if m != nil && m.Index != nil { + return *m.Index + } + return "" +} + +func (m *Envelope) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *Envelope) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Envelope) GetHttpStartStop() *HttpStartStop { + if m != nil { + return m.HttpStartStop + } + return nil +} + +func (m *Envelope) GetLogMessage() *LogMessage { + if m != nil { + return m.LogMessage + } + return nil +} + +func (m *Envelope) GetValueMetric() *ValueMetric { + if m != nil { + return m.ValueMetric + } + return nil +} + +func (m *Envelope) GetCounterEvent() *CounterEvent { + if m != nil { + return m.CounterEvent + } + return nil +} + +func (m *Envelope) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *Envelope) GetContainerMetric() *ContainerMetric { + if m != nil { + return m.ContainerMetric + } + return nil +} + +func init() { + proto.RegisterType((*Envelope)(nil), "events.Envelope") + proto.RegisterEnum("events.Envelope_EventType", Envelope_EventType_name, Envelope_EventType_value) +} +func (m *Envelope) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Envelope) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Origin == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("origin") + } else { + data[i] = 0xa + i++ + i = encodeVarintEnvelope(data, i, uint64(len(*m.Origin))) + i += copy(data[i:], *m.Origin) + } + if m.EventType == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("eventType") + } else { + data[i] = 0x10 + i++ + i = encodeVarintEnvelope(data, i, uint64(*m.EventType)) + } + if m.Timestamp != nil { + data[i] = 0x30 + i++ + i = encodeVarintEnvelope(data, i, uint64(*m.Timestamp)) + } + if m.HttpStartStop != nil { + data[i] = 0x3a + i++ + i = encodeVarintEnvelope(data, i, uint64(m.HttpStartStop.Size())) + n1, err := m.HttpStartStop.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.LogMessage != nil { + data[i] = 0x42 + i++ + i = encodeVarintEnvelope(data, i, uint64(m.LogMessage.Size())) + n2, err := m.LogMessage.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.ValueMetric != nil { + data[i] = 0x4a + i++ + i = encodeVarintEnvelope(data, i, uint64(m.ValueMetric.Size())) + n3, err := m.ValueMetric.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.CounterEvent != nil { + data[i] = 0x52 + i++ + i = encodeVarintEnvelope(data, i, uint64(m.CounterEvent.Size())) + n4, err := m.CounterEvent.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Error != nil { + data[i] = 0x5a + i++ + i = encodeVarintEnvelope(data, i, uint64(m.Error.Size())) + n5, err := m.Error.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.ContainerMetric != nil { + data[i] = 0x62 + i++ + i = encodeVarintEnvelope(data, i, uint64(m.ContainerMetric.Size())) + n6, err := m.ContainerMetric.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Deployment != nil { + data[i] = 0x6a + i++ + i = encodeVarintEnvelope(data, i, uint64(len(*m.Deployment))) + i += copy(data[i:], *m.Deployment) + } + if m.Job != nil { + data[i] = 0x72 + i++ + i = encodeVarintEnvelope(data, i, uint64(len(*m.Job))) + i += copy(data[i:], *m.Job) + } + if m.Index != nil { + data[i] = 0x7a + i++ + i = encodeVarintEnvelope(data, i, uint64(len(*m.Index))) + i += copy(data[i:], *m.Index) + } + if m.Ip != nil { + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintEnvelope(data, i, uint64(len(*m.Ip))) + i += copy(data[i:], *m.Ip) + } + if len(m.Tags) > 0 { + for k, _ := range m.Tags { + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + v := m.Tags[k] + mapSize := 1 + len(k) + sovEnvelope(uint64(len(k))) + 1 + len(v) + sovEnvelope(uint64(len(v))) + i = encodeVarintEnvelope(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintEnvelope(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintEnvelope(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Envelope(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Envelope(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEnvelope(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Envelope) Size() (n int) { + var l int + _ = l + if m.Origin != nil { + l = len(*m.Origin) + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.EventType != nil { + n += 1 + sovEnvelope(uint64(*m.EventType)) + } + if m.Timestamp != nil { + n += 1 + sovEnvelope(uint64(*m.Timestamp)) + } + if m.HttpStartStop != nil { + l = m.HttpStartStop.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.LogMessage != nil { + l = m.LogMessage.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.ValueMetric != nil { + l = m.ValueMetric.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.CounterEvent != nil { + l = m.CounterEvent.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.ContainerMetric != nil { + l = m.ContainerMetric.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.Deployment != nil { + l = len(*m.Deployment) + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.Job != nil { + l = len(*m.Job) + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.Index != nil { + l = len(*m.Index) + n += 1 + l + sovEnvelope(uint64(l)) + } + if m.Ip != nil { + l = len(*m.Ip) + n += 2 + l + sovEnvelope(uint64(l)) + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovEnvelope(uint64(len(k))) + 1 + len(v) + sovEnvelope(uint64(len(v))) + n += mapEntrySize + 2 + sovEnvelope(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEnvelope(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEnvelope(x uint64) (n int) { + return sovEnvelope(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Envelope) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Envelope: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Origin = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EventType", wireType) + } + var v Envelope_EventType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (Envelope_EventType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.EventType = &v + hasFields[0] |= uint64(0x00000002) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamp = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpStartStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpStartStop == nil { + m.HttpStartStop = &HttpStartStop{} + } + if err := m.HttpStartStop.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogMessage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogMessage == nil { + m.LogMessage = &LogMessage{} + } + if err := m.LogMessage.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueMetric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueMetric == nil { + m.ValueMetric = &ValueMetric{} + } + if err := m.ValueMetric.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterEvent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CounterEvent == nil { + m.CounterEvent = &CounterEvent{} + } + if err := m.CounterEvent.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerMetric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerMetric == nil { + m.ContainerMetric = &ContainerMetric{} + } + if err := m.ContainerMetric.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Deployment = &s + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Job = &s + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Index = &s + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Ip = &s + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEnvelope + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Tags == nil { + m.Tags = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEnvelope + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Tags[mapkey] = mapvalue + } else { + var mapvalue string + m.Tags[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvelope(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvelope + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("origin") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("eventType") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEnvelope(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEnvelope + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEnvelope(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEnvelope = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEnvelope = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("envelope.proto", fileDescriptorEnvelope) } + +var fileDescriptorEnvelope = []byte{ + // 522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4f, 0x6f, 0x9b, 0x4e, + 0x10, 0x15, 0x38, 0x76, 0xcc, 0xe0, 0x3f, 0x64, 0x92, 0xdf, 0xaf, 0x2b, 0xab, 0xb2, 0x68, 0x7a, + 0xe1, 0x52, 0x22, 0x59, 0xaa, 0x6a, 0xb5, 0xea, 0xa1, 0xa9, 0x5c, 0xf5, 0xd0, 0x5c, 0x48, 0xd4, + 0x3b, 0x86, 0x0d, 0xa1, 0xc1, 0x2c, 0x5d, 0x16, 0xab, 0x7c, 0xc3, 0x1e, 0xfb, 0x11, 0x2a, 0x7f, + 0x8a, 0x1e, 0x2b, 0x06, 0xb0, 0x71, 0xd4, 0xdb, 0xbc, 0x79, 0xef, 0x31, 0xb3, 0x6f, 0x80, 0x09, + 0x4f, 0xb7, 0x3c, 0x11, 0x19, 0x77, 0x33, 0x29, 0x94, 0xc0, 0x01, 0xdf, 0xf2, 0x54, 0xe5, 0xb3, + 0x57, 0x51, 0xac, 0x1e, 0x8a, 0xb5, 0x1b, 0x88, 0xcd, 0x55, 0x24, 0x22, 0x71, 0x45, 0xf4, 0xba, + 0xb8, 0x27, 0x44, 0x80, 0xaa, 0xda, 0x36, 0x83, 0x07, 0xa5, 0xb2, 0xa6, 0x36, 0x12, 0x11, 0x35, + 0xe5, 0x68, 0xc3, 0x95, 0x8c, 0x83, 0x06, 0x99, 0x5c, 0x4a, 0x21, 0x6b, 0x70, 0xf9, 0xa7, 0x0f, + 0xc3, 0x55, 0x33, 0x1b, 0xff, 0x87, 0x81, 0x90, 0x71, 0x14, 0xa7, 0x4c, 0xb3, 0x75, 0xc7, 0xf0, + 0x1a, 0x84, 0x4b, 0x30, 0x68, 0x9f, 0xbb, 0x32, 0xe3, 0x4c, 0xb7, 0x75, 0x67, 0xb2, 0x98, 0xb9, + 0xf5, 0x86, 0x6e, 0x6b, 0x76, 0x57, 0xad, 0xc2, 0x3b, 0x88, 0xf1, 0x39, 0x18, 0x2a, 0xde, 0xf0, + 0x5c, 0xf9, 0x9b, 0x8c, 0x0d, 0x6c, 0xcd, 0xe9, 0x79, 0x87, 0x06, 0xbe, 0x83, 0x71, 0xb5, 0xf0, + 0xad, 0xf2, 0xa5, 0xba, 0x55, 0x22, 0x63, 0xa7, 0xb6, 0xe6, 0x98, 0x8b, 0xff, 0xda, 0x6f, 0x7f, + 0xee, 0x92, 0xde, 0xb1, 0x16, 0x17, 0x00, 0x89, 0x88, 0x6e, 0x78, 0x9e, 0xfb, 0x11, 0x67, 0x43, + 0x72, 0x62, 0xeb, 0xfc, 0xb2, 0x67, 0xbc, 0x8e, 0x0a, 0x5f, 0x83, 0xb9, 0xf5, 0x93, 0x82, 0xdf, + 0x50, 0x1e, 0xcc, 0x20, 0xd3, 0x79, 0x6b, 0xfa, 0x7a, 0xa0, 0xbc, 0xae, 0x0e, 0x97, 0x30, 0x0a, + 0x44, 0x91, 0x2a, 0x2e, 0xe9, 0x91, 0x0c, 0xc8, 0x77, 0xd1, 0xfa, 0x3e, 0x76, 0x38, 0xef, 0x48, + 0x89, 0x2f, 0xa1, 0x4f, 0x69, 0x33, 0x93, 0x2c, 0xe3, 0x7d, 0x6a, 0x55, 0xd3, 0xab, 0x39, 0xfc, + 0x00, 0xd3, 0x40, 0xa4, 0xca, 0x8f, 0x53, 0x2e, 0x9b, 0xcd, 0x46, 0x24, 0x7f, 0x76, 0x98, 0x70, + 0x44, 0x7b, 0x4f, 0xf5, 0x38, 0x07, 0x08, 0x79, 0x96, 0x88, 0x72, 0x53, 0xed, 0x37, 0xb6, 0x35, + 0xc7, 0xf0, 0x3a, 0x1d, 0xb4, 0xa0, 0xf7, 0x4d, 0xac, 0xd9, 0x84, 0x88, 0xaa, 0xc4, 0x0b, 0xe8, + 0xc7, 0x69, 0xc8, 0x7f, 0xb0, 0x29, 0xf5, 0x6a, 0x80, 0x13, 0xd0, 0xe3, 0x8c, 0x59, 0xd4, 0xd2, + 0xe3, 0x0c, 0x5d, 0x38, 0x51, 0x7e, 0x94, 0xb3, 0x33, 0xbb, 0xe7, 0x98, 0xff, 0x38, 0xfa, 0x9d, + 0x1f, 0xe5, 0xab, 0x54, 0xc9, 0xd2, 0x23, 0xdd, 0xec, 0x0d, 0x18, 0xfb, 0x56, 0x35, 0xf4, 0x91, + 0x97, 0x4c, 0xab, 0x87, 0x3e, 0xf2, 0xb2, 0x1a, 0x4a, 0xb9, 0x32, 0xbd, 0x1e, 0x4a, 0xe0, 0xad, + 0xbe, 0xd4, 0x2e, 0xbf, 0x83, 0xb1, 0xff, 0x81, 0xf0, 0x0c, 0xc6, 0x47, 0xa7, 0xb7, 0x4e, 0x70, + 0x02, 0x70, 0xb8, 0xa9, 0xd5, 0xc7, 0x29, 0x98, 0x9d, 0x73, 0x59, 0x03, 0xb4, 0x60, 0xd4, 0xbd, + 0x83, 0x75, 0x8a, 0x06, 0xf4, 0x29, 0x66, 0x6b, 0x88, 0xe7, 0x30, 0x7d, 0x12, 0xa1, 0x65, 0x5c, + 0xbf, 0xff, 0xb9, 0x9b, 0x6b, 0xbf, 0x76, 0x73, 0xed, 0xf7, 0x6e, 0xae, 0xc1, 0x0b, 0x21, 0x23, + 0x37, 0x48, 0x44, 0x11, 0xde, 0x8b, 0x22, 0x0d, 0x65, 0xe9, 0x86, 0x52, 0x64, 0xb9, 0x48, 0x43, + 0xde, 0xbc, 0xfa, 0x7a, 0x44, 0x5f, 0xfe, 0xe4, 0x07, 0x4a, 0xc8, 0xf2, 0x6f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x44, 0xf5, 0x54, 0xc4, 0xb3, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go b/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go new file mode 100644 index 000000000000..42f0f562ba4b --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go @@ -0,0 +1,427 @@ +// Code generated by protoc-gen-gogo. +// source: error.proto +// DO NOT EDIT! + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// / An Error event represents an error in the originating process. +type Error struct { + Source *string `protobuf:"bytes,1,req,name=source" json:"source,omitempty"` + Code *int32 `protobuf:"varint,2,req,name=code" json:"code,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { return fileDescriptorError, []int{0} } + +func (m *Error) GetSource() string { + if m != nil && m.Source != nil { + return *m.Source + } + return "" +} + +func (m *Error) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *Error) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func init() { + proto.RegisterType((*Error)(nil), "events.Error") +} +func (m *Error) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Error) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Source == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("source") + } else { + data[i] = 0xa + i++ + i = encodeVarintError(data, i, uint64(len(*m.Source))) + i += copy(data[i:], *m.Source) + } + if m.Code == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("code") + } else { + data[i] = 0x10 + i++ + i = encodeVarintError(data, i, uint64(*m.Code)) + } + if m.Message == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("message") + } else { + data[i] = 0x1a + i++ + i = encodeVarintError(data, i, uint64(len(*m.Message))) + i += copy(data[i:], *m.Message) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Error(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Error(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintError(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Error) Size() (n int) { + var l int + _ = l + if m.Source != nil { + l = len(*m.Source) + n += 1 + l + sovError(uint64(l)) + } + if m.Code != nil { + n += 1 + sovError(uint64(*m.Code)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovError(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovError(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozError(x uint64) (n int) { + return sovError(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Error) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Error: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Error: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthError + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Source = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Code = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthError + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + default: + iNdEx = preIndex + skippy, err := skipError(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthError + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("source") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("code") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("message") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipError(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthError + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipError(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthError = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowError = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("error.proto", fileDescriptorError) } + +var fileDescriptorError = []byte{ + // 192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0x2d, 0x2a, 0xca, + 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0x96, + 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, + 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0xe4, 0xcb, + 0xc5, 0xea, 0x0a, 0x32, 0x45, 0x48, 0x8c, 0x8b, 0xad, 0x38, 0xbf, 0xb4, 0x28, 0x39, 0x55, 0x82, + 0x51, 0x81, 0x49, 0x83, 0x33, 0x08, 0xca, 0x13, 0x12, 0xe2, 0x62, 0x49, 0xce, 0x4f, 0x49, 0x95, + 0x60, 0x52, 0x60, 0xd2, 0x60, 0x0d, 0x02, 0xb3, 0x85, 0x24, 0xb8, 0xd8, 0x73, 0x53, 0x8b, 0x8b, + 0x13, 0xd3, 0x53, 0x25, 0x98, 0xc1, 0x8a, 0x61, 0x5c, 0x27, 0xdb, 0x13, 0x8f, 0xe4, 0x18, 0x2f, + 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x91, 0x4b, 0x31, 0xbf, 0x28, 0x5d, 0x2f, 0x39, 0x27, + 0xbf, 0x34, 0x25, 0x2d, 0xbf, 0x34, 0x2f, 0xa5, 0xa8, 0x52, 0x2f, 0xa5, 0x28, 0xbf, 0xa0, 0x38, + 0x3f, 0x2f, 0x25, 0x55, 0x0f, 0xe2, 0x5c, 0x27, 0x1e, 0xb0, 0xed, 0x6e, 0x89, 0xc9, 0x25, 0xf9, + 0x45, 0x95, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x46, 0xed, 0x44, 0xa1, 0xd2, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/event.go b/vendor/github.com/cloudfoundry/sonde-go/events/event.go new file mode 100644 index 000000000000..aa6c96d0c40b --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/event.go @@ -0,0 +1,5 @@ +package events + +type Event interface { + ProtoMessage() +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/events_easyjson.go b/vendor/github.com/cloudfoundry/sonde-go/events/events_easyjson.go new file mode 100644 index 000000000000..71b815c468cb --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/events_easyjson.go @@ -0,0 +1,1565 @@ +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package events + +import ( + json "encoding/json" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents(in *jlexer.Lexer, out *ValueMetric) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "name": + if in.IsNull() { + in.Skip() + out.Name = nil + } else { + if out.Name == nil { + out.Name = new(string) + } + *out.Name = string(in.String()) + } + case "value": + if in.IsNull() { + in.Skip() + out.Value = nil + } else { + if out.Value == nil { + out.Value = new(float64) + } + *out.Value = float64(in.Float64()) + } + case "unit": + if in.IsNull() { + in.Skip() + out.Unit = nil + } else { + if out.Unit == nil { + out.Unit = new(string) + } + *out.Unit = string(in.String()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents(out *jwriter.Writer, in ValueMetric) { + out.RawByte('{') + first := true + _ = first + if in.Name != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"name\":") + if in.Name == nil { + out.RawString("null") + } else { + out.String(string(*in.Name)) + } + } + if in.Value != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"value\":") + if in.Value == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.Value)) + } + } + if in.Unit != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"unit\":") + if in.Unit == nil { + out.RawString("null") + } else { + out.String(string(*in.Unit)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v ValueMetric) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *ValueMetric) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents1(in *jlexer.Lexer, out *UUID) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "low": + if in.IsNull() { + in.Skip() + out.Low = nil + } else { + if out.Low == nil { + out.Low = new(uint64) + } + *out.Low = uint64(in.Uint64()) + } + case "high": + if in.IsNull() { + in.Skip() + out.High = nil + } else { + if out.High == nil { + out.High = new(uint64) + } + *out.High = uint64(in.Uint64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents1(out *jwriter.Writer, in UUID) { + out.RawByte('{') + first := true + _ = first + if in.Low != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"low\":") + if in.Low == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.Low)) + } + } + if in.High != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"high\":") + if in.High == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.High)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v UUID) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents1(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *UUID) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents1(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents2(in *jlexer.Lexer, out *LogMessage) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "message": + if in.IsNull() { + in.Skip() + out.Message = nil + } else { + out.Message = in.Bytes() + } + case "message_type": + if in.IsNull() { + in.Skip() + out.MessageType = nil + } else { + if out.MessageType == nil { + out.MessageType = new(LogMessage_MessageType) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.MessageType).UnmarshalJSON(data)) + } + } + case "timestamp": + if in.IsNull() { + in.Skip() + out.Timestamp = nil + } else { + if out.Timestamp == nil { + out.Timestamp = new(int64) + } + *out.Timestamp = int64(in.Int64()) + } + case "app_id": + if in.IsNull() { + in.Skip() + out.AppId = nil + } else { + if out.AppId == nil { + out.AppId = new(string) + } + *out.AppId = string(in.String()) + } + case "source_type": + if in.IsNull() { + in.Skip() + out.SourceType = nil + } else { + if out.SourceType == nil { + out.SourceType = new(string) + } + *out.SourceType = string(in.String()) + } + case "source_instance": + if in.IsNull() { + in.Skip() + out.SourceInstance = nil + } else { + if out.SourceInstance == nil { + out.SourceInstance = new(string) + } + *out.SourceInstance = string(in.String()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents2(out *jwriter.Writer, in LogMessage) { + out.RawByte('{') + first := true + _ = first + if len(in.Message) != 0 { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"message\":") + out.Base64Bytes(in.Message) + } + if in.MessageType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"message_type\":") + if in.MessageType == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.MessageType)) + } + } + if in.Timestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"timestamp\":") + if in.Timestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.Timestamp)) + } + } + if in.AppId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"app_id\":") + if in.AppId == nil { + out.RawString("null") + } else { + out.String(string(*in.AppId)) + } + } + if in.SourceType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"source_type\":") + if in.SourceType == nil { + out.RawString("null") + } else { + out.String(string(*in.SourceType)) + } + } + if in.SourceInstance != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"source_instance\":") + if in.SourceInstance == nil { + out.RawString("null") + } else { + out.String(string(*in.SourceInstance)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v LogMessage) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents2(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *LogMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents2(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents3(in *jlexer.Lexer, out *HttpStartStop) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "startTimestamp": + if in.IsNull() { + in.Skip() + out.StartTimestamp = nil + } else { + if out.StartTimestamp == nil { + out.StartTimestamp = new(int64) + } + *out.StartTimestamp = int64(in.Int64()) + } + case "stopTimestamp": + if in.IsNull() { + in.Skip() + out.StopTimestamp = nil + } else { + if out.StopTimestamp == nil { + out.StopTimestamp = new(int64) + } + *out.StopTimestamp = int64(in.Int64()) + } + case "requestId": + if in.IsNull() { + in.Skip() + out.RequestId = nil + } else { + if out.RequestId == nil { + out.RequestId = new(UUID) + } + (*out.RequestId).UnmarshalEasyJSON(in) + } + case "peerType": + if in.IsNull() { + in.Skip() + out.PeerType = nil + } else { + if out.PeerType == nil { + out.PeerType = new(PeerType) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.PeerType).UnmarshalJSON(data)) + } + } + case "method": + if in.IsNull() { + in.Skip() + out.Method = nil + } else { + if out.Method == nil { + out.Method = new(Method) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.Method).UnmarshalJSON(data)) + } + } + case "uri": + if in.IsNull() { + in.Skip() + out.Uri = nil + } else { + if out.Uri == nil { + out.Uri = new(string) + } + *out.Uri = string(in.String()) + } + case "remoteAddress": + if in.IsNull() { + in.Skip() + out.RemoteAddress = nil + } else { + if out.RemoteAddress == nil { + out.RemoteAddress = new(string) + } + *out.RemoteAddress = string(in.String()) + } + case "userAgent": + if in.IsNull() { + in.Skip() + out.UserAgent = nil + } else { + if out.UserAgent == nil { + out.UserAgent = new(string) + } + *out.UserAgent = string(in.String()) + } + case "statusCode": + if in.IsNull() { + in.Skip() + out.StatusCode = nil + } else { + if out.StatusCode == nil { + out.StatusCode = new(int32) + } + *out.StatusCode = int32(in.Int32()) + } + case "contentLength": + if in.IsNull() { + in.Skip() + out.ContentLength = nil + } else { + if out.ContentLength == nil { + out.ContentLength = new(int64) + } + *out.ContentLength = int64(in.Int64()) + } + case "applicationId": + if in.IsNull() { + in.Skip() + out.ApplicationId = nil + } else { + if out.ApplicationId == nil { + out.ApplicationId = new(UUID) + } + (*out.ApplicationId).UnmarshalEasyJSON(in) + } + case "instanceIndex": + if in.IsNull() { + in.Skip() + out.InstanceIndex = nil + } else { + if out.InstanceIndex == nil { + out.InstanceIndex = new(int32) + } + *out.InstanceIndex = int32(in.Int32()) + } + case "instanceId": + if in.IsNull() { + in.Skip() + out.InstanceId = nil + } else { + if out.InstanceId == nil { + out.InstanceId = new(string) + } + *out.InstanceId = string(in.String()) + } + case "forwarded": + if in.IsNull() { + in.Skip() + out.Forwarded = nil + } else { + in.Delim('[') + if out.Forwarded == nil { + if !in.IsDelim(']') { + out.Forwarded = make([]string, 0, 4) + } else { + out.Forwarded = []string{} + } + } else { + out.Forwarded = (out.Forwarded)[:0] + } + for !in.IsDelim(']') { + var v4 string + v4 = string(in.String()) + out.Forwarded = append(out.Forwarded, v4) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents3(out *jwriter.Writer, in HttpStartStop) { + out.RawByte('{') + first := true + _ = first + if in.StartTimestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"startTimestamp\":") + if in.StartTimestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.StartTimestamp)) + } + } + if in.StopTimestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"stopTimestamp\":") + if in.StopTimestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.StopTimestamp)) + } + } + if in.RequestId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"requestId\":") + if in.RequestId == nil { + out.RawString("null") + } else { + (*in.RequestId).MarshalEasyJSON(out) + } + } + if in.PeerType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"peerType\":") + if in.PeerType == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.PeerType)) + } + } + if in.Method != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"method\":") + if in.Method == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.Method)) + } + } + if in.Uri != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"uri\":") + if in.Uri == nil { + out.RawString("null") + } else { + out.String(string(*in.Uri)) + } + } + if in.RemoteAddress != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"remoteAddress\":") + if in.RemoteAddress == nil { + out.RawString("null") + } else { + out.String(string(*in.RemoteAddress)) + } + } + if in.UserAgent != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"userAgent\":") + if in.UserAgent == nil { + out.RawString("null") + } else { + out.String(string(*in.UserAgent)) + } + } + if in.StatusCode != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"statusCode\":") + if in.StatusCode == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.StatusCode)) + } + } + if in.ContentLength != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"contentLength\":") + if in.ContentLength == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.ContentLength)) + } + } + if in.ApplicationId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"applicationId\":") + if in.ApplicationId == nil { + out.RawString("null") + } else { + (*in.ApplicationId).MarshalEasyJSON(out) + } + } + if in.InstanceIndex != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"instanceIndex\":") + if in.InstanceIndex == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.InstanceIndex)) + } + } + if in.InstanceId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"instanceId\":") + if in.InstanceId == nil { + out.RawString("null") + } else { + out.String(string(*in.InstanceId)) + } + } + if len(in.Forwarded) != 0 { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"forwarded\":") + if in.Forwarded == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v5, v6 := range in.Forwarded { + if v5 > 0 { + out.RawByte(',') + } + out.String(string(v6)) + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v HttpStartStop) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents3(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *HttpStartStop) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents3(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents4(in *jlexer.Lexer, out *Error) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "source": + if in.IsNull() { + in.Skip() + out.Source = nil + } else { + if out.Source == nil { + out.Source = new(string) + } + *out.Source = string(in.String()) + } + case "code": + if in.IsNull() { + in.Skip() + out.Code = nil + } else { + if out.Code == nil { + out.Code = new(int32) + } + *out.Code = int32(in.Int32()) + } + case "message": + if in.IsNull() { + in.Skip() + out.Message = nil + } else { + if out.Message == nil { + out.Message = new(string) + } + *out.Message = string(in.String()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents4(out *jwriter.Writer, in Error) { + out.RawByte('{') + first := true + _ = first + if in.Source != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"source\":") + if in.Source == nil { + out.RawString("null") + } else { + out.String(string(*in.Source)) + } + } + if in.Code != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"code\":") + if in.Code == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.Code)) + } + } + if in.Message != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"message\":") + if in.Message == nil { + out.RawString("null") + } else { + out.String(string(*in.Message)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v Error) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents4(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents4(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents5(in *jlexer.Lexer, out *Envelope) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "origin": + if in.IsNull() { + in.Skip() + out.Origin = nil + } else { + if out.Origin == nil { + out.Origin = new(string) + } + *out.Origin = string(in.String()) + } + case "eventType": + if in.IsNull() { + in.Skip() + out.EventType = nil + } else { + if out.EventType == nil { + out.EventType = new(Envelope_EventType) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.EventType).UnmarshalJSON(data)) + } + } + case "timestamp": + if in.IsNull() { + in.Skip() + out.Timestamp = nil + } else { + if out.Timestamp == nil { + out.Timestamp = new(int64) + } + *out.Timestamp = int64(in.Int64()) + } + case "deployment": + if in.IsNull() { + in.Skip() + out.Deployment = nil + } else { + if out.Deployment == nil { + out.Deployment = new(string) + } + *out.Deployment = string(in.String()) + } + case "job": + if in.IsNull() { + in.Skip() + out.Job = nil + } else { + if out.Job == nil { + out.Job = new(string) + } + *out.Job = string(in.String()) + } + case "index": + if in.IsNull() { + in.Skip() + out.Index = nil + } else { + if out.Index == nil { + out.Index = new(string) + } + *out.Index = string(in.String()) + } + case "ip": + if in.IsNull() { + in.Skip() + out.Ip = nil + } else { + if out.Ip == nil { + out.Ip = new(string) + } + *out.Ip = string(in.String()) + } + case "tags": + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + if !in.IsDelim('}') { + out.Tags = make(map[string]string) + } else { + out.Tags = nil + } + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v7 string + v7 = string(in.String()) + (out.Tags)[key] = v7 + in.WantComma() + } + in.Delim('}') + } + case "httpStartStop": + if in.IsNull() { + in.Skip() + out.HttpStartStop = nil + } else { + if out.HttpStartStop == nil { + out.HttpStartStop = new(HttpStartStop) + } + (*out.HttpStartStop).UnmarshalEasyJSON(in) + } + case "logMessage": + if in.IsNull() { + in.Skip() + out.LogMessage = nil + } else { + if out.LogMessage == nil { + out.LogMessage = new(LogMessage) + } + (*out.LogMessage).UnmarshalEasyJSON(in) + } + case "valueMetric": + if in.IsNull() { + in.Skip() + out.ValueMetric = nil + } else { + if out.ValueMetric == nil { + out.ValueMetric = new(ValueMetric) + } + (*out.ValueMetric).UnmarshalEasyJSON(in) + } + case "counterEvent": + if in.IsNull() { + in.Skip() + out.CounterEvent = nil + } else { + if out.CounterEvent == nil { + out.CounterEvent = new(CounterEvent) + } + (*out.CounterEvent).UnmarshalEasyJSON(in) + } + case "error": + if in.IsNull() { + in.Skip() + out.Error = nil + } else { + if out.Error == nil { + out.Error = new(Error) + } + (*out.Error).UnmarshalEasyJSON(in) + } + case "containerMetric": + if in.IsNull() { + in.Skip() + out.ContainerMetric = nil + } else { + if out.ContainerMetric == nil { + out.ContainerMetric = new(ContainerMetric) + } + (*out.ContainerMetric).UnmarshalEasyJSON(in) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents5(out *jwriter.Writer, in Envelope) { + out.RawByte('{') + first := true + _ = first + if in.Origin != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"origin\":") + if in.Origin == nil { + out.RawString("null") + } else { + out.String(string(*in.Origin)) + } + } + if in.EventType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"eventType\":") + if in.EventType == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.EventType)) + } + } + if in.Timestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"timestamp\":") + if in.Timestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.Timestamp)) + } + } + if in.Deployment != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"deployment\":") + if in.Deployment == nil { + out.RawString("null") + } else { + out.String(string(*in.Deployment)) + } + } + if in.Job != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"job\":") + if in.Job == nil { + out.RawString("null") + } else { + out.String(string(*in.Job)) + } + } + if in.Index != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"index\":") + if in.Index == nil { + out.RawString("null") + } else { + out.String(string(*in.Index)) + } + } + if in.Ip != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"ip\":") + if in.Ip == nil { + out.RawString("null") + } else { + out.String(string(*in.Ip)) + } + } + if len(in.Tags) != 0 { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"tags\":") + if in.Tags == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { + out.RawString(`null`) + } else { + out.RawByte('{') + v8First := true + for v8Name, v8Value := range in.Tags { + if !v8First { + out.RawByte(',') + } + v8First = false + out.String(string(v8Name)) + out.RawByte(':') + out.String(string(v8Value)) + } + out.RawByte('}') + } + } + if in.HttpStartStop != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"httpStartStop\":") + if in.HttpStartStop == nil { + out.RawString("null") + } else { + (*in.HttpStartStop).MarshalEasyJSON(out) + } + } + if in.LogMessage != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"logMessage\":") + if in.LogMessage == nil { + out.RawString("null") + } else { + (*in.LogMessage).MarshalEasyJSON(out) + } + } + if in.ValueMetric != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"valueMetric\":") + if in.ValueMetric == nil { + out.RawString("null") + } else { + (*in.ValueMetric).MarshalEasyJSON(out) + } + } + if in.CounterEvent != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"counterEvent\":") + if in.CounterEvent == nil { + out.RawString("null") + } else { + (*in.CounterEvent).MarshalEasyJSON(out) + } + } + if in.Error != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"error\":") + if in.Error == nil { + out.RawString("null") + } else { + (*in.Error).MarshalEasyJSON(out) + } + } + if in.ContainerMetric != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"containerMetric\":") + if in.ContainerMetric == nil { + out.RawString("null") + } else { + (*in.ContainerMetric).MarshalEasyJSON(out) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v Envelope) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents5(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *Envelope) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents5(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents6(in *jlexer.Lexer, out *CounterEvent) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "name": + if in.IsNull() { + in.Skip() + out.Name = nil + } else { + if out.Name == nil { + out.Name = new(string) + } + *out.Name = string(in.String()) + } + case "delta": + if in.IsNull() { + in.Skip() + out.Delta = nil + } else { + if out.Delta == nil { + out.Delta = new(uint64) + } + *out.Delta = uint64(in.Uint64()) + } + case "total": + if in.IsNull() { + in.Skip() + out.Total = nil + } else { + if out.Total == nil { + out.Total = new(uint64) + } + *out.Total = uint64(in.Uint64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents6(out *jwriter.Writer, in CounterEvent) { + out.RawByte('{') + first := true + _ = first + if in.Name != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"name\":") + if in.Name == nil { + out.RawString("null") + } else { + out.String(string(*in.Name)) + } + } + if in.Delta != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"delta\":") + if in.Delta == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.Delta)) + } + } + if in.Total != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"total\":") + if in.Total == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.Total)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v CounterEvent) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents6(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *CounterEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents6(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents7(in *jlexer.Lexer, out *ContainerMetric) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "applicationId": + if in.IsNull() { + in.Skip() + out.ApplicationId = nil + } else { + if out.ApplicationId == nil { + out.ApplicationId = new(string) + } + *out.ApplicationId = string(in.String()) + } + case "instanceIndex": + if in.IsNull() { + in.Skip() + out.InstanceIndex = nil + } else { + if out.InstanceIndex == nil { + out.InstanceIndex = new(int32) + } + *out.InstanceIndex = int32(in.Int32()) + } + case "cpuPercentage": + if in.IsNull() { + in.Skip() + out.CpuPercentage = nil + } else { + if out.CpuPercentage == nil { + out.CpuPercentage = new(float64) + } + *out.CpuPercentage = float64(in.Float64()) + } + case "memoryBytes": + if in.IsNull() { + in.Skip() + out.MemoryBytes = nil + } else { + if out.MemoryBytes == nil { + out.MemoryBytes = new(uint64) + } + *out.MemoryBytes = uint64(in.Uint64()) + } + case "diskBytes": + if in.IsNull() { + in.Skip() + out.DiskBytes = nil + } else { + if out.DiskBytes == nil { + out.DiskBytes = new(uint64) + } + *out.DiskBytes = uint64(in.Uint64()) + } + case "memoryBytesQuota": + if in.IsNull() { + in.Skip() + out.MemoryBytesQuota = nil + } else { + if out.MemoryBytesQuota == nil { + out.MemoryBytesQuota = new(uint64) + } + *out.MemoryBytesQuota = uint64(in.Uint64()) + } + case "diskBytesQuota": + if in.IsNull() { + in.Skip() + out.DiskBytesQuota = nil + } else { + if out.DiskBytesQuota == nil { + out.DiskBytesQuota = new(uint64) + } + *out.DiskBytesQuota = uint64(in.Uint64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents7(out *jwriter.Writer, in ContainerMetric) { + out.RawByte('{') + first := true + _ = first + if in.ApplicationId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"applicationId\":") + if in.ApplicationId == nil { + out.RawString("null") + } else { + out.String(string(*in.ApplicationId)) + } + } + if in.InstanceIndex != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"instanceIndex\":") + if in.InstanceIndex == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.InstanceIndex)) + } + } + if in.CpuPercentage != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"cpuPercentage\":") + if in.CpuPercentage == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.CpuPercentage)) + } + } + if in.MemoryBytes != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"memoryBytes\":") + if in.MemoryBytes == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.MemoryBytes)) + } + } + if in.DiskBytes != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"diskBytes\":") + if in.DiskBytes == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.DiskBytes)) + } + } + if in.MemoryBytesQuota != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"memoryBytesQuota\":") + if in.MemoryBytesQuota == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.MemoryBytesQuota)) + } + } + if in.DiskBytesQuota != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"diskBytesQuota\":") + if in.DiskBytesQuota == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.DiskBytesQuota)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v ContainerMetric) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents7(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *ContainerMetric) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents7(l, v) +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go b/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go new file mode 100644 index 000000000000..b3847dfc99f0 --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go @@ -0,0 +1,1186 @@ +// Code generated by protoc-gen-gogo. +// source: http.proto +// DO NOT EDIT! + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// / Type of peer handling request. +type PeerType int32 + +const ( + PeerType_Client PeerType = 1 + PeerType_Server PeerType = 2 +) + +var PeerType_name = map[int32]string{ + 1: "Client", + 2: "Server", +} +var PeerType_value = map[string]int32{ + "Client": 1, + "Server": 2, +} + +func (x PeerType) Enum() *PeerType { + p := new(PeerType) + *p = x + return p +} +func (x PeerType) String() string { + return proto.EnumName(PeerType_name, int32(x)) +} +func (x *PeerType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PeerType_value, data, "PeerType") + if err != nil { + return err + } + *x = PeerType(value) + return nil +} +func (PeerType) EnumDescriptor() ([]byte, []int) { return fileDescriptorHttp, []int{0} } + +// / HTTP method. +type Method int32 + +const ( + Method_GET Method = 1 + Method_POST Method = 2 + Method_PUT Method = 3 + Method_DELETE Method = 4 + Method_HEAD Method = 5 + Method_ACL Method = 6 + Method_BASELINE_CONTROL Method = 7 + Method_BIND Method = 8 + Method_CHECKIN Method = 9 + Method_CHECKOUT Method = 10 + Method_CONNECT Method = 11 + Method_COPY Method = 12 + Method_DEBUG Method = 13 + Method_LABEL Method = 14 + Method_LINK Method = 15 + Method_LOCK Method = 16 + Method_MERGE Method = 17 + Method_MKACTIVITY Method = 18 + Method_MKCALENDAR Method = 19 + Method_MKCOL Method = 20 + Method_MKREDIRECTREF Method = 21 + Method_MKWORKSPACE Method = 22 + Method_MOVE Method = 23 + Method_OPTIONS Method = 24 + Method_ORDERPATCH Method = 25 + Method_PATCH Method = 26 + Method_PRI Method = 27 + Method_PROPFIND Method = 28 + Method_PROPPATCH Method = 29 + Method_REBIND Method = 30 + Method_REPORT Method = 31 + Method_SEARCH Method = 32 + Method_SHOWMETHOD Method = 33 + Method_SPACEJUMP Method = 34 + Method_TEXTSEARCH Method = 35 + Method_TRACE Method = 36 + Method_TRACK Method = 37 + Method_UNBIND Method = 38 + Method_UNCHECKOUT Method = 39 + Method_UNLINK Method = 40 + Method_UNLOCK Method = 41 + Method_UPDATE Method = 42 + Method_UPDATEREDIRECTREF Method = 43 + Method_VERSION_CONTROL Method = 44 +) + +var Method_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "PUT", + 4: "DELETE", + 5: "HEAD", + 6: "ACL", + 7: "BASELINE_CONTROL", + 8: "BIND", + 9: "CHECKIN", + 10: "CHECKOUT", + 11: "CONNECT", + 12: "COPY", + 13: "DEBUG", + 14: "LABEL", + 15: "LINK", + 16: "LOCK", + 17: "MERGE", + 18: "MKACTIVITY", + 19: "MKCALENDAR", + 20: "MKCOL", + 21: "MKREDIRECTREF", + 22: "MKWORKSPACE", + 23: "MOVE", + 24: "OPTIONS", + 25: "ORDERPATCH", + 26: "PATCH", + 27: "PRI", + 28: "PROPFIND", + 29: "PROPPATCH", + 30: "REBIND", + 31: "REPORT", + 32: "SEARCH", + 33: "SHOWMETHOD", + 34: "SPACEJUMP", + 35: "TEXTSEARCH", + 36: "TRACE", + 37: "TRACK", + 38: "UNBIND", + 39: "UNCHECKOUT", + 40: "UNLINK", + 41: "UNLOCK", + 42: "UPDATE", + 43: "UPDATEREDIRECTREF", + 44: "VERSION_CONTROL", +} +var Method_value = map[string]int32{ + "GET": 1, + "POST": 2, + "PUT": 3, + "DELETE": 4, + "HEAD": 5, + "ACL": 6, + "BASELINE_CONTROL": 7, + "BIND": 8, + "CHECKIN": 9, + "CHECKOUT": 10, + "CONNECT": 11, + "COPY": 12, + "DEBUG": 13, + "LABEL": 14, + "LINK": 15, + "LOCK": 16, + "MERGE": 17, + "MKACTIVITY": 18, + "MKCALENDAR": 19, + "MKCOL": 20, + "MKREDIRECTREF": 21, + "MKWORKSPACE": 22, + "MOVE": 23, + "OPTIONS": 24, + "ORDERPATCH": 25, + "PATCH": 26, + "PRI": 27, + "PROPFIND": 28, + "PROPPATCH": 29, + "REBIND": 30, + "REPORT": 31, + "SEARCH": 32, + "SHOWMETHOD": 33, + "SPACEJUMP": 34, + "TEXTSEARCH": 35, + "TRACE": 36, + "TRACK": 37, + "UNBIND": 38, + "UNCHECKOUT": 39, + "UNLINK": 40, + "UNLOCK": 41, + "UPDATE": 42, + "UPDATEREDIRECTREF": 43, + "VERSION_CONTROL": 44, +} + +func (x Method) Enum() *Method { + p := new(Method) + *p = x + return p +} +func (x Method) String() string { + return proto.EnumName(Method_name, int32(x)) +} +func (x *Method) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Method_value, data, "Method") + if err != nil { + return err + } + *x = Method(value) + return nil +} +func (Method) EnumDescriptor() ([]byte, []int) { return fileDescriptorHttp, []int{1} } + +// / An HttpStartStop event represents the whole lifecycle of an HTTP request. +type HttpStartStop struct { + StartTimestamp *int64 `protobuf:"varint,1,req,name=startTimestamp" json:"startTimestamp,omitempty"` + StopTimestamp *int64 `protobuf:"varint,2,req,name=stopTimestamp" json:"stopTimestamp,omitempty"` + RequestId *UUID `protobuf:"bytes,3,req,name=requestId" json:"requestId,omitempty"` + PeerType *PeerType `protobuf:"varint,4,req,name=peerType,enum=events.PeerType" json:"peerType,omitempty"` + Method *Method `protobuf:"varint,5,req,name=method,enum=events.Method" json:"method,omitempty"` + Uri *string `protobuf:"bytes,6,req,name=uri" json:"uri,omitempty"` + RemoteAddress *string `protobuf:"bytes,7,req,name=remoteAddress" json:"remoteAddress,omitempty"` + UserAgent *string `protobuf:"bytes,8,req,name=userAgent" json:"userAgent,omitempty"` + StatusCode *int32 `protobuf:"varint,9,req,name=statusCode" json:"statusCode,omitempty"` + ContentLength *int64 `protobuf:"varint,10,req,name=contentLength" json:"contentLength,omitempty"` + ApplicationId *UUID `protobuf:"bytes,12,opt,name=applicationId" json:"applicationId,omitempty"` + InstanceIndex *int32 `protobuf:"varint,13,opt,name=instanceIndex" json:"instanceIndex,omitempty"` + InstanceId *string `protobuf:"bytes,14,opt,name=instanceId" json:"instanceId,omitempty"` + Forwarded []string `protobuf:"bytes,15,rep,name=forwarded" json:"forwarded,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HttpStartStop) Reset() { *m = HttpStartStop{} } +func (m *HttpStartStop) String() string { return proto.CompactTextString(m) } +func (*HttpStartStop) ProtoMessage() {} +func (*HttpStartStop) Descriptor() ([]byte, []int) { return fileDescriptorHttp, []int{0} } + +func (m *HttpStartStop) GetStartTimestamp() int64 { + if m != nil && m.StartTimestamp != nil { + return *m.StartTimestamp + } + return 0 +} + +func (m *HttpStartStop) GetStopTimestamp() int64 { + if m != nil && m.StopTimestamp != nil { + return *m.StopTimestamp + } + return 0 +} + +func (m *HttpStartStop) GetRequestId() *UUID { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *HttpStartStop) GetPeerType() PeerType { + if m != nil && m.PeerType != nil { + return *m.PeerType + } + return PeerType_Client +} + +func (m *HttpStartStop) GetMethod() Method { + if m != nil && m.Method != nil { + return *m.Method + } + return Method_GET +} + +func (m *HttpStartStop) GetUri() string { + if m != nil && m.Uri != nil { + return *m.Uri + } + return "" +} + +func (m *HttpStartStop) GetRemoteAddress() string { + if m != nil && m.RemoteAddress != nil { + return *m.RemoteAddress + } + return "" +} + +func (m *HttpStartStop) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *HttpStartStop) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *HttpStartStop) GetContentLength() int64 { + if m != nil && m.ContentLength != nil { + return *m.ContentLength + } + return 0 +} + +func (m *HttpStartStop) GetApplicationId() *UUID { + if m != nil { + return m.ApplicationId + } + return nil +} + +func (m *HttpStartStop) GetInstanceIndex() int32 { + if m != nil && m.InstanceIndex != nil { + return *m.InstanceIndex + } + return 0 +} + +func (m *HttpStartStop) GetInstanceId() string { + if m != nil && m.InstanceId != nil { + return *m.InstanceId + } + return "" +} + +func (m *HttpStartStop) GetForwarded() []string { + if m != nil { + return m.Forwarded + } + return nil +} + +func init() { + proto.RegisterType((*HttpStartStop)(nil), "events.HttpStartStop") + proto.RegisterEnum("events.PeerType", PeerType_name, PeerType_value) + proto.RegisterEnum("events.Method", Method_name, Method_value) +} +func (m *HttpStartStop) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HttpStartStop) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTimestamp == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("startTimestamp") + } else { + data[i] = 0x8 + i++ + i = encodeVarintHttp(data, i, uint64(*m.StartTimestamp)) + } + if m.StopTimestamp == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("stopTimestamp") + } else { + data[i] = 0x10 + i++ + i = encodeVarintHttp(data, i, uint64(*m.StopTimestamp)) + } + if m.RequestId == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") + } else { + data[i] = 0x1a + i++ + i = encodeVarintHttp(data, i, uint64(m.RequestId.Size())) + n1, err := m.RequestId.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PeerType == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") + } else { + data[i] = 0x20 + i++ + i = encodeVarintHttp(data, i, uint64(*m.PeerType)) + } + if m.Method == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("method") + } else { + data[i] = 0x28 + i++ + i = encodeVarintHttp(data, i, uint64(*m.Method)) + } + if m.Uri == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") + } else { + data[i] = 0x32 + i++ + i = encodeVarintHttp(data, i, uint64(len(*m.Uri))) + i += copy(data[i:], *m.Uri) + } + if m.RemoteAddress == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("remoteAddress") + } else { + data[i] = 0x3a + i++ + i = encodeVarintHttp(data, i, uint64(len(*m.RemoteAddress))) + i += copy(data[i:], *m.RemoteAddress) + } + if m.UserAgent == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("userAgent") + } else { + data[i] = 0x42 + i++ + i = encodeVarintHttp(data, i, uint64(len(*m.UserAgent))) + i += copy(data[i:], *m.UserAgent) + } + if m.StatusCode == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("statusCode") + } else { + data[i] = 0x48 + i++ + i = encodeVarintHttp(data, i, uint64(*m.StatusCode)) + } + if m.ContentLength == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("contentLength") + } else { + data[i] = 0x50 + i++ + i = encodeVarintHttp(data, i, uint64(*m.ContentLength)) + } + if m.ApplicationId != nil { + data[i] = 0x62 + i++ + i = encodeVarintHttp(data, i, uint64(m.ApplicationId.Size())) + n2, err := m.ApplicationId.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.InstanceIndex != nil { + data[i] = 0x68 + i++ + i = encodeVarintHttp(data, i, uint64(*m.InstanceIndex)) + } + if m.InstanceId != nil { + data[i] = 0x72 + i++ + i = encodeVarintHttp(data, i, uint64(len(*m.InstanceId))) + i += copy(data[i:], *m.InstanceId) + } + if len(m.Forwarded) > 0 { + for _, s := range m.Forwarded { + data[i] = 0x7a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Http(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Http(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintHttp(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *HttpStartStop) Size() (n int) { + var l int + _ = l + if m.StartTimestamp != nil { + n += 1 + sovHttp(uint64(*m.StartTimestamp)) + } + if m.StopTimestamp != nil { + n += 1 + sovHttp(uint64(*m.StopTimestamp)) + } + if m.RequestId != nil { + l = m.RequestId.Size() + n += 1 + l + sovHttp(uint64(l)) + } + if m.PeerType != nil { + n += 1 + sovHttp(uint64(*m.PeerType)) + } + if m.Method != nil { + n += 1 + sovHttp(uint64(*m.Method)) + } + if m.Uri != nil { + l = len(*m.Uri) + n += 1 + l + sovHttp(uint64(l)) + } + if m.RemoteAddress != nil { + l = len(*m.RemoteAddress) + n += 1 + l + sovHttp(uint64(l)) + } + if m.UserAgent != nil { + l = len(*m.UserAgent) + n += 1 + l + sovHttp(uint64(l)) + } + if m.StatusCode != nil { + n += 1 + sovHttp(uint64(*m.StatusCode)) + } + if m.ContentLength != nil { + n += 1 + sovHttp(uint64(*m.ContentLength)) + } + if m.ApplicationId != nil { + l = m.ApplicationId.Size() + n += 1 + l + sovHttp(uint64(l)) + } + if m.InstanceIndex != nil { + n += 1 + sovHttp(uint64(*m.InstanceIndex)) + } + if m.InstanceId != nil { + l = len(*m.InstanceId) + n += 1 + l + sovHttp(uint64(l)) + } + if len(m.Forwarded) > 0 { + for _, s := range m.Forwarded { + l = len(s) + n += 1 + l + sovHttp(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovHttp(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozHttp(x uint64) (n int) { + return sovHttp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *HttpStartStop) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HttpStartStop: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HttpStartStop: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StartTimestamp = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopTimestamp", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StopTimestamp = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestId == nil { + m.RequestId = &UUID{} + } + if err := m.RequestId.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerType", wireType) + } + var v PeerType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (PeerType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PeerType = &v + hasFields[0] |= uint64(0x00000008) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var v Method + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (Method(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Method = &v + hasFields[0] |= uint64(0x00000010) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Uri = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000020) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoteAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.RemoteAddress = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000040) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.UserAgent = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000080) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusCode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StatusCode = &v + hasFields[0] |= uint64(0x00000100) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ContentLength = &v + hasFields[0] |= uint64(0x00000200) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ApplicationId == nil { + m.ApplicationId = &UUID{} + } + if err := m.ApplicationId.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InstanceIndex = &v + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.InstanceId = &s + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Forwarded", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Forwarded = append(m.Forwarded, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHttp(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("startTimestamp") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("stopTimestamp") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") + } + if hasFields[0]&uint64(0x00000008) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") + } + if hasFields[0]&uint64(0x00000010) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("method") + } + if hasFields[0]&uint64(0x00000020) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") + } + if hasFields[0]&uint64(0x00000040) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("remoteAddress") + } + if hasFields[0]&uint64(0x00000080) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("userAgent") + } + if hasFields[0]&uint64(0x00000100) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("statusCode") + } + if hasFields[0]&uint64(0x00000200) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("contentLength") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHttp(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthHttp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHttp(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHttp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHttp = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("http.proto", fileDescriptorHttp) } + +var fileDescriptorHttp = []byte{ + // 811 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xdd, 0x72, 0xda, 0x46, + 0x18, 0x1d, 0xc0, 0x60, 0x58, 0x0c, 0xfe, 0xbc, 0x49, 0x5a, 0xd5, 0x4d, 0xa9, 0xe2, 0xa6, 0x2e, + 0x75, 0x53, 0x32, 0xe3, 0xeb, 0xde, 0x08, 0x69, 0x6d, 0x54, 0x84, 0x56, 0xb3, 0x2c, 0x4e, 0x73, + 0xd5, 0x21, 0x68, 0x8d, 0x99, 0x31, 0x5a, 0x55, 0x5a, 0xa5, 0xf5, 0x73, 0xf4, 0x9d, 0x3a, 0xbd, + 0xec, 0x23, 0x74, 0xfc, 0x24, 0x9d, 0x5d, 0x61, 0x6c, 0x9a, 0xbb, 0xf3, 0x9d, 0x73, 0xf6, 0xfb, + 0x39, 0x1a, 0x40, 0xe8, 0x46, 0xa9, 0x74, 0x90, 0x66, 0x52, 0x49, 0xdc, 0x10, 0x1f, 0x45, 0xa2, + 0xf2, 0xe3, 0x1f, 0x97, 0x2b, 0x75, 0x53, 0x7c, 0x18, 0x2c, 0xe4, 0xfa, 0xed, 0x52, 0x2e, 0xe5, + 0x5b, 0x23, 0x7f, 0x28, 0xae, 0x4d, 0x65, 0x0a, 0x83, 0xca, 0x67, 0xc7, 0xa8, 0x28, 0x56, 0x71, + 0x89, 0x4f, 0xfe, 0xdc, 0x43, 0x9d, 0x91, 0x52, 0xe9, 0x54, 0xcd, 0x33, 0x35, 0x55, 0x32, 0xc5, + 0xa7, 0xa8, 0x9b, 0xeb, 0x82, 0xaf, 0xd6, 0x22, 0x57, 0xf3, 0x75, 0x6a, 0x55, 0xec, 0x6a, 0xbf, + 0xc6, 0xfe, 0xc7, 0xe2, 0xd7, 0xa8, 0x93, 0x2b, 0x99, 0x3e, 0xda, 0xaa, 0xc6, 0xb6, 0x4b, 0xe2, + 0x33, 0xd4, 0xca, 0xc4, 0x6f, 0x85, 0xc8, 0x95, 0x1f, 0x5b, 0x35, 0xbb, 0xda, 0x6f, 0x9f, 0x1f, + 0x0c, 0xca, 0xb5, 0x07, 0xb3, 0x99, 0xef, 0xb1, 0x47, 0x19, 0xbf, 0x41, 0xcd, 0x54, 0x88, 0x8c, + 0xdf, 0xa5, 0xc2, 0xda, 0xb3, 0xab, 0xfd, 0xee, 0x39, 0x3c, 0x58, 0xa3, 0x0d, 0xcf, 0xb6, 0x0e, + 0x7c, 0x8a, 0x1a, 0x6b, 0xa1, 0x6e, 0x64, 0x6c, 0xd5, 0x8d, 0xb7, 0xfb, 0xe0, 0x9d, 0x18, 0x96, + 0x6d, 0x54, 0x0c, 0xa8, 0x56, 0x64, 0x2b, 0xab, 0x61, 0x57, 0xfb, 0x2d, 0xa6, 0xa1, 0xde, 0x3c, + 0x13, 0x6b, 0xa9, 0x84, 0x13, 0xc7, 0x99, 0xc8, 0x73, 0x6b, 0xdf, 0x68, 0xbb, 0x24, 0x7e, 0x89, + 0x5a, 0x45, 0x2e, 0x32, 0x67, 0x29, 0x12, 0x65, 0x35, 0x8d, 0xe3, 0x91, 0xc0, 0x3d, 0x84, 0x72, + 0x35, 0x57, 0x45, 0xee, 0xca, 0x58, 0x58, 0x2d, 0xbb, 0xda, 0xaf, 0xb3, 0x27, 0x8c, 0x9e, 0xb1, + 0x90, 0x89, 0x12, 0x89, 0x0a, 0x44, 0xb2, 0x54, 0x37, 0x16, 0x2a, 0xd3, 0xd9, 0x21, 0xf1, 0x39, + 0xea, 0xcc, 0xd3, 0xf4, 0x76, 0xb5, 0x98, 0xab, 0x95, 0x4c, 0xfc, 0xd8, 0x3a, 0xb0, 0x2b, 0x9f, + 0x24, 0xb4, 0x6b, 0xd1, 0x9d, 0x57, 0x49, 0xae, 0xe6, 0xc9, 0x42, 0xf8, 0x49, 0x2c, 0xfe, 0xb0, + 0x3a, 0x76, 0xa5, 0x5f, 0x67, 0xbb, 0xa4, 0xde, 0x6f, 0x4b, 0xc4, 0x56, 0xd7, 0xae, 0xf4, 0x5b, + 0xec, 0x09, 0xa3, 0xaf, 0xbb, 0x96, 0xd9, 0xef, 0xf3, 0x2c, 0x16, 0xb1, 0x75, 0x68, 0xd7, 0xf4, + 0x75, 0x5b, 0xe2, 0xec, 0x04, 0x35, 0x1f, 0x12, 0xc7, 0x08, 0x35, 0xdc, 0xdb, 0x95, 0x48, 0x14, + 0x54, 0x34, 0x9e, 0x8a, 0xec, 0xa3, 0xc8, 0xa0, 0x7a, 0xf6, 0xd7, 0x1e, 0x6a, 0x94, 0x51, 0xe3, + 0x7d, 0x54, 0xbb, 0x24, 0x1c, 0x2a, 0xb8, 0x89, 0xf6, 0x22, 0x3a, 0xe5, 0x50, 0xd5, 0x54, 0x34, + 0xe3, 0x50, 0xd3, 0x4f, 0x3c, 0x12, 0x10, 0x4e, 0x60, 0x4f, 0xcb, 0x23, 0xe2, 0x78, 0x50, 0xd7, + 0xb2, 0xe3, 0x06, 0xd0, 0xc0, 0xcf, 0x11, 0x0c, 0x9d, 0x29, 0x09, 0xfc, 0x90, 0xfc, 0xea, 0xd2, + 0x90, 0x33, 0x1a, 0xc0, 0xbe, 0x36, 0x0e, 0xfd, 0xd0, 0x83, 0x26, 0x6e, 0xa3, 0x7d, 0x77, 0x44, + 0xdc, 0xb1, 0x1f, 0x42, 0x0b, 0x1f, 0xa0, 0xa6, 0x29, 0xe8, 0x8c, 0x03, 0x32, 0x12, 0x0d, 0x43, + 0xe2, 0x72, 0x68, 0xeb, 0x17, 0x2e, 0x8d, 0xde, 0xc3, 0x01, 0x6e, 0xa1, 0xba, 0x47, 0x86, 0xb3, + 0x4b, 0xe8, 0x68, 0x18, 0x38, 0x43, 0x12, 0x40, 0x57, 0xeb, 0x81, 0x1f, 0x8e, 0xe1, 0xd0, 0x20, + 0xea, 0x8e, 0x01, 0xb4, 0x3c, 0x21, 0xec, 0x92, 0xc0, 0x11, 0xee, 0x22, 0x34, 0x19, 0x3b, 0x2e, + 0xf7, 0xaf, 0x7c, 0xfe, 0x1e, 0x70, 0x59, 0xbb, 0x4e, 0x40, 0x42, 0xcf, 0x61, 0xf0, 0xcc, 0x58, + 0xc7, 0x2e, 0x0d, 0xe0, 0x39, 0x3e, 0x42, 0x9d, 0xc9, 0x98, 0x11, 0xcf, 0x67, 0xc4, 0xe5, 0x8c, + 0x5c, 0xc0, 0x0b, 0x7c, 0x88, 0xda, 0x93, 0xf1, 0x3b, 0xca, 0xc6, 0xd3, 0xc8, 0x71, 0x09, 0x7c, + 0xa6, 0x67, 0x4c, 0xe8, 0x15, 0x81, 0xcf, 0xf5, 0x92, 0x34, 0xe2, 0x3e, 0x0d, 0xa7, 0x60, 0xe9, + 0xae, 0x94, 0x79, 0x84, 0x45, 0x0e, 0x77, 0x47, 0xf0, 0x85, 0xee, 0x5a, 0xc2, 0x63, 0x93, 0x17, + 0xf3, 0xe1, 0x4b, 0x7d, 0x63, 0xc4, 0x68, 0x74, 0xa1, 0xcf, 0x7f, 0x89, 0x3b, 0xa8, 0xa5, 0xab, + 0xd2, 0xf5, 0x95, 0x0e, 0x93, 0x11, 0x93, 0x4c, 0xaf, 0xc4, 0x11, 0x65, 0x1c, 0xbe, 0x36, 0xdf, + 0x85, 0x38, 0xcc, 0x1d, 0x81, 0xad, 0x87, 0x4c, 0x47, 0xf4, 0xdd, 0x84, 0xf0, 0x11, 0xf5, 0xe0, + 0x95, 0x6e, 0x61, 0xd6, 0xfa, 0x79, 0x36, 0x89, 0xe0, 0x44, 0xcb, 0x9c, 0xfc, 0xc2, 0x37, 0xf6, + 0x6f, 0xf4, 0x0e, 0x9c, 0xe9, 0xad, 0x5f, 0x3f, 0xc0, 0x31, 0x7c, 0xab, 0x1b, 0xce, 0x42, 0x33, + 0xe8, 0x54, 0xbf, 0x98, 0x85, 0xdb, 0xdc, 0xbf, 0x2b, 0x35, 0x13, 0x66, 0x7f, 0x83, 0x75, 0x9c, + 0xdf, 0x1b, 0x1c, 0x79, 0x0e, 0x27, 0x70, 0x86, 0x5f, 0xa0, 0xa3, 0x12, 0x3f, 0x0d, 0xea, 0x07, + 0xfc, 0x0c, 0x1d, 0x5e, 0x11, 0x36, 0xf5, 0x69, 0xb8, 0xfd, 0xd8, 0x6f, 0x86, 0x3f, 0xfd, 0x7d, + 0xdf, 0xab, 0xfc, 0x73, 0xdf, 0xab, 0xfc, 0x7b, 0xdf, 0xab, 0xa0, 0x57, 0x32, 0x5b, 0x0e, 0x16, + 0xb7, 0xb2, 0x88, 0xaf, 0x65, 0x91, 0xc4, 0xd9, 0xdd, 0x20, 0xce, 0x64, 0x9a, 0xcb, 0x24, 0x16, + 0x9b, 0x5f, 0xc5, 0xb0, 0xad, 0xff, 0xb0, 0x2e, 0xe6, 0x0b, 0x25, 0xb3, 0xbb, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xfb, 0xdc, 0x82, 0x50, 0x10, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go b/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go new file mode 100644 index 000000000000..464978206bb3 --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go @@ -0,0 +1,603 @@ +// Code generated by protoc-gen-gogo. +// source: log.proto +// DO NOT EDIT! + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// / MessageType stores the destination of the message (corresponding to STDOUT or STDERR). +type LogMessage_MessageType int32 + +const ( + LogMessage_OUT LogMessage_MessageType = 1 + LogMessage_ERR LogMessage_MessageType = 2 +) + +var LogMessage_MessageType_name = map[int32]string{ + 1: "OUT", + 2: "ERR", +} +var LogMessage_MessageType_value = map[string]int32{ + "OUT": 1, + "ERR": 2, +} + +func (x LogMessage_MessageType) Enum() *LogMessage_MessageType { + p := new(LogMessage_MessageType) + *p = x + return p +} +func (x LogMessage_MessageType) String() string { + return proto.EnumName(LogMessage_MessageType_name, int32(x)) +} +func (x *LogMessage_MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogMessage_MessageType_value, data, "LogMessage_MessageType") + if err != nil { + return err + } + *x = LogMessage_MessageType(value) + return nil +} +func (LogMessage_MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorLog, []int{0, 0} } + +// / A LogMessage contains a "log line" and associated metadata. +type LogMessage struct { + Message []byte `protobuf:"bytes,1,req,name=message" json:"message,omitempty"` + MessageType *LogMessage_MessageType `protobuf:"varint,2,req,name=message_type,json=messageType,enum=events.LogMessage_MessageType" json:"message_type,omitempty"` + Timestamp *int64 `protobuf:"varint,3,req,name=timestamp" json:"timestamp,omitempty"` + AppId *string `protobuf:"bytes,4,opt,name=app_id,json=appId" json:"app_id,omitempty"` + SourceType *string `protobuf:"bytes,5,opt,name=source_type,json=sourceType" json:"source_type,omitempty"` + SourceInstance *string `protobuf:"bytes,6,opt,name=source_instance,json=sourceInstance" json:"source_instance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogMessage) Reset() { *m = LogMessage{} } +func (m *LogMessage) String() string { return proto.CompactTextString(m) } +func (*LogMessage) ProtoMessage() {} +func (*LogMessage) Descriptor() ([]byte, []int) { return fileDescriptorLog, []int{0} } + +func (m *LogMessage) GetMessage() []byte { + if m != nil { + return m.Message + } + return nil +} + +func (m *LogMessage) GetMessageType() LogMessage_MessageType { + if m != nil && m.MessageType != nil { + return *m.MessageType + } + return LogMessage_OUT +} + +func (m *LogMessage) GetTimestamp() int64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *LogMessage) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogMessage) GetSourceType() string { + if m != nil && m.SourceType != nil { + return *m.SourceType + } + return "" +} + +func (m *LogMessage) GetSourceInstance() string { + if m != nil && m.SourceInstance != nil { + return *m.SourceInstance + } + return "" +} + +func init() { + proto.RegisterType((*LogMessage)(nil), "events.LogMessage") + proto.RegisterEnum("events.LogMessage_MessageType", LogMessage_MessageType_name, LogMessage_MessageType_value) +} +func (m *LogMessage) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LogMessage) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("message") + } else { + data[i] = 0xa + i++ + i = encodeVarintLog(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + } + if m.MessageType == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("message_type") + } else { + data[i] = 0x10 + i++ + i = encodeVarintLog(data, i, uint64(*m.MessageType)) + } + if m.Timestamp == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") + } else { + data[i] = 0x18 + i++ + i = encodeVarintLog(data, i, uint64(*m.Timestamp)) + } + if m.AppId != nil { + data[i] = 0x22 + i++ + i = encodeVarintLog(data, i, uint64(len(*m.AppId))) + i += copy(data[i:], *m.AppId) + } + if m.SourceType != nil { + data[i] = 0x2a + i++ + i = encodeVarintLog(data, i, uint64(len(*m.SourceType))) + i += copy(data[i:], *m.SourceType) + } + if m.SourceInstance != nil { + data[i] = 0x32 + i++ + i = encodeVarintLog(data, i, uint64(len(*m.SourceInstance))) + i += copy(data[i:], *m.SourceInstance) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Log(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Log(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintLog(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *LogMessage) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = len(m.Message) + n += 1 + l + sovLog(uint64(l)) + } + if m.MessageType != nil { + n += 1 + sovLog(uint64(*m.MessageType)) + } + if m.Timestamp != nil { + n += 1 + sovLog(uint64(*m.Timestamp)) + } + if m.AppId != nil { + l = len(*m.AppId) + n += 1 + l + sovLog(uint64(l)) + } + if m.SourceType != nil { + l = len(*m.SourceType) + n += 1 + l + sovLog(uint64(l)) + } + if m.SourceInstance != nil { + l = len(*m.SourceInstance) + n += 1 + l + sovLog(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovLog(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLog(x uint64) (n int) { + return sovLog(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LogMessage) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = append(m.Message[:0], data[iNdEx:postIndex]...) + if m.Message == nil { + m.Message = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageType", wireType) + } + var v LogMessage_MessageType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (LogMessage_MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MessageType = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamp = &v + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.AppId = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.SourceType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.SourceInstance = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLog(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("message") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("message_type") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLog(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLog + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLog(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLog = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLog = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("log.proto", fileDescriptorLog) } + +var fileDescriptorLog = []byte{ + // 294 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xd1, 0x4a, 0xc3, 0x30, + 0x14, 0x86, 0x69, 0xe7, 0x36, 0x76, 0x36, 0xe6, 0x08, 0x08, 0x45, 0xa4, 0xab, 0xbb, 0xb1, 0x37, + 0x66, 0xe0, 0xad, 0x57, 0x0e, 0x14, 0x06, 0x8a, 0x50, 0xe6, 0xf5, 0xc8, 0x9a, 0x2c, 0x16, 0xd6, + 0x9e, 0x90, 0xa4, 0x42, 0x1f, 0xc1, 0x37, 0xf3, 0xd2, 0x47, 0x90, 0x3d, 0x89, 0x2c, 0x8d, 0xd4, + 0xab, 0xfc, 0xff, 0x77, 0x4e, 0xce, 0x9f, 0x1c, 0x18, 0x1d, 0x50, 0x52, 0xa5, 0xd1, 0x22, 0x19, + 0x88, 0x0f, 0x51, 0x59, 0x73, 0x79, 0x2b, 0x0b, 0xfb, 0x5e, 0xef, 0x68, 0x8e, 0xe5, 0x52, 0xa2, + 0xc4, 0xa5, 0x2b, 0xef, 0xea, 0xbd, 0x73, 0xce, 0x38, 0xd5, 0x5e, 0x5b, 0x7c, 0x86, 0x00, 0xcf, + 0x28, 0x5f, 0x84, 0x31, 0x4c, 0x0a, 0x12, 0xc1, 0xb0, 0x6c, 0x65, 0x14, 0x24, 0x61, 0x3a, 0xc9, + 0xfe, 0x2c, 0x79, 0x80, 0x89, 0x97, 0x5b, 0xdb, 0x28, 0x11, 0x85, 0x49, 0x98, 0x4e, 0xef, 0x62, + 0xda, 0xc6, 0xd2, 0x6e, 0x06, 0xf5, 0xe7, 0xa6, 0x51, 0x22, 0x1b, 0x97, 0x9d, 0x21, 0x57, 0x30, + 0xb2, 0x45, 0x29, 0x8c, 0x65, 0xa5, 0x8a, 0x7a, 0x49, 0x98, 0xf6, 0xb2, 0x0e, 0x90, 0x0b, 0x18, + 0x30, 0xa5, 0xb6, 0x05, 0x8f, 0xce, 0x92, 0x20, 0x1d, 0x65, 0x7d, 0xa6, 0xd4, 0x9a, 0x93, 0x39, + 0x8c, 0x0d, 0xd6, 0x3a, 0xf7, 0xb1, 0x7d, 0x57, 0x83, 0x16, 0xb9, 0xa9, 0x37, 0x70, 0xee, 0x1b, + 0x8a, 0xca, 0x58, 0x56, 0xe5, 0x22, 0x1a, 0xb8, 0xa6, 0x69, 0x8b, 0xd7, 0x9e, 0x2e, 0xe6, 0x30, + 0xfe, 0xf7, 0x34, 0x32, 0x84, 0xde, 0xeb, 0xdb, 0x66, 0x16, 0x9c, 0xc4, 0x63, 0x96, 0xcd, 0xc2, + 0xd5, 0xfd, 0xd7, 0x31, 0x0e, 0xbe, 0x8f, 0x71, 0xf0, 0x73, 0x8c, 0x03, 0xb8, 0x46, 0x2d, 0x69, + 0x7e, 0xc0, 0x9a, 0xef, 0xb1, 0xae, 0xb8, 0x6e, 0x28, 0xd7, 0xa8, 0x0c, 0x56, 0x5c, 0xf8, 0x4f, + 0xaf, 0x4e, 0x9b, 0x7b, 0x62, 0xb9, 0x45, 0xdd, 0xfc, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x4c, + 0x0b, 0xe4, 0x8b, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go b/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go new file mode 100644 index 000000000000..72d5e64c377f --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go @@ -0,0 +1,1048 @@ +// Code generated by protoc-gen-gogo. +// source: metric.proto +// DO NOT EDIT! + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// / A ValueMetric indicates the value of a metric at an instant in time. +type ValueMetric struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *float64 `protobuf:"fixed64,2,req,name=value" json:"value,omitempty"` + Unit *string `protobuf:"bytes,3,req,name=unit" json:"unit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ValueMetric) Reset() { *m = ValueMetric{} } +func (m *ValueMetric) String() string { return proto.CompactTextString(m) } +func (*ValueMetric) ProtoMessage() {} +func (*ValueMetric) Descriptor() ([]byte, []int) { return fileDescriptorMetric, []int{0} } + +func (m *ValueMetric) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ValueMetric) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *ValueMetric) GetUnit() string { + if m != nil && m.Unit != nil { + return *m.Unit + } + return "" +} + +// / A CounterEvent represents the increment of a counter. It contains only the change in the value; it is the responsibility of downstream consumers to maintain the value of the counter. +type CounterEvent struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Delta *uint64 `protobuf:"varint,2,req,name=delta" json:"delta,omitempty"` + Total *uint64 `protobuf:"varint,3,opt,name=total" json:"total,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CounterEvent) Reset() { *m = CounterEvent{} } +func (m *CounterEvent) String() string { return proto.CompactTextString(m) } +func (*CounterEvent) ProtoMessage() {} +func (*CounterEvent) Descriptor() ([]byte, []int) { return fileDescriptorMetric, []int{1} } + +func (m *CounterEvent) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CounterEvent) GetDelta() uint64 { + if m != nil && m.Delta != nil { + return *m.Delta + } + return 0 +} + +func (m *CounterEvent) GetTotal() uint64 { + if m != nil && m.Total != nil { + return *m.Total + } + return 0 +} + +// / A ContainerMetric records resource usage of an app in a container. +type ContainerMetric struct { + ApplicationId *string `protobuf:"bytes,1,req,name=applicationId" json:"applicationId,omitempty"` + InstanceIndex *int32 `protobuf:"varint,2,req,name=instanceIndex" json:"instanceIndex,omitempty"` + CpuPercentage *float64 `protobuf:"fixed64,3,req,name=cpuPercentage" json:"cpuPercentage,omitempty"` + MemoryBytes *uint64 `protobuf:"varint,4,req,name=memoryBytes" json:"memoryBytes,omitempty"` + DiskBytes *uint64 `protobuf:"varint,5,req,name=diskBytes" json:"diskBytes,omitempty"` + MemoryBytesQuota *uint64 `protobuf:"varint,6,opt,name=memoryBytesQuota" json:"memoryBytesQuota,omitempty"` + DiskBytesQuota *uint64 `protobuf:"varint,7,opt,name=diskBytesQuota" json:"diskBytesQuota,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ContainerMetric) Reset() { *m = ContainerMetric{} } +func (m *ContainerMetric) String() string { return proto.CompactTextString(m) } +func (*ContainerMetric) ProtoMessage() {} +func (*ContainerMetric) Descriptor() ([]byte, []int) { return fileDescriptorMetric, []int{2} } + +func (m *ContainerMetric) GetApplicationId() string { + if m != nil && m.ApplicationId != nil { + return *m.ApplicationId + } + return "" +} + +func (m *ContainerMetric) GetInstanceIndex() int32 { + if m != nil && m.InstanceIndex != nil { + return *m.InstanceIndex + } + return 0 +} + +func (m *ContainerMetric) GetCpuPercentage() float64 { + if m != nil && m.CpuPercentage != nil { + return *m.CpuPercentage + } + return 0 +} + +func (m *ContainerMetric) GetMemoryBytes() uint64 { + if m != nil && m.MemoryBytes != nil { + return *m.MemoryBytes + } + return 0 +} + +func (m *ContainerMetric) GetDiskBytes() uint64 { + if m != nil && m.DiskBytes != nil { + return *m.DiskBytes + } + return 0 +} + +func (m *ContainerMetric) GetMemoryBytesQuota() uint64 { + if m != nil && m.MemoryBytesQuota != nil { + return *m.MemoryBytesQuota + } + return 0 +} + +func (m *ContainerMetric) GetDiskBytesQuota() uint64 { + if m != nil && m.DiskBytesQuota != nil { + return *m.DiskBytesQuota + } + return 0 +} + +func init() { + proto.RegisterType((*ValueMetric)(nil), "events.ValueMetric") + proto.RegisterType((*CounterEvent)(nil), "events.CounterEvent") + proto.RegisterType((*ContainerMetric)(nil), "events.ContainerMetric") +} +func (m *ValueMetric) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ValueMetric) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } else { + data[i] = 0xa + i++ + i = encodeVarintMetric(data, i, uint64(len(*m.Name))) + i += copy(data[i:], *m.Name) + } + if m.Value == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } else { + data[i] = 0x11 + i++ + i = encodeFixed64Metric(data, i, uint64(math.Float64bits(float64(*m.Value)))) + } + if m.Unit == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("unit") + } else { + data[i] = 0x1a + i++ + i = encodeVarintMetric(data, i, uint64(len(*m.Unit))) + i += copy(data[i:], *m.Unit) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CounterEvent) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CounterEvent) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } else { + data[i] = 0xa + i++ + i = encodeVarintMetric(data, i, uint64(len(*m.Name))) + i += copy(data[i:], *m.Name) + } + if m.Delta == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("delta") + } else { + data[i] = 0x10 + i++ + i = encodeVarintMetric(data, i, uint64(*m.Delta)) + } + if m.Total != nil { + data[i] = 0x18 + i++ + i = encodeVarintMetric(data, i, uint64(*m.Total)) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ContainerMetric) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerMetric) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ApplicationId == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("applicationId") + } else { + data[i] = 0xa + i++ + i = encodeVarintMetric(data, i, uint64(len(*m.ApplicationId))) + i += copy(data[i:], *m.ApplicationId) + } + if m.InstanceIndex == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("instanceIndex") + } else { + data[i] = 0x10 + i++ + i = encodeVarintMetric(data, i, uint64(*m.InstanceIndex)) + } + if m.CpuPercentage == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("cpuPercentage") + } else { + data[i] = 0x19 + i++ + i = encodeFixed64Metric(data, i, uint64(math.Float64bits(float64(*m.CpuPercentage)))) + } + if m.MemoryBytes == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("memoryBytes") + } else { + data[i] = 0x20 + i++ + i = encodeVarintMetric(data, i, uint64(*m.MemoryBytes)) + } + if m.DiskBytes == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("diskBytes") + } else { + data[i] = 0x28 + i++ + i = encodeVarintMetric(data, i, uint64(*m.DiskBytes)) + } + if m.MemoryBytesQuota != nil { + data[i] = 0x30 + i++ + i = encodeVarintMetric(data, i, uint64(*m.MemoryBytesQuota)) + } + if m.DiskBytesQuota != nil { + data[i] = 0x38 + i++ + i = encodeVarintMetric(data, i, uint64(*m.DiskBytesQuota)) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Metric(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Metric(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintMetric(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ValueMetric) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMetric(uint64(l)) + } + if m.Value != nil { + n += 9 + } + if m.Unit != nil { + l = len(*m.Unit) + n += 1 + l + sovMetric(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CounterEvent) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMetric(uint64(l)) + } + if m.Delta != nil { + n += 1 + sovMetric(uint64(*m.Delta)) + } + if m.Total != nil { + n += 1 + sovMetric(uint64(*m.Total)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ContainerMetric) Size() (n int) { + var l int + _ = l + if m.ApplicationId != nil { + l = len(*m.ApplicationId) + n += 1 + l + sovMetric(uint64(l)) + } + if m.InstanceIndex != nil { + n += 1 + sovMetric(uint64(*m.InstanceIndex)) + } + if m.CpuPercentage != nil { + n += 9 + } + if m.MemoryBytes != nil { + n += 1 + sovMetric(uint64(*m.MemoryBytes)) + } + if m.DiskBytes != nil { + n += 1 + sovMetric(uint64(*m.DiskBytes)) + } + if m.MemoryBytesQuota != nil { + n += 1 + sovMetric(uint64(*m.MemoryBytesQuota)) + } + if m.DiskBytesQuota != nil { + n += 1 + sovMetric(uint64(*m.DiskBytesQuota)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetric(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMetric(x uint64) (n int) { + return sovMetric(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ValueMetric) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetric + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(data[iNdEx-8]) + v |= uint64(data[iNdEx-7]) << 8 + v |= uint64(data[iNdEx-6]) << 16 + v |= uint64(data[iNdEx-5]) << 24 + v |= uint64(data[iNdEx-4]) << 32 + v |= uint64(data[iNdEx-3]) << 40 + v |= uint64(data[iNdEx-2]) << 48 + v |= uint64(data[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Value = &v2 + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetric + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Unit = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + default: + iNdEx = preIndex + skippy, err := skipMetric(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetric + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("unit") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CounterEvent) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CounterEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CounterEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetric + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Delta", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Delta = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Total = &v + default: + iNdEx = preIndex + skippy, err := skipMetric(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetric + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("delta") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerMetric) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetric + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.ApplicationId = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InstanceIndex = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuPercentage", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(data[iNdEx-8]) + v |= uint64(data[iNdEx-7]) << 8 + v |= uint64(data[iNdEx-6]) << 16 + v |= uint64(data[iNdEx-5]) << 24 + v |= uint64(data[iNdEx-4]) << 32 + v |= uint64(data[iNdEx-3]) << 40 + v |= uint64(data[iNdEx-2]) << 48 + v |= uint64(data[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.CpuPercentage = &v2 + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemoryBytes = &v + hasFields[0] |= uint64(0x00000008) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiskBytes = &v + hasFields[0] |= uint64(0x00000010) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytesQuota", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemoryBytesQuota = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskBytesQuota", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetric + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiskBytesQuota = &v + default: + iNdEx = preIndex + skippy, err := skipMetric(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetric + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("applicationId") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("instanceIndex") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("cpuPercentage") + } + if hasFields[0]&uint64(0x00000008) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("memoryBytes") + } + if hasFields[0]&uint64(0x00000010) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("diskBytes") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetric(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetric + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetric + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetric + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMetric + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetric + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMetric(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMetric = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetric = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("metric.proto", fileDescriptorMetric) } + +var fileDescriptorMetric = []byte{ + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0xaa, 0x13, 0x31, + 0x10, 0x87, 0xd9, 0xed, 0x1f, 0x69, 0xda, 0xaa, 0x04, 0x2f, 0x96, 0x22, 0x65, 0x2d, 0x22, 0x45, + 0x70, 0xfb, 0x06, 0x5e, 0xb4, 0x28, 0x14, 0x51, 0x74, 0x2f, 0xbc, 0x4f, 0x93, 0xe9, 0x1a, 0xdc, + 0xcd, 0x2c, 0xd9, 0x49, 0x71, 0x9f, 0xc4, 0x57, 0xf2, 0xd2, 0x47, 0x90, 0x3e, 0x89, 0x24, 0x29, + 0xda, 0x9e, 0xc3, 0xb9, 0x9b, 0xdf, 0x37, 0x5f, 0x66, 0x27, 0x4b, 0xd8, 0xac, 0x01, 0xb2, 0x5a, + 0x16, 0xad, 0x45, 0x42, 0x3e, 0x86, 0x13, 0x18, 0xea, 0x16, 0x6f, 0x2a, 0x4d, 0xdf, 0xdc, 0xa1, + 0x90, 0xd8, 0x6c, 0x2a, 0xac, 0x70, 0x13, 0xda, 0x07, 0x77, 0x0c, 0x29, 0x84, 0x50, 0xc5, 0x63, + 0x0b, 0xe6, 0x9c, 0x56, 0xb1, 0x5e, 0x7d, 0x60, 0xd3, 0xaf, 0xa2, 0x76, 0xf0, 0x31, 0xcc, 0xe5, + 0x9c, 0x0d, 0x8d, 0x68, 0x20, 0x4b, 0xf2, 0x74, 0x3d, 0x29, 0x43, 0xcd, 0x9f, 0xb1, 0xd1, 0xc9, + 0x2b, 0x59, 0x9a, 0xa7, 0xeb, 0xa4, 0x8c, 0xc1, 0x9b, 0xce, 0x68, 0xca, 0x06, 0xd1, 0xf4, 0xf5, + 0xea, 0x13, 0x9b, 0xed, 0xd0, 0x19, 0x02, 0xfb, 0xce, 0x2f, 0xf6, 0xd0, 0x34, 0x05, 0x35, 0x89, + 0x30, 0x6d, 0x58, 0xc6, 0xe0, 0x29, 0x21, 0x89, 0x3a, 0x1b, 0xe4, 0x89, 0xa7, 0x21, 0xac, 0x7e, + 0xa6, 0xec, 0xc9, 0x0e, 0x0d, 0x09, 0x6d, 0xc0, 0x5e, 0x36, 0x7c, 0xc9, 0xe6, 0xa2, 0x6d, 0x6b, + 0x2d, 0x05, 0x69, 0x34, 0x7b, 0x75, 0x19, 0x7e, 0x0b, 0xbd, 0xa5, 0x4d, 0x47, 0xc2, 0x48, 0xd8, + 0x1b, 0x05, 0x3f, 0xc2, 0xd7, 0x46, 0xe5, 0x2d, 0xf4, 0x96, 0x6c, 0xdd, 0x67, 0xb0, 0x12, 0x0c, + 0x89, 0x0a, 0xc2, 0x65, 0x92, 0xf2, 0x16, 0xf2, 0x9c, 0x4d, 0x1b, 0x68, 0xd0, 0xf6, 0xdb, 0x9e, + 0xa0, 0xcb, 0x86, 0x61, 0xef, 0x6b, 0xc4, 0x9f, 0xb3, 0x89, 0xd2, 0xdd, 0xf7, 0xd8, 0x1f, 0x85, + 0xfe, 0x7f, 0xc0, 0x5f, 0xb3, 0xa7, 0x57, 0xf2, 0x17, 0x87, 0x24, 0xb2, 0x71, 0xb8, 0xe6, 0x3d, + 0xce, 0x5f, 0xb1, 0xc7, 0xff, 0x0e, 0x46, 0xf3, 0x51, 0x30, 0xef, 0xd0, 0xed, 0xdb, 0x5f, 0xe7, + 0x65, 0xf2, 0xfb, 0xbc, 0x4c, 0xfe, 0x9c, 0x97, 0x09, 0x7b, 0x81, 0xb6, 0x2a, 0x64, 0x8d, 0x4e, + 0x1d, 0xd1, 0x19, 0x65, 0xfb, 0x42, 0x59, 0x6c, 0x3b, 0x34, 0x0a, 0x8a, 0xf8, 0x44, 0xb6, 0xf3, + 0xf8, 0xfb, 0xde, 0x0b, 0x49, 0x68, 0xfb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc6, 0xbe, 0x10, + 0xbe, 0x48, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go b/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go new file mode 100644 index 000000000000..6ef42e3ec12f --- /dev/null +++ b/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go @@ -0,0 +1,362 @@ +// Code generated by protoc-gen-gogo. +// source: uuid.proto +// DO NOT EDIT! + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// / Type representing a 128-bit UUID. +// +// The bytes of the UUID should be packed in little-endian **byte** (not bit) order. For example, the UUID `f47ac10b-58cc-4372-a567-0e02b2c3d479` should be encoded as `UUID{ low: 0x7243cc580bc17af4, high: 0x79d4c3b2020e67a5 }` +type UUID struct { + Low *uint64 `protobuf:"varint,1,req,name=low" json:"low,omitempty"` + High *uint64 `protobuf:"varint,2,req,name=high" json:"high,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UUID) Reset() { *m = UUID{} } +func (m *UUID) String() string { return proto.CompactTextString(m) } +func (*UUID) ProtoMessage() {} +func (*UUID) Descriptor() ([]byte, []int) { return fileDescriptorUuid, []int{0} } + +func (m *UUID) GetLow() uint64 { + if m != nil && m.Low != nil { + return *m.Low + } + return 0 +} + +func (m *UUID) GetHigh() uint64 { + if m != nil && m.High != nil { + return *m.High + } + return 0 +} + +func init() { + proto.RegisterType((*UUID)(nil), "events.UUID") +} +func (m *UUID) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UUID) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Low == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("low") + } else { + data[i] = 0x8 + i++ + i = encodeVarintUuid(data, i, uint64(*m.Low)) + } + if m.High == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("high") + } else { + data[i] = 0x10 + i++ + i = encodeVarintUuid(data, i, uint64(*m.High)) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Uuid(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Uuid(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintUuid(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *UUID) Size() (n int) { + var l int + _ = l + if m.Low != nil { + n += 1 + sovUuid(uint64(*m.Low)) + } + if m.High != nil { + n += 1 + sovUuid(uint64(*m.High)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovUuid(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozUuid(x uint64) (n int) { + return sovUuid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *UUID) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUuid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UUID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UUID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Low", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUuid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Low = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field High", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUuid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.High = &v + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipUuid(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthUuid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("low") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("high") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipUuid(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUuid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUuid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUuid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthUuid + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUuid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipUuid(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthUuid = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowUuid = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("uuid.proto", fileDescriptorUuid) } + +var fileDescriptorUuid = []byte{ + // 171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x2d, 0xcd, 0x4c, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0x96, 0xd2, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, + 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0xa4, 0xc3, 0xc5, + 0x12, 0x1a, 0xea, 0xe9, 0x22, 0x24, 0xc0, 0xc5, 0x9c, 0x93, 0x5f, 0x2e, 0xc1, 0xa8, 0xc0, 0xa4, + 0xc1, 0x12, 0x04, 0x62, 0x0a, 0x09, 0x71, 0xb1, 0x64, 0x64, 0xa6, 0x67, 0x48, 0x30, 0x81, 0x85, + 0xc0, 0x6c, 0x27, 0x9b, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, + 0x91, 0x4b, 0x31, 0xbf, 0x28, 0x5d, 0x2f, 0x39, 0x27, 0xbf, 0x34, 0x25, 0x2d, 0xbf, 0x34, 0x2f, + 0xa5, 0xa8, 0x52, 0x2f, 0xa5, 0x28, 0xbf, 0xa0, 0x38, 0x3f, 0x2f, 0x25, 0x55, 0x0f, 0xe2, 0x1a, + 0x27, 0xee, 0xd0, 0xd2, 0xcc, 0x14, 0xb7, 0xc4, 0xe4, 0x92, 0xfc, 0xa2, 0x4a, 0x40, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xc1, 0x49, 0x67, 0x8e, 0xaf, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 000000000000..fbff658f70d9 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/Makefile b/vendor/github.com/mailru/easyjson/Makefile new file mode 100644 index 000000000000..80449f0d2749 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/Makefile @@ -0,0 +1,56 @@ +all: test + +clean: + rm -rf bin + rm -rf tests/*_easyjson.go + rm -rf benchmark/*_easyjson.go + +build: + go build -i -o ./bin/easyjson ./easyjson + +generate: build + bin/easyjson -stubs \ + ./tests/snake.go \ + ./tests/data.go \ + ./tests/omitempty.go \ + ./tests/nothing.go \ + ./tests/named_type.go \ + ./tests/custom_map_key_type.go \ + ./tests/embedded_type.go \ + ./tests/reference_to_pointer.go \ + ./tests/html.go \ + ./tests/unknown_fields.go \ + + bin/easyjson -all ./tests/data.go + bin/easyjson -all ./tests/nothing.go + bin/easyjson -all ./tests/errors.go + bin/easyjson -all ./tests/html.go + bin/easyjson -snake_case ./tests/snake.go + bin/easyjson -omit_empty ./tests/omitempty.go + bin/easyjson -build_tags=use_easyjson ./benchmark/data.go + bin/easyjson ./tests/nested_easy.go + bin/easyjson ./tests/named_type.go + bin/easyjson ./tests/custom_map_key_type.go + bin/easyjson ./tests/embedded_type.go + bin/easyjson ./tests/reference_to_pointer.go + bin/easyjson ./tests/key_marshaler_map.go + bin/easyjson -disallow_unknown_fields ./tests/disallow_unknown.go + bin/easyjson ./tests/unknown_fields.go + +test: generate + go test \ + ./tests \ + ./jlexer \ + ./gen \ + ./buffer + cd benchmark && go test -benchmem -tags use_easyjson -bench . + golint -set_exit_status ./tests/*_easyjson.go + +bench-other: generate + cd benchmark && make + +bench-python: + benchmark/ujson.sh + + +.PHONY: clean generate test build diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md new file mode 100644 index 000000000000..3bdcf2d06c23 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/README.md @@ -0,0 +1,336 @@ +# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson) + +Package easyjson provides a fast and easy way to marshal/unmarshal Go structs +to/from JSON without the use of reflection. In performance tests, easyjson +outperforms the standard `encoding/json` package by a factor of 4-5x, and other +JSON encoding packages by a factor of 2-3x. + +easyjson aims to keep generated Go code simple enough so that it can be easily +optimized or fixed. Another goal is to provide users with the ability to +customize the generated code by providing options not available with the +standard `encoding/json` package, such as generating "snake_case" names or +enabling `omitempty` behavior by default. + +## Usage +```sh +# install +go get -u github.com/mailru/easyjson/... + +# run +easyjson -all .go +``` + +The above will generate `_easyjson.go` containing the appropriate marshaler and +unmarshaler funcs for all structs contained in `.go`. + +Please note that easyjson requires a full Go build environment and the `GOPATH` +environment variable to be set. This is because easyjson code generation +invokes `go run` on a temporary file (an approach to code generation borrowed +from [ffjson](https://github.com/pquerna/ffjson)). + +## Options +```txt +Usage of easyjson: + -all + generate marshaler/unmarshalers for all structs in a file + -build_tags string + build tags to add to generated file + -leave_temps + do not delete temporary files + -no_std_marshalers + don't generate MarshalJSON/UnmarshalJSON funcs + -noformat + do not run 'gofmt -w' on output file + -omit_empty + omit empty fields by default + -output_filename string + specify the filename of the output + -pkg + process the whole package instead of just the given file + -snake_case + use snake_case names instead of CamelCase by default + -lower_camel_case + use lowerCamelCase instead of CamelCase by default + -stubs + only generate stubs for marshaler/unmarshaler funcs + -disallow_unknown_fields + return error if some unknown field in json appeared +``` + +Using `-all` will generate marshalers/unmarshalers for all Go structs in the +file. If `-all` is not provided, then only those structs whose preceding +comment starts with `easyjson:json` will have marshalers/unmarshalers +generated. For example: + +```go +//easyjson:json +type A struct {} +``` + +Additional option notes: + +* `-snake_case` tells easyjson to generate snake\_case field names by default + (unless overridden by a field tag). The CamelCase to snake\_case conversion + algorithm should work in most cases (ie, HTTPVersion will be converted to + "http_version"). + +* `-build_tags` will add the specified build tags to generated Go sources. + +## Generated Marshaler/Unmarshaler Funcs + +For Go struct types, easyjson generates the funcs `MarshalEasyJSON` / +`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify +the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in +conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary +reflection / type assertions during marshaling/unmarshaling to/from JSON for Go +structs. + +easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct +types compatible with the standard `json.Marshaler` and `json.Unmarshaler` +interfaces. Please be aware that using the standard `json.Marshal` / +`json.Unmarshal` for marshaling/unmarshaling will incur a significant +performance penalty when compared to using `easyjson.Marshal` / +`easyjson.Unmarshal`. + +Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and +`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers +and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter` +which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc +listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of +utility funcs that are available. + +## Controlling easyjson Marshaling and Unmarshaling Behavior + +Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs +that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. +These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined +for a Go type. + +Go types can also satisify the `easyjson.Optional` interface, which allows the +type to define its own `omitempty` logic. + +## Type Wrappers + +easyjson provides additional type wrappers defined in the `easyjson/opt` +package. These wrap the standard Go primitives and in turn satisify the +easyjson interfaces. + +The `easyjson/opt` type wrappers are useful when needing to distinguish between +a missing value and/or when needing to specifying a default value. Type +wrappers allow easyjson to avoid additional pointers and heap allocations and +can significantly increase performance when used properly. + +## Memory Pooling + +easyjson uses a buffer pool that allocates data in increasing chunks from 128 +to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of +`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory +allocation and to allow larger reusable buffers. + +easyjson's custom allocation buffer pool is defined in the `easyjson/buffer` +package, and the default behavior pool behavior can be modified (if necessary) +through a call to `buffer.Init()` prior to any marshaling or unmarshaling. +Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer) +for more information. + +## Issues, Notes, and Limitations + +* easyjson is still early in its development. As such, there are likely to be + bugs and missing features when compared to `encoding/json`. In the case of a + missing feature or bug, please create a GitHub issue. Pull requests are + welcome! + +* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive + matching is not currently provided due to the significant performance hit + when doing case-insensitive key matching. In the future, case-insensitive + object key matching may be provided via an option to the generator. + +* easyjson makes use of `unsafe`, which simplifies the code and + provides significant performance benefits by allowing no-copy + conversion from `[]byte` to `string`. That said, `unsafe` is used + only when unmarshaling and parsing JSON, and any `unsafe` operations + / memory allocations done will be safely deallocated by + easyjson. Set the build tag `easyjson_nounsafe` to compile it + without `unsafe`. + +* easyjson is compatible with Google App Engine. The `appengine` build + tag (set by App Engine's environment) will automatically disable the + use of `unsafe`, which is not allowed in App Engine's Standard + Environment. Note that the use with App Engine is still experimental. + +* Floats are formatted using the default precision from Go's `strconv` package. + As such, easyjson will not correctly handle high precision floats when + marshaling/unmarshaling JSON. Note, however, that there are very few/limited + uses where this behavior is not sufficient for general use. That said, a + different package may be needed if precise marshaling/unmarshaling of high + precision floats to/from JSON is required. + +* While unmarshaling, the JSON parser does the minimal amount of work needed to + skip over unmatching parens, and as such full validation is not done for the + entire JSON value being unmarshaled/parsed. + +* Currently there is no true streaming support for encoding/decoding as + typically for many uses/protocols the final, marshaled length of the JSON + needs to be known prior to sending the data. Currently this is not possible + with easyjson's architecture. + +* easyjson parser and codegen based on reflection, so it wont works on `package main` + files, because they cant be imported by parser. + +## Benchmarks + +Most benchmarks were done using the example +[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets) +(9k after eliminating whitespace). This example is similar to real-world data, +is well-structured, and contains a healthy variety of different types, making +it ideal for JSON serialization benchmarks. + +Note: + +* For small request benchmarks, an 80 byte portion of the above example was + used. + +* For large request marshaling benchmarks, a struct containing 50 regular + samples was used, making a ~500kB output JSON. + +* Benchmarks are showing the results of easyjson's default behaviour, + which makes use of `unsafe`. + +Benchmarks are available in the repository and can be run by invoking `make`. + +### easyjson vs. encoding/json + +easyjson is roughly 5-6 times faster than the standard `encoding/json` for +unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent +marshaling is 6-7x faster if marshaling to a writer. + +### easyjson vs. ffjson + +easyjson uses the same approach for JSON marshaling as +[ffjson](https://github.com/pquerna/ffjson), but takes a significantly +different approach to lexing and parsing JSON during unmarshaling. This means +easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for +non-concurrent unmarshaling. + +As of this writing, `ffjson` seems to have issues when used concurrently: +specifically, large request pooling hurts `ffjson`'s performance and causes +scalability issues. These issues with `ffjson` can likely be fixed, but as of +writing remain outstanding/known issues with `ffjson`. + +easyjson and `ffjson` have similar performance for small requests, however +easyjson outperforms `ffjson` by roughly 2-5x times for large requests when +used with a writer. + +### easyjson vs. go/codec + +[go/codec](https://github.com/ugorji/go) provides +compile-time helpers for JSON generation. In this case, helpers do not work +like marshalers as they are encoding-independent. + +easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks +and about 3x faster for concurrent encoding (without marshaling to a writer). + +In an attempt to measure marshaling performance of `go/codec` (as opposed to +allocations/memcpy/writer interface invocations), a benchmark was done with +resetting length of a byte slice rather than resetting the whole slice to nil. +However, the optimization in this exact form may not be applicable in practice, +since the memory is not freed between marshaling operations. + +### easyjson vs 'ujson' python module + +[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it +is interesting to see how plain golang compares to that. It is imporant to note +that the resulting object for python is slower to access, since the library +parses JSON object into dictionaries. + +easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for +marshaling. + +### Benchmark Results + +`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6. +`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6. + +#### Unmarshaling + +| lib | json size | MB/s | allocs/op | B/op | +|:---------|:----------|-----:|----------:|------:| +| standard | regular | 22 | 218 | 10229 | +| standard | small | 9.7 | 14 | 720 | +| | | | | | +| easyjson | regular | 125 | 128 | 9794 | +| easyjson | small | 67 | 3 | 128 | +| | | | | | +| ffjson | regular | 66 | 141 | 9985 | +| ffjson | small | 17.6 | 10 | 488 | +| | | | | | +| codec | regular | 55 | 434 | 19299 | +| codec | small | 29 | 7 | 336 | +| | | | | | +| ujson | regular | 103 | N/A | N/A | + +#### Marshaling, one goroutine. + +| lib | json size | MB/s | allocs/op | B/op | +|:----------|:----------|-----:|----------:|------:| +| standard | regular | 75 | 9 | 23256 | +| standard | small | 32 | 3 | 328 | +| standard | large | 80 | 17 | 1.2M | +| | | | | | +| easyjson | regular | 213 | 9 | 10260 | +| easyjson* | regular | 263 | 8 | 742 | +| easyjson | small | 125 | 1 | 128 | +| easyjson | large | 212 | 33 | 490k | +| easyjson* | large | 262 | 25 | 2879 | +| | | | | | +| ffjson | regular | 122 | 153 | 21340 | +| ffjson** | regular | 146 | 152 | 4897 | +| ffjson | small | 36 | 5 | 384 | +| ffjson** | small | 64 | 4 | 128 | +| ffjson | large | 134 | 7317 | 818k | +| ffjson** | large | 125 | 7320 | 827k | +| | | | | | +| codec | regular | 80 | 17 | 33601 | +| codec*** | regular | 108 | 9 | 1153 | +| codec | small | 42 | 3 | 304 | +| codec*** | small | 56 | 1 | 48 | +| codec | large | 73 | 483 | 2.5M | +| codec*** | large | 103 | 451 | 66007 | +| | | | | | +| ujson | regular | 92 | N/A | N/A | + +\* marshaling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil + +#### Marshaling, concurrent. + +| lib | json size | MB/s | allocs/op | B/op | +|:----------|:----------|-----:|----------:|------:| +| standard | regular | 252 | 9 | 23257 | +| standard | small | 124 | 3 | 328 | +| standard | large | 289 | 17 | 1.2M | +| | | | | | +| easyjson | regular | 792 | 9 | 10597 | +| easyjson* | regular | 1748 | 8 | 779 | +| easyjson | small | 333 | 1 | 128 | +| easyjson | large | 718 | 36 | 548k | +| easyjson* | large | 2134 | 25 | 4957 | +| | | | | | +| ffjson | regular | 301 | 153 | 21629 | +| ffjson** | regular | 707 | 152 | 5148 | +| ffjson | small | 62 | 5 | 384 | +| ffjson** | small | 282 | 4 | 128 | +| ffjson | large | 438 | 7330 | 1.0M | +| ffjson** | large | 131 | 7319 | 820k | +| | | | | | +| codec | regular | 183 | 17 | 33603 | +| codec*** | regular | 671 | 9 | 1157 | +| codec | small | 147 | 3 | 304 | +| codec*** | small | 299 | 1 | 48 | +| codec | large | 190 | 483 | 2.5M | +| codec*** | large | 752 | 451 | 77574 | + +\* marshaling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 000000000000..07fb4bc1f7bf --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,270 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size < config.PooledSize { + return make([]byte, 0, size) + } + + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) >= s { + return + } + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendBytes appends a string to buffer. +func (b *Buffer) AppendString(data string) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + var n int + for _, buf := range b.bufs { + if err == nil { + n, err = w.Write(buf) + written += n + } + putBuf(buf) + } + + if err == nil { + n, err = w.Write(b.Buf) + written += n + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enought, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/go.mod b/vendor/github.com/mailru/easyjson/go.mod new file mode 100644 index 000000000000..7bc4a6584425 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/go.mod @@ -0,0 +1,3 @@ +module github.com/mailru/easyjson + +go 1.12 diff --git a/vendor/github.com/mailru/easyjson/helpers.go b/vendor/github.com/mailru/easyjson/helpers.go new file mode 100644 index 000000000000..04ac63562870 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/helpers.go @@ -0,0 +1,88 @@ +// Package easyjson contains marshaler/unmarshaler interfaces and helper functions. +package easyjson + +import ( + "io" + "io/ioutil" + "net/http" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// Marshaler is an easyjson-compatible marshaler interface. +type Marshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +// Marshaler is an easyjson-compatible unmarshaler interface. +type Unmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic. +type Optional interface { + IsDefined() bool +} + +// UnknownsUnmarshaler provides a method to unmarshal unknown struct fileds and save them as you want +type UnknownsUnmarshaler interface { + UnmarshalUnknown(in *jlexer.Lexer, key string) +} + +// UnknownsMarshaler provides a method to write additional struct fields +type UnknownsMarshaler interface { + MarshalUnknowns(w *jwriter.Writer, first bool) +} + +// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied +// from a chain of smaller chunks. +func Marshal(v Marshaler) ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.BuildBytes() +} + +// MarshalToWriter marshals the data to an io.Writer. +func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) { + jw := jwriter.Writer{} + v.MarshalEasyJSON(&jw) + return jw.DumpTo(w) +} + +// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the +// http.ResponseWriter, and send the data to the writer. started will be equal to +// false if an error occurred before any http.ResponseWriter methods were actually +// invoked (in this case a 500 reply is possible). +func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) { + jw := jwriter.Writer{} + v.MarshalEasyJSON(&jw) + if jw.Error != nil { + return false, 0, jw.Error + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(jw.Size())) + + started = true + written, err = jw.DumpTo(w) + return +} + +// Unmarshal decodes the JSON in data into the object. +func Unmarshal(data []byte, v Unmarshaler) error { + l := jlexer.Lexer{Data: data} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object. +func UnmarshalFromReader(r io.Reader, v Unmarshaler) error { + data, err := ioutil.ReadAll(r) + if err != nil { + return err + } + l := jlexer.Lexer{Data: data} + v.UnmarshalEasyJSON(&l) + return l.Error() +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 000000000000..ff7b27c5b203 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 000000000000..864d1be67638 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 000000000000..e90ec40d05f5 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 000000000000..ddd376b844cb --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,1182 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. +} + +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.fatalError = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (isValid, hasEscapes bool, length int) { + delta := 0 + + for i := 0; i < len(data); i++ { + switch data[i] { + case '\\': + i++ + delta++ + if i < len(data) && data[i] == 'u' { + delta++ + } + case '"': + return true, (delta > 0), (i - delta) + } + } + + return false, false, len(data) +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + +// processEscape processes a single escape sequence and returns number of bytes processed. +func (r *Lexer) processEscape(data []byte) (int, error) { + if len(data) < 2 { + return 0, fmt.Errorf("syntax error at %v", string(data)) + } + + c := data[1] + switch c { + case '"', '/', '\\': + r.token.byteValue = append(r.token.byteValue, c) + return 2, nil + case 'b': + r.token.byteValue = append(r.token.byteValue, '\b') + return 2, nil + case 'f': + r.token.byteValue = append(r.token.byteValue, '\f') + return 2, nil + case 'n': + r.token.byteValue = append(r.token.byteValue, '\n') + return 2, nil + case 'r': + r.token.byteValue = append(r.token.byteValue, '\r') + return 2, nil + case 't': + r.token.byteValue = append(r.token.byteValue, '\t') + return 2, nil + case 'u': + rr := getu4(data) + if rr < 0 { + return 0, errors.New("syntax error") + } + + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + var d [4]byte + s := utf8.EncodeRune(d[:], rr) + r.token.byteValue = append(r.token.byteValue, d[:s]...) + return read, nil + } + + return 0, errors.New("syntax error") +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + isValid, hasEscapes, length := findStringLen(data) + if !isValid { + r.pos += length + r.errParse("unterminated string literal") + return + } + if !hasEscapes { + r.token.byteValue = data[:length] + r.pos += length + 1 + return + } + + r.token.byteValue = make([]byte, 0, length) + p := 0 + for i := 0; i < len(data); { + switch data[i] { + case '"': + r.pos += i + 1 + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + i++ + return + + case '\\': + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + off, err := r.processEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return + } + i += off + p = i + + default: + i++ + } + } + r.errParse("unterminated string literal") +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.fatalError != nil { + return + } + + r.FetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.fatalError == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.fatalError == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim + } + r.addNonfatalError(&LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return + } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. + r.errInvalidToken(string([]byte{c})) + } else { + r.consume() + } +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + var start, end byte + + switch r.token.delimValue { + case '{': + start, end = '{', '}' + case '[': + start, end = '[', ']' + default: + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + return + } + case c == '\\' && inQuotes: + wasEscape = !wasEscape + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.AddError(&LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + }) + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString() (string, []byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "", nil + } + bytes := r.token.byteValue + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString() + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString() + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + ret := string(r.token.byteValue) + r.consume() + return ret +} + +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:n] +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) UintptrStr() uintptr { + return uintptr(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return float32(n) +} + +func (r *Lexer) Float32Str() float32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Float64Str() float64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) Error() error { + return r.fatalError +} + +func (r *Lexer) AddError(e error) { + if r.fatalError == nil { + r.fatalError = e + } +} + +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + +// JsonNumber fetches and json.Number from 'encoding/json' package. +// Both int, float or string, contains them are valid values +func (r *Lexer) JsonNumber() json.Number { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() { + r.errInvalidToken("json.Number") + return json.Number("") + } + + switch r.token.kind { + case tokenString: + return json.Number(r.String()) + case tokenNumber: + return json.Number(r.Raw()) + case tokenNull: + r.Null() + return json.Number("") + default: + r.errSyntax() + return json.Number("") + } +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + ret := []interface{}{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 000000000000..eb8547ccc27a --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,407 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + w.base64(data) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintptrStr(n uintptr) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func getTable(falseValues ...int) [128]bool { + table := [128]bool{} + + for i := 0; i < 128; i++ { + table[i] = true + } + + for _, v := range falseValues { + table[v] = false + } + + return table +} + +var ( + htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') + htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') +) + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + var escapeTable [128]bool + if w.NoEscapeHTML { + escapeTable = htmlNoEscapeTable + } else { + escapeTable = htmlEscapeTable + } + + for i := 0; i < len(s); { + c := s[i] + + if c < utf8.RuneSelf { + if escapeTable[c] { + // single-width character, no escaping is required + i++ + continue + } + + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} + +const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" +const padChar = '=' + +func (w *Writer) base64(in []byte) { + + if len(in) == 0 { + return + } + + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) + + si := 0 + n := (len(in) / 3) * 3 + + for si < n { + // Convert 3x 8bit source bytes into 4 bytes + val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) + + si += 3 + } + + remain := len(in) - si + if remain == 0 { + return + } + + // Add the remaining small block + val := uint(in[si+0]) << 16 + if remain == 2 { + val |= uint(in[si+1]) << 8 + } + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) + + switch remain { + case 2: + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) + case 1: + w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) + } +} diff --git a/vendor/github.com/mailru/easyjson/raw.go b/vendor/github.com/mailru/easyjson/raw.go new file mode 100644 index 000000000000..81bd002e19f8 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/raw.go @@ -0,0 +1,45 @@ +package easyjson + +import ( + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// RawMessage is a raw piece of JSON (number, string, bool, object, array or +// null) that is extracted without parsing and output as is during marshaling. +type RawMessage []byte + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) { + if len(*v) == 0 { + w.RawString("null") + } else { + w.Raw(*v, nil) + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { + *v = RawMessage(l.Raw()) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler interface. +func (v *RawMessage) UnmarshalJSON(data []byte) error { + *v = data + return nil +} + +var nullBytes = []byte("null") + +// MarshalJSON implements encoding/json.Marshaler interface. +func (v RawMessage) MarshalJSON() ([]byte, error) { + if len(v) == 0 { + return nullBytes, nil + } + return v, nil +} + +// IsDefined is required for integration with omitempty easyjson logic. +func (v *RawMessage) IsDefined() bool { + return len(*v) > 0 +} diff --git a/vendor/github.com/mailru/easyjson/unknown_fields.go b/vendor/github.com/mailru/easyjson/unknown_fields.go new file mode 100644 index 000000000000..6cfdf8300ba0 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/unknown_fields.go @@ -0,0 +1,34 @@ +package easyjson + +import ( + json "encoding/json" + + jlexer "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// UnknownFieldsProxy implemets UnknownsUnmarshaler and UnknownsMarshaler +// use it as embedded field in your structure to parse and then serialize unknown struct fields +type UnknownFieldsProxy struct { + unknownFields map[string]interface{} +} + +func (s *UnknownFieldsProxy) UnmarshalUnknown(in *jlexer.Lexer, key string) { + if s.unknownFields == nil { + s.unknownFields = make(map[string]interface{}, 1) + } + s.unknownFields[key] = in.Interface() +} + +func (s UnknownFieldsProxy) MarshalUnknowns(out *jwriter.Writer, first bool) { + for key, val := range s.unknownFields { + if first { + first = false + } else { + out.RawByte(',') + } + out.String(string(key)) + out.RawByte(':') + out.Raw(json.Marshal(val)) + } +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 000000000000..7a0b9ed1029e --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See https://tools.ietf.org/html/rfc6749#section-4.4 +package clientcredentials // import "golang.org/x/oauth2/clientcredentials" + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string + + // EndpointParams specifies additional parameters for requests to the token endpoint. + EndpointParams url.Values + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle oauth2.AuthStyle +} + +// Token uses client credentials to retrieve a token. +// +// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return c.TokenSource(ctx).Token() +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. +// +// The provided context optionally controls which HTTP client +// is returned. See the oauth2.HTTPClient variable. +// +// The returned Client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + v := url.Values{ + "grant_type": {"client_credentials"}, + } + if len(c.conf.Scopes) > 0 { + v.Set("scope", strings.Join(c.conf.Scopes, " ")) + } + for k, p := range c.conf.EndpointParams { + // Allow grant_type to be overridden to allow interoperability with + // non-compliant implementations. + if _, ok := v[k]; ok && k != "grant_type" { + return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k) + } + v[k] = p + } + + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle)) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*oauth2.RetrieveError)(rErr) + } + return nil, err + } + t := &oauth2.Token{ + AccessToken: tk.AccessToken, + TokenType: tk.TokenType, + RefreshToken: tk.RefreshToken, + Expiry: tk.Expiry, + } + return t.WithExtra(tk.Raw), nil +} diff --git a/vendor/vendor.json b/vendor/vendor.json index dee67fa3e1c8..b1b3998067dc 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -104,6 +104,48 @@ "revision": "fe5ac47354fef18304f2f4b6981c3cf149c84779", "revisionTime": "2019-08-13T14:39:03Z" }, + { + "checksumSHA1": "wei6nnaRVIBVKgG02Y2ZKZFImOs=", + "path": "code.cloudfoundry.org/go-diodes", + "revision": "f77fb823c7ee0156ed4cdadaf4f79ac3fd84613f", + "revisionTime": "2019-08-08T08:06:32Z" + }, + { + "checksumSHA1": "/ThvU1ey59Se2KtvnxDH8pTv0tE=", + "path": "code.cloudfoundry.org/go-loggregator", + "revision": "b8d176783c8a6280a34f0e19e0e8f57d722773a1", + "revisionTime": "2019-07-25T20:30:07Z", + "version": "v7.7.0", + "versionExact": "v7.7.0" + }, + { + "checksumSHA1": "gqL4M5ckqhtX9MaUh+VdHq+KEUs=", + "path": "code.cloudfoundry.org/go-loggregator/conversion", + "revision": "b8d176783c8a6280a34f0e19e0e8f57d722773a1", + "revisionTime": "2019-07-25T20:30:07Z", + "version": "v7.7.0", + "versionExact": "v7.7.0" + }, + { + "checksumSHA1": "Y1hEVVknRkkXXUYd84nEBGBz5a8=", + "path": "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2", + "revision": "b8d176783c8a6280a34f0e19e0e8f57d722773a1", + "revisionTime": "2019-07-25T20:30:07Z", + "version": "v7.7.0", + "versionExact": "v7.7.0" + }, + { + "checksumSHA1": "g0lkFgCOhEZmlwxJF7aKD58pz2g=", + "path": "code.cloudfoundry.org/gofileutils/fileutils", + "revision": "4d0c80011a0f37da1711c184028bc40137cd45af", + "revisionTime": "2017-01-11T11:52:28Z" + }, + { + "checksumSHA1": "3h5TYyUNNdIhRM2j8cTr/yuPVWs=", + "path": "code.cloudfoundry.org/rfc5424", + "revision": "236a6d29298aea12f69978f33393d12465abc429", + "revisionTime": "2018-09-05T21:01:52Z" + }, { "checksumSHA1": "6uv2eI5FyjWtTg5AsZ++csclSqc=", "path": "contrib.go.opencensus.io/exporter/ocagent", @@ -365,6 +407,12 @@ "revision": "2bf71ec4836011b92dc78df3b9ace6b40e65f7df", "revisionTime": "2016-07-06T22:07:25Z" }, + { + "checksumSHA1": "Mr55Nhc1CQ49Fi97aHLYgvXmPc8=", + "path": "github.com/Masterminds/semver", + "revision": "910aa146bd66780c2815d652b92a7fc5331e533c", + "revisionTime": "2019-12-13T17:28:11Z" + }, { "checksumSHA1": "9vIkKNaaSRELM/2nCkWOAdHD21M=", "path": "github.com/Microsoft/go-winio", @@ -2009,6 +2057,20 @@ "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", "revisionTime": "2019-08-08T01:16:37Z" }, + { + "checksumSHA1": "Cq0mCokJQIOkHz7SFCKSstUZxnw=", + "path": "github.com/cloudfoundry-community/go-cfclient", + "revision": "35bcce23fc5f8b9969723ac38c0de1f82c4d3471", + "revisionTime": "2019-08-08T21:40:49Z", + "version": "master", + "versionExact": "master" + }, + { + "checksumSHA1": "6wZB7Zkp33mMdUjWH7O0N5/fk9A=", + "path": "github.com/cloudfoundry/sonde-go/events", + "revision": "b33733203bb48d7c56de7cb639d77f78b0449d19", + "revisionTime": "2017-12-06T17:18:20Z" + }, { "checksumSHA1": "ru5eKWdLzXfpNRL+Mi1bxbmY8DU=", "path": "github.com/containerd/containerd/errdefs", @@ -4063,6 +4125,30 @@ "revision": "v1.8.0", "version": "v1.8.0" }, + { + "checksumSHA1": "UaztUU8ZfWj6YG13dBLR2FHIZxg=", + "path": "github.com/mailru/easyjson", + "revision": "8edcc4e51f39ddbd3505a3386aff3f435a7fd028", + "revisionTime": "2020-02-18T08:42:23Z" + }, + { + "checksumSHA1": "T8soMJArSZrYnhmdpAnq1bVxQ6Q=", + "path": "github.com/mailru/easyjson/buffer", + "revision": "8edcc4e51f39ddbd3505a3386aff3f435a7fd028", + "revisionTime": "2020-02-18T08:42:23Z" + }, + { + "checksumSHA1": "8Qyp0ekkp7JuahJZnnzuTXESyFU=", + "path": "github.com/mailru/easyjson/jlexer", + "revision": "8edcc4e51f39ddbd3505a3386aff3f435a7fd028", + "revisionTime": "2020-02-18T08:42:23Z" + }, + { + "checksumSHA1": "C72An0967WekWiUIFS+fAvoJpcY=", + "path": "github.com/mailru/easyjson/jwriter", + "revision": "8edcc4e51f39ddbd3505a3386aff3f435a7fd028", + "revisionTime": "2020-02-18T08:42:23Z" + }, { "checksumSHA1": "qNkx9+OTwZI6aFv7K9zuFCGODUw=", "path": "github.com/mattn/go-colorable", @@ -5250,6 +5336,12 @@ "revision": "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33", "revisionTime": "2019-05-07T23:52:07Z" }, + { + "checksumSHA1": "Fie9WZxLdPy9f7Kk4qXqH4CV30A=", + "path": "golang.org/x/oauth2/clientcredentials", + "revision": "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303", + "revisionTime": "2020-01-07T16:11:21Z" + }, { "checksumSHA1": "w5SmJwdVFlGE1KO4/AQjY+jvdsI=", "path": "golang.org/x/oauth2/google", diff --git a/x-pack/libbeat/common/cloudfoundry/cache.go b/x-pack/libbeat/common/cloudfoundry/cache.go new file mode 100644 index 000000000000..42250f317153 --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/cache.go @@ -0,0 +1,72 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cloudfoundry + +import ( + "fmt" + "time" + + "github.com/cloudfoundry-community/go-cfclient" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +// cfClient interface is provided so unit tests can mock the actual client. +type cfClient interface { + // GetAppByGuid returns an application information from its Guid. + GetAppByGuid(guid string) (cfclient.App, error) +} + +// clientCacheWrap wraps the cloudfoundry client to add a cache in front of GetAppByGuid. +type clientCacheWrap struct { + cache *common.Cache + client cfClient + log *logp.Logger +} + +// newClientCacheWrap creates a new cache for application data. +func newClientCacheWrap(client cfClient, ttl time.Duration, log *logp.Logger) *clientCacheWrap { + return &clientCacheWrap{ + cache: common.NewCacheWithExpireOnAdd(ttl, 100), + client: client, + log: log, + } +} + +// fetchApp uses the cfClient to retrieve an App entity and +// stores it in the internal cache +func (c *clientCacheWrap) fetchAppByGuid(guid string) (*cfclient.App, error) { + app, err := c.client.GetAppByGuid(guid) + if err != nil { + return nil, err + } + c.cache.Put(app.Guid, &app) + return &app, nil +} + +// GetApp returns CF Application info, either from the cache or +// using the CF client. +func (c *clientCacheWrap) GetAppByGuid(guid string) (*cfclient.App, error) { + cachedApp := c.cache.Get(guid) + if cachedApp == nil { + return c.fetchAppByGuid(guid) + } + app, ok := cachedApp.(*cfclient.App) + if !ok { + return nil, fmt.Errorf("error converting cached app") + } + return app, nil +} + +// StartJanitor starts a goroutine that will periodically clean the applications cache. +func (c *clientCacheWrap) StartJanitor(interval time.Duration) { + c.cache.StartJanitor(interval) +} + +// StopJanitor stops the goroutine that periodically clean the applications cache. +func (c *clientCacheWrap) StopJanitor() { + c.cache.StopJanitor() +} diff --git a/x-pack/libbeat/common/cloudfoundry/cache_test.go b/x-pack/libbeat/common/cloudfoundry/cache_test.go new file mode 100644 index 000000000000..70618e7502da --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/cache_test.go @@ -0,0 +1,79 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package cloudfoundry + +import ( + "fmt" + "testing" + "time" + + "github.com/cloudfoundry-community/go-cfclient" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/logp" +) + +func TestClientCacheWrap(t *testing.T) { + ttl := 500 * time.Millisecond + guid := mustCreateFakeGuid() + app := cfclient.App{ + Guid: guid, + Memory: 1, // use this field to track if from cache or from client + } + fakeClient := &fakeCFClient{app, 0} + cache := newClientCacheWrap(fakeClient, ttl, logp.NewLogger("cloudfoundry")) + + // should err; different app client doesn't have + _, err := cache.GetAppByGuid(mustCreateFakeGuid()) + assert.Error(t, err) + + // fetched from client for the first time + one, err := cache.GetAppByGuid(guid) + assert.NoError(t, err) + assert.Equal(t, app, *one) + assert.Equal(t, 1, fakeClient.callCount) + + // updated app in fake client, new fetch should not have updated app + updatedApp := cfclient.App{ + Guid: guid, + Memory: 2, + } + fakeClient.app = updatedApp + two, err := cache.GetAppByGuid(guid) + assert.NoError(t, err) + assert.Equal(t, app, *two) + assert.Equal(t, 1, fakeClient.callCount) + + // wait the ttl, then it should have updated app + time.Sleep(ttl) + three, err := cache.GetAppByGuid(guid) + assert.NoError(t, err) + assert.Equal(t, updatedApp, *three) + assert.Equal(t, 2, fakeClient.callCount) +} + +type fakeCFClient struct { + app cfclient.App + callCount int +} + +func (f *fakeCFClient) GetAppByGuid(guid string) (cfclient.App, error) { + if f.app.Guid != guid { + return f.app, fmt.Errorf("no app with guid") + } + f.callCount++ + return f.app, nil +} + +func mustCreateFakeGuid() string { + uuid, err := uuid.NewV4() + if err != nil { + panic(err) + } + return uuid.String() +} diff --git a/x-pack/libbeat/common/cloudfoundry/config.go b/x-pack/libbeat/common/cloudfoundry/config.go new file mode 100644 index 000000000000..33c0a85e5130 --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/config.go @@ -0,0 +1,57 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cloudfoundry + +import ( + "crypto/tls" + "time" + + "github.com/elastic/beats/libbeat/common/transport/tlscommon" + + "github.com/gofrs/uuid" +) + +type Config struct { + // CloudFoundry credentials for retrieving OAuth tokens + ClientID string `config:"client_id" validate:"required"` + ClientSecret string `config:"client_secret" validate:"required"` + + // TLS configuration for the client + TLS *tlscommon.Config `config:"ssl"` + + // Override URLs returned from the CF client + APIAddress string `config:"api_address"` + DopplerAddress string `config:"doppler_address"` + UaaAddress string `config:"uaa_address"` + RlpAddress string `config:"rlp_address"` + + // ShardID when retrieving events from loggregator, sharing this ID across + // multiple filebeats will shard the load of receiving and sending events. + ShardID string `config:"shard_id"` + + // Maximum amount of time to cache application objects from CF client + CacheDuration time.Duration `config:"cache_duration"` +} + +// InitDefaults initialize the defaults for the configuration. +func (c *Config) InitDefaults() { + // If not provided by the user; subscription ID should be a unique string to avoid clustering by default. + // Default to using a UUID4 string. + uuid, err := uuid.NewV4() + if err != nil { + panic(err) + } + c.ShardID = uuid.String() + c.CacheDuration = 120 * time.Second +} + +// TLSConfig returns the TLS configuration. +func (c *Config) TLSConfig() (*tls.Config, error) { + tls, err := tlscommon.LoadTLSConfig(c.TLS) + if err != nil { + return nil, err + } + return tls.ToConfig(), nil +} diff --git a/x-pack/libbeat/common/cloudfoundry/config_test.go b/x-pack/libbeat/common/cloudfoundry/config_test.go new file mode 100644 index 000000000000..219a48394214 --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/config_test.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package cloudfoundry + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/go-ucfg" + + "github.com/elastic/beats/libbeat/common" +) + +func TestValidation(t *testing.T) { + var noIdOrSecret Config + assert.Error(t, ucfg.New().Unpack(&noIdOrSecret)) + + var noId Config + assert.Error(t, ucfg.MustNewFrom(common.MapStr{ + "client_secret": "client_secret", + }).Unpack(&noId)) + + var noSecret Config + assert.Error(t, ucfg.MustNewFrom(common.MapStr{ + "client_id": "client_id", + }).Unpack(&noSecret)) + + var valid Config + assert.NoError(t, ucfg.MustNewFrom(common.MapStr{ + "client_id": "client_id", + "client_secret": "client_secret", + }).Unpack(&valid)) +} + +func TestInitDefaults(t *testing.T) { + var cfCfg Config + assert.NoError(t, ucfg.MustNewFrom(common.MapStr{ + "client_id": "client_id", + "client_secret": "client_secret", + }).Unpack(&cfCfg)) + assert.Len(t, cfCfg.ShardID, 36) +} diff --git a/x-pack/libbeat/common/cloudfoundry/doer.go b/x-pack/libbeat/common/cloudfoundry/doer.go new file mode 100644 index 000000000000..98bb3aa16936 --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/doer.go @@ -0,0 +1,98 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cloudfoundry + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/logp" +) + +// authTokenDoer is an HTTP requester that indcludes UAA tokens at the header +type authTokenDoer struct { + url string + clientID string + clientSecret string + httpClient *http.Client + log *logp.Logger +} + +// NewAuthTokenDoer creates a loggregator HTTP client that uses a new UAA token at each request +func newAuthTokenDoer(url string, clientID, clientSecret string, httpClient *http.Client, log *logp.Logger) *authTokenDoer { + return &authTokenDoer{ + url: url, + clientID: clientID, + clientSecret: clientSecret, + httpClient: httpClient, + log: log.Named("doer"), + } +} + +// Do executes an HTTP request adding an UAA OAuth token +func (d *authTokenDoer) Do(r *http.Request) (*http.Response, error) { + t, err := d.getAuthToken(d.clientID, d.clientSecret) + if err != nil { + // The reason for writing an error here is that pushing the error upstream + // is handled by loggregate library, which is beyond our reach. + d.log.Errorf("error creating UAA Auth Token: %+v", err) + return nil, errors.Wrap(err, "error retrieving UUA token") + } + r.Header.Set("Authorization", t) + return d.httpClient.Do(r) +} + +func (d *authTokenDoer) getAuthToken(username, password string) (string, error) { + token, _, err := d.getAuthTokenWithExpiresIn(username, password) + return token, err +} + +func (d *authTokenDoer) getAuthTokenWithExpiresIn(username, password string) (string, int, error) { + data := url.Values{ + "client_id": {username}, + "grant_type": {"client_credentials"}, + } + + request, err := http.NewRequest("POST", fmt.Sprintf("%s/oauth/token", d.url), strings.NewReader(data.Encode())) + if err != nil { + return "", -1, err + } + request.SetBasicAuth(username, password) + request.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := d.httpClient.Do(request) + if err != nil { + return "", -1, err + } + + if resp.StatusCode != http.StatusOK { + return "", -1, fmt.Errorf("received a status code %v", resp.Status) + } + defer resp.Body.Close() + + jsonData := make(map[string]interface{}) + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&jsonData) + if err != nil { + return "", -1, err + } + + expiresIn := 0 + if value, ok := jsonData["expires_in"]; ok { + asFloat, err := strconv.ParseFloat(fmt.Sprintf("%f", value), 64) + if err != nil { + return "", -1, err + } + expiresIn = int(asFloat) + } + + return fmt.Sprintf("%s %s", jsonData["token_type"], jsonData["access_token"]), expiresIn, nil +} diff --git a/x-pack/libbeat/common/cloudfoundry/events.go b/x-pack/libbeat/common/cloudfoundry/events.go new file mode 100644 index 000000000000..c14958b76ac4 --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/events.go @@ -0,0 +1,526 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cloudfoundry + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/elastic/beats/libbeat/common" + + "github.com/cloudfoundry/sonde-go/events" +) + +// EventType defines the different event types that can be raised from RPLClient. +type EventType uint + +// EventTypes from loggregator documented here: https://github.com/cloudfoundry/loggregator-api +const ( + // EventTypeHttpAccess is a http access event. + EventTypeHttpAccess EventType = iota + // EventTypeLog is a log event. + EventTypeLog + // EventTypeCounter is a counter event. + EventTypeCounter + // EventTypeValueMetric is a value metric event. + EventTypeValueMetric + // EventTypeContainerMetric is a container metric event. + EventTypeContainerMetric + // EventTypeError is an error event. + EventTypeError +) + +// String returns string representation of the event type. +func (t EventType) String() string { + switch t { + case EventTypeHttpAccess: + return "access" + case EventTypeLog: + return "log" + case EventTypeCounter: + return "counter" + case EventTypeValueMetric: + return "value" + case EventTypeContainerMetric: + return "container" + case EventTypeError: + return "error" + default: + return "unknown" + } +} + +// EventMessageType defines the different log message types. +type EventLogMessageType uint + +const ( + // EventLogMessageTypeStdout is a message that was received from stdout. + EventLogMessageTypeStdout EventLogMessageType = iota + 1 + // EventLogMessageTypeStderr is a message that was received from stderr. + EventLogMessageTypeStderr +) + +// String returns string representation of the event log message type. +func (t EventLogMessageType) String() string { + switch t { + case EventLogMessageTypeStdout: + return "stdout" + case EventLogMessageTypeStderr: + return "stderr" + default: + return "unknown" + } +} + +// Event is the interface all events implements. +type Event interface { + fmt.Stringer + + Origin() string + EventType() EventType + Timestamp() time.Time + Deployment() string + Job() string + Index() string + IP() string + Tags() map[string]string + ToFields() common.MapStr +} + +// EventWithAppID is the interface all events implement that provide an application ID for the event. +type EventWithAppID interface { + Event + + AppGuid() string +} + +type eventBase struct { + origin string + timestamp time.Time + deployment string + job string + index string + ip string + tags map[string]string +} + +type eventAppBase struct { + eventBase + + appGuid string +} + +// EventHttpAccess represents a http access event. +type EventHttpAccess struct { + eventAppBase + + startTimestamp time.Time + stopTimestamp time.Time + requestID string + peerType string + method string + uri string + remoteAddress string + userAgent string + statusCode int32 + contentLength int64 + instanceIndex int32 + forwarded []string +} + +func (*EventHttpAccess) EventType() EventType { return EventTypeHttpAccess } +func (e *EventHttpAccess) String() string { return e.EventType().String() } +func (e *EventHttpAccess) Origin() string { return e.origin } +func (e *EventHttpAccess) Timestamp() time.Time { return e.timestamp } +func (e *EventHttpAccess) Deployment() string { return e.deployment } +func (e *EventHttpAccess) Job() string { return e.job } +func (e *EventHttpAccess) Index() string { return e.index } +func (e *EventHttpAccess) IP() string { return e.ip } +func (e *EventHttpAccess) Tags() map[string]string { return e.tags } +func (e *EventHttpAccess) AppGuid() string { return e.appGuid } +func (e *EventHttpAccess) StartTimestamp() time.Time { return e.startTimestamp } +func (e *EventHttpAccess) StopTimestamp() time.Time { return e.stopTimestamp } +func (e *EventHttpAccess) RequestID() string { return e.requestID } +func (e *EventHttpAccess) PeerType() string { return e.peerType } +func (e *EventHttpAccess) Method() string { return e.method } +func (e *EventHttpAccess) URI() string { return e.uri } +func (e *EventHttpAccess) RemoteAddress() string { return e.remoteAddress } +func (e *EventHttpAccess) UserAgent() string { return e.userAgent } +func (e *EventHttpAccess) StatusCode() int32 { return e.statusCode } +func (e *EventHttpAccess) ContentLength() int64 { return e.contentLength } +func (e *EventHttpAccess) InstanceIndex() int32 { return e.instanceIndex } +func (e *EventHttpAccess) Forwarded() []string { return e.forwarded } +func (e *EventHttpAccess) ToFields() common.MapStr { + fields := baseMapWithApp(e) + fields.DeepUpdate(common.MapStr{ + "http": common.MapStr{ + "response": common.MapStr{ + "status_code": e.StatusCode(), + "method": e.Method(), + "bytes": e.ContentLength(), + }, + }, + "user_agent": common.MapStr{ + "original": e.UserAgent(), + }, + "url": urlMap(e.URI()), + }) + return fields +} + +// EventLog represents a log message event. +type EventLog struct { + eventAppBase + + message string + messageType EventLogMessageType + sourceType string + sourceID string +} + +func (*EventLog) EventType() EventType { return EventTypeLog } +func (e *EventLog) String() string { return e.EventType().String() } +func (e *EventLog) Origin() string { return e.origin } +func (e *EventLog) Timestamp() time.Time { return e.timestamp } +func (e *EventLog) Deployment() string { return e.deployment } +func (e *EventLog) Job() string { return e.job } +func (e *EventLog) Index() string { return e.index } +func (e *EventLog) IP() string { return e.ip } +func (e *EventLog) Tags() map[string]string { return e.tags } +func (e *EventLog) AppGuid() string { return e.appGuid } +func (e *EventLog) Message() string { return e.message } +func (e *EventLog) MessageType() EventLogMessageType { return e.messageType } +func (e *EventLog) SourceType() string { return e.sourceType } +func (e *EventLog) SourceID() string { return e.sourceID } +func (e *EventLog) ToFields() common.MapStr { + fields := baseMapWithApp(e) + fields.DeepUpdate(common.MapStr{ + "cf": common.MapStr{ + e.String(): common.MapStr{ + "source": common.MapStr{ + "instance": e.SourceID(), + "type": e.SourceType(), + }, + }, + }, + "message": e.Message(), + }) + return fields +} + +// EventCounter represents a counter event. +type EventCounter struct { + eventBase + + name string + delta uint64 + total uint64 +} + +func (*EventCounter) EventType() EventType { return EventTypeCounter } +func (e *EventCounter) String() string { return e.EventType().String() } +func (e *EventCounter) Origin() string { return e.origin } +func (e *EventCounter) Timestamp() time.Time { return e.timestamp } +func (e *EventCounter) Deployment() string { return e.deployment } +func (e *EventCounter) Job() string { return e.job } +func (e *EventCounter) Index() string { return e.index } +func (e *EventCounter) IP() string { return e.ip } +func (e *EventCounter) Tags() map[string]string { return e.tags } +func (e *EventCounter) Name() string { return e.name } +func (e *EventCounter) Delta() uint64 { return e.delta } +func (e *EventCounter) Total() uint64 { return e.total } +func (e *EventCounter) ToFields() common.MapStr { + fields := baseMap(e) + fields.DeepUpdate(common.MapStr{ + "cf": common.MapStr{ + e.String(): common.MapStr{ + "name": e.Name(), + "delta": e.Delta(), + "total": e.Total(), + }, + }, + }) + return fields +} + +// EventValueMetric represents a value metric event. +type EventValueMetric struct { + eventBase + + name string + value float64 + unit string +} + +func (*EventValueMetric) EventType() EventType { return EventTypeValueMetric } +func (e *EventValueMetric) String() string { return e.EventType().String() } +func (e *EventValueMetric) Origin() string { return e.origin } +func (e *EventValueMetric) Timestamp() time.Time { return e.timestamp } +func (e *EventValueMetric) Deployment() string { return e.deployment } +func (e *EventValueMetric) Job() string { return e.job } +func (e *EventValueMetric) Index() string { return e.index } +func (e *EventValueMetric) IP() string { return e.ip } +func (e *EventValueMetric) Tags() map[string]string { return e.tags } +func (e *EventValueMetric) Name() string { return e.name } +func (e *EventValueMetric) Value() float64 { return e.value } +func (e *EventValueMetric) Unit() string { return e.unit } +func (e *EventValueMetric) ToFields() common.MapStr { + fields := baseMap(e) + fields.DeepUpdate(common.MapStr{ + "cf": common.MapStr{ + e.String(): common.MapStr{ + "name": e.Name(), + "unit": e.Unit(), + "value": e.Value(), + }, + }, + }) + return fields +} + +// EventContainerMetric represents a container metric event. +type EventContainerMetric struct { + eventAppBase + + instanceIndex int32 + cpuPercentage float64 + memoryBytes uint64 + diskBytes uint64 + memoryBytesQuota uint64 + diskBytesQuota uint64 +} + +func (*EventContainerMetric) EventType() EventType { return EventTypeContainerMetric } +func (e *EventContainerMetric) String() string { return e.EventType().String() } +func (e *EventContainerMetric) Origin() string { return e.origin } +func (e *EventContainerMetric) Timestamp() time.Time { return e.timestamp } +func (e *EventContainerMetric) Deployment() string { return e.deployment } +func (e *EventContainerMetric) Job() string { return e.job } +func (e *EventContainerMetric) Index() string { return e.index } +func (e *EventContainerMetric) IP() string { return e.ip } +func (e *EventContainerMetric) Tags() map[string]string { return e.tags } +func (e *EventContainerMetric) AppGuid() string { return e.appGuid } +func (e *EventContainerMetric) InstanceIndex() int32 { return e.instanceIndex } +func (e *EventContainerMetric) CPUPercentage() float64 { return e.cpuPercentage } +func (e *EventContainerMetric) MemoryBytes() uint64 { return e.memoryBytes } +func (e *EventContainerMetric) DiskBytes() uint64 { return e.diskBytes } +func (e *EventContainerMetric) MemoryBytesQuota() uint64 { return e.memoryBytesQuota } +func (e *EventContainerMetric) DiskBytesQuota() uint64 { return e.diskBytesQuota } +func (e *EventContainerMetric) ToFields() common.MapStr { + fields := baseMapWithApp(e) + fields.DeepUpdate(common.MapStr{ + "cf": common.MapStr{ + e.String(): common.MapStr{ + "instance_index": e.InstanceIndex(), + "cpu.pct": e.CPUPercentage(), + "memory.bytes": e.MemoryBytes(), + "memory.quota.bytes": e.MemoryBytesQuota(), + "disk.bytes": e.DiskBytes(), + "disk.quota.bytes": e.DiskBytesQuota(), + }, + }, + }) + return fields +} + +// EventError represents an error event. +type EventError struct { + eventBase + + message string + code int32 + source string +} + +func (*EventError) EventType() EventType { return EventTypeError } +func (e *EventError) String() string { return e.EventType().String() } +func (e *EventError) Origin() string { return e.origin } +func (e *EventError) Timestamp() time.Time { return e.timestamp } +func (e *EventError) Deployment() string { return e.deployment } +func (e *EventError) Job() string { return e.job } +func (e *EventError) Index() string { return e.index } +func (e *EventError) IP() string { return e.ip } +func (e *EventError) Tags() map[string]string { return e.tags } +func (e *EventError) Message() string { return e.message } +func (e *EventError) Code() int32 { return e.code } +func (e *EventError) Source() string { return e.source } +func (e *EventError) ToFields() common.MapStr { + fields := baseMap(e) + fields.DeepUpdate(common.MapStr{ + "cf": common.MapStr{ + e.String(): common.MapStr{ + "source": e.Source(), + }, + }, + "message": e.Message(), + "code": e.Code(), + }) + return fields +} + +func newEventBase(env *events.Envelope) eventBase { + return eventBase{ + origin: *env.Origin, + timestamp: time.Unix(0, *env.Timestamp), + deployment: *env.Deployment, + job: *env.Job, + index: *env.Index, + ip: *env.Ip, + tags: env.Tags, + } +} + +func newEventHttpAccess(env *events.Envelope) *EventHttpAccess { + msg := env.GetHttpStartStop() + appID := "" + if msg.ApplicationId != nil { + appID = msg.ApplicationId.String() + } + return &EventHttpAccess{ + eventAppBase: eventAppBase{ + eventBase: newEventBase(env), + appGuid: appID, + }, + startTimestamp: time.Unix(0, *msg.StartTimestamp), + stopTimestamp: time.Unix(0, *msg.StopTimestamp), + requestID: msg.RequestId.String(), + peerType: strings.ToLower(msg.PeerType.String()), + method: msg.Method.String(), + uri: *msg.Uri, + remoteAddress: *msg.RemoteAddress, + userAgent: *msg.UserAgent, + statusCode: *msg.StatusCode, + contentLength: *msg.ContentLength, + instanceIndex: *msg.InstanceIndex, + forwarded: msg.Forwarded, + } +} + +func newEventLog(env *events.Envelope) *EventLog { + msg := env.GetLogMessage() + return &EventLog{ + eventAppBase: eventAppBase{ + eventBase: newEventBase(env), + appGuid: *msg.AppId, + }, + message: string(msg.Message), + messageType: EventLogMessageType(*msg.MessageType), + sourceType: *msg.SourceType, + sourceID: *msg.SourceInstance, + } +} + +func newEventCounter(env *events.Envelope) *EventCounter { + msg := env.GetCounterEvent() + return &EventCounter{ + eventBase: newEventBase(env), + name: *msg.Name, + delta: *msg.Delta, + total: *msg.Total, + } +} + +func newEventValueMetric(env *events.Envelope) *EventValueMetric { + msg := env.GetValueMetric() + return &EventValueMetric{ + eventBase: newEventBase(env), + name: *msg.Name, + value: *msg.Value, + unit: *msg.Unit, + } +} + +func newEventContainerMetric(env *events.Envelope) *EventContainerMetric { + msg := env.GetContainerMetric() + return &EventContainerMetric{ + eventAppBase: eventAppBase{ + eventBase: newEventBase(env), + appGuid: *msg.ApplicationId, + }, + instanceIndex: *msg.InstanceIndex, + cpuPercentage: *msg.CpuPercentage, + memoryBytes: *msg.MemoryBytes, + diskBytes: *msg.DiskBytes, + memoryBytesQuota: *msg.MemoryBytesQuota, + diskBytesQuota: *msg.DiskBytesQuota, + } +} + +func newEventError(env *events.Envelope) *EventError { + msg := env.GetError() + return &EventError{ + eventBase: newEventBase(env), + message: *msg.Message, + code: *msg.Code, + source: *msg.Source, + } +} + +func envelopeToEvent(env *events.Envelope) Event { + switch *env.EventType { + case events.Envelope_HttpStartStop: + return newEventHttpAccess(env) + case events.Envelope_LogMessage: + return newEventLog(env) + case events.Envelope_CounterEvent: + return newEventCounter(env) + case events.Envelope_ValueMetric: + return newEventValueMetric(env) + case events.Envelope_ContainerMetric: + return newEventContainerMetric(env) + } + return nil +} + +func envelopMap(evt Event) common.MapStr { + return common.MapStr{ + "origin": evt.Origin(), + "deployment": evt.Deployment(), + "ip": evt.IP(), + "job": evt.Job(), + "index": evt.Index(), + } +} + +func baseMap(evt Event) common.MapStr { + return common.MapStr{ + "module": "cf", + "dataset": fmt.Sprintf("cf.%s", evt), + "cf": common.MapStr{ + evt.String(): common.MapStr{ + "timestamp": evt.Timestamp(), + "type": evt.String(), + }, + "envelope": envelopMap(evt), + }, + } +} + +func baseMapWithApp(evt EventWithAppID) common.MapStr { + base := baseMap(evt) + appID := evt.AppGuid() + if appID != "" { + base.Put("cf.app.id", appID) + } + return base +} + +func urlMap(uri string) common.MapStr { + u, err := url.Parse(uri) + if err != nil { + return common.MapStr{ + "original": uri, + } + } + return common.MapStr{ + "original": uri, + "scheme": u.Scheme, + "port": u.Port(), + "path": u.Path, + "domain": u.Hostname(), + } +} diff --git a/x-pack/libbeat/common/cloudfoundry/hub.go b/x-pack/libbeat/common/cloudfoundry/hub.go new file mode 100644 index 000000000000..ed7740c464fd --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/hub.go @@ -0,0 +1,133 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cloudfoundry + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/cloudfoundry-community/go-cfclient" + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/logp" +) + +// Client interface exposed by Hub.Client. +type Client interface { + // GetAppByGuid returns the application from cloudfoundry. + GetAppByGuid(guid string) (*cfclient.App, error) + // StartJanitor keeps the cache of applications clean. + StartJanitor(interval time.Duration) + // StopJanitor stops the running janitor. + StopJanitor() +} + +// Hub is central place to get all the required clients to communicate with cloudfoundry. +type Hub struct { + cfg *Config + userAgent string + log *logp.Logger +} + +// NewHub creates a new hub to get the required clients to communicate with cloudfoundry. +func NewHub(cfg *Config, userAgent string, log *logp.Logger) *Hub { + return &Hub{cfg, userAgent, log} +} + +// Client returns the cloudfoundry client. +func (h *Hub) Client() (Client, error) { + httpClient, insecure, err := h.httpClient() + if err != nil { + return nil, err + } + + h.log.Debugw( + "creating cloudfoundry ", + "client_id", h.cfg.ClientID, + "client_secret_present", h.cfg.ClientSecret != "", + "api_address", h.cfg.APIAddress) + cf, err := cfclient.NewClient(&cfclient.Config{ + ClientID: h.cfg.ClientID, + ClientSecret: h.cfg.ClientSecret, + ApiAddress: h.cfg.APIAddress, + HttpClient: httpClient, + SkipSslValidation: insecure, + UserAgent: h.userAgent, + }) + if err != nil { + return nil, errors.Wrap(err, "error creating cloudfoundry client") + } + if h.cfg.DopplerAddress != "" { + cf.Endpoint.DopplerEndpoint = h.cfg.DopplerAddress + } + if h.cfg.UaaAddress != "" { + cf.Endpoint.AuthEndpoint = h.cfg.UaaAddress + } + return newClientCacheWrap(cf, h.cfg.CacheDuration, h.log), nil +} + +// RlpListener returns a listener client that calls the passed callback when the provided events are streamed through +// the loggregator to this client. +func (h *Hub) RlpListener(callbacks RlpListenerCallbacks) (*RlpListener, error) { + client, err := h.Client() + if err != nil { + return nil, err + } + return h.RlpListenerFromClient(client, callbacks) +} + +// RlpListener returns a listener client that calls the passed callback when the provided events are streamed through +// the loggregator to this client. +// +// In the case that the cloudfoundry client was already needed by the code path, call this method +// as not to create a intermediate client that will not be used. +func (h *Hub) RlpListenerFromClient(client Client, callbacks RlpListenerCallbacks) (*RlpListener, error) { + var rlpAddress string + if h.cfg.RlpAddress != "" { + rlpAddress = h.cfg.RlpAddress + } else { + rlpAddress = strings.Replace(h.cfg.APIAddress, "api", "log-stream", 1) + } + doer, err := h.doerFromClient(client) + if err != nil { + return nil, err + } + return newRlpListener(rlpAddress, doer, h.cfg.ShardID, callbacks, h.log), nil +} + +// doerFromClient returns an auth token doer using uaa. +func (h *Hub) doerFromClient(client Client) (*authTokenDoer, error) { + httpClient, _, err := h.httpClient() + if err != nil { + return nil, err + } + ccw, ok := client.(*clientCacheWrap) + if !ok { + return nil, fmt.Errorf("must pass client returned from hub.Client") + } + cfClient, ok := ccw.client.(*cfclient.Client) + if !ok { + return nil, fmt.Errorf("must pass client returned from hub.Client") + } + url := cfClient.Endpoint.AuthEndpoint + if h.cfg.UaaAddress != "" { + url = h.cfg.UaaAddress + } + return newAuthTokenDoer(url, h.cfg.ClientID, h.cfg.ClientSecret, httpClient, h.log), nil +} + +// httpClient returns an HTTP client configured with the configuration TLS. +func (h *Hub) httpClient() (*http.Client, bool, error) { + tls, err := h.cfg.TLSConfig() + if err != nil { + return nil, true, err + } + httpClient := cfclient.DefaultConfig().HttpClient + tp := httpClient.Transport.(*http.Transport) + tp.TLSClientConfig = tls + return httpClient, tls.InsecureSkipVerify, nil +} diff --git a/x-pack/libbeat/common/cloudfoundry/rlplistener.go b/x-pack/libbeat/common/cloudfoundry/rlplistener.go new file mode 100644 index 000000000000..3372b73dccac --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/rlplistener.go @@ -0,0 +1,152 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cloudfoundry + +import ( + "context" + "sync" + + loggregator "code.cloudfoundry.org/go-loggregator" + "code.cloudfoundry.org/go-loggregator/conversion" + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" + + "github.com/elastic/beats/libbeat/logp" +) + +type RlpListenerCallbacks struct { + HttpAccess func(*EventHttpAccess) + Log func(*EventLog) + Counter func(*EventCounter) + ValueMetric func(*EventValueMetric) + ContainerMetric func(*EventContainerMetric) + Error func(*EventError) +} + +// RlpListener is a listener client that connects to the cloudfoundry loggregator. +type RlpListener struct { + cancel context.CancelFunc + wg sync.WaitGroup + rlpAddress string + doer *authTokenDoer + shardID string + log *logp.Logger + callbacks RlpListenerCallbacks +} + +// newRlpListener returns default implementation for RLPClient +func newRlpListener( + rlpAddress string, + doer *authTokenDoer, + shardID string, + callbacks RlpListenerCallbacks, + log *logp.Logger) *RlpListener { + return &RlpListener{ + rlpAddress: rlpAddress, + doer: doer, + shardID: shardID, + callbacks: callbacks, + log: log, + } +} + +// Start receiving events through from loggregator. +func (c *RlpListener) Start(ctx context.Context) { + c.log.Debugw("starting RLP listener.", "rlpAddress", c.rlpAddress) + + ops := []loggregator.RLPGatewayClientOption{loggregator.WithRLPGatewayHTTPClient(c.doer)} + rlpClient := loggregator.NewRLPGatewayClient(c.rlpAddress, ops...) + + ctx, cancel := context.WithCancel(ctx) + c.cancel = cancel + l := &loggregator_v2.EgressBatchRequest{ + ShardId: c.shardID, + Selectors: c.getSelectors(), + } + es := rlpClient.Stream(ctx, l) + + go func() { + c.wg.Add(1) + defer c.wg.Done() + for { + select { + case <-ctx.Done(): + c.log.Debug("context done message at loggregator received.") + return + default: + envelopes := es() + for i := range envelopes { + v1s := conversion.ToV1(envelopes[i]) + for _, v := range v1s { + evt := envelopeToEvent(v) + if evt.EventType() == EventTypeHttpAccess && c.callbacks.HttpAccess != nil { + c.callbacks.HttpAccess(evt.(*EventHttpAccess)) + } else if evt.EventType() == EventTypeLog && c.callbacks.Log != nil { + c.callbacks.Log(evt.(*EventLog)) + } else if evt.EventType() == EventTypeCounter && c.callbacks.Counter != nil { + c.callbacks.Counter(evt.(*EventCounter)) + } else if evt.EventType() == EventTypeValueMetric && c.callbacks.ValueMetric != nil { + c.callbacks.ValueMetric(evt.(*EventValueMetric)) + } else if evt.EventType() == EventTypeContainerMetric && c.callbacks.ContainerMetric != nil { + c.callbacks.ContainerMetric(evt.(*EventContainerMetric)) + } else if evt.EventType() == EventTypeError && c.callbacks.Error != nil { + c.callbacks.Error(evt.(*EventError)) + } + } + } + } + } + }() +} + +// Stop receiving events +func (c *RlpListener) Stop() { + c.log.Debugw("stopping RLP listener.", "rlpAddress", c.rlpAddress) + + if c.cancel != nil { + c.cancel() + } + c.wg.Wait() +} + +// getSelectors returns the server side selectors based on the callbacks defined on the listener. +func (c *RlpListener) getSelectors() []*loggregator_v2.Selector { + selectors := make([]*loggregator_v2.Selector, 0) + if c.callbacks.HttpAccess != nil { + selectors = append(selectors, &loggregator_v2.Selector{ + Message: &loggregator_v2.Selector_Timer{ + Timer: &loggregator_v2.TimerSelector{}, + }, + }) + } + if c.callbacks.Log != nil { + selectors = append(selectors, &loggregator_v2.Selector{ + Message: &loggregator_v2.Selector_Log{ + Log: &loggregator_v2.LogSelector{}, + }, + }) + } + if c.callbacks.Counter != nil { + selectors = append(selectors, &loggregator_v2.Selector{ + Message: &loggregator_v2.Selector_Counter{ + Counter: &loggregator_v2.CounterSelector{}, + }, + }) + } + if c.callbacks.ValueMetric != nil || c.callbacks.ContainerMetric != nil { + selectors = append(selectors, &loggregator_v2.Selector{ + Message: &loggregator_v2.Selector_Gauge{ + Gauge: &loggregator_v2.GaugeSelector{}, + }, + }) + } + if c.callbacks.Error != nil { + selectors = append(selectors, &loggregator_v2.Selector{ + Message: &loggregator_v2.Selector_Event{ + Event: &loggregator_v2.EventSelector{}, + }, + }) + } + return selectors +} diff --git a/x-pack/libbeat/common/cloudfoundry/rlplistener_test.go b/x-pack/libbeat/common/cloudfoundry/rlplistener_test.go new file mode 100644 index 000000000000..724f22d3ed85 --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/rlplistener_test.go @@ -0,0 +1,164 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package cloudfoundry + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" +) + +func TestGetSelectors(t *testing.T) { + + tests := []struct { + Name string + Callbacks RlpListenerCallbacks + Selectors []*loggregator_v2.Selector + }{ + { + Name: "HTTPAccess only", + Callbacks: RlpListenerCallbacks{ + HttpAccess: func(_ *EventHttpAccess) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Timer{ + Timer: &loggregator_v2.TimerSelector{}, + }, + }, + }, + }, + { + Name: "Log only", + Callbacks: RlpListenerCallbacks{ + Log: func(_ *EventLog) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Log{ + Log: &loggregator_v2.LogSelector{}, + }, + }, + }, + }, + { + Name: "Counter only", + Callbacks: RlpListenerCallbacks{ + Counter: func(_ *EventCounter) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Counter{ + Counter: &loggregator_v2.CounterSelector{}, + }, + }, + }, + }, + { + Name: "ValueMetric only", + Callbacks: RlpListenerCallbacks{ + ValueMetric: func(_ *EventValueMetric) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Gauge{ + Gauge: &loggregator_v2.GaugeSelector{}, + }, + }, + }, + }, + { + Name: "ContainerMetric only", + Callbacks: RlpListenerCallbacks{ + ContainerMetric: func(_ *EventContainerMetric) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Gauge{ + Gauge: &loggregator_v2.GaugeSelector{}, + }, + }, + }, + }, + { + Name: "Error only", + Callbacks: RlpListenerCallbacks{ + Error: func(_ *EventError) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Event{ + Event: &loggregator_v2.EventSelector{}, + }, + }, + }, + }, + { + Name: "ValueMetric and ContainerMetric", + Callbacks: RlpListenerCallbacks{ + ValueMetric: func(_ *EventValueMetric) {}, + ContainerMetric: func(_ *EventContainerMetric) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Gauge{ + Gauge: &loggregator_v2.GaugeSelector{}, + }, + }, + }, + }, + { + Name: "All", + Callbacks: RlpListenerCallbacks{ + HttpAccess: func(_ *EventHttpAccess) {}, + Log: func(_ *EventLog) {}, + Counter: func(_ *EventCounter) {}, + ValueMetric: func(_ *EventValueMetric) {}, + ContainerMetric: func(_ *EventContainerMetric) {}, + Error: func(_ *EventError) {}, + }, + Selectors: []*loggregator_v2.Selector{ + { + Message: &loggregator_v2.Selector_Timer{ + Timer: &loggregator_v2.TimerSelector{}, + }, + }, + { + Message: &loggregator_v2.Selector_Log{ + Log: &loggregator_v2.LogSelector{}, + }, + }, + { + Message: &loggregator_v2.Selector_Counter{ + Counter: &loggregator_v2.CounterSelector{}, + }, + }, + { + Message: &loggregator_v2.Selector_Gauge{ + Gauge: &loggregator_v2.GaugeSelector{}, + }, + }, + { + Message: &loggregator_v2.Selector_Event{ + Event: &loggregator_v2.EventSelector{}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + listener := newRlpListener("", nil, "", test.Callbacks, nil) + observed := listener.getSelectors() + assert.EqualValues(t, test.Selectors, observed) + }) + } + +}