mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Update vendor directory for k8s events support
Executed "glide up -v" after adding the k8s event feature. These vendor dependency changes are required for k8s event support.
This commit is contained in:
10
glide.lock
generated
10
glide.lock
generated
@@ -1,5 +1,5 @@
|
||||
hash: f82bf395df6f00004b1794606a9256d5c69b62deb5498b574a08b2b5d15f7a6a
|
||||
updated: 2019-10-28T17:02:34.793943-04:00
|
||||
updated: 2019-12-11T21:28:26.970892-06:00
|
||||
imports:
|
||||
- name: cloud.google.com/go
|
||||
version: 8c41231e01b2085512d98153bcffb847ff9b4b9f
|
||||
@@ -32,6 +32,10 @@ imports:
|
||||
subpackages:
|
||||
- proto
|
||||
- sortkeys
|
||||
- name: github.com/golang/groupcache
|
||||
version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
|
||||
subpackages:
|
||||
- lru
|
||||
- name: github.com/golang/protobuf
|
||||
version: b5d812f8a3706043e23a9cd5babf2e5423744d30
|
||||
subpackages:
|
||||
@@ -342,6 +346,8 @@ imports:
|
||||
- tools/clientcmd/api/v1
|
||||
- tools/metrics
|
||||
- tools/pager
|
||||
- tools/record
|
||||
- tools/record/util
|
||||
- tools/reference
|
||||
- transport
|
||||
- util/cert
|
||||
@@ -368,7 +374,7 @@ imports:
|
||||
subpackages:
|
||||
- pkg/util/proto
|
||||
- name: k8s.io/kubernetes
|
||||
version: b1e4a13ba1f74f363154ea2e1d36047ea55190a3
|
||||
version: d70a3ca08fe72ad8dd0b2d72cf032474ab2ce2a9
|
||||
subpackages:
|
||||
- pkg/api/legacyscheme
|
||||
- pkg/api/testapi
|
||||
|
||||
1
vendor/github.com/golang/groupcache/.gitignore
generated
vendored
Normal file
1
vendor/github.com/golang/groupcache/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*~
|
||||
191
vendor/github.com/golang/groupcache/LICENSE
generated
vendored
Normal file
191
vendor/github.com/golang/groupcache/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
73
vendor/github.com/golang/groupcache/README.md
generated
vendored
Normal file
73
vendor/github.com/golang/groupcache/README.md
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
# groupcache
|
||||
|
||||
## Summary
|
||||
|
||||
groupcache is a caching and cache-filling library, intended as a
|
||||
replacement for memcached in many cases.
|
||||
|
||||
For API docs and examples, see http://godoc.org/github.com/golang/groupcache
|
||||
|
||||
## Comparison to memcached
|
||||
|
||||
### **Like memcached**, groupcache:
|
||||
|
||||
* shards by key to select which peer is responsible for that key
|
||||
|
||||
### **Unlike memcached**, groupcache:
|
||||
|
||||
* does not require running a separate set of servers, thus massively
|
||||
reducing deployment/configuration pain. groupcache is a client
|
||||
library as well as a server. It connects to its own peers.
|
||||
|
||||
* comes with a cache filling mechanism. Whereas memcached just says
|
||||
"Sorry, cache miss", often resulting in a thundering herd of
|
||||
database (or whatever) loads from an unbounded number of clients
|
||||
(which has resulted in several fun outages), groupcache coordinates
|
||||
cache fills such that only one load in one process of an entire
|
||||
replicated set of processes populates the cache, then multiplexes
|
||||
the loaded value to all callers.
|
||||
|
||||
* does not support versioned values. If key "foo" is value "bar",
|
||||
key "foo" must always be "bar". There are neither cache expiration
|
||||
times, nor explicit cache evictions. Thus there is also no CAS,
|
||||
nor Increment/Decrement. This also means that groupcache....
|
||||
|
||||
* ... supports automatic mirroring of super-hot items to multiple
|
||||
processes. This prevents memcached hot spotting where a machine's
|
||||
CPU and/or NIC are overloaded by very popular keys/values.
|
||||
|
||||
* is currently only available for Go. It's very unlikely that I
|
||||
(bradfitz@) will port the code to any other language.
|
||||
|
||||
## Loading process
|
||||
|
||||
In a nutshell, a groupcache lookup of **Get("foo")** looks like:
|
||||
|
||||
(On machine #5 of a set of N machines running the same code)
|
||||
|
||||
1. Is the value of "foo" in local memory because it's super hot? If so, use it.
|
||||
|
||||
2. Is the value of "foo" in local memory because peer #5 (the current
|
||||
peer) is the owner of it? If so, use it.
|
||||
|
||||
3. Amongst all the peers in my set of N, am I the owner of the key
|
||||
"foo"? (e.g. does it consistent hash to 5?) If so, load it. If
|
||||
other callers come in, via the same process or via RPC requests
|
||||
from peers, they block waiting for the load to finish and get the
|
||||
same answer. If not, RPC to the peer that's the owner and get
|
||||
the answer. If the RPC fails, just load it locally (still with
|
||||
local dup suppression).
|
||||
|
||||
## Users
|
||||
|
||||
groupcache is in production use by dl.google.com (its original user),
|
||||
parts of Blogger, parts of Google Code, parts of Google Fiber, parts
|
||||
of Google production monitoring systems, etc.
|
||||
|
||||
## Presentations
|
||||
|
||||
See http://talks.golang.org/2013/oscon-dl.slide
|
||||
|
||||
## Help
|
||||
|
||||
Use the golang-nuts mailing list for any discussion or questions.
|
||||
160
vendor/github.com/golang/groupcache/byteview.go
generated
vendored
Normal file
160
vendor/github.com/golang/groupcache/byteview.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A ByteView holds an immutable view of bytes.
|
||||
// Internally it wraps either a []byte or a string,
|
||||
// but that detail is invisible to callers.
|
||||
//
|
||||
// A ByteView is meant to be used as a value type, not
|
||||
// a pointer (like a time.Time).
|
||||
type ByteView struct {
|
||||
// If b is non-nil, b is used, else s is used.
|
||||
b []byte
|
||||
s string
|
||||
}
|
||||
|
||||
// Len returns the view's length.
|
||||
func (v ByteView) Len() int {
|
||||
if v.b != nil {
|
||||
return len(v.b)
|
||||
}
|
||||
return len(v.s)
|
||||
}
|
||||
|
||||
// ByteSlice returns a copy of the data as a byte slice.
|
||||
func (v ByteView) ByteSlice() []byte {
|
||||
if v.b != nil {
|
||||
return cloneBytes(v.b)
|
||||
}
|
||||
return []byte(v.s)
|
||||
}
|
||||
|
||||
// String returns the data as a string, making a copy if necessary.
|
||||
func (v ByteView) String() string {
|
||||
if v.b != nil {
|
||||
return string(v.b)
|
||||
}
|
||||
return v.s
|
||||
}
|
||||
|
||||
// At returns the byte at index i.
|
||||
func (v ByteView) At(i int) byte {
|
||||
if v.b != nil {
|
||||
return v.b[i]
|
||||
}
|
||||
return v.s[i]
|
||||
}
|
||||
|
||||
// Slice slices the view between the provided from and to indices.
|
||||
func (v ByteView) Slice(from, to int) ByteView {
|
||||
if v.b != nil {
|
||||
return ByteView{b: v.b[from:to]}
|
||||
}
|
||||
return ByteView{s: v.s[from:to]}
|
||||
}
|
||||
|
||||
// SliceFrom slices the view from the provided index until the end.
|
||||
func (v ByteView) SliceFrom(from int) ByteView {
|
||||
if v.b != nil {
|
||||
return ByteView{b: v.b[from:]}
|
||||
}
|
||||
return ByteView{s: v.s[from:]}
|
||||
}
|
||||
|
||||
// Copy copies b into dest and returns the number of bytes copied.
|
||||
func (v ByteView) Copy(dest []byte) int {
|
||||
if v.b != nil {
|
||||
return copy(dest, v.b)
|
||||
}
|
||||
return copy(dest, v.s)
|
||||
}
|
||||
|
||||
// Equal returns whether the bytes in b are the same as the bytes in
|
||||
// b2.
|
||||
func (v ByteView) Equal(b2 ByteView) bool {
|
||||
if b2.b == nil {
|
||||
return v.EqualString(b2.s)
|
||||
}
|
||||
return v.EqualBytes(b2.b)
|
||||
}
|
||||
|
||||
// EqualString returns whether the bytes in b are the same as the bytes
|
||||
// in s.
|
||||
func (v ByteView) EqualString(s string) bool {
|
||||
if v.b == nil {
|
||||
return v.s == s
|
||||
}
|
||||
l := v.Len()
|
||||
if len(s) != l {
|
||||
return false
|
||||
}
|
||||
for i, bi := range v.b {
|
||||
if bi != s[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualBytes returns whether the bytes in b are the same as the bytes
|
||||
// in b2.
|
||||
func (v ByteView) EqualBytes(b2 []byte) bool {
|
||||
if v.b != nil {
|
||||
return bytes.Equal(v.b, b2)
|
||||
}
|
||||
l := v.Len()
|
||||
if len(b2) != l {
|
||||
return false
|
||||
}
|
||||
for i, bi := range b2 {
|
||||
if bi != v.s[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Reader returns an io.ReadSeeker for the bytes in v.
|
||||
func (v ByteView) Reader() io.ReadSeeker {
|
||||
if v.b != nil {
|
||||
return bytes.NewReader(v.b)
|
||||
}
|
||||
return strings.NewReader(v.s)
|
||||
}
|
||||
|
||||
// ReadAt implements io.ReaderAt on the bytes in v.
|
||||
func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, errors.New("view: invalid offset")
|
||||
}
|
||||
if off >= int64(v.Len()) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = v.SliceFrom(int(off)).Copy(p)
|
||||
if n < len(p) {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
142
vendor/github.com/golang/groupcache/byteview_test.go
generated
vendored
Normal file
142
vendor/github.com/golang/groupcache/byteview_test.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestByteView(t *testing.T) {
|
||||
for _, s := range []string{"", "x", "yy"} {
|
||||
for _, v := range []ByteView{of([]byte(s)), of(s)} {
|
||||
name := fmt.Sprintf("string %q, view %+v", s, v)
|
||||
if v.Len() != len(s) {
|
||||
t.Errorf("%s: Len = %d; want %d", name, v.Len(), len(s))
|
||||
}
|
||||
if v.String() != s {
|
||||
t.Errorf("%s: String = %q; want %q", name, v.String(), s)
|
||||
}
|
||||
var longDest [3]byte
|
||||
if n := v.Copy(longDest[:]); n != len(s) {
|
||||
t.Errorf("%s: long Copy = %d; want %d", name, n, len(s))
|
||||
}
|
||||
var shortDest [1]byte
|
||||
if n := v.Copy(shortDest[:]); n != min(len(s), 1) {
|
||||
t.Errorf("%s: short Copy = %d; want %d", name, n, min(len(s), 1))
|
||||
}
|
||||
if got, err := ioutil.ReadAll(v.Reader()); err != nil || string(got) != s {
|
||||
t.Errorf("%s: Reader = %q, %v; want %q", name, got, err, s)
|
||||
}
|
||||
if got, err := ioutil.ReadAll(io.NewSectionReader(v, 0, int64(len(s)))); err != nil || string(got) != s {
|
||||
t.Errorf("%s: SectionReader of ReaderAt = %q, %v; want %q", name, got, err, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// of returns a byte view of the []byte or string in x.
|
||||
func of(x interface{}) ByteView {
|
||||
if bytes, ok := x.([]byte); ok {
|
||||
return ByteView{b: bytes}
|
||||
}
|
||||
return ByteView{s: x.(string)}
|
||||
}
|
||||
|
||||
func TestByteViewEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
a interface{} // string or []byte
|
||||
b interface{} // string or []byte
|
||||
want bool
|
||||
}{
|
||||
{"x", "x", true},
|
||||
{"x", "y", false},
|
||||
{"x", "yy", false},
|
||||
{[]byte("x"), []byte("x"), true},
|
||||
{[]byte("x"), []byte("y"), false},
|
||||
{[]byte("x"), []byte("yy"), false},
|
||||
{[]byte("x"), "x", true},
|
||||
{[]byte("x"), "y", false},
|
||||
{[]byte("x"), "yy", false},
|
||||
{"x", []byte("x"), true},
|
||||
{"x", []byte("y"), false},
|
||||
{"x", []byte("yy"), false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
va := of(tt.a)
|
||||
if bytes, ok := tt.b.([]byte); ok {
|
||||
if got := va.EqualBytes(bytes); got != tt.want {
|
||||
t.Errorf("%d. EqualBytes = %v; want %v", i, got, tt.want)
|
||||
}
|
||||
} else {
|
||||
if got := va.EqualString(tt.b.(string)); got != tt.want {
|
||||
t.Errorf("%d. EqualString = %v; want %v", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
if got := va.Equal(of(tt.b)); got != tt.want {
|
||||
t.Errorf("%d. Equal = %v; want %v", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteViewSlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
from int
|
||||
to interface{} // nil to mean the end (SliceFrom); else int
|
||||
want string
|
||||
}{
|
||||
{
|
||||
in: "abc",
|
||||
from: 1,
|
||||
to: 2,
|
||||
want: "b",
|
||||
},
|
||||
{
|
||||
in: "abc",
|
||||
from: 1,
|
||||
want: "bc",
|
||||
},
|
||||
{
|
||||
in: "abc",
|
||||
to: 2,
|
||||
want: "ab",
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
for _, v := range []ByteView{of([]byte(tt.in)), of(tt.in)} {
|
||||
name := fmt.Sprintf("test %d, view %+v", i, v)
|
||||
if tt.to != nil {
|
||||
v = v.Slice(tt.from, tt.to.(int))
|
||||
} else {
|
||||
v = v.SliceFrom(tt.from)
|
||||
}
|
||||
if v.String() != tt.want {
|
||||
t.Errorf("%s: got %q; want %q", name, v.String(), tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
81
vendor/github.com/golang/groupcache/consistenthash/consistenthash.go
generated
vendored
Normal file
81
vendor/github.com/golang/groupcache/consistenthash/consistenthash.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package consistenthash provides an implementation of a ring hash.
|
||||
package consistenthash
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Hash func(data []byte) uint32
|
||||
|
||||
type Map struct {
|
||||
hash Hash
|
||||
replicas int
|
||||
keys []int // Sorted
|
||||
hashMap map[int]string
|
||||
}
|
||||
|
||||
func New(replicas int, fn Hash) *Map {
|
||||
m := &Map{
|
||||
replicas: replicas,
|
||||
hash: fn,
|
||||
hashMap: make(map[int]string),
|
||||
}
|
||||
if m.hash == nil {
|
||||
m.hash = crc32.ChecksumIEEE
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Returns true if there are no items available.
|
||||
func (m *Map) IsEmpty() bool {
|
||||
return len(m.keys) == 0
|
||||
}
|
||||
|
||||
// Adds some keys to the hash.
|
||||
func (m *Map) Add(keys ...string) {
|
||||
for _, key := range keys {
|
||||
for i := 0; i < m.replicas; i++ {
|
||||
hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
|
||||
m.keys = append(m.keys, hash)
|
||||
m.hashMap[hash] = key
|
||||
}
|
||||
}
|
||||
sort.Ints(m.keys)
|
||||
}
|
||||
|
||||
// Gets the closest item in the hash to the provided key.
|
||||
func (m *Map) Get(key string) string {
|
||||
if m.IsEmpty() {
|
||||
return ""
|
||||
}
|
||||
|
||||
hash := int(m.hash([]byte(key)))
|
||||
|
||||
// Binary search for appropriate replica.
|
||||
idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
|
||||
|
||||
// Means we have cycled back to the first replica.
|
||||
if idx == len(m.keys) {
|
||||
idx = 0
|
||||
}
|
||||
|
||||
return m.hashMap[m.keys[idx]]
|
||||
}
|
||||
110
vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go
generated
vendored
Normal file
110
vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package consistenthash
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHashing(t *testing.T) {
|
||||
|
||||
// Override the hash function to return easier to reason about values. Assumes
|
||||
// the keys can be converted to an integer.
|
||||
hash := New(3, func(key []byte) uint32 {
|
||||
i, err := strconv.Atoi(string(key))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return uint32(i)
|
||||
})
|
||||
|
||||
// Given the above hash function, this will give replicas with "hashes":
|
||||
// 2, 4, 6, 12, 14, 16, 22, 24, 26
|
||||
hash.Add("6", "4", "2")
|
||||
|
||||
testCases := map[string]string{
|
||||
"2": "2",
|
||||
"11": "2",
|
||||
"23": "4",
|
||||
"27": "2",
|
||||
}
|
||||
|
||||
for k, v := range testCases {
|
||||
if hash.Get(k) != v {
|
||||
t.Errorf("Asking for %s, should have yielded %s", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Adds 8, 18, 28
|
||||
hash.Add("8")
|
||||
|
||||
// 27 should now map to 8.
|
||||
testCases["27"] = "8"
|
||||
|
||||
for k, v := range testCases {
|
||||
if hash.Get(k) != v {
|
||||
t.Errorf("Asking for %s, should have yielded %s", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestConsistency(t *testing.T) {
|
||||
hash1 := New(1, nil)
|
||||
hash2 := New(1, nil)
|
||||
|
||||
hash1.Add("Bill", "Bob", "Bonny")
|
||||
hash2.Add("Bob", "Bonny", "Bill")
|
||||
|
||||
if hash1.Get("Ben") != hash2.Get("Ben") {
|
||||
t.Errorf("Fetching 'Ben' from both hashes should be the same")
|
||||
}
|
||||
|
||||
hash2.Add("Becky", "Ben", "Bobby")
|
||||
|
||||
if hash1.Get("Ben") != hash2.Get("Ben") ||
|
||||
hash1.Get("Bob") != hash2.Get("Bob") ||
|
||||
hash1.Get("Bonny") != hash2.Get("Bonny") {
|
||||
t.Errorf("Direct matches should always return the same entry")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkGet8(b *testing.B) { benchmarkGet(b, 8) }
|
||||
func BenchmarkGet32(b *testing.B) { benchmarkGet(b, 32) }
|
||||
func BenchmarkGet128(b *testing.B) { benchmarkGet(b, 128) }
|
||||
func BenchmarkGet512(b *testing.B) { benchmarkGet(b, 512) }
|
||||
|
||||
func benchmarkGet(b *testing.B, shards int) {
|
||||
|
||||
hash := New(50, nil)
|
||||
|
||||
var buckets []string
|
||||
for i := 0; i < shards; i++ {
|
||||
buckets = append(buckets, fmt.Sprintf("shard-%d", i))
|
||||
}
|
||||
|
||||
hash.Add(buckets...)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
hash.Get(buckets[i&(shards-1)])
|
||||
}
|
||||
}
|
||||
489
vendor/github.com/golang/groupcache/groupcache.go
generated
vendored
Normal file
489
vendor/github.com/golang/groupcache/groupcache.go
generated
vendored
Normal file
@@ -0,0 +1,489 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package groupcache provides a data loading mechanism with caching
|
||||
// and de-duplication that works across a set of peer processes.
|
||||
//
|
||||
// Each data Get first consults its local cache, otherwise delegates
|
||||
// to the requested key's canonical owner, which then checks its cache
|
||||
// or finally gets the data. In the common case, many concurrent
|
||||
// cache misses across a set of peers for the same key result in just
|
||||
// one cache fill.
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
pb "github.com/golang/groupcache/groupcachepb"
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/golang/groupcache/singleflight"
|
||||
)
|
||||
|
||||
// A Getter loads data for a key.
|
||||
type Getter interface {
|
||||
// Get returns the value identified by key, populating dest.
|
||||
//
|
||||
// The returned data must be unversioned. That is, key must
|
||||
// uniquely describe the loaded data, without an implicit
|
||||
// current time, and without relying on cache expiration
|
||||
// mechanisms.
|
||||
Get(ctx Context, key string, dest Sink) error
|
||||
}
|
||||
|
||||
// A GetterFunc implements Getter with a function.
|
||||
type GetterFunc func(ctx Context, key string, dest Sink) error
|
||||
|
||||
func (f GetterFunc) Get(ctx Context, key string, dest Sink) error {
|
||||
return f(ctx, key, dest)
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
groups = make(map[string]*Group)
|
||||
|
||||
initPeerServerOnce sync.Once
|
||||
initPeerServer func()
|
||||
)
|
||||
|
||||
// GetGroup returns the named group previously created with NewGroup, or
|
||||
// nil if there's no such group.
|
||||
func GetGroup(name string) *Group {
|
||||
mu.RLock()
|
||||
g := groups[name]
|
||||
mu.RUnlock()
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGroup creates a coordinated group-aware Getter from a Getter.
|
||||
//
|
||||
// The returned Getter tries (but does not guarantee) to run only one
|
||||
// Get call at once for a given key across an entire set of peer
|
||||
// processes. Concurrent callers both in the local process and in
|
||||
// other processes receive copies of the answer once the original Get
|
||||
// completes.
|
||||
//
|
||||
// The group name must be unique for each getter.
|
||||
func NewGroup(name string, cacheBytes int64, getter Getter) *Group {
|
||||
return newGroup(name, cacheBytes, getter, nil)
|
||||
}
|
||||
|
||||
// If peers is nil, the peerPicker is called via a sync.Once to initialize it.
|
||||
func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group {
|
||||
if getter == nil {
|
||||
panic("nil Getter")
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
initPeerServerOnce.Do(callInitPeerServer)
|
||||
if _, dup := groups[name]; dup {
|
||||
panic("duplicate registration of group " + name)
|
||||
}
|
||||
g := &Group{
|
||||
name: name,
|
||||
getter: getter,
|
||||
peers: peers,
|
||||
cacheBytes: cacheBytes,
|
||||
loadGroup: &singleflight.Group{},
|
||||
}
|
||||
if fn := newGroupHook; fn != nil {
|
||||
fn(g)
|
||||
}
|
||||
groups[name] = g
|
||||
return g
|
||||
}
|
||||
|
||||
// newGroupHook, if non-nil, is called right after a new group is created.
|
||||
var newGroupHook func(*Group)
|
||||
|
||||
// RegisterNewGroupHook registers a hook that is run each time
|
||||
// a group is created.
|
||||
func RegisterNewGroupHook(fn func(*Group)) {
|
||||
if newGroupHook != nil {
|
||||
panic("RegisterNewGroupHook called more than once")
|
||||
}
|
||||
newGroupHook = fn
|
||||
}
|
||||
|
||||
// RegisterServerStart registers a hook that is run when the first
|
||||
// group is created.
|
||||
func RegisterServerStart(fn func()) {
|
||||
if initPeerServer != nil {
|
||||
panic("RegisterServerStart called more than once")
|
||||
}
|
||||
initPeerServer = fn
|
||||
}
|
||||
|
||||
func callInitPeerServer() {
|
||||
if initPeerServer != nil {
|
||||
initPeerServer()
|
||||
}
|
||||
}
|
||||
|
||||
// A Group is a cache namespace and associated data loaded spread over
|
||||
// a group of 1 or more machines.
|
||||
type Group struct {
|
||||
name string
|
||||
getter Getter
|
||||
peersOnce sync.Once
|
||||
peers PeerPicker
|
||||
cacheBytes int64 // limit for sum of mainCache and hotCache size
|
||||
|
||||
// mainCache is a cache of the keys for which this process
|
||||
// (amongst its peers) is authoritative. That is, this cache
|
||||
// contains keys which consistent hash on to this process's
|
||||
// peer number.
|
||||
mainCache cache
|
||||
|
||||
// hotCache contains keys/values for which this peer is not
|
||||
// authoritative (otherwise they would be in mainCache), but
|
||||
// are popular enough to warrant mirroring in this process to
|
||||
// avoid going over the network to fetch from a peer. Having
|
||||
// a hotCache avoids network hotspotting, where a peer's
|
||||
// network card could become the bottleneck on a popular key.
|
||||
// This cache is used sparingly to maximize the total number
|
||||
// of key/value pairs that can be stored globally.
|
||||
hotCache cache
|
||||
|
||||
// loadGroup ensures that each key is only fetched once
|
||||
// (either locally or remotely), regardless of the number of
|
||||
// concurrent callers.
|
||||
loadGroup flightGroup
|
||||
|
||||
// Stats are statistics on the group.
|
||||
Stats Stats
|
||||
}
|
||||
|
||||
// flightGroup is defined as an interface which flightgroup.Group
|
||||
// satisfies. We define this so that we may test with an alternate
|
||||
// implementation.
|
||||
type flightGroup interface {
|
||||
// Done is called when Do is done.
|
||||
Do(key string, fn func() (interface{}, error)) (interface{}, error)
|
||||
}
|
||||
|
||||
// Stats are per-group statistics.
|
||||
type Stats struct {
|
||||
Gets AtomicInt // any Get request, including from peers
|
||||
CacheHits AtomicInt // either cache was good
|
||||
PeerLoads AtomicInt // either remote load or remote cache hit (not an error)
|
||||
PeerErrors AtomicInt
|
||||
Loads AtomicInt // (gets - cacheHits)
|
||||
LoadsDeduped AtomicInt // after singleflight
|
||||
LocalLoads AtomicInt // total good local loads
|
||||
LocalLoadErrs AtomicInt // total bad local loads
|
||||
ServerRequests AtomicInt // gets that came over the network from peers
|
||||
}
|
||||
|
||||
// Name returns the name of the group.
|
||||
func (g *Group) Name() string {
|
||||
return g.name
|
||||
}
|
||||
|
||||
func (g *Group) initPeers() {
|
||||
if g.peers == nil {
|
||||
g.peers = getPeers()
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Group) Get(ctx Context, key string, dest Sink) error {
|
||||
g.peersOnce.Do(g.initPeers)
|
||||
g.Stats.Gets.Add(1)
|
||||
if dest == nil {
|
||||
return errors.New("groupcache: nil dest Sink")
|
||||
}
|
||||
value, cacheHit := g.lookupCache(key)
|
||||
|
||||
if cacheHit {
|
||||
g.Stats.CacheHits.Add(1)
|
||||
return setSinkView(dest, value)
|
||||
}
|
||||
|
||||
// Optimization to avoid double unmarshalling or copying: keep
|
||||
// track of whether the dest was already populated. One caller
|
||||
// (if local) will set this; the losers will not. The common
|
||||
// case will likely be one caller.
|
||||
destPopulated := false
|
||||
value, destPopulated, err := g.load(ctx, key, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destPopulated {
|
||||
return nil
|
||||
}
|
||||
return setSinkView(dest, value)
|
||||
}
|
||||
|
||||
// load loads key either by invoking the getter locally or by sending it to another machine.
|
||||
func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) {
|
||||
g.Stats.Loads.Add(1)
|
||||
viewi, err := g.loadGroup.Do(key, func() (interface{}, error) {
|
||||
// Check the cache again because singleflight can only dedup calls
|
||||
// that overlap concurrently. It's possible for 2 concurrent
|
||||
// requests to miss the cache, resulting in 2 load() calls. An
|
||||
// unfortunate goroutine scheduling would result in this callback
|
||||
// being run twice, serially. If we don't check the cache again,
|
||||
// cache.nbytes would be incremented below even though there will
|
||||
// be only one entry for this key.
|
||||
//
|
||||
// Consider the following serialized event ordering for two
|
||||
// goroutines in which this callback gets called twice for hte
|
||||
// same key:
|
||||
// 1: Get("key")
|
||||
// 2: Get("key")
|
||||
// 1: lookupCache("key")
|
||||
// 2: lookupCache("key")
|
||||
// 1: load("key")
|
||||
// 2: load("key")
|
||||
// 1: loadGroup.Do("key", fn)
|
||||
// 1: fn()
|
||||
// 2: loadGroup.Do("key", fn)
|
||||
// 2: fn()
|
||||
if value, cacheHit := g.lookupCache(key); cacheHit {
|
||||
g.Stats.CacheHits.Add(1)
|
||||
return value, nil
|
||||
}
|
||||
g.Stats.LoadsDeduped.Add(1)
|
||||
var value ByteView
|
||||
var err error
|
||||
if peer, ok := g.peers.PickPeer(key); ok {
|
||||
value, err = g.getFromPeer(ctx, peer, key)
|
||||
if err == nil {
|
||||
g.Stats.PeerLoads.Add(1)
|
||||
return value, nil
|
||||
}
|
||||
g.Stats.PeerErrors.Add(1)
|
||||
// TODO(bradfitz): log the peer's error? keep
|
||||
// log of the past few for /groupcachez? It's
|
||||
// probably boring (normal task movement), so not
|
||||
// worth logging I imagine.
|
||||
}
|
||||
value, err = g.getLocally(ctx, key, dest)
|
||||
if err != nil {
|
||||
g.Stats.LocalLoadErrs.Add(1)
|
||||
return nil, err
|
||||
}
|
||||
g.Stats.LocalLoads.Add(1)
|
||||
destPopulated = true // only one caller of load gets this return value
|
||||
g.populateCache(key, value, &g.mainCache)
|
||||
return value, nil
|
||||
})
|
||||
if err == nil {
|
||||
value = viewi.(ByteView)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (g *Group) getLocally(ctx Context, key string, dest Sink) (ByteView, error) {
|
||||
err := g.getter.Get(ctx, key, dest)
|
||||
if err != nil {
|
||||
return ByteView{}, err
|
||||
}
|
||||
return dest.view()
|
||||
}
|
||||
|
||||
func (g *Group) getFromPeer(ctx Context, peer ProtoGetter, key string) (ByteView, error) {
|
||||
req := &pb.GetRequest{
|
||||
Group: &g.name,
|
||||
Key: &key,
|
||||
}
|
||||
res := &pb.GetResponse{}
|
||||
err := peer.Get(ctx, req, res)
|
||||
if err != nil {
|
||||
return ByteView{}, err
|
||||
}
|
||||
value := ByteView{b: res.Value}
|
||||
// TODO(bradfitz): use res.MinuteQps or something smart to
|
||||
// conditionally populate hotCache. For now just do it some
|
||||
// percentage of the time.
|
||||
if rand.Intn(10) == 0 {
|
||||
g.populateCache(key, value, &g.hotCache)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (g *Group) lookupCache(key string) (value ByteView, ok bool) {
|
||||
if g.cacheBytes <= 0 {
|
||||
return
|
||||
}
|
||||
value, ok = g.mainCache.get(key)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
value, ok = g.hotCache.get(key)
|
||||
return
|
||||
}
|
||||
|
||||
func (g *Group) populateCache(key string, value ByteView, cache *cache) {
|
||||
if g.cacheBytes <= 0 {
|
||||
return
|
||||
}
|
||||
cache.add(key, value)
|
||||
|
||||
// Evict items from cache(s) if necessary.
|
||||
for {
|
||||
mainBytes := g.mainCache.bytes()
|
||||
hotBytes := g.hotCache.bytes()
|
||||
if mainBytes+hotBytes <= g.cacheBytes {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(bradfitz): this is good-enough-for-now logic.
|
||||
// It should be something based on measurements and/or
|
||||
// respecting the costs of different resources.
|
||||
victim := &g.mainCache
|
||||
if hotBytes > mainBytes/8 {
|
||||
victim = &g.hotCache
|
||||
}
|
||||
victim.removeOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// CacheType represents a type of cache.
|
||||
type CacheType int
|
||||
|
||||
const (
|
||||
// The MainCache is the cache for items that this peer is the
|
||||
// owner for.
|
||||
MainCache CacheType = iota + 1
|
||||
|
||||
// The HotCache is the cache for items that seem popular
|
||||
// enough to replicate to this node, even though it's not the
|
||||
// owner.
|
||||
HotCache
|
||||
)
|
||||
|
||||
// CacheStats returns stats about the provided cache within the group.
|
||||
func (g *Group) CacheStats(which CacheType) CacheStats {
|
||||
switch which {
|
||||
case MainCache:
|
||||
return g.mainCache.stats()
|
||||
case HotCache:
|
||||
return g.hotCache.stats()
|
||||
default:
|
||||
return CacheStats{}
|
||||
}
|
||||
}
|
||||
|
||||
// cache is a wrapper around an *lru.Cache that adds synchronization,
|
||||
// makes values always be ByteView, and counts the size of all keys and
|
||||
// values.
|
||||
type cache struct {
|
||||
mu sync.RWMutex
|
||||
nbytes int64 // of all keys and values
|
||||
lru *lru.Cache
|
||||
nhit, nget int64
|
||||
nevict int64 // number of evictions
|
||||
}
|
||||
|
||||
func (c *cache) stats() CacheStats {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return CacheStats{
|
||||
Bytes: c.nbytes,
|
||||
Items: c.itemsLocked(),
|
||||
Gets: c.nget,
|
||||
Hits: c.nhit,
|
||||
Evictions: c.nevict,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cache) add(key string, value ByteView) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.lru == nil {
|
||||
c.lru = &lru.Cache{
|
||||
OnEvicted: func(key lru.Key, value interface{}) {
|
||||
val := value.(ByteView)
|
||||
c.nbytes -= int64(len(key.(string))) + int64(val.Len())
|
||||
c.nevict++
|
||||
},
|
||||
}
|
||||
}
|
||||
c.lru.Add(key, value)
|
||||
c.nbytes += int64(len(key)) + int64(value.Len())
|
||||
}
|
||||
|
||||
func (c *cache) get(key string) (value ByteView, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.nget++
|
||||
if c.lru == nil {
|
||||
return
|
||||
}
|
||||
vi, ok := c.lru.Get(key)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
c.nhit++
|
||||
return vi.(ByteView), true
|
||||
}
|
||||
|
||||
func (c *cache) removeOldest() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.lru != nil {
|
||||
c.lru.RemoveOldest()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cache) bytes() int64 {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.nbytes
|
||||
}
|
||||
|
||||
func (c *cache) items() int64 {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.itemsLocked()
|
||||
}
|
||||
|
||||
func (c *cache) itemsLocked() int64 {
|
||||
if c.lru == nil {
|
||||
return 0
|
||||
}
|
||||
return int64(c.lru.Len())
|
||||
}
|
||||
|
||||
// An AtomicInt is an int64 to be accessed atomically.
|
||||
type AtomicInt int64
|
||||
|
||||
// Add atomically adds n to i.
|
||||
func (i *AtomicInt) Add(n int64) {
|
||||
atomic.AddInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
// Get atomically gets the value of i.
|
||||
func (i *AtomicInt) Get() int64 {
|
||||
return atomic.LoadInt64((*int64)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt) String() string {
|
||||
return strconv.FormatInt(i.Get(), 10)
|
||||
}
|
||||
|
||||
// CacheStats are returned by stats accessors on Group.
|
||||
type CacheStats struct {
|
||||
Bytes int64
|
||||
Items int64
|
||||
Gets int64
|
||||
Hits int64
|
||||
Evictions int64
|
||||
}
|
||||
447
vendor/github.com/golang/groupcache/groupcache_test.go
generated
vendored
Normal file
447
vendor/github.com/golang/groupcache/groupcache_test.go
generated
vendored
Normal file
@@ -0,0 +1,447 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Tests for groupcache.
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
pb "github.com/golang/groupcache/groupcachepb"
|
||||
testpb "github.com/golang/groupcache/testpb"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
stringGroup, protoGroup Getter
|
||||
|
||||
stringc = make(chan string)
|
||||
|
||||
dummyCtx Context
|
||||
|
||||
// cacheFills is the number of times stringGroup or
|
||||
// protoGroup's Getter have been called. Read using the
|
||||
// cacheFills function.
|
||||
cacheFills AtomicInt
|
||||
)
|
||||
|
||||
const (
|
||||
stringGroupName = "string-group"
|
||||
protoGroupName = "proto-group"
|
||||
testMessageType = "google3/net/groupcache/go/test_proto.TestMessage"
|
||||
fromChan = "from-chan"
|
||||
cacheSize = 1 << 20
|
||||
)
|
||||
|
||||
func testSetup() {
|
||||
stringGroup = NewGroup(stringGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error {
|
||||
if key == fromChan {
|
||||
key = <-stringc
|
||||
}
|
||||
cacheFills.Add(1)
|
||||
return dest.SetString("ECHO:" + key)
|
||||
}))
|
||||
|
||||
protoGroup = NewGroup(protoGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error {
|
||||
if key == fromChan {
|
||||
key = <-stringc
|
||||
}
|
||||
cacheFills.Add(1)
|
||||
return dest.SetProto(&testpb.TestMessage{
|
||||
Name: proto.String("ECHO:" + key),
|
||||
City: proto.String("SOME-CITY"),
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
// tests that a Getter's Get method is only called once with two
|
||||
// outstanding callers. This is the string variant.
|
||||
func TestGetDupSuppressString(t *testing.T) {
|
||||
once.Do(testSetup)
|
||||
// Start two getters. The first should block (waiting reading
|
||||
// from stringc) and the second should latch on to the first
|
||||
// one.
|
||||
resc := make(chan string, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
go func() {
|
||||
var s string
|
||||
if err := stringGroup.Get(dummyCtx, fromChan, StringSink(&s)); err != nil {
|
||||
resc <- "ERROR:" + err.Error()
|
||||
return
|
||||
}
|
||||
resc <- s
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait a bit so both goroutines get merged together via
|
||||
// singleflight.
|
||||
// TODO(bradfitz): decide whether there are any non-offensive
|
||||
// debug/test hooks that could be added to singleflight to
|
||||
// make a sleep here unnecessary.
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
// Unblock the first getter, which should unblock the second
|
||||
// as well.
|
||||
stringc <- "foo"
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case v := <-resc:
|
||||
if v != "ECHO:foo" {
|
||||
t.Errorf("got %q; want %q", v, "ECHO:foo")
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("timeout waiting on getter #%d of 2", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tests that a Getter's Get method is only called once with two
|
||||
// outstanding callers. This is the proto variant.
|
||||
func TestGetDupSuppressProto(t *testing.T) {
|
||||
once.Do(testSetup)
|
||||
// Start two getters. The first should block (waiting reading
|
||||
// from stringc) and the second should latch on to the first
|
||||
// one.
|
||||
resc := make(chan *testpb.TestMessage, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
go func() {
|
||||
tm := new(testpb.TestMessage)
|
||||
if err := protoGroup.Get(dummyCtx, fromChan, ProtoSink(tm)); err != nil {
|
||||
tm.Name = proto.String("ERROR:" + err.Error())
|
||||
}
|
||||
resc <- tm
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait a bit so both goroutines get merged together via
|
||||
// singleflight.
|
||||
// TODO(bradfitz): decide whether there are any non-offensive
|
||||
// debug/test hooks that could be added to singleflight to
|
||||
// make a sleep here unnecessary.
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
// Unblock the first getter, which should unblock the second
|
||||
// as well.
|
||||
stringc <- "Fluffy"
|
||||
want := &testpb.TestMessage{
|
||||
Name: proto.String("ECHO:Fluffy"),
|
||||
City: proto.String("SOME-CITY"),
|
||||
}
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case v := <-resc:
|
||||
if !reflect.DeepEqual(v, want) {
|
||||
t.Errorf(" Got: %v\nWant: %v", proto.CompactTextString(v), proto.CompactTextString(want))
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("timeout waiting on getter #%d of 2", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func countFills(f func()) int64 {
|
||||
fills0 := cacheFills.Get()
|
||||
f()
|
||||
return cacheFills.Get() - fills0
|
||||
}
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
once.Do(testSetup)
|
||||
fills := countFills(func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
var s string
|
||||
if err := stringGroup.Get(dummyCtx, "TestCaching-key", StringSink(&s)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
if fills != 1 {
|
||||
t.Errorf("expected 1 cache fill; got %d", fills)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheEviction(t *testing.T) {
|
||||
once.Do(testSetup)
|
||||
testKey := "TestCacheEviction-key"
|
||||
getTestKey := func() {
|
||||
var res string
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := stringGroup.Get(dummyCtx, testKey, StringSink(&res)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
fills := countFills(getTestKey)
|
||||
if fills != 1 {
|
||||
t.Fatalf("expected 1 cache fill; got %d", fills)
|
||||
}
|
||||
|
||||
g := stringGroup.(*Group)
|
||||
evict0 := g.mainCache.nevict
|
||||
|
||||
// Trash the cache with other keys.
|
||||
var bytesFlooded int64
|
||||
// cacheSize/len(testKey) is approximate
|
||||
for bytesFlooded < cacheSize+1024 {
|
||||
var res string
|
||||
key := fmt.Sprintf("dummy-key-%d", bytesFlooded)
|
||||
stringGroup.Get(dummyCtx, key, StringSink(&res))
|
||||
bytesFlooded += int64(len(key) + len(res))
|
||||
}
|
||||
evicts := g.mainCache.nevict - evict0
|
||||
if evicts <= 0 {
|
||||
t.Errorf("evicts = %v; want more than 0", evicts)
|
||||
}
|
||||
|
||||
// Test that the key is gone.
|
||||
fills = countFills(getTestKey)
|
||||
if fills != 1 {
|
||||
t.Fatalf("expected 1 cache fill after cache trashing; got %d", fills)
|
||||
}
|
||||
}
|
||||
|
||||
type fakePeer struct {
|
||||
hits int
|
||||
fail bool
|
||||
}
|
||||
|
||||
func (p *fakePeer) Get(_ Context, in *pb.GetRequest, out *pb.GetResponse) error {
|
||||
p.hits++
|
||||
if p.fail {
|
||||
return errors.New("simulated error from peer")
|
||||
}
|
||||
out.Value = []byte("got:" + in.GetKey())
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakePeers []ProtoGetter
|
||||
|
||||
func (p fakePeers) PickPeer(key string) (peer ProtoGetter, ok bool) {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
n := crc32.Checksum([]byte(key), crc32.IEEETable) % uint32(len(p))
|
||||
return p[n], p[n] != nil
|
||||
}
|
||||
|
||||
// tests that peers (virtual, in-process) are hit, and how much.
|
||||
func TestPeers(t *testing.T) {
|
||||
once.Do(testSetup)
|
||||
rand.Seed(123)
|
||||
peer0 := &fakePeer{}
|
||||
peer1 := &fakePeer{}
|
||||
peer2 := &fakePeer{}
|
||||
peerList := fakePeers([]ProtoGetter{peer0, peer1, peer2, nil})
|
||||
const cacheSize = 0 // disabled
|
||||
localHits := 0
|
||||
getter := func(_ Context, key string, dest Sink) error {
|
||||
localHits++
|
||||
return dest.SetString("got:" + key)
|
||||
}
|
||||
testGroup := newGroup("TestPeers-group", cacheSize, GetterFunc(getter), peerList)
|
||||
run := func(name string, n int, wantSummary string) {
|
||||
// Reset counters
|
||||
localHits = 0
|
||||
for _, p := range []*fakePeer{peer0, peer1, peer2} {
|
||||
p.hits = 0
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
key := fmt.Sprintf("key-%d", i)
|
||||
want := "got:" + key
|
||||
var got string
|
||||
err := testGroup.Get(dummyCtx, key, StringSink(&got))
|
||||
if err != nil {
|
||||
t.Errorf("%s: error on key %q: %v", name, key, err)
|
||||
continue
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("%s: for key %q, got %q; want %q", name, key, got, want)
|
||||
}
|
||||
}
|
||||
summary := func() string {
|
||||
return fmt.Sprintf("localHits = %d, peers = %d %d %d", localHits, peer0.hits, peer1.hits, peer2.hits)
|
||||
}
|
||||
if got := summary(); got != wantSummary {
|
||||
t.Errorf("%s: got %q; want %q", name, got, wantSummary)
|
||||
}
|
||||
}
|
||||
resetCacheSize := func(maxBytes int64) {
|
||||
g := testGroup
|
||||
g.cacheBytes = maxBytes
|
||||
g.mainCache = cache{}
|
||||
g.hotCache = cache{}
|
||||
}
|
||||
|
||||
// Base case; peers all up, with no problems.
|
||||
resetCacheSize(1 << 20)
|
||||
run("base", 200, "localHits = 49, peers = 51 49 51")
|
||||
|
||||
// Verify cache was hit. All localHits are gone, and some of
|
||||
// the peer hits (the ones randomly selected to be maybe hot)
|
||||
run("cached_base", 200, "localHits = 0, peers = 49 47 48")
|
||||
resetCacheSize(0)
|
||||
|
||||
// With one of the peers being down.
|
||||
// TODO(bradfitz): on a peer number being unavailable, the
|
||||
// consistent hashing should maybe keep trying others to
|
||||
// spread the load out. Currently it fails back to local
|
||||
// execution if the first consistent-hash slot is unavailable.
|
||||
peerList[0] = nil
|
||||
run("one_peer_down", 200, "localHits = 100, peers = 0 49 51")
|
||||
|
||||
// Failing peer
|
||||
peerList[0] = peer0
|
||||
peer0.fail = true
|
||||
run("peer0_failing", 200, "localHits = 100, peers = 51 49 51")
|
||||
}
|
||||
|
||||
func TestTruncatingByteSliceTarget(t *testing.T) {
|
||||
var buf [100]byte
|
||||
s := buf[:]
|
||||
if err := stringGroup.Get(dummyCtx, "short", TruncatingByteSliceSink(&s)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if want := "ECHO:short"; string(s) != want {
|
||||
t.Errorf("short key got %q; want %q", s, want)
|
||||
}
|
||||
|
||||
s = buf[:6]
|
||||
if err := stringGroup.Get(dummyCtx, "truncated", TruncatingByteSliceSink(&s)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if want := "ECHO:t"; string(s) != want {
|
||||
t.Errorf("truncated key got %q; want %q", s, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocatingByteSliceTarget(t *testing.T) {
|
||||
var dst []byte
|
||||
sink := AllocatingByteSliceSink(&dst)
|
||||
|
||||
inBytes := []byte("some bytes")
|
||||
sink.SetBytes(inBytes)
|
||||
if want := "some bytes"; string(dst) != want {
|
||||
t.Errorf("SetBytes resulted in %q; want %q", dst, want)
|
||||
}
|
||||
v, err := sink.view()
|
||||
if err != nil {
|
||||
t.Fatalf("view after SetBytes failed: %v", err)
|
||||
}
|
||||
if &inBytes[0] == &dst[0] {
|
||||
t.Error("inBytes and dst share memory")
|
||||
}
|
||||
if &inBytes[0] == &v.b[0] {
|
||||
t.Error("inBytes and view share memory")
|
||||
}
|
||||
if &dst[0] == &v.b[0] {
|
||||
t.Error("dst and view share memory")
|
||||
}
|
||||
}
|
||||
|
||||
// orderedFlightGroup allows the caller to force the schedule of when
|
||||
// orig.Do will be called. This is useful to serialize calls such
|
||||
// that singleflight cannot dedup them.
|
||||
type orderedFlightGroup struct {
|
||||
mu sync.Mutex
|
||||
stage1 chan bool
|
||||
stage2 chan bool
|
||||
orig flightGroup
|
||||
}
|
||||
|
||||
func (g *orderedFlightGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
|
||||
<-g.stage1
|
||||
<-g.stage2
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
return g.orig.Do(key, fn)
|
||||
}
|
||||
|
||||
// TestNoDedup tests invariants on the cache size when singleflight is
|
||||
// unable to dedup calls.
|
||||
func TestNoDedup(t *testing.T) {
|
||||
const testkey = "testkey"
|
||||
const testval = "testval"
|
||||
g := newGroup("testgroup", 1024, GetterFunc(func(_ Context, key string, dest Sink) error {
|
||||
return dest.SetString(testval)
|
||||
}), nil)
|
||||
|
||||
orderedGroup := &orderedFlightGroup{
|
||||
stage1: make(chan bool),
|
||||
stage2: make(chan bool),
|
||||
orig: g.loadGroup,
|
||||
}
|
||||
// Replace loadGroup with our wrapper so we can control when
|
||||
// loadGroup.Do is entered for each concurrent request.
|
||||
g.loadGroup = orderedGroup
|
||||
|
||||
// Issue two idential requests concurrently. Since the cache is
|
||||
// empty, it will miss. Both will enter load(), but we will only
|
||||
// allow one at a time to enter singleflight.Do, so the callback
|
||||
// function will be called twice.
|
||||
resc := make(chan string, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
go func() {
|
||||
var s string
|
||||
if err := g.Get(dummyCtx, testkey, StringSink(&s)); err != nil {
|
||||
resc <- "ERROR:" + err.Error()
|
||||
return
|
||||
}
|
||||
resc <- s
|
||||
}()
|
||||
}
|
||||
|
||||
// Ensure both goroutines have entered the Do routine. This implies
|
||||
// both concurrent requests have checked the cache, found it empty,
|
||||
// and called load().
|
||||
orderedGroup.stage1 <- true
|
||||
orderedGroup.stage1 <- true
|
||||
orderedGroup.stage2 <- true
|
||||
orderedGroup.stage2 <- true
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
if s := <-resc; s != testval {
|
||||
t.Errorf("result is %s want %s", s, testval)
|
||||
}
|
||||
}
|
||||
|
||||
const wantItems = 1
|
||||
if g.mainCache.items() != wantItems {
|
||||
t.Errorf("mainCache has %d items, want %d", g.mainCache.items(), wantItems)
|
||||
}
|
||||
|
||||
// If the singleflight callback doesn't double-check the cache again
|
||||
// upon entry, we would increment nbytes twice but the entry would
|
||||
// only be in the cache once.
|
||||
const wantBytes = int64(len(testkey) + len(testval))
|
||||
if g.mainCache.nbytes != wantBytes {
|
||||
t.Errorf("cache has %d bytes, want %d", g.mainCache.nbytes, wantBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(bradfitz): port the Google-internal full integration test into here,
|
||||
// using HTTP requests instead of our RPC system.
|
||||
65
vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go
generated
vendored
Normal file
65
vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: groupcache.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package groupcachepb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import json "encoding/json"
|
||||
import math "math"
|
||||
|
||||
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = &json.SyntaxError{}
|
||||
var _ = math.Inf
|
||||
|
||||
type GetRequest struct {
|
||||
Group *string `protobuf:"bytes,1,req,name=group" json:"group,omitempty"`
|
||||
Key *string `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetRequest) Reset() { *m = GetRequest{} }
|
||||
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetRequest) ProtoMessage() {}
|
||||
|
||||
func (m *GetRequest) GetGroup() string {
|
||||
if m != nil && m.Group != nil {
|
||||
return *m.Group
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetRequest) GetKey() string {
|
||||
if m != nil && m.Key != nil {
|
||||
return *m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetResponse struct {
|
||||
Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
|
||||
MinuteQps *float64 `protobuf:"fixed64,2,opt,name=minute_qps" json:"minute_qps,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetResponse) Reset() { *m = GetResponse{} }
|
||||
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetResponse) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GetResponse) GetMinuteQps() float64 {
|
||||
if m != nil && m.MinuteQps != nil {
|
||||
return *m.MinuteQps
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
||||
34
vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto
generated
vendored
Normal file
34
vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package groupcachepb;
|
||||
|
||||
message GetRequest {
|
||||
required string group = 1;
|
||||
required string key = 2; // not actually required/guaranteed to be UTF-8
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
optional bytes value = 1;
|
||||
optional double minute_qps = 2;
|
||||
}
|
||||
|
||||
service GroupCache {
|
||||
rpc Get(GetRequest) returns (GetResponse) {
|
||||
};
|
||||
}
|
||||
227
vendor/github.com/golang/groupcache/http.go
generated
vendored
Normal file
227
vendor/github.com/golang/groupcache/http.go
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/consistenthash"
|
||||
pb "github.com/golang/groupcache/groupcachepb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
const defaultBasePath = "/_groupcache/"
|
||||
|
||||
const defaultReplicas = 50
|
||||
|
||||
// HTTPPool implements PeerPicker for a pool of HTTP peers.
|
||||
type HTTPPool struct {
|
||||
// Context optionally specifies a context for the server to use when it
|
||||
// receives a request.
|
||||
// If nil, the server uses a nil Context.
|
||||
Context func(*http.Request) Context
|
||||
|
||||
// Transport optionally specifies an http.RoundTripper for the client
|
||||
// to use when it makes a request.
|
||||
// If nil, the client uses http.DefaultTransport.
|
||||
Transport func(Context) http.RoundTripper
|
||||
|
||||
// this peer's base URL, e.g. "https://example.net:8000"
|
||||
self string
|
||||
|
||||
// opts specifies the options.
|
||||
opts HTTPPoolOptions
|
||||
|
||||
mu sync.Mutex // guards peers and httpGetters
|
||||
peers *consistenthash.Map
|
||||
httpGetters map[string]*httpGetter // keyed by e.g. "http://10.0.0.2:8008"
|
||||
}
|
||||
|
||||
// HTTPPoolOptions are the configurations of a HTTPPool.
|
||||
type HTTPPoolOptions struct {
|
||||
// BasePath specifies the HTTP path that will serve groupcache requests.
|
||||
// If blank, it defaults to "/_groupcache/".
|
||||
BasePath string
|
||||
|
||||
// Replicas specifies the number of key replicas on the consistent hash.
|
||||
// If blank, it defaults to 50.
|
||||
Replicas int
|
||||
|
||||
// HashFn specifies the hash function of the consistent hash.
|
||||
// If blank, it defaults to crc32.ChecksumIEEE.
|
||||
HashFn consistenthash.Hash
|
||||
}
|
||||
|
||||
// NewHTTPPool initializes an HTTP pool of peers, and registers itself as a PeerPicker.
|
||||
// For convenience, it also registers itself as an http.Handler with http.DefaultServeMux.
|
||||
// The self argument be a valid base URL that points to the current server,
|
||||
// for example "http://example.net:8000".
|
||||
func NewHTTPPool(self string) *HTTPPool {
|
||||
p := NewHTTPPoolOpts(self, nil)
|
||||
http.Handle(p.opts.BasePath, p)
|
||||
return p
|
||||
}
|
||||
|
||||
var httpPoolMade bool
|
||||
|
||||
// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options.
|
||||
// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler.
|
||||
// The returned *HTTPPool implements http.Handler and must be registered using http.Handle.
|
||||
func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool {
|
||||
if httpPoolMade {
|
||||
panic("groupcache: NewHTTPPool must be called only once")
|
||||
}
|
||||
httpPoolMade = true
|
||||
|
||||
p := &HTTPPool{
|
||||
self: self,
|
||||
httpGetters: make(map[string]*httpGetter),
|
||||
}
|
||||
if o != nil {
|
||||
p.opts = *o
|
||||
}
|
||||
if p.opts.BasePath == "" {
|
||||
p.opts.BasePath = defaultBasePath
|
||||
}
|
||||
if p.opts.Replicas == 0 {
|
||||
p.opts.Replicas = defaultReplicas
|
||||
}
|
||||
p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
|
||||
|
||||
RegisterPeerPicker(func() PeerPicker { return p })
|
||||
return p
|
||||
}
|
||||
|
||||
// Set updates the pool's list of peers.
|
||||
// Each peer value should be a valid base URL,
|
||||
// for example "http://example.net:8000".
|
||||
func (p *HTTPPool) Set(peers ...string) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
|
||||
p.peers.Add(peers...)
|
||||
p.httpGetters = make(map[string]*httpGetter, len(peers))
|
||||
for _, peer := range peers {
|
||||
p.httpGetters[peer] = &httpGetter{transport: p.Transport, baseURL: peer + p.opts.BasePath}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *HTTPPool) PickPeer(key string) (ProtoGetter, bool) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.peers.IsEmpty() {
|
||||
return nil, false
|
||||
}
|
||||
if peer := p.peers.Get(key); peer != p.self {
|
||||
return p.httpGetters[peer], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (p *HTTPPool) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Parse request.
|
||||
if !strings.HasPrefix(r.URL.Path, p.opts.BasePath) {
|
||||
panic("HTTPPool serving unexpected path: " + r.URL.Path)
|
||||
}
|
||||
parts := strings.SplitN(r.URL.Path[len(p.opts.BasePath):], "/", 2)
|
||||
if len(parts) != 2 {
|
||||
http.Error(w, "bad request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
groupName := parts[0]
|
||||
key := parts[1]
|
||||
|
||||
// Fetch the value for this group/key.
|
||||
group := GetGroup(groupName)
|
||||
if group == nil {
|
||||
http.Error(w, "no such group: "+groupName, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
var ctx Context
|
||||
if p.Context != nil {
|
||||
ctx = p.Context(r)
|
||||
}
|
||||
|
||||
group.Stats.ServerRequests.Add(1)
|
||||
var value []byte
|
||||
err := group.Get(ctx, key, AllocatingByteSliceSink(&value))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Write the value to the response body as a proto message.
|
||||
body, err := proto.Marshal(&pb.GetResponse{Value: value})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/x-protobuf")
|
||||
w.Write(body)
|
||||
}
|
||||
|
||||
type httpGetter struct {
|
||||
transport func(Context) http.RoundTripper
|
||||
baseURL string
|
||||
}
|
||||
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
func (h *httpGetter) Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error {
|
||||
u := fmt.Sprintf(
|
||||
"%v%v/%v",
|
||||
h.baseURL,
|
||||
url.QueryEscape(in.GetGroup()),
|
||||
url.QueryEscape(in.GetKey()),
|
||||
)
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr := http.DefaultTransport
|
||||
if h.transport != nil {
|
||||
tr = h.transport(context)
|
||||
}
|
||||
res, err := tr.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("server returned: %v", res.Status)
|
||||
}
|
||||
b := bufferPool.Get().(*bytes.Buffer)
|
||||
b.Reset()
|
||||
defer bufferPool.Put(b)
|
||||
_, err = io.Copy(b, res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading response body: %v", err)
|
||||
}
|
||||
err = proto.Unmarshal(b.Bytes(), out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding response body: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
166
vendor/github.com/golang/groupcache/http_test.go
generated
vendored
Normal file
166
vendor/github.com/golang/groupcache/http_test.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
peerAddrs = flag.String("test_peer_addrs", "", "Comma-separated list of peer addresses; used by TestHTTPPool")
|
||||
peerIndex = flag.Int("test_peer_index", -1, "Index of which peer this child is; used by TestHTTPPool")
|
||||
peerChild = flag.Bool("test_peer_child", false, "True if running as a child process; used by TestHTTPPool")
|
||||
)
|
||||
|
||||
func TestHTTPPool(t *testing.T) {
|
||||
if *peerChild {
|
||||
beChildForTestHTTPPool()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
const (
|
||||
nChild = 4
|
||||
nGets = 100
|
||||
)
|
||||
|
||||
var childAddr []string
|
||||
for i := 0; i < nChild; i++ {
|
||||
childAddr = append(childAddr, pickFreeAddr(t))
|
||||
}
|
||||
|
||||
var cmds []*exec.Cmd
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < nChild; i++ {
|
||||
cmd := exec.Command(os.Args[0],
|
||||
"--test.run=TestHTTPPool",
|
||||
"--test_peer_child",
|
||||
"--test_peer_addrs="+strings.Join(childAddr, ","),
|
||||
"--test_peer_index="+strconv.Itoa(i),
|
||||
)
|
||||
cmds = append(cmds, cmd)
|
||||
wg.Add(1)
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatal("failed to start child process: ", err)
|
||||
}
|
||||
go awaitAddrReady(t, childAddr[i], &wg)
|
||||
}
|
||||
defer func() {
|
||||
for i := 0; i < nChild; i++ {
|
||||
if cmds[i].Process != nil {
|
||||
cmds[i].Process.Kill()
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
// Use a dummy self address so that we don't handle gets in-process.
|
||||
p := NewHTTPPool("should-be-ignored")
|
||||
p.Set(addrToURL(childAddr)...)
|
||||
|
||||
// Dummy getter function. Gets should go to children only.
|
||||
// The only time this process will handle a get is when the
|
||||
// children can't be contacted for some reason.
|
||||
getter := GetterFunc(func(ctx Context, key string, dest Sink) error {
|
||||
return errors.New("parent getter called; something's wrong")
|
||||
})
|
||||
g := NewGroup("httpPoolTest", 1<<20, getter)
|
||||
|
||||
for _, key := range testKeys(nGets) {
|
||||
var value string
|
||||
if err := g.Get(nil, key, StringSink(&value)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if suffix := ":" + key; !strings.HasSuffix(value, suffix) {
|
||||
t.Errorf("Get(%q) = %q, want value ending in %q", key, value, suffix)
|
||||
}
|
||||
t.Logf("Get key=%q, value=%q (peer:key)", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func testKeys(n int) (keys []string) {
|
||||
keys = make([]string, n)
|
||||
for i := range keys {
|
||||
keys[i] = strconv.Itoa(i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func beChildForTestHTTPPool() {
|
||||
addrs := strings.Split(*peerAddrs, ",")
|
||||
|
||||
p := NewHTTPPool("http://" + addrs[*peerIndex])
|
||||
p.Set(addrToURL(addrs)...)
|
||||
|
||||
getter := GetterFunc(func(ctx Context, key string, dest Sink) error {
|
||||
dest.SetString(strconv.Itoa(*peerIndex) + ":" + key)
|
||||
return nil
|
||||
})
|
||||
NewGroup("httpPoolTest", 1<<20, getter)
|
||||
|
||||
log.Fatal(http.ListenAndServe(addrs[*peerIndex], p))
|
||||
}
|
||||
|
||||
// This is racy. Another process could swoop in and steal the port between the
|
||||
// call to this function and the next listen call. Should be okay though.
|
||||
// The proper way would be to pass the l.File() as ExtraFiles to the child
|
||||
// process, and then close your copy once the child starts.
|
||||
func pickFreeAddr(t *testing.T) string {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
return l.Addr().String()
|
||||
}
|
||||
|
||||
func addrToURL(addr []string) []string {
|
||||
url := make([]string, len(addr))
|
||||
for i := range addr {
|
||||
url[i] = "http://" + addr[i]
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func awaitAddrReady(t *testing.T, addr string, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
const max = 1 * time.Second
|
||||
tries := 0
|
||||
for {
|
||||
tries++
|
||||
c, err := net.Dial("tcp", addr)
|
||||
if err == nil {
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
delay := time.Duration(tries) * 25 * time.Millisecond
|
||||
if delay > max {
|
||||
delay = max
|
||||
}
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
121
vendor/github.com/golang/groupcache/lru/lru.go
generated
vendored
Normal file
121
vendor/github.com/golang/groupcache/lru/lru.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package lru implements an LRU cache.
|
||||
package lru
|
||||
|
||||
import "container/list"
|
||||
|
||||
// Cache is an LRU cache. It is not safe for concurrent access.
|
||||
type Cache struct {
|
||||
// MaxEntries is the maximum number of cache entries before
|
||||
// an item is evicted. Zero means no limit.
|
||||
MaxEntries int
|
||||
|
||||
// OnEvicted optionally specificies a callback function to be
|
||||
// executed when an entry is purged from the cache.
|
||||
OnEvicted func(key Key, value interface{})
|
||||
|
||||
ll *list.List
|
||||
cache map[interface{}]*list.Element
|
||||
}
|
||||
|
||||
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
|
||||
type Key interface{}
|
||||
|
||||
type entry struct {
|
||||
key Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// New creates a new Cache.
|
||||
// If maxEntries is zero, the cache has no limit and it's assumed
|
||||
// that eviction is done by the caller.
|
||||
func New(maxEntries int) *Cache {
|
||||
return &Cache{
|
||||
MaxEntries: maxEntries,
|
||||
ll: list.New(),
|
||||
cache: make(map[interface{}]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *Cache) Add(key Key, value interface{}) {
|
||||
if c.cache == nil {
|
||||
c.cache = make(map[interface{}]*list.Element)
|
||||
c.ll = list.New()
|
||||
}
|
||||
if ee, ok := c.cache[key]; ok {
|
||||
c.ll.MoveToFront(ee)
|
||||
ee.Value.(*entry).value = value
|
||||
return
|
||||
}
|
||||
ele := c.ll.PushFront(&entry{key, value})
|
||||
c.cache[key] = ele
|
||||
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
|
||||
c.RemoveOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.ll.MoveToFront(ele)
|
||||
return ele.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key Key) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
ele := c.ll.Back()
|
||||
if ele != nil {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) removeElement(e *list.Element) {
|
||||
c.ll.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.cache, kv.key)
|
||||
if c.OnEvicted != nil {
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
if c.cache == nil {
|
||||
return 0
|
||||
}
|
||||
return c.ll.Len()
|
||||
}
|
||||
73
vendor/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
Normal file
73
vendor/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type simpleStruct struct {
|
||||
int
|
||||
string
|
||||
}
|
||||
|
||||
type complexStruct struct {
|
||||
int
|
||||
simpleStruct
|
||||
}
|
||||
|
||||
var getTests = []struct {
|
||||
name string
|
||||
keyToAdd interface{}
|
||||
keyToGet interface{}
|
||||
expectedOk bool
|
||||
}{
|
||||
{"string_hit", "myKey", "myKey", true},
|
||||
{"string_miss", "myKey", "nonsense", false},
|
||||
{"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true},
|
||||
{"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false},
|
||||
{"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}},
|
||||
complexStruct{1, simpleStruct{2, "three"}}, true},
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
for _, tt := range getTests {
|
||||
lru := New(0)
|
||||
lru.Add(tt.keyToAdd, 1234)
|
||||
val, ok := lru.Get(tt.keyToGet)
|
||||
if ok != tt.expectedOk {
|
||||
t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
|
||||
} else if ok && val != 1234 {
|
||||
t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
lru := New(0)
|
||||
lru.Add("myKey", 1234)
|
||||
if val, ok := lru.Get("myKey"); !ok {
|
||||
t.Fatal("TestRemove returned no match")
|
||||
} else if val != 1234 {
|
||||
t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
|
||||
}
|
||||
|
||||
lru.Remove("myKey")
|
||||
if _, ok := lru.Get("myKey"); ok {
|
||||
t.Fatal("TestRemove returned a removed entry")
|
||||
}
|
||||
}
|
||||
71
vendor/github.com/golang/groupcache/peers.go
generated
vendored
Normal file
71
vendor/github.com/golang/groupcache/peers.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// peers.go defines how processes find and communicate with their peers.
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
pb "github.com/golang/groupcache/groupcachepb"
|
||||
)
|
||||
|
||||
// Context is an opaque value passed through calls to the
|
||||
// ProtoGetter. It may be nil if your ProtoGetter implementation does
|
||||
// not require a context.
|
||||
type Context interface{}
|
||||
|
||||
// ProtoGetter is the interface that must be implemented by a peer.
|
||||
type ProtoGetter interface {
|
||||
Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error
|
||||
}
|
||||
|
||||
// PeerPicker is the interface that must be implemented to locate
|
||||
// the peer that owns a specific key.
|
||||
type PeerPicker interface {
|
||||
// PickPeer returns the peer that owns the specific key
|
||||
// and true to indicate that a remote peer was nominated.
|
||||
// It returns nil, false if the key owner is the current peer.
|
||||
PickPeer(key string) (peer ProtoGetter, ok bool)
|
||||
}
|
||||
|
||||
// NoPeers is an implementation of PeerPicker that never finds a peer.
|
||||
type NoPeers struct{}
|
||||
|
||||
func (NoPeers) PickPeer(key string) (peer ProtoGetter, ok bool) { return }
|
||||
|
||||
var (
|
||||
portPicker func() PeerPicker
|
||||
)
|
||||
|
||||
// RegisterPeerPicker registers the peer initialization function.
|
||||
// It is called once, when the first group is created.
|
||||
func RegisterPeerPicker(fn func() PeerPicker) {
|
||||
if portPicker != nil {
|
||||
panic("RegisterPeerPicker called more than once")
|
||||
}
|
||||
portPicker = fn
|
||||
}
|
||||
|
||||
func getPeers() PeerPicker {
|
||||
if portPicker == nil {
|
||||
return NoPeers{}
|
||||
}
|
||||
pk := portPicker()
|
||||
if pk == nil {
|
||||
pk = NoPeers{}
|
||||
}
|
||||
return pk
|
||||
}
|
||||
64
vendor/github.com/golang/groupcache/singleflight/singleflight.go
generated
vendored
Normal file
64
vendor/github.com/golang/groupcache/singleflight/singleflight.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package singleflight provides a duplicate function call suppression
|
||||
// mechanism.
|
||||
package singleflight
|
||||
|
||||
import "sync"
|
||||
|
||||
// call is an in-flight or completed Do call
|
||||
type call struct {
|
||||
wg sync.WaitGroup
|
||||
val interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
// Group represents a class of work and forms a namespace in which
|
||||
// units of work can be executed with duplicate suppression.
|
||||
type Group struct {
|
||||
mu sync.Mutex // protects m
|
||||
m map[string]*call // lazily initialized
|
||||
}
|
||||
|
||||
// Do executes and returns the results of the given function, making
|
||||
// sure that only one execution is in-flight for a given key at a
|
||||
// time. If a duplicate comes in, the duplicate caller waits for the
|
||||
// original to complete and receives the same results.
|
||||
func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
|
||||
g.mu.Lock()
|
||||
if g.m == nil {
|
||||
g.m = make(map[string]*call)
|
||||
}
|
||||
if c, ok := g.m[key]; ok {
|
||||
g.mu.Unlock()
|
||||
c.wg.Wait()
|
||||
return c.val, c.err
|
||||
}
|
||||
c := new(call)
|
||||
c.wg.Add(1)
|
||||
g.m[key] = c
|
||||
g.mu.Unlock()
|
||||
|
||||
c.val, c.err = fn()
|
||||
c.wg.Done()
|
||||
|
||||
g.mu.Lock()
|
||||
delete(g.m, key)
|
||||
g.mu.Unlock()
|
||||
|
||||
return c.val, c.err
|
||||
}
|
||||
85
vendor/github.com/golang/groupcache/singleflight/singleflight_test.go
generated
vendored
Normal file
85
vendor/github.com/golang/groupcache/singleflight/singleflight_test.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package singleflight
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDo(t *testing.T) {
|
||||
var g Group
|
||||
v, err := g.Do("key", func() (interface{}, error) {
|
||||
return "bar", nil
|
||||
})
|
||||
if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
|
||||
t.Errorf("Do = %v; want %v", got, want)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Do error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoErr(t *testing.T) {
|
||||
var g Group
|
||||
someErr := errors.New("Some error")
|
||||
v, err := g.Do("key", func() (interface{}, error) {
|
||||
return nil, someErr
|
||||
})
|
||||
if err != someErr {
|
||||
t.Errorf("Do error = %v; want someErr", err)
|
||||
}
|
||||
if v != nil {
|
||||
t.Errorf("unexpected non-nil value %#v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoDupSuppress(t *testing.T) {
|
||||
var g Group
|
||||
c := make(chan string)
|
||||
var calls int32
|
||||
fn := func() (interface{}, error) {
|
||||
atomic.AddInt32(&calls, 1)
|
||||
return <-c, nil
|
||||
}
|
||||
|
||||
const n = 10
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < n; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
v, err := g.Do("key", fn)
|
||||
if err != nil {
|
||||
t.Errorf("Do error: %v", err)
|
||||
}
|
||||
if v.(string) != "bar" {
|
||||
t.Errorf("got %q; want %q", v, "bar")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond) // let goroutines above block
|
||||
c <- "bar"
|
||||
wg.Wait()
|
||||
if got := atomic.LoadInt32(&calls); got != 1 {
|
||||
t.Errorf("number of calls = %d; want 1", got)
|
||||
}
|
||||
}
|
||||
322
vendor/github.com/golang/groupcache/sinks.go
generated
vendored
Normal file
322
vendor/github.com/golang/groupcache/sinks.go
generated
vendored
Normal file
@@ -0,0 +1,322 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package groupcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// A Sink receives data from a Get call.
|
||||
//
|
||||
// Implementation of Getter must call exactly one of the Set methods
|
||||
// on success.
|
||||
type Sink interface {
|
||||
// SetString sets the value to s.
|
||||
SetString(s string) error
|
||||
|
||||
// SetBytes sets the value to the contents of v.
|
||||
// The caller retains ownership of v.
|
||||
SetBytes(v []byte) error
|
||||
|
||||
// SetProto sets the value to the encoded version of m.
|
||||
// The caller retains ownership of m.
|
||||
SetProto(m proto.Message) error
|
||||
|
||||
// view returns a frozen view of the bytes for caching.
|
||||
view() (ByteView, error)
|
||||
}
|
||||
|
||||
func cloneBytes(b []byte) []byte {
|
||||
c := make([]byte, len(b))
|
||||
copy(c, b)
|
||||
return c
|
||||
}
|
||||
|
||||
func setSinkView(s Sink, v ByteView) error {
|
||||
// A viewSetter is a Sink that can also receive its value from
|
||||
// a ByteView. This is a fast path to minimize copies when the
|
||||
// item was already cached locally in memory (where it's
|
||||
// cached as a ByteView)
|
||||
type viewSetter interface {
|
||||
setView(v ByteView) error
|
||||
}
|
||||
if vs, ok := s.(viewSetter); ok {
|
||||
return vs.setView(v)
|
||||
}
|
||||
if v.b != nil {
|
||||
return s.SetBytes(v.b)
|
||||
}
|
||||
return s.SetString(v.s)
|
||||
}
|
||||
|
||||
// StringSink returns a Sink that populates the provided string pointer.
|
||||
func StringSink(sp *string) Sink {
|
||||
return &stringSink{sp: sp}
|
||||
}
|
||||
|
||||
type stringSink struct {
|
||||
sp *string
|
||||
v ByteView
|
||||
// TODO(bradfitz): track whether any Sets were called.
|
||||
}
|
||||
|
||||
func (s *stringSink) view() (ByteView, error) {
|
||||
// TODO(bradfitz): return an error if no Set was called
|
||||
return s.v, nil
|
||||
}
|
||||
|
||||
func (s *stringSink) SetString(v string) error {
|
||||
s.v.b = nil
|
||||
s.v.s = v
|
||||
*s.sp = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringSink) SetBytes(v []byte) error {
|
||||
return s.SetString(string(v))
|
||||
}
|
||||
|
||||
func (s *stringSink) SetProto(m proto.Message) error {
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.v.b = b
|
||||
*s.sp = string(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ByteViewSink returns a Sink that populates a ByteView.
|
||||
func ByteViewSink(dst *ByteView) Sink {
|
||||
if dst == nil {
|
||||
panic("nil dst")
|
||||
}
|
||||
return &byteViewSink{dst: dst}
|
||||
}
|
||||
|
||||
type byteViewSink struct {
|
||||
dst *ByteView
|
||||
|
||||
// if this code ever ends up tracking that at least one set*
|
||||
// method was called, don't make it an error to call set
|
||||
// methods multiple times. Lorry's payload.go does that, and
|
||||
// it makes sense. The comment at the top of this file about
|
||||
// "exactly one of the Set methods" is overly strict. We
|
||||
// really care about at least once (in a handler), but if
|
||||
// multiple handlers fail (or multiple functions in a program
|
||||
// using a Sink), it's okay to re-use the same one.
|
||||
}
|
||||
|
||||
func (s *byteViewSink) setView(v ByteView) error {
|
||||
*s.dst = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *byteViewSink) view() (ByteView, error) {
|
||||
return *s.dst, nil
|
||||
}
|
||||
|
||||
func (s *byteViewSink) SetProto(m proto.Message) error {
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s.dst = ByteView{b: b}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *byteViewSink) SetBytes(b []byte) error {
|
||||
*s.dst = ByteView{b: cloneBytes(b)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *byteViewSink) SetString(v string) error {
|
||||
*s.dst = ByteView{s: v}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProtoSink returns a sink that unmarshals binary proto values into m.
|
||||
func ProtoSink(m proto.Message) Sink {
|
||||
return &protoSink{
|
||||
dst: m,
|
||||
}
|
||||
}
|
||||
|
||||
type protoSink struct {
|
||||
dst proto.Message // authorative value
|
||||
typ string
|
||||
|
||||
v ByteView // encoded
|
||||
}
|
||||
|
||||
func (s *protoSink) view() (ByteView, error) {
|
||||
return s.v, nil
|
||||
}
|
||||
|
||||
func (s *protoSink) SetBytes(b []byte) error {
|
||||
err := proto.Unmarshal(b, s.dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.v.b = cloneBytes(b)
|
||||
s.v.s = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *protoSink) SetString(v string) error {
|
||||
b := []byte(v)
|
||||
err := proto.Unmarshal(b, s.dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.v.b = b
|
||||
s.v.s = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *protoSink) SetProto(m proto.Message) error {
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(bradfitz): optimize for same-task case more and write
|
||||
// right through? would need to document ownership rules at
|
||||
// the same time. but then we could just assign *dst = *m
|
||||
// here. This works for now:
|
||||
err = proto.Unmarshal(b, s.dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.v.b = b
|
||||
s.v.s = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllocatingByteSliceSink returns a Sink that allocates
|
||||
// a byte slice to hold the received value and assigns
|
||||
// it to *dst. The memory is not retained by groupcache.
|
||||
func AllocatingByteSliceSink(dst *[]byte) Sink {
|
||||
return &allocBytesSink{dst: dst}
|
||||
}
|
||||
|
||||
type allocBytesSink struct {
|
||||
dst *[]byte
|
||||
v ByteView
|
||||
}
|
||||
|
||||
func (s *allocBytesSink) view() (ByteView, error) {
|
||||
return s.v, nil
|
||||
}
|
||||
|
||||
func (s *allocBytesSink) setView(v ByteView) error {
|
||||
if v.b != nil {
|
||||
*s.dst = cloneBytes(v.b)
|
||||
} else {
|
||||
*s.dst = []byte(v.s)
|
||||
}
|
||||
s.v = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *allocBytesSink) SetProto(m proto.Message) error {
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.setBytesOwned(b)
|
||||
}
|
||||
|
||||
func (s *allocBytesSink) SetBytes(b []byte) error {
|
||||
return s.setBytesOwned(cloneBytes(b))
|
||||
}
|
||||
|
||||
func (s *allocBytesSink) setBytesOwned(b []byte) error {
|
||||
if s.dst == nil {
|
||||
return errors.New("nil AllocatingByteSliceSink *[]byte dst")
|
||||
}
|
||||
*s.dst = cloneBytes(b) // another copy, protecting the read-only s.v.b view
|
||||
s.v.b = b
|
||||
s.v.s = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *allocBytesSink) SetString(v string) error {
|
||||
if s.dst == nil {
|
||||
return errors.New("nil AllocatingByteSliceSink *[]byte dst")
|
||||
}
|
||||
*s.dst = []byte(v)
|
||||
s.v.b = nil
|
||||
s.v.s = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// TruncatingByteSliceSink returns a Sink that writes up to len(*dst)
|
||||
// bytes to *dst. If more bytes are available, they're silently
|
||||
// truncated. If fewer bytes are available than len(*dst), *dst
|
||||
// is shrunk to fit the number of bytes available.
|
||||
func TruncatingByteSliceSink(dst *[]byte) Sink {
|
||||
return &truncBytesSink{dst: dst}
|
||||
}
|
||||
|
||||
type truncBytesSink struct {
|
||||
dst *[]byte
|
||||
v ByteView
|
||||
}
|
||||
|
||||
func (s *truncBytesSink) view() (ByteView, error) {
|
||||
return s.v, nil
|
||||
}
|
||||
|
||||
func (s *truncBytesSink) SetProto(m proto.Message) error {
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.setBytesOwned(b)
|
||||
}
|
||||
|
||||
func (s *truncBytesSink) SetBytes(b []byte) error {
|
||||
return s.setBytesOwned(cloneBytes(b))
|
||||
}
|
||||
|
||||
func (s *truncBytesSink) setBytesOwned(b []byte) error {
|
||||
if s.dst == nil {
|
||||
return errors.New("nil TruncatingByteSliceSink *[]byte dst")
|
||||
}
|
||||
n := copy(*s.dst, b)
|
||||
if n < len(*s.dst) {
|
||||
*s.dst = (*s.dst)[:n]
|
||||
}
|
||||
s.v.b = b
|
||||
s.v.s = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *truncBytesSink) SetString(v string) error {
|
||||
if s.dst == nil {
|
||||
return errors.New("nil TruncatingByteSliceSink *[]byte dst")
|
||||
}
|
||||
n := copy(*s.dst, v)
|
||||
if n < len(*s.dst) {
|
||||
*s.dst = (*s.dst)[:n]
|
||||
}
|
||||
s.v.b = nil
|
||||
s.v.s = v
|
||||
return nil
|
||||
}
|
||||
235
vendor/github.com/golang/groupcache/testpb/test.pb.go
generated
vendored
Normal file
235
vendor/github.com/golang/groupcache/testpb/test.pb.go
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: test.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package testpb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import json "encoding/json"
|
||||
import math "math"
|
||||
|
||||
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = &json.SyntaxError{}
|
||||
var _ = math.Inf
|
||||
|
||||
type TestMessage struct {
|
||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
City *string `protobuf:"bytes,2,opt,name=city" json:"city,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TestMessage) Reset() { *m = TestMessage{} }
|
||||
func (m *TestMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*TestMessage) ProtoMessage() {}
|
||||
|
||||
func (m *TestMessage) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
return *m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *TestMessage) GetCity() string {
|
||||
if m != nil && m.City != nil {
|
||||
return *m.City
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type TestRequest struct {
|
||||
Lower *string `protobuf:"bytes,1,req,name=lower" json:"lower,omitempty"`
|
||||
RepeatCount *int32 `protobuf:"varint,2,opt,name=repeat_count,def=1" json:"repeat_count,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TestRequest) Reset() { *m = TestRequest{} }
|
||||
func (m *TestRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TestRequest) ProtoMessage() {}
|
||||
|
||||
const Default_TestRequest_RepeatCount int32 = 1
|
||||
|
||||
func (m *TestRequest) GetLower() string {
|
||||
if m != nil && m.Lower != nil {
|
||||
return *m.Lower
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *TestRequest) GetRepeatCount() int32 {
|
||||
if m != nil && m.RepeatCount != nil {
|
||||
return *m.RepeatCount
|
||||
}
|
||||
return Default_TestRequest_RepeatCount
|
||||
}
|
||||
|
||||
type TestResponse struct {
|
||||
Value *string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TestResponse) Reset() { *m = TestResponse{} }
|
||||
func (m *TestResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TestResponse) ProtoMessage() {}
|
||||
|
||||
func (m *TestResponse) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type CacheStats struct {
|
||||
Items *int64 `protobuf:"varint,1,opt,name=items" json:"items,omitempty"`
|
||||
Bytes *int64 `protobuf:"varint,2,opt,name=bytes" json:"bytes,omitempty"`
|
||||
Gets *int64 `protobuf:"varint,3,opt,name=gets" json:"gets,omitempty"`
|
||||
Hits *int64 `protobuf:"varint,4,opt,name=hits" json:"hits,omitempty"`
|
||||
Evicts *int64 `protobuf:"varint,5,opt,name=evicts" json:"evicts,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CacheStats) Reset() { *m = CacheStats{} }
|
||||
func (m *CacheStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*CacheStats) ProtoMessage() {}
|
||||
|
||||
func (m *CacheStats) GetItems() int64 {
|
||||
if m != nil && m.Items != nil {
|
||||
return *m.Items
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *CacheStats) GetBytes() int64 {
|
||||
if m != nil && m.Bytes != nil {
|
||||
return *m.Bytes
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *CacheStats) GetGets() int64 {
|
||||
if m != nil && m.Gets != nil {
|
||||
return *m.Gets
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *CacheStats) GetHits() int64 {
|
||||
if m != nil && m.Hits != nil {
|
||||
return *m.Hits
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *CacheStats) GetEvicts() int64 {
|
||||
if m != nil && m.Evicts != nil {
|
||||
return *m.Evicts
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type StatsResponse struct {
|
||||
Gets *int64 `protobuf:"varint,1,opt,name=gets" json:"gets,omitempty"`
|
||||
CacheHits *int64 `protobuf:"varint,12,opt,name=cache_hits" json:"cache_hits,omitempty"`
|
||||
Fills *int64 `protobuf:"varint,2,opt,name=fills" json:"fills,omitempty"`
|
||||
TotalAlloc *uint64 `protobuf:"varint,3,opt,name=total_alloc" json:"total_alloc,omitempty"`
|
||||
MainCache *CacheStats `protobuf:"bytes,4,opt,name=main_cache" json:"main_cache,omitempty"`
|
||||
HotCache *CacheStats `protobuf:"bytes,5,opt,name=hot_cache" json:"hot_cache,omitempty"`
|
||||
ServerIn *int64 `protobuf:"varint,6,opt,name=server_in" json:"server_in,omitempty"`
|
||||
Loads *int64 `protobuf:"varint,8,opt,name=loads" json:"loads,omitempty"`
|
||||
PeerLoads *int64 `protobuf:"varint,9,opt,name=peer_loads" json:"peer_loads,omitempty"`
|
||||
PeerErrors *int64 `protobuf:"varint,10,opt,name=peer_errors" json:"peer_errors,omitempty"`
|
||||
LocalLoads *int64 `protobuf:"varint,11,opt,name=local_loads" json:"local_loads,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StatsResponse) Reset() { *m = StatsResponse{} }
|
||||
func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatsResponse) ProtoMessage() {}
|
||||
|
||||
func (m *StatsResponse) GetGets() int64 {
|
||||
if m != nil && m.Gets != nil {
|
||||
return *m.Gets
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetCacheHits() int64 {
|
||||
if m != nil && m.CacheHits != nil {
|
||||
return *m.CacheHits
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetFills() int64 {
|
||||
if m != nil && m.Fills != nil {
|
||||
return *m.Fills
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetTotalAlloc() uint64 {
|
||||
if m != nil && m.TotalAlloc != nil {
|
||||
return *m.TotalAlloc
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetMainCache() *CacheStats {
|
||||
if m != nil {
|
||||
return m.MainCache
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetHotCache() *CacheStats {
|
||||
if m != nil {
|
||||
return m.HotCache
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetServerIn() int64 {
|
||||
if m != nil && m.ServerIn != nil {
|
||||
return *m.ServerIn
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetLoads() int64 {
|
||||
if m != nil && m.Loads != nil {
|
||||
return *m.Loads
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetPeerLoads() int64 {
|
||||
if m != nil && m.PeerLoads != nil {
|
||||
return *m.PeerLoads
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetPeerErrors() int64 {
|
||||
if m != nil && m.PeerErrors != nil {
|
||||
return *m.PeerErrors
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetLocalLoads() int64 {
|
||||
if m != nil && m.LocalLoads != nil {
|
||||
return *m.LocalLoads
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Empty struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (m *Empty) String() string { return proto.CompactTextString(m) }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
|
||||
func init() {
|
||||
}
|
||||
63
vendor/github.com/golang/groupcache/testpb/test.proto
generated
vendored
Normal file
63
vendor/github.com/golang/groupcache/testpb/test.proto
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2012 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package testpb;
|
||||
|
||||
message TestMessage {
|
||||
optional string name = 1;
|
||||
optional string city = 2;
|
||||
}
|
||||
|
||||
message TestRequest {
|
||||
required string lower = 1; // to be returned upper case
|
||||
optional int32 repeat_count = 2 [default = 1]; // .. this many times
|
||||
}
|
||||
|
||||
message TestResponse {
|
||||
optional string value = 1;
|
||||
}
|
||||
|
||||
message CacheStats {
|
||||
optional int64 items = 1;
|
||||
optional int64 bytes = 2;
|
||||
optional int64 gets = 3;
|
||||
optional int64 hits = 4;
|
||||
optional int64 evicts = 5;
|
||||
}
|
||||
|
||||
message StatsResponse {
|
||||
optional int64 gets = 1;
|
||||
optional int64 cache_hits = 12;
|
||||
optional int64 fills = 2;
|
||||
optional uint64 total_alloc = 3;
|
||||
optional CacheStats main_cache = 4;
|
||||
optional CacheStats hot_cache = 5;
|
||||
optional int64 server_in = 6;
|
||||
optional int64 loads = 8;
|
||||
optional int64 peer_loads = 9;
|
||||
optional int64 peer_errors = 10;
|
||||
optional int64 local_loads = 11;
|
||||
}
|
||||
|
||||
message Empty {}
|
||||
|
||||
service GroupCacheTest {
|
||||
rpc InitPeers(Empty) returns (Empty) {};
|
||||
rpc Get(TestRequest) returns (TestResponse) {};
|
||||
rpc GetStats(Empty) returns (StatsResponse) {};
|
||||
}
|
||||
187
vendor/k8s.io/kubernetes/CHANGELOG-1.16.md
generated
vendored
187
vendor/k8s.io/kubernetes/CHANGELOG-1.16.md
generated
vendored
@@ -1,22 +1,30 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.16.2](#v1162)
|
||||
- [Downloads for v1.16.2](#downloads-for-v1162)
|
||||
- [v1.16.3](#v1163)
|
||||
- [Downloads for v1.16.3](#downloads-for-v1163)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.16.1](#changelog-since-v1161)
|
||||
- [v1.16.1](#v1161)
|
||||
- [Downloads for v1.16.1](#downloads-for-v1161)
|
||||
- [Changelog since v1.16.2](#changelog-since-v1162)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.16.2](#v1162)
|
||||
- [Downloads for v1.16.2](#downloads-for-v1162)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.16.0](#changelog-since-v1160)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.16.0](#v1160)
|
||||
- [Downloads for v1.16.0](#downloads-for-v1160)
|
||||
- [Changelog since v1.16.1](#changelog-since-v1161)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.16.1](#v1161)
|
||||
- [Downloads for v1.16.1](#downloads-for-v1161)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.16.0](#changelog-since-v1160)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.16.0](#v1160)
|
||||
- [Downloads for v1.16.0](#downloads-for-v1160)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Kubernetes v1.16.0 Release Notes](#kubernetes-v1160-release-notes)
|
||||
- [What’s New (Major Themes)](#whats-new-major-themes)
|
||||
- [Additional Notable Feature Updates](#additional-notable-feature-updates)
|
||||
@@ -36,7 +44,7 @@
|
||||
- [CLI Improvements](#cli-improvements)
|
||||
- [Misc](#misc)
|
||||
- [API Changes](#api-changes)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [API Machinery](#api-machinery)
|
||||
- [Apps](#apps)
|
||||
- [Auth](#auth)
|
||||
@@ -60,62 +68,136 @@
|
||||
- [Removed](#removed-1)
|
||||
- [v1.16.0-rc.2](#v1160-rc2)
|
||||
- [Downloads for v1.16.0-rc.2](#downloads-for-v1160-rc2)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.16.0-rc.1](#changelog-since-v1160-rc1)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.16.0-rc.1](#v1160-rc1)
|
||||
- [Downloads for v1.16.0-rc.1](#downloads-for-v1160-rc1)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.16.0-beta.2](#changelog-since-v1160-beta2)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.16.0-beta.2](#v1160-beta2)
|
||||
- [Downloads for v1.16.0-beta.2](#downloads-for-v1160-beta2)
|
||||
- [Changelog since v1.16.0-rc.1](#changelog-since-v1160-rc1)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.16.0-rc.1](#v1160-rc1)
|
||||
- [Downloads for v1.16.0-rc.1](#downloads-for-v1160-rc1)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.16.0-beta.1](#changelog-since-v1160-beta1)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.16.0-beta.1](#v1160-beta1)
|
||||
- [Downloads for v1.16.0-beta.1](#downloads-for-v1160-beta1)
|
||||
- [Changelog since v1.16.0-beta.2](#changelog-since-v1160-beta2)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.16.0-beta.2](#v1160-beta2)
|
||||
- [Downloads for v1.16.0-beta.2](#downloads-for-v1160-beta2)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.16.0-alpha.3](#changelog-since-v1160-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.16.0-alpha.3](#v1160-alpha3)
|
||||
- [Downloads for v1.16.0-alpha.3](#downloads-for-v1160-alpha3)
|
||||
- [Changelog since v1.16.0-beta.1](#changelog-since-v1160-beta1)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.16.0-beta.1](#v1160-beta1)
|
||||
- [Downloads for v1.16.0-beta.1](#downloads-for-v1160-beta1)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.16.0-alpha.2](#changelog-since-v1160-alpha2)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.16.0-alpha.2](#v1160-alpha2)
|
||||
- [Downloads for v1.16.0-alpha.2](#downloads-for-v1160-alpha2)
|
||||
- [Changelog since v1.16.0-alpha.3](#changelog-since-v1160-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
- [v1.16.0-alpha.3](#v1160-alpha3)
|
||||
- [Downloads for v1.16.0-alpha.3](#downloads-for-v1160-alpha3)
|
||||
- [Client Binaries](#client-binaries-8)
|
||||
- [Server Binaries](#server-binaries-8)
|
||||
- [Node Binaries](#node-binaries-8)
|
||||
- [Changelog since v1.16.0-alpha.1](#changelog-since-v1160-alpha1)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
- [v1.16.0-alpha.1](#v1160-alpha1)
|
||||
- [Downloads for v1.16.0-alpha.1](#downloads-for-v1160-alpha1)
|
||||
- [Changelog since v1.16.0-alpha.2](#changelog-since-v1160-alpha2)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-8)
|
||||
- [v1.16.0-alpha.2](#v1160-alpha2)
|
||||
- [Downloads for v1.16.0-alpha.2](#downloads-for-v1160-alpha2)
|
||||
- [Client Binaries](#client-binaries-9)
|
||||
- [Server Binaries](#server-binaries-9)
|
||||
- [Node Binaries](#node-binaries-9)
|
||||
- [Changelog since v1.16.0-alpha.1](#changelog-since-v1160-alpha1)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-9)
|
||||
- [v1.16.0-alpha.1](#v1160-alpha1)
|
||||
- [Downloads for v1.16.0-alpha.1](#downloads-for-v1160-alpha1)
|
||||
- [Client Binaries](#client-binaries-10)
|
||||
- [Server Binaries](#server-binaries-10)
|
||||
- [Node Binaries](#node-binaries-10)
|
||||
- [Changelog since v1.15.0](#changelog-since-v1150)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Other notable changes](#other-notable-changes-8)
|
||||
- [Other notable changes](#other-notable-changes-10)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.16.3
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
|
||||
## Downloads for v1.16.3
|
||||
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes.tar.gz) | `17618d2ece64346d03db6a6c31eb3c38bbc3aa206005e99032791bdbe7dad1f401bdb9d707995ff83b658dd1ad59d9b53489e5ee45b9f209edf09b35326aac4a`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-src.tar.gz) | `e51d3418b006fb28039dc30095333f9e362c785eb27778554005a83e70860b33b554f61ddb6dd83e05d1ca0dda4c6f9bf2812347109e83ff72ad4d355bd22546`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-darwin-386.tar.gz) | `15d3316afc04ae959658cc4489347b1e425df37ce7438e79357bd7fde783766374315c4c6e4ea8a94dab8d113c1871aff70de84e3181d08c37849c4971f6ad7e`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-darwin-amd64.tar.gz) | `82c2af675d77dea335d99247df9136668bda81d64da82a949543369bb31e0bc2963f9de83cb34174b01a93f6a1acab3459fb64621e771e4f7362c090ac2a256d`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-linux-386.tar.gz) | `193ae911e58fa61e001730cfa8018cb77b672b77cba6b3916bde53a73add160641085176d8bc60ea25e22f7b9a8d2a9ec17da139ec234771939cc55d5b2c0931`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-linux-amd64.tar.gz) | `904604839bbf46c11c7934f6f906adbc968234044b9f3268b71c175241626f8d4076812aca789dc5aa2f85fd25a384ad779638231d4d94f3f3c6d043b5d9f062`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-linux-arm.tar.gz) | `016e9a5662afa27da5ffb055fbf2bf4135b8521deb6bd3dea565f250f611f1690be9d6ea18c3fd1053c71c03213bc53441d88e70633192a443b838d353783278`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-linux-arm64.tar.gz) | `2ee032304114556bf3ab78c000f32ab9605e294030d83edbe013fdeb19eb49283d8b8a9c120e2d076f394f2e0d74015384754e6876e62f68ec515378ddbce77e`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-linux-ppc64le.tar.gz) | `07299344bc4c2da573b2bc4b2fa9c7a60358c4a591d711f8a9ce45768543fe678d1f15412db0fa02b777964cebcee2bcf9dea16d94a97754eac163b77cb6ddc8`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-linux-s390x.tar.gz) | `83e6a17b86740199788b48e94534ea6cd08c9325325a7f75a7073771a1de421843c2898ef385a9d8912d7cfd99c09b60a4bb4f3c6220c8f212c08a72a85661b2`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-windows-386.tar.gz) | `bfc6ff8fcc7e1219074df6db5d42fb38af4b6c10e36fb0fd7c6d8aa72fc01cbca5658d8c6a9882ed234cc20652511f551c5f59d77a0a46fc2b27f96e6f4e385a`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-client-windows-amd64.tar.gz) | `ccd9d4f53152f1c09cb5c2194892afaf93faf7bfbbf0b8ea0d6c333f121593b7c7d0039246af9234b684ac8501e4f778de8ddf02d4b34ca191621f7c74c811ba`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-server-linux-amd64.tar.gz) | `040f6bfb78cb336d2d23360c0921ef7e008561b2f10bed3261c33811d1d818693cd24dcb5bf52cbf854f4209a8968a5e07ff8356e3e061a4db14623469098e5f`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-server-linux-arm.tar.gz) | `e8d8312f44ecd38d8d88300cda22ef364a637af4a666869e0926bb25008b31eceb5cbdc4ba2fda9358d996c1b9776fe4ae49693b3e95237bd82c2ec7898b4249`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-server-linux-arm64.tar.gz) | `39e7d76e88da1ac712faf6d61bb980c0a4ac01d4996bb559cc6a9c132105a5b073d7833d431ba44418b4981a190ff613463cbf16f25e4ce38a31f43191842680`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-server-linux-ppc64le.tar.gz) | `72c418ade3291911da4a654ae4c4a88f7d02ad1608d9d9721e9ce7199c4dc07090c9f53b6350911050f25c7e583423ba9e985260e94982e217119b2b0a7be501`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-server-linux-s390x.tar.gz) | `7405ca92726fb8e4083ee66a996df98e37c1a04b679634fadb0f2e64aa7e264a5e55b1149fede58d7dc693c71551e31abbc1e2bd8b137686debd2091ce9059b4`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-node-linux-amd64.tar.gz) | `6074a03879f385fb3411b024ada7e1d2b8ae89990fe4f5a9c6046424cbb567afaf24264a003157b682c91eedcbd09bca75a51265fbdb3a948930fe7494c997e2`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-node-linux-arm.tar.gz) | `277432b212c98e7eb8020b17745d4c90dbc6c359148efc38e41e0836085eac4655effd9e5a694162d61ee31adc8eb1e9f6196fa9f05d898e2f4d596a1e097055`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-node-linux-arm64.tar.gz) | `6354e8862e60d6c6d60a115dd6043beed8205991c833716426071dc70d3658da1ddc9b23c4e16cae0410a86ea82eab3240f2a5123e753ff55a151e6f96db436e`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-node-linux-ppc64le.tar.gz) | `2e5bdf3a64b5387fe60aff1b05ccf0f822a3955815576f979e2e8b7673373c49b602ab53fc1a63dd4467787e9b62f3f369f4a656812078709be795e85aaf253b`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-node-linux-s390x.tar.gz) | `44632772ac2e13b91e9b04c05c42522f8fdcf574daff6e98719724ed0e07c44e1d01c0f047bf462e00f092a21b617822063ee6cc987c531fc7f0980c6b1319ba`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.3/kubernetes-node-windows-amd64.tar.gz) | `eb6128b0aa1001b0fea5d458692c96191bfaa51c4a8736a9c370e38fffb9d72a28937581cf79a1ea529058f039a08a3b8a23e7372c7f8a8265a5f9acd252a4c1`
|
||||
|
||||
## Changelog since v1.16.2
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* kubeadm: fix skipped etcd upgrade on secondary control-plane nodes when the command "kubeadm upgrade node" is used. ([#85024](https://github.com/kubernetes/kubernetes/pull/85024), [@neolit123](https://github.com/neolit123))
|
||||
* Fix kubelet metrics gathering on non-English Windows hosts ([#84156](https://github.com/kubernetes/kubernetes/pull/84156), [@wawa0210](https://github.com/wawa0210))
|
||||
* Bump metrics-server to v0.3.5 ([#83015](https://github.com/kubernetes/kubernetes/pull/83015), [@olagacek](https://github.com/olagacek))
|
||||
* Bumps metrics-server version to v0.3.6 with following bugfix: ([#83907](https://github.com/kubernetes/kubernetes/pull/83907), [@olagacek](https://github.com/olagacek))
|
||||
* Don't break metric storage when duplicate pod metrics encountered causing hpa to fail
|
||||
* kube-apiserver: Fixed a regression accepting patch requests > 1MB ([#84963](https://github.com/kubernetes/kubernetes/pull/84963), [@liggitt](https://github.com/liggitt))
|
||||
* kube-apiserver: fixed a bug that could cause a goroutine leak if the apiserver encountered an encoding error serving a watch to a websocket watcher ([#84693](https://github.com/kubernetes/kubernetes/pull/84693), [@tedyu](https://github.com/tedyu))
|
||||
* azure: Add allow unsafe read from cache ([#83685](https://github.com/kubernetes/kubernetes/pull/83685), [@aramase](https://github.com/aramase))
|
||||
* Add data cache flushing during unmount device for GCE-PD driver in Windows Server. ([#83591](https://github.com/kubernetes/kubernetes/pull/83591), [@jingxu97](https://github.com/jingxu97))
|
||||
* Fixed binding of block PersistentVolumes / PersistentVolumeClaims when BlockVolume feature is off. ([#84049](https://github.com/kubernetes/kubernetes/pull/84049), [@jsafrane](https://github.com/jsafrane))
|
||||
* kube-scheduler now fallbacks to emitting events using core/v1 Events when events.k8s.io/v1beta1 is disabled. ([#83692](https://github.com/kubernetes/kubernetes/pull/83692), [@yastij](https://github.com/yastij))
|
||||
* Adds a metric apiserver_request_error_total to kube-apiserver. This metric tallies the number of request_errors encountered by verb, group, version, resource, subresource, scope, component, and code. ([#83427](https://github.com/kubernetes/kubernetes/pull/83427), [@logicalhan](https://github.com/logicalhan))
|
||||
* Update Cluster Autoscaler version to 1.16.2 (CA release docs: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.16.2) ([#84038](https://github.com/kubernetes/kubernetes/pull/84038), [@losipiuk](https://github.com/losipiuk))
|
||||
* Fixed an issue with informers missing an `Added` event if a recently deleted object was immediately recreated at the same time the informer dropped a watch and relisted. ([#83911](https://github.com/kubernetes/kubernetes/pull/83911), [@matte21](https://github.com/matte21))
|
||||
* CSI detach timeout increased from 10 seconds to 2 minutes ([#84321](https://github.com/kubernetes/kubernetes/pull/84321), [@cduchesne](https://github.com/cduchesne))
|
||||
* Switched intstr.Type to sized integer to follow API guidelines and improve compatibility with proto libraries ([#83956](https://github.com/kubernetes/kubernetes/pull/83956), [@liggitt](https://github.com/liggitt))
|
||||
* Upgrade to etcd client 3.3.17 to fix bug where etcd client does not parse IPv6 addresses correctly when members are joining, and to fix bug where failover on multi-member etcd cluster fails certificate check on DNS mismatch ([#83968](https://github.com/kubernetes/kubernetes/pull/83968), [@jpbetz](https://github.com/jpbetz))
|
||||
* Update to use go1.12.12 ([#84064](https://github.com/kubernetes/kubernetes/pull/84064), [@cblecker](https://github.com/cblecker))
|
||||
* Fix handling tombstones in pod-disruption-budged controller. ([#83951](https://github.com/kubernetes/kubernetes/pull/83951), [@zouyee](https://github.com/zouyee))
|
||||
|
||||
|
||||
|
||||
# v1.16.2
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
@@ -381,6 +463,7 @@ The main themes of this release are:
|
||||
- Aggregated discovery requests can now timeout. Aggregated API servers must complete discovery calls within 5 seconds (other requests can take longer). Use the feature gate `EnableAggregatedDiscoveryTimeout=false` to temporarily revert behavior to the previous 30 second timeout if required (the temporary `EnableAggregatedDiscoveryTimeout` feature gate will be removed in v1.17). ([#82146](https://github.com/kubernetes/kubernetes/pull/82146), [@deads2k](https://github.com/deads2k))
|
||||
- the `scheduler.alpha.kubernetes.io/critical-pod` annotation is removed. Pod priority (`spec.priorityClassName`) should be used instead to mark pods as critical. ([#80342](https://github.com/kubernetes/kubernetes/pull/80342), [@draveness](https://github.com/draveness))
|
||||
- the NormalizeScore plugin set is removed from scheduler framework config API. Use ScorePlugin only. ([#80930](https://github.com/kubernetes/kubernetes/pull/80930), [@liu-cong](https://github.com/liu-cong))
|
||||
- the node label `alpha.service-controller.kubernetes.io/exclude-balancer` which excludes a node from cloud load balancers (using Service Type=LoadBalancer) is deprecated in favor of `node.kubernetes.io/exclude-balancer`. Support for `alpha.service-controller.kubernetes.io/exclude-balancer` will be removed in v1.18.
|
||||
|
||||
- Features:
|
||||
|
||||
@@ -463,7 +546,7 @@ The main themes of this release are:
|
||||
- kube-apiserver metrics are now marked as with the ALPHA stability level. ([#81531](https://github.com/kubernetes/kubernetes/pull/81531), [@logicalhan](https://github.com/logicalhan))
|
||||
- kubelet metrics for /metrics and /metrics/probes are now marked as with the ALPHA stability level. ([#81534](https://github.com/kubernetes/kubernetes/pull/81534), [@logicalhan](https://github.com/logicalhan))
|
||||
- Scheduler metrics are now marked as with the ALPHA stability level. ([#81576](https://github.com/kubernetes/kubernetes/pull/81576), [@logicalhan](https://github.com/logicalhan))
|
||||
- The `rejected` label in `apiserver_admission_webhook_admission_duration_seconds` metrices now properly indicates if the request was rejected. ([#81399](https://github.com/kubernetes/kubernetes/pull/81399), [@roycaihw](https://github.com/roycaihw))
|
||||
- The `rejected` label in `apiserver_admission_webhook_admission_duration_seconds` metrics now properly indicates if the request was rejected. ([#81399](https://github.com/kubernetes/kubernetes/pull/81399), [@roycaihw](https://github.com/roycaihw))
|
||||
- Fixed a bug in the CSI metrics that does not return not supported error when a CSI driver does not support metrics. ([#79851](https://github.com/kubernetes/kubernetes/pull/79851), [@jparklab](https://github.com/jparklab))
|
||||
- Fix disk stats in LXD using ZFS storage pool and CRI-O missing network metris bug ([#81972](https://github.com/kubernetes/kubernetes/pull/81972), [@dashpole](https://github.com/dashpole))
|
||||
|
||||
@@ -1176,14 +1259,14 @@ filename | sha512 hash
|
||||
* Make kubectl get --ignore-not-found continue processing when encountering error. ([#82120](https://github.com/kubernetes/kubernetes/pull/82120), [@soltysh](https://github.com/soltysh))
|
||||
* Dual stack services (Phase II of IPv6DualStack feature) are enabled via the IPVS proxier. iptables proxier does not support dualstack yet. Dualstack iptables proxier is WIP and should catchup soon. ([#82091](https://github.com/kubernetes/kubernetes/pull/82091), [@khenidak](https://github.com/khenidak))
|
||||
* to enable, kube-proxy must be have the following flags:
|
||||
* --proxy-mode=ipvs
|
||||
* --cluster-cidrs=<cidr>,<cidr>
|
||||
* --proxy-mode=ipvs
|
||||
* --cluster-cidrs=<cidr>,<cidr>
|
||||
* The apiserver now uses http/1.1 to communicate with admission webhooks, opening multiple connections to satisfy concurrent requests, and allowing spreading requests across multiple backing pods. ([#82090](https://github.com/kubernetes/kubernetes/pull/82090), [@liggitt](https://github.com/liggitt))
|
||||
* Added support to specify a global-access annotation for gce ILB. ([#81549](https://github.com/kubernetes/kubernetes/pull/81549), [@prameshj](https://github.com/prameshj))
|
||||
* Added new startupProbe, related to KEP https://github.com/kubernetes/enhancements/issues/950. ([#77807](https://github.com/kubernetes/kubernetes/pull/77807), [@matthyx](https://github.com/matthyx))
|
||||
* Adds \livez for liveness health checking for kube-apiserver. Using the parameter `--maximum-startup-sequence-duration` will allow the liveness endpoint to defer boot-sequence failures for the specified duration period. ([#81969](https://github.com/kubernetes/kubernetes/pull/81969), [@logicalhan](https://github.com/logicalhan))
|
||||
* Server-side apply is now Beta. ([#81956](https://github.com/kubernetes/kubernetes/pull/81956), [@apelisse](https://github.com/apelisse))
|
||||
* The `rejected` label in `apiserver_admission_webhook_admission_duration_seconds` metrices now properly indicates if a request was rejected. Add a new counter metrics `apiserver_admission_webhook_rejection_count` with details about the causing for a webhook rejection. ([#81399](https://github.com/kubernetes/kubernetes/pull/81399), [@roycaihw](https://github.com/roycaihw))
|
||||
* The `rejected` label in `apiserver_admission_webhook_admission_duration_seconds` metrics now properly indicates if a request was rejected. Add a new counter metrics `apiserver_admission_webhook_rejection_count` with details about the causing for a webhook rejection. ([#81399](https://github.com/kubernetes/kubernetes/pull/81399), [@roycaihw](https://github.com/roycaihw))
|
||||
* Add `container_state` label to `running_container_count` kubelet metrics, to get count of containers based on their state(running/exited/created/unknown) ([#81573](https://github.com/kubernetes/kubernetes/pull/81573), [@irajdeep](https://github.com/irajdeep))
|
||||
* Fix a bug in CRD openapi controller that user-defined CRD can overwrite OpenAPI definition/path for the CRD API. ([#81436](https://github.com/kubernetes/kubernetes/pull/81436), [@roycaihw](https://github.com/roycaihw))
|
||||
* Service account tokens now include the JWT Key ID field in their header. ([#78502](https://github.com/kubernetes/kubernetes/pull/78502), [@ahmedtd](https://github.com/ahmedtd))
|
||||
@@ -1198,7 +1281,7 @@ filename | sha512 hash
|
||||
* => IPv6 BackendPool name == <clusterName>-IPv6
|
||||
* This result into:
|
||||
* - clusters moving from IPv4 to duakstack will require no changes
|
||||
* - clusters moving from IPv6 (while not seen in the wild, we can not rule out thier existance) to dualstack will require deleting backend pools (the reconciler will take care of creating correct backendpools)
|
||||
* - clusters moving from IPv6 (while not seen in the wild, we can not rule out their existence) to dualstack will require deleting backend pools (the reconciler will take care of creating correct backendpools)
|
||||
* Promotes VolumePVCDataSource (Cloning) feature to beta for 1.16 release ([#81792](https://github.com/kubernetes/kubernetes/pull/81792), [@j-griffith](https://github.com/j-griffith))
|
||||
* Remove kubectl log, use kubectl logs instead ([#78098](https://github.com/kubernetes/kubernetes/pull/78098), [@soltysh](https://github.com/soltysh))
|
||||
* CSI ephemeral inline volume support is beta, i.e. the CSIInlineVolume feature gate is enabled by default ([#82004](https://github.com/kubernetes/kubernetes/pull/82004), [@pohly](https://github.com/pohly))
|
||||
@@ -1210,7 +1293,7 @@ filename | sha512 hash
|
||||
* Master: convert service CIDR to list `--service-cluster-ip-range=<CIDR>,<CIDR>` and make sure `IPv6DualStack` feature flag is turned on. The flag is validated and used as the following:
|
||||
* 1. `--service-cluster-ip-range[0]` is consider primary service range, and will be used for any service with `Service.Spec.IPFamily = nil` or any service in the at the time of turning on the feature flag.
|
||||
* 2. A cluster can be dualstack (i.e. Pods and nodes carry dualstack IPs) but does not need to support ingress on dualstack. In this case the cluster can perform egress using `PodIPs` (according to family and binding selection in user code) but will ingress will only be performed against the pod primary IP. This can be configured by supplying single entry to `--service-cluster-ip-range` flag.
|
||||
* 3. Maximum of two entries is allowed in `--service-cluster-ip-range` and they are validated to be dual stacked `i.e. --service-cluster-ip-range=<v4>,<v6> or --service-cluster-ip-range=<v6>,<v4>`
|
||||
* 3. Maximum of two entries is allowed in `--service-cluster-ip-range` and they are validated to be dual stacked `i.e. --service-cluster-ip-range=<v4>,<v6> or --service-cluster-ip-range=<v6>,<v4>`
|
||||
* 4. Max 20 bit for range (min network bits `<v6>/108` or <v4>/12)
|
||||
* kube-controller-manager: convert service CIDR to list `--service-cluster-ip-range=<CIDR>,<CIDR>` and make sure `IPv6DualStack` feature flag is turned on. The flag is validated as above.
|
||||
* + to use:
|
||||
@@ -1577,7 +1660,7 @@ filename | sha512 hash
|
||||
* NFS Drivers are now enabled to collect metrics, StatFS metrics provider is used to collect the metrics. ([#75805](https://github.com/kubernetes/kubernetes/pull/75805) , [@brahmaroutu](https://github.com/brahmaroutu)) ([#75805](https://github.com/kubernetes/kubernetes/pull/75805), [@brahmaroutu](https://github.com/brahmaroutu))
|
||||
* make node lease renew interval more heuristic based on node-status-update-frequency in kubelet ([#80173](https://github.com/kubernetes/kubernetes/pull/80173), [@gaorong](https://github.com/gaorong))
|
||||
* Introduction of the pod overhead feature to the scheduler. This functionality is alpha-level as of ([#78319](https://github.com/kubernetes/kubernetes/pull/78319), [@egernst](https://github.com/egernst))
|
||||
* Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.gate.
|
||||
* Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.gate.
|
||||
* N/A ([#80260](https://github.com/kubernetes/kubernetes/pull/80260), [@khenidak](https://github.com/khenidak))
|
||||
* Add v1.Container.SecurityContext.WindowsOptions.RunAsUserName to the pod spec ([#79489](https://github.com/kubernetes/kubernetes/pull/79489), [@bclau](https://github.com/bclau))
|
||||
* Pass-through volume MountOptions to global mount (NodeStageVolume) on the node for CSI ([#80191](https://github.com/kubernetes/kubernetes/pull/80191), [@davidz627](https://github.com/davidz627))
|
||||
@@ -1650,7 +1733,7 @@ filename | sha512 hash
|
||||
* `daemonsets`, `deployments`, `replicasets` resources under `extensions/v1beta1` - use `apps/v1` instead
|
||||
* `networkpolicies` resources under `extensions/v1beta1` - use `networking.k8s.io/v1` instead
|
||||
* `podsecuritypolicies` resources under `extensions/v1beta1` - use `policy/v1beta1` instead
|
||||
* Serving these resources can be temporarily re-enabled using the `--runtime-config` apiserver flag.
|
||||
* Serving these resources can be temporarily re-enabled using the `--runtime-config` apiserver flag.
|
||||
* `apps/v1beta1=true`
|
||||
* `apps/v1beta2=true`
|
||||
* `extensions/v1beta1/daemonsets=true,extensions/v1beta1/deployments=true,extensions/v1beta1/replicasets=true,extensions/v1beta1/networkpolicies=true,extensions/v1beta1/podsecuritypolicies=true`
|
||||
@@ -1719,9 +1802,9 @@ filename | sha512 hash
|
||||
* To configure controller manager to use ipv6dual stack: ([#73977](https://github.com/kubernetes/kubernetes/pull/73977), [@khenidak](https://github.com/khenidak))
|
||||
* use --cluster-cidr="<cidr1>,<cidr2>".
|
||||
* Notes:
|
||||
|
||||
* 1. Only the first two cidrs are used (soft limits for Alpha, might be lifted later on).
|
||||
* 2. Only the "RangeAllocator" (default) is allowed as a value for --cidr-allocator-type . Cloud allocators are not compatible with ipv6dualstack
|
||||
|
||||
* 1. Only the first two cidrs are used (soft limits for Alpha, might be lifted later on).
|
||||
* 2. Only the "RangeAllocator" (default) is allowed as a value for --cidr-allocator-type . Cloud allocators are not compatible with ipv6dualstack
|
||||
* When using the conformance test image, a new environment variable E2E_USE_GO_RUNNER will cause the tests to be run with the new Golang-based test runner rather than the current bash wrapper. ([#79284](https://github.com/kubernetes/kubernetes/pull/79284), [@johnSchnake](https://github.com/johnSchnake))
|
||||
* kubeadm: prevent PSP blocking of upgrade image prepull by using a non-root user ([#77792](https://github.com/kubernetes/kubernetes/pull/77792), [@neolit123](https://github.com/neolit123))
|
||||
* kubelet now accepts a --cni-cache-dir option, which defaults to /var/lib/cni/cache, where CNI stores cache files. ([#78908](https://github.com/kubernetes/kubernetes/pull/78908), [@dcbw](https://github.com/dcbw))
|
||||
|
||||
2
vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json
generated
vendored
2
vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json
generated
vendored
@@ -20106,7 +20106,7 @@
|
||||
},
|
||||
"info": {
|
||||
"title": "Kubernetes",
|
||||
"version": "v1.16.3"
|
||||
"version": "v1.16.4"
|
||||
},
|
||||
"paths": {
|
||||
"/api/": {
|
||||
|
||||
12
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/metrics-server-deployment.yaml
generated
vendored
12
vendor/k8s.io/kubernetes/cluster/addons/metrics-server/metrics-server-deployment.yaml
generated
vendored
@@ -23,24 +23,24 @@ data:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server-v0.3.4
|
||||
name: metrics-server-v0.3.6
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.3.4
|
||||
version: v0.3.6
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.3.4
|
||||
version: v0.3.6
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
version: v0.3.4
|
||||
version: v0.3.6
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
@@ -48,7 +48,7 @@ spec:
|
||||
serviceAccountName: metrics-server
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.4
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.6
|
||||
command:
|
||||
- /metrics-server
|
||||
- --metric-resolution=30s
|
||||
@@ -90,7 +90,7 @@ spec:
|
||||
- --memory={{ base_metrics_server_memory }}
|
||||
- --extra-memory={{ metrics_server_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=metrics-server-v0.3.4
|
||||
- --deployment=metrics-server-v0.3.6
|
||||
- --container=metrics-server
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
|
||||
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
2
vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest
generated
vendored
@@ -17,7 +17,7 @@
|
||||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.16.1",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.16.2",
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/health-check",
|
||||
|
||||
14
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/BUILD
generated
vendored
@@ -1,14 +1,10 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["server.go"],
|
||||
importpath = "k8s.io/kubernetes/cmd/kube-scheduler/app",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//cmd/kube-scheduler/app/options:go_default_library",
|
||||
@@ -22,6 +18,8 @@ go_library(
|
||||
"//pkg/util/flag:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/version/verflag:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
@@ -32,8 +30,11 @@ go_library(
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/routes:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/term:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/cli/globalflag:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library",
|
||||
@@ -58,4 +59,5 @@ filegroup(
|
||||
"//cmd/kube-scheduler/app/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
6
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/config/config.go
generated
vendored
6
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/config/config.go
generated
vendored
@@ -47,12 +47,12 @@ type Config struct {
|
||||
Client clientset.Interface
|
||||
InformerFactory informers.SharedInformerFactory
|
||||
PodInformer coreinformers.PodInformer
|
||||
EventClient v1beta1.EventsGetter
|
||||
|
||||
// TODO: Remove the following after fully migrating to the new events api.
|
||||
CoreEventClient v1core.EventsGetter
|
||||
LeaderElectionBroadcaster record.EventBroadcaster
|
||||
CoreEventClient v1core.EventsGetter
|
||||
CoreBroadcaster record.EventBroadcaster
|
||||
|
||||
EventClient v1beta1.EventsGetter
|
||||
Recorder events.EventRecorder
|
||||
Broadcaster events.EventBroadcaster
|
||||
|
||||
|
||||
1
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/BUILD
generated
vendored
@@ -32,7 +32,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
|
||||
14
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/options.go
generated
vendored
14
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/options.go
generated
vendored
@@ -34,7 +34,6 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@@ -236,16 +235,13 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare event clients.
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: eventClient.EventsV1beta1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, c.ComponentConfig.SchedulerName)
|
||||
leaderElectionBroadcaster := record.NewBroadcaster()
|
||||
leaderElectionRecorder := leaderElectionBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: c.ComponentConfig.SchedulerName})
|
||||
coreBroadcaster := record.NewBroadcaster()
|
||||
coreRecorder := coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: c.ComponentConfig.SchedulerName})
|
||||
|
||||
// Set up leader election if enabled.
|
||||
var leaderElectionConfig *leaderelection.LeaderElectionConfig
|
||||
if c.ComponentConfig.LeaderElection.LeaderElect {
|
||||
leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, leaderElectionRecorder)
|
||||
leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, coreRecorder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -256,9 +252,7 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
|
||||
c.PodInformer = factory.NewPodInformer(client, 0)
|
||||
c.EventClient = eventClient.EventsV1beta1()
|
||||
c.CoreEventClient = eventClient.CoreV1()
|
||||
c.Recorder = recorder
|
||||
c.Broadcaster = eventBroadcaster
|
||||
c.LeaderElectionBroadcaster = leaderElectionBroadcaster
|
||||
c.CoreBroadcaster = coreBroadcaster
|
||||
c.LeaderElection = leaderElectionConfig
|
||||
|
||||
return c, nil
|
||||
|
||||
18
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/server.go
generated
vendored
18
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/server.go
generated
vendored
@@ -27,6 +27,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
@@ -37,8 +39,11 @@ import (
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
"k8s.io/apiserver/pkg/server/routes"
|
||||
"k8s.io/apiserver/pkg/util/term"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/cli/globalflag"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
@@ -168,6 +173,15 @@ func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}, regis
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare event clients.
|
||||
if _, err := cc.Client.Discovery().ServerResourcesForGroupVersion(eventsv1beta1.SchemeGroupVersion.String()); err == nil {
|
||||
cc.Broadcaster = events.NewBroadcaster(&events.EventSinkImpl{Interface: cc.EventClient.Events("")})
|
||||
cc.Recorder = cc.Broadcaster.NewRecorder(scheme.Scheme, cc.ComponentConfig.SchedulerName)
|
||||
} else {
|
||||
recorder := cc.CoreBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: cc.ComponentConfig.SchedulerName})
|
||||
cc.Recorder = record.NewEventRecorderAdapter(recorder)
|
||||
}
|
||||
|
||||
// Create the scheduler.
|
||||
sched, err := scheduler.New(cc.Client,
|
||||
cc.InformerFactory.Core().V1().Nodes(),
|
||||
@@ -200,8 +214,8 @@ func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}, regis
|
||||
if cc.Broadcaster != nil && cc.EventClient != nil {
|
||||
cc.Broadcaster.StartRecordingToSink(stopCh)
|
||||
}
|
||||
if cc.LeaderElectionBroadcaster != nil && cc.CoreEventClient != nil {
|
||||
cc.LeaderElectionBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: cc.CoreEventClient.Events("")})
|
||||
if cc.CoreBroadcaster != nil && cc.CoreEventClient != nil {
|
||||
cc.CoreBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: cc.CoreEventClient.Events("")})
|
||||
}
|
||||
// Setup healthz checks.
|
||||
var checks []healthz.HealthChecker
|
||||
|
||||
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/upgrade/node.go
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/upgrade/node.go
generated
vendored
@@ -108,6 +108,7 @@ func newNodeOptions() *nodeOptions {
|
||||
return &nodeOptions{
|
||||
kubeConfigPath: constants.GetKubeletKubeConfigPath(),
|
||||
dryRun: false,
|
||||
etcdUpgrade: true,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
7
vendor/k8s.io/kubernetes/go.mod
generated
vendored
7
vendor/k8s.io/kubernetes/go.mod
generated
vendored
@@ -38,7 +38,7 @@ require (
|
||||
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 // indirect
|
||||
github.com/containernetworking/cni v0.7.1
|
||||
github.com/coredns/corefile-migration v1.0.2
|
||||
github.com/coreos/etcd v3.3.15+incompatible
|
||||
github.com/coreos/etcd v3.3.17+incompatible
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
||||
github.com/coreos/rkt v1.30.0 // indirect
|
||||
@@ -226,8 +226,8 @@ replace (
|
||||
github.com/containerd/typeurl => github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20
|
||||
github.com/containernetworking/cni => github.com/containernetworking/cni v0.7.1
|
||||
github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.2
|
||||
github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.1-coreos.6
|
||||
github.com/coreos/etcd => github.com/coreos/etcd v3.3.15+incompatible
|
||||
github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.3
|
||||
github.com/coreos/etcd => github.com/coreos/etcd v3.3.17+incompatible
|
||||
github.com/coreos/go-etcd => github.com/coreos/go-etcd v2.0.0+incompatible
|
||||
github.com/coreos/go-oidc => github.com/coreos/go-oidc v2.1.0+incompatible
|
||||
github.com/coreos/go-semver => github.com/coreos/go-semver v0.3.0
|
||||
@@ -409,6 +409,7 @@ replace (
|
||||
github.com/xiang90/probing => github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18
|
||||
github.com/xlab/handysort => github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1
|
||||
github.com/xordataexchange/crypt => github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77
|
||||
go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.3
|
||||
go.opencensus.io => go.opencensus.io v0.21.0
|
||||
go.uber.org/atomic => go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569
|
||||
go.uber.org/multierr => go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df
|
||||
|
||||
10
vendor/k8s.io/kubernetes/go.sum
generated
vendored
10
vendor/k8s.io/kubernetes/go.sum
generated
vendored
@@ -93,10 +93,10 @@ github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coredns/corefile-migration v1.0.2 h1:kQga1ATFIZdkBtU6c/oJdtASLcCRkDh3fW8vVyVdvUc=
|
||||
github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
@@ -429,6 +429,8 @@ github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q
|
||||
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 h1:j2hhcujLRHAg872RWAV5yaUrEjHEObwDv3aImCaNLek=
|
||||
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg=
|
||||
|
||||
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
@@ -19,7 +19,7 @@ package persistentvolume
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -621,9 +621,9 @@ func TestSyncBlockVolumeDisabled(t *testing.T) {
|
||||
// syncVolume binds a requested block claim to a block volume
|
||||
"14-1 - binding to volumeMode block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, pvutil.AnnBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, pvutil.AnnBoundByController, pvutil.AnnBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -657,9 +657,9 @@ func TestSyncBlockVolumeDisabled(t *testing.T) {
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-5 - binding different volumeModes should be ignored",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, pvutil.AnnBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, pvutil.AnnBoundByController, pvutil.AnnBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
15
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -1118,19 +1118,19 @@ func TestVolumeModeCheck(t *testing.T) {
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature disabled - pvc block and pv filesystem": {
|
||||
isExpectedMismatch: false,
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc filesystem and pv block": {
|
||||
isExpectedMismatch: false,
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc block and pv block": {
|
||||
isExpectedMismatch: false,
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
@@ -1146,10 +1146,7 @@ func TestVolumeModeCheck(t *testing.T) {
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)()
|
||||
expectedMismatch, err := pvutil.CheckVolumeModeMismatches(&scenario.pvc.Spec, &scenario.vol.Spec)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected failure for checkVolumeModeMismatches: %v", err)
|
||||
}
|
||||
expectedMismatch := pvutil.CheckVolumeModeMismatches(&scenario.pvc.Spec, &scenario.vol.Spec)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if expectedMismatch && !scenario.isExpectedMismatch {
|
||||
t.Errorf("Unexpected failure for scenario, expected not to mismatch on modes but did: %s", name)
|
||||
@@ -1222,7 +1219,7 @@ func TestFilteringVolumeModes(t *testing.T) {
|
||||
enableBlock: false,
|
||||
},
|
||||
"2-2 feature disabled - pvc mode is block and pv mode is block - fields should be dropped by api and not analyzed with gate disabled": {
|
||||
isExpectedMatch: true,
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeBlockTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
|
||||
8
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
@@ -266,11 +266,7 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
|
||||
return fmt.Errorf("storageClassName does not match")
|
||||
}
|
||||
|
||||
isMismatch, err := pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking volumeMode: %v", err)
|
||||
}
|
||||
if isMismatch {
|
||||
if pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
|
||||
return fmt.Errorf("incompatible volumeMode")
|
||||
}
|
||||
|
||||
@@ -587,7 +583,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
|
||||
}
|
||||
return nil
|
||||
} else if claim.Spec.VolumeName == "" {
|
||||
if isMismatch, err := pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec); err != nil || isMismatch {
|
||||
if pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
|
||||
// Binding for the volume won't be called in syncUnboundClaim,
|
||||
// because findBestMatchForClaim won't return the volume due to volumeMode mismatch.
|
||||
volumeMsg := fmt.Sprintf("Cannot bind PersistentVolume to requested PersistentVolumeClaim %q due to incompatible volumeMode.", claim.Name)
|
||||
|
||||
28
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go
generated
vendored
@@ -19,7 +19,7 @@ package persistentvolume
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -203,13 +203,8 @@ func FindMatchingVolume(
|
||||
|
||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||
|
||||
// check if volumeModes do not match (feature gate protected)
|
||||
isMismatch, err := CheckVolumeModeMismatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
|
||||
}
|
||||
// filter out mismatching volumeModes
|
||||
if isMismatch {
|
||||
if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -305,9 +300,22 @@ func FindMatchingVolume(
|
||||
|
||||
// CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
|
||||
// and PersistentVolumeClaims
|
||||
func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
|
||||
func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
return false, nil
|
||||
if pvcSpec.VolumeMode != nil && *pvcSpec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
// Block PVC does not match anything when the feature is off. We explicitly want
|
||||
// to prevent binding block PVC to filesystem PV.
|
||||
// The PVC should be ignored by PV controller.
|
||||
return true
|
||||
}
|
||||
if pvSpec.VolumeMode != nil && *pvSpec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
// Block PV does not match anything when the feature is off. We explicitly want
|
||||
// to prevent binding block PV to filesystem PVC.
|
||||
// The PV should be ignored by PV controller.
|
||||
return true
|
||||
}
|
||||
// Both PV + PVC are not block.
|
||||
return false
|
||||
}
|
||||
|
||||
// In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager.
|
||||
@@ -320,7 +328,7 @@ func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1
|
||||
if pvSpec.VolumeMode != nil {
|
||||
pvVolumeMode = *pvSpec.VolumeMode
|
||||
}
|
||||
return requestedVolumeMode != pvVolumeMode, nil
|
||||
return requestedVolumeMode != pvVolumeMode
|
||||
}
|
||||
|
||||
// CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes
|
||||
|
||||
5
vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go
generated
vendored
@@ -54,11 +54,6 @@ func newPerfCounter(counter string) (*perfCounter, error) {
|
||||
return nil, errors.New("unable to open query through DLL call")
|
||||
}
|
||||
|
||||
ret = win_pdh.PdhValidatePath(counter)
|
||||
if ret != win_pdh.ERROR_SUCCESS {
|
||||
return nil, fmt.Errorf("unable to valid path to counter. Error code is %x", ret)
|
||||
}
|
||||
|
||||
ret = win_pdh.PdhAddEnglishCounter(queryHandle, counter, 0, &counterHandle)
|
||||
if ret != win_pdh.ERROR_SUCCESS {
|
||||
return nil, fmt.Errorf("unable to add process counter. Error code is %x", ret)
|
||||
|
||||
5
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
@@ -445,13 +445,12 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
}
|
||||
|
||||
klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
|
||||
return c.waitForVolumeDetachment(volID, attachID)
|
||||
return c.waitForVolumeDetachment(volID, attachID, csiTimeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
|
||||
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string, timeout time.Duration) error {
|
||||
klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
timeout := c.waitSleepTime * 10
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
|
||||
8
vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go
generated
vendored
@@ -384,6 +384,14 @@ func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName ty
|
||||
}
|
||||
|
||||
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Flush data cache for windows because it does not do so automatically during unmount device
|
||||
exec := detacher.host.GetExec(gcePersistentDiskPluginName)
|
||||
err := volumeutil.WriteVolumeCache(deviceMountPath, exec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return mount.CleanupMountPoint(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName), false)
|
||||
}
|
||||
|
||||
|
||||
20
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -29,7 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
utypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -196,7 +197,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
|
||||
pod := &v1.Pod{}
|
||||
|
||||
codec := legacyscheme.Codecs.UniversalDecoder()
|
||||
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
|
||||
if err := apiruntime.DecodeInto(codec, podDef, pod); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding file: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
@@ -590,3 +591,18 @@ func HasMountRefs(mountPath string, mountRefs []string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//WriteVolumeCache flush disk data given the spcified mount path
|
||||
func WriteVolumeCache(deviceMountPath string, exec mount.Exec) error {
|
||||
// If runtime os is windows, execute Write-VolumeCache powershell command on the disk
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd := fmt.Sprintf("Get-Volume -FilePath %s | Write-Volumecache", deviceMountPath)
|
||||
output, err := exec.Run("powershell", "/c", cmd)
|
||||
klog.Infof("command (%q) execeuted: %v, output: %q", cmd, err, string(output))
|
||||
if err != nil {
|
||||
return fmt.Errorf("command (%q) failed: %v, output: %q", cmd, err, string(output))
|
||||
}
|
||||
}
|
||||
// For linux runtime, it skips because unmount will automatically flush disk data
|
||||
return nil
|
||||
}
|
||||
|
||||
2
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.mod
generated
vendored
2
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.mod
generated
vendored
@@ -5,7 +5,7 @@ module k8s.io/apiextensions-apiserver
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/coreos/etcd v3.3.15+incompatible
|
||||
github.com/coreos/etcd v3.3.17+incompatible
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible
|
||||
github.com/go-openapi/errors v0.19.2
|
||||
github.com/go-openapi/spec v0.19.2
|
||||
|
||||
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.sum
generated
vendored
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.sum
generated
vendored
@@ -32,11 +32,11 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -262,6 +262,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
||||
2
vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
generated
vendored
2
vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
generated
vendored
@@ -45,7 +45,7 @@ type IntOrString struct {
|
||||
}
|
||||
|
||||
// Type represents the stored type of IntOrString.
|
||||
type Type int
|
||||
type Type int64
|
||||
|
||||
const (
|
||||
Int Type = iota // The IntOrString holds an int.
|
||||
|
||||
5
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.mod
generated
vendored
5
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.mod
generated
vendored
@@ -6,8 +6,8 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 // indirect
|
||||
github.com/coreos/etcd v3.3.15+incompatible
|
||||
github.com/coreos/bbolt v1.3.3 // indirect
|
||||
github.com/coreos/etcd v3.3.17+incompatible
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
||||
@@ -38,6 +38,7 @@ require (
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 // indirect
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 // indirect
|
||||
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df // indirect
|
||||
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 // indirect
|
||||
|
||||
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.sum
generated
vendored
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.sum
generated
vendored
@@ -25,10 +25,10 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
@@ -196,6 +196,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwN
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
||||
@@ -341,7 +341,7 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr
|
||||
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
|
||||
if len(p.patchBytes) > 1024*1024 {
|
||||
v := []interface{}{}
|
||||
if err := json.Unmarshal(p.patchBytes, v); err != nil {
|
||||
if err := json.Unmarshal(p.patchBytes, &v); err != nil {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
|
||||
}
|
||||
}
|
||||
@@ -365,7 +365,7 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr
|
||||
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
|
||||
if len(p.patchBytes) > 1024*1024 {
|
||||
v := map[string]interface{}{}
|
||||
if err := json.Unmarshal(p.patchBytes, v); err != nil {
|
||||
if err := json.Unmarshal(p.patchBytes, &v); err != nil {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,10 +285,12 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
|
||||
buf := &bytes.Buffer{}
|
||||
streamBuf := &bytes.Buffer{}
|
||||
ch := s.Watching.ResultChan()
|
||||
|
||||
defer s.Watching.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
s.Watching.Stop()
|
||||
return
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
@@ -317,25 +319,21 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err))
|
||||
// client disconnect.
|
||||
s.Watching.Stop()
|
||||
return
|
||||
}
|
||||
if err := s.Encoder.Encode(outEvent, streamBuf); err != nil {
|
||||
// encoding error
|
||||
utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err))
|
||||
s.Watching.Stop()
|
||||
return
|
||||
}
|
||||
if s.UseTextFraming {
|
||||
if err := websocket.Message.Send(ws, streamBuf.String()); err != nil {
|
||||
// Client disconnect.
|
||||
s.Watching.Stop()
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil {
|
||||
// Client disconnect.
|
||||
s.Watching.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
25
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
generated
vendored
25
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
generated
vendored
@@ -178,7 +178,7 @@ var (
|
||||
},
|
||||
[]string{"group", "version", "kind"},
|
||||
)
|
||||
// Because of volatality of the base metric this is pre-aggregated one. Instead of reporing current usage all the time
|
||||
// Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time
|
||||
// it reports maximal usage during the last second.
|
||||
currentInflightRequests = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
@@ -188,6 +188,15 @@ var (
|
||||
},
|
||||
[]string{"requestKind"},
|
||||
)
|
||||
|
||||
requestTerminationsTotal = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Name: "apiserver_request_terminations_total",
|
||||
Help: "Number of requests which apiserver terminated in self-defense.",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"},
|
||||
)
|
||||
kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`)
|
||||
|
||||
metrics = []resettableCollector{
|
||||
@@ -204,6 +213,7 @@ var (
|
||||
WatchEvents,
|
||||
WatchEventsSizes,
|
||||
currentInflightRequests,
|
||||
requestTerminationsTotal,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -237,10 +247,11 @@ func UpdateInflightRequestMetrics(nonmutating, mutating int) {
|
||||
currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating))
|
||||
}
|
||||
|
||||
// Record records a single request to the standard metrics endpoints. For use by handlers that perform their own
|
||||
// processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if
|
||||
// you already have a RequestInfo object.
|
||||
func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) {
|
||||
// RecordRequestTermination records that the request was terminated early as part of a resource
|
||||
// preservation or apiserver self-defense mechanism (e.g. timeouts, maxinflight throttling,
|
||||
// proxyHandler errors). RecordRequestTermination should only be called zero or one times
|
||||
// per request.
|
||||
func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInfo, component string, code int) {
|
||||
if requestInfo == nil {
|
||||
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
|
||||
}
|
||||
@@ -252,9 +263,9 @@ func Record(req *http.Request, requestInfo *request.RequestInfo, component, cont
|
||||
// However, we need to tweak it e.g. to differentiate GET from LIST.
|
||||
verb := canonicalVerb(strings.ToUpper(req.Method), scope)
|
||||
if requestInfo.IsResourceRequest {
|
||||
MonitorRequest(req, verb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed)
|
||||
requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc()
|
||||
} else {
|
||||
MonitorRequest(req, verb, "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed)
|
||||
requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ func WithMaxInFlightLimit(
|
||||
}
|
||||
}
|
||||
}
|
||||
metrics.Record(r, requestInfo, metrics.APIServerComponent, "", http.StatusTooManyRequests, 0, 0)
|
||||
metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests)
|
||||
tooManyRequests(r, w)
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go
generated
vendored
2
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go
generated
vendored
@@ -59,7 +59,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apir
|
||||
|
||||
postTimeoutFn := func() {
|
||||
cancel()
|
||||
metrics.Record(req, requestInfo, metrics.APIServerComponent, "", http.StatusGatewayTimeout, 0, 0)
|
||||
metrics.RecordRequestTermination(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout)
|
||||
}
|
||||
return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0)
|
||||
}
|
||||
|
||||
14
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
14
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta {
|
||||
return b
|
||||
}
|
||||
|
||||
// willObjectBeDeletedLocked returns true only if the last delta for the
|
||||
// given object is Delete. Caller must lock first.
|
||||
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
|
||||
deltas := f.items[id]
|
||||
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
|
||||
}
|
||||
|
||||
// queueActionLocked appends to the delta list for the object.
|
||||
// Caller must lock first.
|
||||
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
|
||||
@@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// If object is supposed to be deleted (last event is Deleted),
|
||||
// then we should ignore Sync events, because it would result in
|
||||
// recreation of this object.
|
||||
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
|
||||
27
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/delta_fifo_test.go
generated
vendored
27
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/delta_fifo_test.go
generated
vendored
@@ -85,6 +85,33 @@ func TestDeltaFIFO_basic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeltaFIFO_replaceWithDeleteDeltaIn tests that a `Sync` delta for an
|
||||
// object `O` with ID `X` is added when .Replace is called and `O` is among the
|
||||
// replacement objects even if the DeltaFIFO already stores in terminal position
|
||||
// a delta of type `Delete` for ID `X`. Not adding the `Sync` delta causes
|
||||
// SharedIndexInformers to miss `O`'s create notification, see https://github.com/kubernetes/kubernetes/issues/83810
|
||||
// for more details.
|
||||
func TestDeltaFIFO_replaceWithDeleteDeltaIn(t *testing.T) {
|
||||
oldObj := mkFifoObj("foo", 1)
|
||||
newObj := mkFifoObj("foo", 2)
|
||||
|
||||
f := NewDeltaFIFO(testFifoObjectKeyFunc, keyLookupFunc(func() []testFifoObject {
|
||||
return []testFifoObject{oldObj}
|
||||
}))
|
||||
|
||||
f.Delete(oldObj)
|
||||
f.Replace([]interface{}{newObj}, "")
|
||||
|
||||
actualDeltas := Pop(f)
|
||||
expectedDeltas := Deltas{
|
||||
Delta{Type: Deleted, Object: oldObj},
|
||||
Delta{Type: Sync, Object: newObj},
|
||||
}
|
||||
if !reflect.DeepEqual(expectedDeltas, actualDeltas) {
|
||||
t.Errorf("expected %#v, got %#v", expectedDeltas, actualDeltas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeltaFIFO_requeueOnPop(t *testing.T) {
|
||||
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
|
||||
|
||||
|
||||
19
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/record/event.go
generated
vendored
19
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/record/event.go
generated
vendored
@@ -129,6 +129,25 @@ type EventBroadcaster interface {
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
|
||||
}
|
||||
|
||||
// EventRecorderAdapter is a wrapper around EventRecorder implementing the
|
||||
// new EventRecorder interface.
|
||||
type EventRecorderAdapter struct {
|
||||
recorder EventRecorder
|
||||
}
|
||||
|
||||
// NewEventRecorderAdapter returns an adapter implementing new EventRecorder
|
||||
// interface.
|
||||
func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter {
|
||||
return &EventRecorderAdapter{
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
// Eventf is a wrapper around v1 Eventf
|
||||
func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
a.recorder.Eventf(regarding, eventtype, reason, note, args...)
|
||||
}
|
||||
|
||||
// Creates a new event broadcaster.
|
||||
func NewBroadcaster() EventBroadcaster {
|
||||
return &eventBroadcasterImpl{
|
||||
|
||||
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/go.sum
generated
vendored
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/go.sum
generated
vendored
@@ -26,11 +26,11 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -218,6 +218,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
||||
@@ -100,7 +100,7 @@ func proxyError(w http.ResponseWriter, req *http.Request, error string, code int
|
||||
return
|
||||
}
|
||||
// TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver
|
||||
endpointmetrics.Record(req, info, aggregatorComponent, "", code, 0, 0)
|
||||
endpointmetrics.RecordRequestTermination(req, info, aggregatorComponent, code)
|
||||
}
|
||||
|
||||
func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
@@ -65,11 +65,11 @@ func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt cacheReadType) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var retryErr error
|
||||
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
||||
machine, retryErr = az.getVirtualMachine(name)
|
||||
machine, retryErr = az.getVirtualMachine(name, crt)
|
||||
if retryErr == cloudprovider.InstanceNotFound {
|
||||
return true, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
74
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache.go
generated
vendored
74
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache.go
generated
vendored
@@ -26,6 +26,20 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// cacheReadType defines the read type for cache data
|
||||
type cacheReadType int
|
||||
|
||||
const (
|
||||
// cacheReadTypeDefault returns data from cache if cache entry not expired
|
||||
// if cache entry expired, then it will refetch the data using getter
|
||||
// save the entry in cache and then return
|
||||
cacheReadTypeDefault cacheReadType = iota
|
||||
// cacheReadTypeUnsafe returns data from cache even if the cache entry is
|
||||
// active/expired. If entry doesn't exist in cache, then data is fetched
|
||||
// using getter, saved in cache and returned
|
||||
cacheReadTypeUnsafe
|
||||
)
|
||||
|
||||
// getFunc defines a getter function for timedCache.
|
||||
type getFunc func(key string) (interface{}, error)
|
||||
|
||||
@@ -36,6 +50,8 @@ type cacheEntry struct {
|
||||
|
||||
// The lock to ensure not updating same entry simultaneously.
|
||||
lock sync.Mutex
|
||||
// time when entry was fetched and created
|
||||
createdOn time.Time
|
||||
}
|
||||
|
||||
// cacheKeyFunc defines the key function required in TTLStore.
|
||||
@@ -48,6 +64,7 @@ type timedCache struct {
|
||||
store cache.Store
|
||||
lock sync.Mutex
|
||||
getter getFunc
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
// newTimedcache creates a new timedCache.
|
||||
@@ -58,7 +75,11 @@ func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) {
|
||||
|
||||
return &timedCache{
|
||||
getter: getter,
|
||||
store: cache.NewTTLStore(cacheKeyFunc, ttl),
|
||||
// switch to using NewStore instead of NewTTLStore so that we can
|
||||
// reuse entries for calls that are fine with reading expired/stalled data.
|
||||
// with NewTTLStore, entries are not returned if they have already expired.
|
||||
store: cache.NewStore(cacheKeyFunc),
|
||||
ttl: ttl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -69,19 +90,15 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if entry exists, return the entry
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
// lock here to ensure if entry doesn't exist, we add a new entry
|
||||
// avoiding overwrites
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
entry, exists, err = t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
// Still not found, add new entry with nil data.
|
||||
// Note the data will be filled later by getter.
|
||||
@@ -94,26 +111,38 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
|
||||
}
|
||||
|
||||
// Get returns the requested item by key.
|
||||
func (t *timedCache) Get(key string) (interface{}, error) {
|
||||
func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) {
|
||||
entry, err := t.getInternal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Data is still not cached yet, cache it by getter.
|
||||
if entry.data == nil {
|
||||
entry.lock.Lock()
|
||||
defer entry.lock.Unlock()
|
||||
entry.lock.Lock()
|
||||
defer entry.lock.Unlock()
|
||||
|
||||
if entry.data == nil {
|
||||
data, err := t.getter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry.data = data
|
||||
// entry exists
|
||||
if entry.data != nil {
|
||||
// allow unsafe read, so return data even if expired
|
||||
if crt == cacheReadTypeUnsafe {
|
||||
return entry.data, nil
|
||||
}
|
||||
// if cached data is not expired, return cached data
|
||||
if time.Since(entry.createdOn) < t.ttl {
|
||||
return entry.data, nil
|
||||
}
|
||||
}
|
||||
// Data is not cached yet or cache data is expired, cache it by getter.
|
||||
// entry is locked before getting to ensure concurrent gets don't result in
|
||||
// multiple ARM calls.
|
||||
data, err := t.getter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set the data in cache and also set the last update time
|
||||
// to now as the data was recently fetched
|
||||
entry.data = data
|
||||
entry.createdOn = time.Now().UTC()
|
||||
|
||||
return entry.data, nil
|
||||
}
|
||||
@@ -129,7 +158,8 @@ func (t *timedCache) Delete(key string) error {
|
||||
// It is only used for testing.
|
||||
func (t *timedCache) Set(key string, data interface{}) {
|
||||
t.store.Add(&cacheEntry{
|
||||
key: key,
|
||||
data: data,
|
||||
key: key,
|
||||
data: data,
|
||||
createdOn: time.Now().UTC(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestCacheGet(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(c.data)
|
||||
val, err := cache.Get(c.key)
|
||||
val, err := cache.Get(c.key, cacheReadTypeDefault)
|
||||
assert.NoError(t, err, c.name)
|
||||
assert.Equal(t, c.expected, val, c.name)
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func TestCacheGetError(t *testing.T) {
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
val, err := cache.Get("key")
|
||||
val, err := cache.Get("key", cacheReadTypeDefault)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, getError, err)
|
||||
assert.Nil(t, val)
|
||||
@@ -128,13 +128,13 @@ func TestCacheDelete(t *testing.T) {
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
dataSource.set(nil)
|
||||
cache.Delete(key)
|
||||
v, err = cache.Get(key)
|
||||
v, err = cache.Get(key, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, nil, v, "cache should get nil after data is removed")
|
||||
@@ -149,14 +149,58 @@ func TestCacheExpired(t *testing.T) {
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
time.Sleep(fakeCacheTTL)
|
||||
v, err = cache.Get(key)
|
||||
v, err = cache.Get(key, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data even after expired")
|
||||
}
|
||||
|
||||
func TestCacheAllowUnsafeRead(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
time.Sleep(fakeCacheTTL)
|
||||
v, err = cache.Get(key, cacheReadTypeUnsafe)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should return expired as allow unsafe read is allowed")
|
||||
}
|
||||
|
||||
func TestCacheNoConcurrentGet(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
time.Sleep(fakeCacheTTL)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(1)
|
||||
go cache.Get(key, cacheReadTypeDefault)
|
||||
wg.Done()
|
||||
}
|
||||
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||
wg.Wait()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ type controllerCommon struct {
|
||||
}
|
||||
|
||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) {
|
||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt cacheReadType) (VMSet, error) {
|
||||
// 1. vmType is standard, return cloud.vmSet directly.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet, nil
|
||||
@@ -82,7 +82,7 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName), crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -124,14 +124,14 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
}
|
||||
}
|
||||
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
klog.Warningf("failed to get azure instance id (%v)", err)
|
||||
klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName)
|
||||
return -1, fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||
}
|
||||
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -197,18 +197,20 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||
}
|
||||
|
||||
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
|
||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
|
||||
vmset, err := c.getNodeVMSet(nodeName, crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vmset.GetDataDisks(nodeName)
|
||||
return vmset.GetDataDisks(nodeName, crt)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
// getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
|
||||
// to ensure we get LUN based on latest entry.
|
||||
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
@@ -228,7 +230,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
@@ -255,7 +257,11 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
// doing stalled read for getNodeDataDisks to ensure we don't call ARM
|
||||
// for every reconcile call. The cache is invalidated after Attach/Detach
|
||||
// disk. So the new entry will be fetched and cached the first time reconcile
|
||||
// loop runs after the Attach/Disk OP which will reflect the latest model.
|
||||
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,7 +102,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
// DetachDisk detaches a disk from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
// if host doesn't exist, no need to detach
|
||||
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI)
|
||||
@@ -155,8 +155,8 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||
}
|
||||
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName, crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
@@ -99,12 +100,14 @@ func TestGetDataDisks(t *testing.T) {
|
||||
nodeName types.NodeName
|
||||
expectedDataDisks []compute.DataDisk
|
||||
expectedError bool
|
||||
crt cacheReadType
|
||||
}{
|
||||
{
|
||||
desc: "an error shall be returned if there's no corresponding vm",
|
||||
nodeName: "vm2",
|
||||
expectedDataDisks: nil,
|
||||
expectedError: true,
|
||||
crt: cacheReadTypeDefault,
|
||||
},
|
||||
{
|
||||
desc: "correct list of data disks shall be returned if everything is good",
|
||||
@@ -116,6 +119,19 @@ func TestGetDataDisks(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
crt: cacheReadTypeDefault,
|
||||
},
|
||||
{
|
||||
desc: "correct list of data disks shall be returned if everything is good",
|
||||
nodeName: "vm1",
|
||||
expectedDataDisks: []compute.DataDisk{
|
||||
{
|
||||
Lun: to.Int32Ptr(0),
|
||||
Name: to.StringPtr("disk1"),
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
crt: cacheReadTypeUnsafe,
|
||||
},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
@@ -123,8 +139,15 @@ func TestGetDataDisks(t *testing.T) {
|
||||
vmSet := testCloud.vmSet
|
||||
setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
|
||||
|
||||
dataDisks, err := vmSet.GetDataDisks(test.nodeName)
|
||||
dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt)
|
||||
assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
|
||||
assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
|
||||
|
||||
if test.crt == cacheReadTypeUnsafe {
|
||||
time.Sleep(fakeCacheTTL)
|
||||
dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt)
|
||||
assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
|
||||
assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -167,8 +167,8 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
|
||||
}
|
||||
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName), crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -959,7 +959,7 @@ func (f *fakeVMSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
|
||||
@@ -144,8 +144,9 @@ func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}
|
||||
}
|
||||
|
||||
// GetMetadata gets instance metadata from cache.
|
||||
func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) {
|
||||
cache, err := ims.imsCache.Get(metadataCacheKey)
|
||||
// crt determines if we can get data from stalled cache/need fresh if cache expired.
|
||||
func (ims *InstanceMetadataService) GetMetadata(crt cacheReadType) (*InstanceMetadata, error) {
|
||||
cache, err := ims.imsCache.Get(metadataCacheKey, crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -346,7 +346,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
|
||||
if isInternal {
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||
return nil, err
|
||||
@@ -1125,7 +1125,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
sg, err := az.getSecurityGroup()
|
||||
sg, err := az.getSecurityGroup(cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1466,7 +1466,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
|
||||
}
|
||||
|
||||
if lbName != "" {
|
||||
loadBalancer, _, err := az.getAzureLoadBalancer(lbName)
|
||||
loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ const (
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
routeTable, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault)
|
||||
routes, err := processRoutes(routeTable, existsRouteTable, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -102,7 +102,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
if _, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault); err != nil {
|
||||
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
|
||||
@@ -375,14 +375,14 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
var machine compute.VirtualMachine
|
||||
var err error
|
||||
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name))
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
return "", err
|
||||
@@ -403,7 +403,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return powerState, err
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
@@ -448,7 +448,7 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error
|
||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||
// with availability zone, then it returns fault domain.
|
||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
@@ -649,7 +649,7 @@ func extractResourceGroupByNicID(nicID string) (string, error) {
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||
return network.Interface{}, err
|
||||
|
||||
@@ -70,7 +70,7 @@ type VMSet interface {
|
||||
// DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error)
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error)
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
GetPowerStatusByNodeName(name string) (string, error)
|
||||
|
||||
38
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
38
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
@@ -88,9 +88,9 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
@@ -132,7 +132,7 @@ func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.Virtual
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return powerState, err
|
||||
}
|
||||
@@ -154,9 +154,9 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (*compute.VirtualMachineScaleSetVM, error) {
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt cacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
|
||||
getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@@ -203,7 +203,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
@@ -213,7 +213,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -262,7 +262,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
@@ -272,7 +272,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||
// with availability zone, then it returns fault domain.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return cloudprovider.Zone{}, err
|
||||
@@ -297,7 +297,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
return ss.availabilitySet.GetZoneByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
@@ -536,7 +536,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, _, _, err := ss.getVmssVM(nodeName)
|
||||
ssName, _, _, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -614,7 +614,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return network.Interface{}, err
|
||||
@@ -624,7 +624,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||
}
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
// VM is availability set, but not cached yet in availabilitySetNodesCache.
|
||||
if err == ErrorNotVmssInstance {
|
||||
@@ -747,7 +747,7 @@ func (ss *scaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachin
|
||||
func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID)
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1027,7 +1027,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
||||
|
||||
f := func() error {
|
||||
// Check whether the node is VMAS virtual machine.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName)
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
|
||||
return err
|
||||
@@ -1068,7 +1068,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
||||
|
||||
// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node.
|
||||
func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1167,7 +1167,7 @@ func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (st
|
||||
resourceGroup := matches[1]
|
||||
scaleSetName := matches[2]
|
||||
instanceID := matches[3]
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ type vmssVirtualMachinesEntry struct {
|
||||
vmssName string
|
||||
instanceID string
|
||||
virtualMachine *compute.VirtualMachineScaleSetVM
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
@@ -101,6 +102,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
|
||||
vmssName: ssName,
|
||||
instanceID: to.String(vm.InstanceID),
|
||||
virtualMachine: &vm,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -113,7 +115,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
|
||||
}
|
||||
|
||||
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,8 +152,8 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt cacheReadType) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, crt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ func TestVMSSVMCache(t *testing.T) {
|
||||
for i := range virtualMachines {
|
||||
vm := virtualMachines[i]
|
||||
vmName := to.String(vm.OsProfile.ComputerName)
|
||||
ssName, instanceID, realVM, err := ss.getVmssVM(vmName)
|
||||
ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "vmss", ssName)
|
||||
assert.Equal(t, to.String(vm.InstanceID), instanceID)
|
||||
@@ -99,14 +99,14 @@ func TestVMSSVMCache(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// the VM should be removed from cache after deleteCacheForNode().
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
cachedVirtualMachines := cached.(*sync.Map)
|
||||
_, ok := cachedVirtualMachines.Load(vmName)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
// the VM should be get back after another cache refresh.
|
||||
ssName, instanceID, realVM, err := ss.getVmssVM(vmName)
|
||||
ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "vmss", ssName)
|
||||
assert.Equal(t, to.String(vm.InstanceID), instanceID)
|
||||
|
||||
16
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_wrap.go
generated
vendored
16
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_wrap.go
generated
vendored
@@ -90,9 +90,9 @@ func ignoreStatusForbiddenFromError(err error) error {
|
||||
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
|
||||
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm
|
||||
/// resource request in short period.
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) {
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt cacheReadType) (vm compute.VirtualMachine, err error) {
|
||||
vmName := string(nodeName)
|
||||
cachedVM, err := az.vmCache.Get(vmName)
|
||||
cachedVM, err := az.vmCache.Get(vmName, crt)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
@@ -104,8 +104,8 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM
|
||||
return *(cachedVM.(*compute.VirtualMachine)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
|
||||
cachedRt, err := az.rtCache.Get(az.RouteTableName)
|
||||
func (az *Cloud) getRouteTable(crt cacheReadType) (routeTable network.RouteTable, exists bool, err error) {
|
||||
cachedRt, err := az.rtCache.Get(az.RouteTableName, crt)
|
||||
if err != nil {
|
||||
return routeTable, false, err
|
||||
}
|
||||
@@ -168,8 +168,8 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
return subnet, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
cachedLB, err := az.lbCache.Get(name)
|
||||
func (az *Cloud) getAzureLoadBalancer(name string, crt cacheReadType) (lb network.LoadBalancer, exists bool, err error) {
|
||||
cachedLB, err := az.lbCache.Get(name, crt)
|
||||
if err != nil {
|
||||
return lb, false, err
|
||||
}
|
||||
@@ -181,12 +181,12 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi
|
||||
return *(cachedLB.(*network.LoadBalancer)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
|
||||
func (az *Cloud) getSecurityGroup(crt cacheReadType) (nsg network.SecurityGroup, err error) {
|
||||
if az.SecurityGroupName == "" {
|
||||
return nsg, fmt.Errorf("securityGroupName is not configured")
|
||||
}
|
||||
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName, crt)
|
||||
if err != nil {
|
||||
return nsg, err
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string {
|
||||
// If the node is not running with availability zones, then it will fall back to fault domain.
|
||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
5
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/go.sum
generated
vendored
5
vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/go.sum
generated
vendored
@@ -38,8 +38,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
@@ -193,6 +193,7 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1
|
||||
github.com/vmware/govmomi v0.20.1 h1:7b/SeTUB3tER8ZLGLLLH3xcnB2xeuLULXmfPFqPSRZA=
|
||||
github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
||||
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/go.sum
generated
vendored
10
vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/go.sum
generated
vendored
@@ -26,11 +26,11 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
|
||||
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -215,6 +215,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg=
|
||||
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
||||
1
vendor/k8s.io/kubernetes/test/e2e/storage/csi_volumes.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/storage/csi_volumes.go
generated
vendored
@@ -36,7 +36,6 @@ import (
|
||||
var csiTestDrivers = []func() testsuites.TestDriver{
|
||||
drivers.InitHostPathCSIDriver,
|
||||
drivers.InitGcePDCSIDriver,
|
||||
drivers.InitHostPathV0CSIDriver,
|
||||
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
|
||||
}
|
||||
|
||||
|
||||
17
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go
generated
vendored
@@ -343,23 +343,6 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
||||
}
|
||||
}
|
||||
|
||||
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
|
||||
func InitHostPathV0CSIDriver() testsuites.TestDriver {
|
||||
return initHostPathCSIDriver("csi-hostpath-v0",
|
||||
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
|
||||
nil, /* no volume attributes -> no ephemeral volume testing */
|
||||
// Using the current set of rbac.yaml files is problematic here because they don't
|
||||
// match the version of the rules that were written for the releases of external-attacher
|
||||
// and external-provisioner that we are using here. It happens to work in practice...
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-provisioner.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpathplugin.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/e2e-test-rbac.yaml",
|
||||
)
|
||||
}
|
||||
|
||||
// gce-pd
|
||||
type gcePDCSIDriver struct {
|
||||
driverInfo testsuites.DriverInfo
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpath-attacher
|
||||
labels:
|
||||
app: csi-hostpath-attacher
|
||||
spec:
|
||||
selector:
|
||||
app: csi-hostpath-attacher
|
||||
ports:
|
||||
- name: dummy
|
||||
port: 12345
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpath-attacher
|
||||
spec:
|
||||
serviceName: "csi-hostpath-attacher"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-hostpath-attacher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-hostpath-attacher
|
||||
spec:
|
||||
serviceAccountName: csi-attacher
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: quay.io/k8scsi/csi-attacher:v0.4.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath-v0
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
@@ -1,49 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpath-provisioner
|
||||
labels:
|
||||
app: csi-hostpath-provisioner
|
||||
spec:
|
||||
selector:
|
||||
app: csi-hostpath-provisioner
|
||||
ports:
|
||||
- name: dummy
|
||||
port: 12345
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpath-provisioner
|
||||
spec:
|
||||
serviceName: "csi-hostpath-provisioner"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-hostpath-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-hostpath-provisioner
|
||||
spec:
|
||||
serviceAccountName: csi-provisioner
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: quay.io/k8scsi/csi-provisioner:v0.4.1
|
||||
args:
|
||||
- "--provisioner=csi-hostpath-v0"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--connection-timeout=15s"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath-v0
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
@@ -1,69 +0,0 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-hostpathplugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-hostpathplugin
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: quay.io/k8scsi/driver-registrar:v0.4.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath-v0/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: hostpath
|
||||
image: quay.io/k8scsi/hostpathplugin:v0.4.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: Bidirectional
|
||||
name: mountpoint-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath-v0
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: DirectoryOrCreate
|
||||
name: mountpoint-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
@@ -1,16 +0,0 @@
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-hostpath-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
32
vendor/k8s.io/kubernetes/test/integration/apiserver/max_request_body_bytes_test.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/integration/apiserver/max_request_body_bytes_test.go
generated
vendored
@@ -93,6 +93,14 @@ func TestMaxResourceSize(t *testing.T) {
|
||||
t.Errorf("expected success or bad request err, got %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("JSONPatchType should handle a valid patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`[{"op":"add","path":"/foo","value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}]`)
|
||||
err = rest.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
Body(patchBody).Do().Error()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("MergePatchType should handle a patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
||||
err = rest.Patch(types.MergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
@@ -101,6 +109,14 @@ func TestMaxResourceSize(t *testing.T) {
|
||||
t.Errorf("expected success or bad request err, got %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("MergePatchType should handle a valid patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`)
|
||||
err = rest.Patch(types.MergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
Body(patchBody).Do().Error()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("StrategicMergePatchType should handle a patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
||||
err = rest.Patch(types.StrategicMergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
@@ -109,6 +125,14 @@ func TestMaxResourceSize(t *testing.T) {
|
||||
t.Errorf("expected success or bad request err, got %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("StrategicMergePatchType should handle a valid patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`)
|
||||
err = rest.Patch(types.StrategicMergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
Body(patchBody).Do().Error()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("ApplyPatchType should handle a patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
||||
err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
@@ -117,6 +141,14 @@ func TestMaxResourceSize(t *testing.T) {
|
||||
t.Errorf("expected success or bad request err, got %#v", err)
|
||||
}
|
||||
})
|
||||
t.Run("ApplyPatchType should handle a valid patch just under the max limit", func(t *testing.T) {
|
||||
patchBody := []byte(`{"apiVersion":"v1","kind":"Secret"` + strings.Repeat(" ", 3*1024*1024-100) + `}`)
|
||||
err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
Body(patchBody).Do().Error()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("Delete should limit the request body size", func(t *testing.T) {
|
||||
err = c.Delete().AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||
Body(hugeData).Do().Error()
|
||||
|
||||
Reference in New Issue
Block a user