1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

..

24 Commits

Author SHA1 Message Date
Avesh Agarwal
9f38146bbf Merge pull request #34 from aveshagarwal/master-remove-deadcode
Remove unused test code.
2017-11-10 17:06:05 -05:00
Avesh Agarwal
1473e1d024 Remove unused test code. 2017-11-10 16:18:07 -05:00
Avesh Agarwal
7b4b9d9e7e Merge pull request #35 from aveshagarwal/master-fixes
Fix to not process empty node list and also fix error reporting in other places.
2017-11-10 16:14:11 -05:00
Avesh Agarwal
7d079813e5 Fix to not process empty node list and also fix error reporting in other places.
Also fix unit test panic and if nodeLister is nil and refactor some code.
2017-11-10 16:09:33 -05:00
Avesh Agarwal
e02857e00a Merge pull request #33 from aveshagarwal/master-node-selector
Implement node selectors to retrieve node list based on provided query.
2017-11-10 12:11:10 -05:00
Avesh Agarwal
acfd4f8680 Update auto-generated files. 2017-11-10 12:02:43 -05:00
Avesh Agarwal
c29c9db41e Implement node selectors to retrieve node list based on provided query. 2017-11-10 12:02:43 -05:00
Avesh Agarwal
5d3f987dde Merge pull request #20 from ravisantoshgudimetla/pod-anti-affinity-strategy
Pod antiaffinity strategy
2017-11-08 10:19:16 -05:00
ravisantoshgudimetla
97732cf62d Pod antiaffinity strategy.
Signed-off-by: ravisantoshgudimetla <ravisantoshgudimetla@gmail.com>
2017-11-02 18:09:44 -04:00
Avesh Agarwal
4afc4dfb16 Merge pull request #25 from ravisantoshgudimetla/descheduler-version
Introducing versioning in descheduler
2017-10-25 17:11:07 -04:00
ravisantoshgudimetla
023ccd99f5 Introducing versioning in descheduler
Signed-off-by: ravisantoshgudimetla <ravisantoshgudimetla@gmail.com>
2017-10-24 15:45:45 -04:00
Avesh Agarwal
dd831d0d03 Merge pull request #28 from aveshagarwal/master-rebase
Rebase to kubernetes 1.7.6
2017-10-16 16:08:57 -04:00
Avesh Agarwal
04dd7a5902 Update vendor dir for rebasing kubernetes to 1.7.6. 2017-10-16 14:44:28 -04:00
Avesh Agarwal
d395332793 Update glide.yaml for rebasing kuberenetes to 1.7.6. Also fix
unit tests.
2017-10-16 14:42:17 -04:00
Avesh Agarwal
73d9803a46 Merge pull request #27 from aveshagarwal/master-fixes
Fix glog messages.
2017-10-11 13:44:32 -04:00
Avesh Agarwal
40a19396d0 Fix glog messages. 2017-10-11 11:49:59 -04:00
Avesh Agarwal
10593fa427 Merge pull request #26 from aveshagarwal/master-readme-update
Update readme with description to run descheduler as a job inside a pod.
2017-10-11 08:55:32 -04:00
Avesh Agarwal
257312929e Update readme with description to run descheduler as a job inside a pod. 2017-10-11 08:48:02 -04:00
Avesh Agarwal
6aa10f2169 Merge pull request #22 from ravisantoshgudimetla/introduce-gofmt
Introducing gofmt
2017-10-05 16:33:44 -04:00
ravisantoshgudimetla
9e536da99e Commit for gofmt
Signed-off-by: ravisantoshgudimetla <ravisantoshgudimetla@gmail.com>
2017-10-05 16:03:28 -04:00
Avesh Agarwal
b272dbec29 Merge pull request #21 from aveshagarwal/master-owner-fix
Update owners file.
2017-10-05 15:21:46 -04:00
Avesh Agarwal
70e45297bc Update owners file. 2017-10-05 14:35:26 -04:00
Avesh Agarwal
8007f4af20 Merge pull request #19 from ravisantoshgudimetla/introduce-glog
Conversion to glog
2017-10-04 14:40:19 -04:00
ravisantoshgudimetla
32c8898ec7 Conversion to glog
Signed-off-by: ravisantoshgudimetla <ravisantoshgudimetla@gmail.com>

Conversion to glog
2017-10-04 12:17:44 -04:00
392 changed files with 13416 additions and 7627 deletions

View File

@@ -2,5 +2,6 @@ language: go
go:
- 1.8.3
script:
- hack/verify-gofmt.sh
- make build
- make test

View File

@@ -15,15 +15,22 @@
.PHONY: test
# VERSION is currently based on the last commit
VERSION:=$(shell git rev-parse --short HEAD)
VERSION=`git describe --tags`
COMMIT=`git rev-parse HEAD`
BUILD=`date +%FT%T%z`
LDFLAG_LOCATION=github.com/kubernetes-incubator/descheduler/cmd/descheduler/app
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
# IMAGE is the image name of descheduler
# Should this be changed?
IMAGE:=descheduler:$(VERSION)
all: build
build:
go build -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
go build ${LDFLAGS} -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
image: build
docker build -t $(IMAGE) .

3
OWNER
View File

@@ -1,3 +0,0 @@
aveshagarwal
ravisantoshgudimetla
jayunit100

8
OWNERS Normal file
View File

@@ -0,0 +1,8 @@
approvers:
- aveshagarwal
- ravisantoshgudimetla
- jayunit100
reviewers:
- aveshagarwal
- ravisantoshgudimetla
- jayunit100

103
README.md
View File

@@ -40,6 +40,109 @@ For more information about available options run:
$ ./_output/bin/descheduler --help
```
## Running Descheduler as a Job Inside of a Pod
Descheduler can be run as a job inside of a pod. It has the advantage of
being able to be run multiple times without needing user intervention.
Descheduler pod is run as a critical pod to avoid being evicted by itself,
or by kubelet due to an eviction event. Since critical pods are created in
`kube-system` namespace, descheduler job and its pod will also be created
in `kube-system` namespace.
### Create a container image
First we create a simple Docker image utilizing the Dockerfile found in the root directory:
```
$ make image
```
### Create a cluster role
To give necessary permissions for the descheduler to work in a pod, create a cluster role:
```
$ cat << EOF| kubectl create -f -
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: descheduler-cluster-role
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
EOF
```
### Create the service account which will be used to run the job:
```
$ kubectl create sa descheduler-sa -n kube-system
```
### Bind the cluster role to the service account:
```
$ kubectl create clusterrolebinding descheduler-cluster-role-binding \
--clusterrole=descheduler-cluster-role \
--serviceaccount=kube-system:descheduler-sa
```
### Create a configmap to store descheduler policy
Descheduler policy is created as a ConfigMap in `kube-system` namespace
so that it can be mounted as a volume inside pod.
```
$ kubectl create configmap descheduler-policy-configmap \
-n kube-system --from-file=<path-to-policy-dir/policy.yaml>
```
### Create the job specification (descheduler-job.yaml)
```
apiVersion: batch/v1
kind: Job
metadata:
name: descheduler-job
namespace: kube-system
spec:
parallelism: 1
completions: 1
template:
metadata:
name: descheduler-pod
annotations:
scheduler.alpha.kubernetes.io/critical-pod: "true"
spec:
containers:
- name: descheduler
image: descheduler
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/sh"
- "-ec"
- |
/bin/descheduler --policy-config-file /policy-dir/policy.yaml
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap
```
Please note that the pod template is configured with critical pod annotation, and
the policy `policy-file` is mounted as a volume from the config map.
### Run the descheduler as a job in a pod:
```
$ kubectl create -f descheduler-job.yaml
```
## Policy and Strategies
Descheduler's policy is configurable and includes strategies to be enabled or disabled.

View File

@@ -53,4 +53,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.KubeconfigFile, "kubeconfig-file", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
}

View File

@@ -18,32 +18,41 @@ limitations under the License.
package app
import (
"fmt"
"flag"
"io"
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
"github.com/kubernetes-incubator/descheduler/pkg/descheduler"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
aflag "k8s.io/apiserver/pkg/util/flag"
"k8s.io/apiserver/pkg/util/logs"
)
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
func NewDeschedulerCommand() *cobra.Command {
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
s := options.NewDeschedulerServer()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "descheduler",
Short: "descheduler",
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
logs.InitLogs()
defer logs.FlushLogs()
err := Run(s)
if err != nil {
fmt.Println(err)
glog.Errorf("%v", err)
}
},
}
cmd.SetOutput(out)
flags := cmd.Flags()
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
flags.AddGoFlagSet(flag.CommandLine)
s.AddFlags(flags)
return cmd
}

View File

@@ -0,0 +1,86 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"github.com/spf13/cobra"
"runtime"
"strings"
)
var (
// gitCommit is a constant representing the source version that
// generated this build. It should be set during build via -ldflags.
gitCommit string
// version is a constant representing the version tag that
// generated this build. It should be set during build via -ldflags.
version string
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
//It should be set during build via -ldflags.
buildDate string
)
// Info holds the information related to descheduler app version.
type Info struct {
Major string `json:"major"`
Minor string `json:"minor"`
GitCommit string `json:"gitCommit"`
GitVersion string `json:"gitVersion"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
}
// Get returns the overall codebase version. It's for detecting
// what code a binary was built from.
func Get() Info {
majorVersion, minorVersion := splitVersion(version)
return Info{
Major: majorVersion,
Minor: minorVersion,
GitCommit: gitCommit,
GitVersion: version,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
}
}
func NewVersionCommand() *cobra.Command {
var versionCmd = &cobra.Command{
Use: "version",
Short: "Version of descheduler",
Long: `Prints the version of descheduler.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Descheduler version %+v\n", Get())
},
}
return versionCmd
}
// splitVersion splits the git version to generate major and minor versions needed.
func splitVersion(version string) (string, string) {
if version == "" {
return "", ""
}
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
}

View File

@@ -18,13 +18,14 @@ package main
import (
"fmt"
"os"
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app"
"os"
)
func main() {
cmd := app.NewDeschedulerCommand()
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)
cmd.AddCommand(app.NewVersionCommand())
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)

View File

@@ -3,6 +3,8 @@ kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:

76
glide.lock generated
View File

@@ -1,5 +1,5 @@
hash: bf18ba8a038a73e8f11f808c8babdd34734279b050e0223f704eaabbd13830fd
updated: 2017-08-05T10:03:09.300557448-04:00
hash: 6ccf8e8213eb31f9dd31b46c3aa3c2c01929c6230fb049cfabcabd498ade9c30
updated: 2017-10-16T14:31:20.353977552-04:00
imports:
- name: github.com/davecgh/go-spew
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
@@ -33,31 +33,8 @@ imports:
- name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
subpackages:
- gogoproto
- plugin/compare
- plugin/defaultcheck
- plugin/description
- plugin/embedcheck
- plugin/enumstringer
- plugin/equal
- plugin/face
- plugin/gostring
- plugin/marshalto
- plugin/oneofcheck
- plugin/populate
- plugin/size
- plugin/stringer
- plugin/testgen
- plugin/union
- plugin/unmarshal
- proto
- protoc-gen-gogo/descriptor
- protoc-gen-gogo/generator
- protoc-gen-gogo/grpc
- protoc-gen-gogo/plugin
- sortkeys
- vanity
- vanity/command
- name: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- name: github.com/google/gofuzz
@@ -94,27 +71,19 @@ imports:
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
subpackages:
- codec
- codec/codecgen
- name: golang.org/x/crypto
version: d172538b2cfce0c13cee31e647d0367aa8cd2486
subpackages:
- bcrypt
- blowfish
- curve25519
- ed25519
- ed25519/internal/edwards25519
- nacl/secretbox
- pkcs12
- pkcs12/internal/rc2
- poly1305
- salsa20/salsa
- ssh
- ssh/terminal
- name: golang.org/x/net
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
subpackages:
- context
- context/ctxhttp
- html
- html/atom
- http2
@@ -122,24 +91,17 @@ imports:
- idna
- internal/timeseries
- lex/httplex
- proxy
- trace
- websocket
- name: golang.org/x/sys
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
subpackages:
- unix
- windows
- name: golang.org/x/text
version: 2910a502d2bf9e43193af9d68ca516529614eed3
subpackages:
- cases
- encoding
- encoding/internal
- encoding/internal/identifier
- encoding/unicode
- internal/tag
- internal/utf8internal
- language
- runes
- secure/bidirule
@@ -153,7 +115,7 @@ imports:
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- name: k8s.io/apimachinery
version: abe34e4f5b4413c282a83011892cbeea5b32223b
version: 917740426ad66ff818da4809990480bcc0786a77
subpackages:
- pkg/api/equality
- pkg/api/errors
@@ -200,11 +162,15 @@ imports:
- pkg/watch
- third_party/forked/golang/reflect
- name: k8s.io/apiserver
version: ab57ed5a72c3b67058f665d660e23bae18339fc2
version: a7f02eb8e3920e446965036c9610ec52a7ede92f
subpackages:
- pkg/util/flag
- pkg/util/logs
- name: k8s.io/client-go
version: e356aa2e77ab4a5914c216c12ba14cce25a25ab0
version: ec52d278b25c8fef82a965d93afdc74771ea6963
subpackages:
- discovery
- discovery/fake
- kubernetes/scheme
- pkg/api
- pkg/api/v1
@@ -245,6 +211,7 @@ imports:
- pkg/version
- rest
- rest/watch
- testing
- tools/auth
- tools/cache
- tools/clientcmd
@@ -260,7 +227,7 @@ imports:
- name: k8s.io/gengo
version: c79c13d131b0a8f42d05faa6491c12e94ccc6f30
- name: k8s.io/kubernetes
version: ebb8d6e0fadfc95f3d64ccecc36c8ed2ac9224ef
version: 4bc5e7f9a6c25dc4c03d4d656f2cefd21540e28c
subpackages:
- pkg/api
- pkg/api/install
@@ -312,27 +279,48 @@ imports:
- pkg/apis/storage/v1
- pkg/apis/storage/v1beta1
- pkg/client/clientset_generated/clientset
- pkg/client/clientset_generated/clientset/fake
- pkg/client/clientset_generated/clientset/scheme
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/fake
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/authentication/v1
- pkg/client/clientset_generated/clientset/typed/authentication/v1/fake
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/authorization/v1
- pkg/client/clientset_generated/clientset/typed/authorization/v1/fake
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1/fake
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/fake
- pkg/client/clientset_generated/clientset/typed/batch/v1
- pkg/client/clientset_generated/clientset/typed/batch/v1/fake
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/fake
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/core/v1
- pkg/client/clientset_generated/clientset/typed/core/v1/fake
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/networking/v1
- pkg/client/clientset_generated/clientset/typed/networking/v1/fake
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/fake
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/fake
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/fake
- pkg/client/clientset_generated/clientset/typed/storage/v1
- pkg/client/clientset_generated/clientset/typed/storage/v1/fake
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1/fake
- pkg/client/listers/core/v1
- pkg/kubelet/types
- pkg/util

View File

@@ -1,13 +1,13 @@
package: github.com/kubernetes-incubator/descheduler
import:
- package: k8s.io/client-go
version: e356aa2e77ab4a5914c216c12ba14cce25a25ab0 # kube 1.7.0
version: ec52d278b25c8fef82a965d93afdc74771ea6963
- package: k8s.io/apiserver
version: ab57ed5a72c3b67058f665d660e23bae18339fc2
version: release-1.7
- package: k8s.io/apimachinery
version: abe34e4f5b4413c282a83011892cbeea5b32223b # kube 1.7.0
version: 917740426ad66ff818da4809990480bcc0786a77
- package: k8s.io/kubernetes
version: ebb8d6e0fadfc95f3d64ccecc36c8ed2ac9224ef # kube 1.7.0
version: 1.7.6
- package: github.com/kubernetes/repo-infra
- package: github.com/spf13/cobra
version: f62e98d28ab7ad31d707ba837a966378465c7b57

54
hack/verify-gofmt.sh Executable file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8') ]]; then
echo "Unknown go version '${GO_VERSION}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename './.git' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
-o -wholename '*/vendor/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s"
bad_files=$(find_files | xargs $GOFMT -l)
if [[ -n "${bad_files}" ]]; then
echo "!!! '$GOFMT' needs to be run on the following files: "
echo "${bad_files}"
exit 1
fi

View File

@@ -81,16 +81,16 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [6]bool
var yyq2 [7]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[0] = x.Kind != ""
yyq2[1] = x.APIVersion != ""
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(6)
r.EncodeArrayStart(7)
} else {
yynn2 = 4
yynn2 = 5
for _, b := range yyq2 {
if b {
yynn2++
@@ -227,6 +227,25 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeBool(bool(x.DryRun))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym22 := z.EncBinary()
_ = yym22
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("NodeSelector"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym23 := z.EncBinary()
_ = yym23
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
@@ -361,6 +380,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
*((*bool)(yyv14)) = r.DecodeBool()
}
}
case "NodeSelector":
if r.TryDecodeAsNil() {
x.NodeSelector = ""
} else {
yyv16 := &x.NodeSelector
yym17 := z.DecBinary()
_ = yym17
if false {
} else {
*((*string)(yyv16)) = r.DecodeString()
}
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
@@ -372,16 +403,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj16 int
var yyb16 bool
var yyhl16 bool = l >= 0
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
var yyj18 int
var yyb18 bool
var yyhl18 bool = l >= 0
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -389,29 +420,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
yyv17 := &x.Kind
yym18 := z.DecBinary()
_ = yym18
if false {
} else {
*((*string)(yyv17)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb16 = r.CheckBreak()
}
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv19 := &x.APIVersion
yyv19 := &x.Kind
yym20 := z.DecBinary()
_ = yym20
if false {
@@ -419,13 +428,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv19)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv21 := &x.APIVersion
yym22 := z.DecBinary()
_ = yym22
if false {
} else {
*((*string)(yyv21)) = r.DecodeString()
}
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -433,22 +464,22 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.DeschedulingInterval = 0
} else {
yyv21 := &x.DeschedulingInterval
yym22 := z.DecBinary()
_ = yym22
yyv23 := &x.DeschedulingInterval
yym24 := z.DecBinary()
_ = yym24
if false {
} else if z.HasExtensions() && z.DecExt(yyv21) {
} else if z.HasExtensions() && z.DecExt(yyv23) {
} else {
*((*int64)(yyv21)) = int64(r.DecodeInt(64))
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -456,29 +487,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.KubeconfigFile = ""
} else {
yyv23 := &x.KubeconfigFile
yym24 := z.DecBinary()
_ = yym24
if false {
} else {
*((*string)(yyv23)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb16 = r.CheckBreak()
}
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv25 := &x.PolicyConfigFile
yyv25 := &x.KubeconfigFile
yym26 := z.DecBinary()
_ = yym26
if false {
@@ -486,13 +495,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv25)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv27 := &x.PolicyConfigFile
yym28 := z.DecBinary()
_ = yym28
if false {
} else {
*((*string)(yyv27)) = r.DecodeString()
}
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -500,26 +531,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.DryRun = false
} else {
yyv27 := &x.DryRun
yym28 := z.DecBinary()
_ = yym28
yyv29 := &x.DryRun
yym30 := z.DecBinary()
_ = yym30
if false {
} else {
*((*bool)(yyv27)) = r.DecodeBool()
*((*bool)(yyv29)) = r.DecodeBool()
}
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.NodeSelector = ""
} else {
yyv31 := &x.NodeSelector
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
*((*string)(yyv31)) = r.DecodeString()
}
}
for {
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj16-1, "")
z.DecStructFieldNotFound(yyj18-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}

View File

@@ -37,4 +37,7 @@ type DeschedulerConfiguration struct {
// Dry run
DryRun bool
// Node selectors
NodeSelector string
}

View File

@@ -81,7 +81,7 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [6]bool
var yyq2 [7]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[0] = x.Kind != ""
@@ -89,9 +89,10 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
yyq2[2] = x.DeschedulingInterval != 0
yyq2[4] = x.PolicyConfigFile != ""
yyq2[5] = x.DryRun != false
yyq2[6] = x.NodeSelector != ""
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(6)
r.EncodeArrayStart(7)
} else {
yynn2 = 1
for _, b := range yyq2 {
@@ -248,6 +249,31 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[6] {
yym22 := z.EncBinary()
_ = yym22
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq2[6] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("nodeSelector"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym23 := z.EncBinary()
_ = yym23
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.NodeSelector))
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
@@ -382,6 +408,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
*((*bool)(yyv14)) = r.DecodeBool()
}
}
case "nodeSelector":
if r.TryDecodeAsNil() {
x.NodeSelector = ""
} else {
yyv16 := &x.NodeSelector
yym17 := z.DecBinary()
_ = yym17
if false {
} else {
*((*string)(yyv16)) = r.DecodeString()
}
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
@@ -393,16 +431,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj16 int
var yyb16 bool
var yyhl16 bool = l >= 0
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
var yyj18 int
var yyb18 bool
var yyhl18 bool = l >= 0
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -410,29 +448,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
yyv17 := &x.Kind
yym18 := z.DecBinary()
_ = yym18
if false {
} else {
*((*string)(yyv17)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb16 = r.CheckBreak()
}
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv19 := &x.APIVersion
yyv19 := &x.Kind
yym20 := z.DecBinary()
_ = yym20
if false {
@@ -440,13 +456,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv19)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv21 := &x.APIVersion
yym22 := z.DecBinary()
_ = yym22
if false {
} else {
*((*string)(yyv21)) = r.DecodeString()
}
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -454,22 +492,22 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.DeschedulingInterval = 0
} else {
yyv21 := &x.DeschedulingInterval
yym22 := z.DecBinary()
_ = yym22
yyv23 := &x.DeschedulingInterval
yym24 := z.DecBinary()
_ = yym24
if false {
} else if z.HasExtensions() && z.DecExt(yyv21) {
} else if z.HasExtensions() && z.DecExt(yyv23) {
} else {
*((*int64)(yyv21)) = int64(r.DecodeInt(64))
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -477,29 +515,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.KubeconfigFile = ""
} else {
yyv23 := &x.KubeconfigFile
yym24 := z.DecBinary()
_ = yym24
if false {
} else {
*((*string)(yyv23)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb16 = r.CheckBreak()
}
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv25 := &x.PolicyConfigFile
yyv25 := &x.KubeconfigFile
yym26 := z.DecBinary()
_ = yym26
if false {
@@ -507,13 +523,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv25)) = r.DecodeString()
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv27 := &x.PolicyConfigFile
yym28 := z.DecBinary()
_ = yym28
if false {
} else {
*((*string)(yyv27)) = r.DecodeString()
}
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@@ -521,26 +559,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() {
x.DryRun = false
} else {
yyv27 := &x.DryRun
yym28 := z.DecBinary()
_ = yym28
yyv29 := &x.DryRun
yym30 := z.DecBinary()
_ = yym30
if false {
} else {
*((*bool)(yyv27)) = r.DecodeBool()
*((*bool)(yyv29)) = r.DecodeBool()
}
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.NodeSelector = ""
} else {
yyv31 := &x.NodeSelector
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
*((*string)(yyv31)) = r.DecodeString()
}
}
for {
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb16 = r.CheckBreak()
yyb18 = r.CheckBreak()
}
if yyb16 {
if yyb18 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj16-1, "")
z.DecStructFieldNotFound(yyj18-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}

View File

@@ -37,4 +37,7 @@ type DeschedulerConfiguration struct {
// Dry run
DryRun bool `json:"dryRun,omitempty"`
// Node selectors
NodeSelector string `json:"nodeSelector,omitempty"`
}

View File

@@ -45,6 +45,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.KubeconfigFile = in.KubeconfigFile
out.PolicyConfigFile = in.PolicyConfigFile
out.DryRun = in.DryRun
out.NodeSelector = in.NodeSelector
return nil
}
@@ -58,6 +59,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.KubeconfigFile = in.KubeconfigFile
out.PolicyConfigFile = in.PolicyConfigFile
out.DryRun = in.DryRun
out.NodeSelector = in.NodeSelector
return nil
}

View File

@@ -19,6 +19,8 @@ package descheduler
import (
"fmt"
"github.com/golang/glog"
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/client"
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
@@ -39,22 +41,28 @@ func Run(rs *options.DeschedulerServer) error {
return err
}
if deschedulerPolicy == nil {
return fmt.Errorf("\ndeschedulerPolicy is nil\n")
return fmt.Errorf("deschedulerPolicy is nil")
}
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
return err
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(rs.Client, stopChannel)
nodes, err := nodeutil.ReadyNodes(rs.Client, rs.NodeSelector, stopChannel)
if err != nil {
return err
}
if len(nodes) == 0 {
glog.V(1).Infof("node list is empty")
return nil
}
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes)
return nil
}

View File

@@ -17,9 +17,9 @@ limitations under the License.
package node
import (
"fmt"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
@@ -31,21 +31,34 @@ import (
// ReadyNodes returns ready nodes irrespective of whether they are
// schedulable or not.
func ReadyNodes(client clientset.Interface, stopChannel <-chan struct{}) ([]*v1.Node, error) {
nl := GetNodeLister(client, stopChannel)
nodes, err := nl.List(labels.Everything())
func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
ns, err := labels.Parse(nodeSelector)
if err != nil {
return []*v1.Node{}, err
}
var nodes []*v1.Node
nl := GetNodeLister(client, stopChannel)
if nl != nil {
// err is defined above
if nodes, err = nl.List(ns); err != nil {
return []*v1.Node{}, err
}
}
if len(nodes) == 0 {
var err error
nItems, err := client.Core().Nodes().List(metav1.ListOptions{})
glog.V(2).Infof("node lister returned empty list, now fetch directly")
nItems, err := client.Core().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
if err != nil {
return []*v1.Node{}, err
}
for i, _ := range nItems.Items {
if nItems == nil || len(nItems.Items) == 0 {
return []*v1.Node{}, nil
}
for i := range nItems.Items {
node := nItems.Items[i]
nodes = append(nodes, &node)
}
@@ -61,6 +74,9 @@ func ReadyNodes(client clientset.Interface, stopChannel <-chan struct{}) ([]*v1.
}
func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) corelisters.NodeLister {
if stopChannel == nil {
return nil
}
listWatcher := cache.NewListWatchFromClient(client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
nodeLister := corelisters.NewNodeLister(store)
@@ -78,7 +94,7 @@ func IsReady(node *v1.Node) bool {
// - NodeOutOfDisk condition status is ConditionFalse,
// - NodeNetworkUnavailable condition status is ConditionFalse.
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
fmt.Printf("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
glog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)

View File

@@ -17,18 +17,14 @@ limitations under the License.
package node
import (
"fmt"
"testing"
"github.com/kubernetes-incubator/descheduler/test"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
)
func TestReadyNodes(t *testing.T) {
fakeClient := &fake.Clientset{}
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
node1.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
@@ -41,25 +37,6 @@ func TestReadyNodes(t *testing.T) {
node6 := test.BuildTestNode("node6", 1000, 2000, 9)
node6.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
switch getAction.GetName() {
case node1.Name:
return true, node1, nil
case node2.Name:
return true, node2, nil
case node3.Name:
return true, node3, nil
case node4.Name:
return true, node4, nil
case node5.Name:
return true, node5, nil
case node6.Name:
return true, node6, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
if !IsReady(node1) {
t.Errorf("Expected %v to be ready", node1.Name)
}
@@ -80,3 +57,18 @@ func TestReadyNodes(t *testing.T) {
}
}
func TestReadyNodesWithNodeSelector(t *testing.T) {
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
node1.Labels = map[string]string{"type": "compute"}
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
node2.Labels = map[string]string{"type": "infra"}
fakeClient := fake.NewSimpleClientset(node1, node2)
nodeSelector := "type=compute"
nodes, _ := ReadyNodes(fakeClient, nodeSelector, nil)
if nodes[0].Name != "node1" {
t.Errorf("Expected node1, got %s", nodes[0].Name)
}
}

View File

@@ -46,7 +46,7 @@ func TestPodTypes(t *testing.T) {
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: *resource.NewQuantity(int64(10), resource.BinarySI)},
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"github.com/golang/glog"
"github.com/kubernetes-incubator/descheduler/pkg/api"
_ "github.com/kubernetes-incubator/descheduler/pkg/api/install"
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
@@ -30,7 +31,7 @@ import (
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
if policyConfigFile == "" {
fmt.Printf("policy config file not specified")
glog.V(1).Infof("policy config file not specified")
return nil, nil
}

View File

@@ -17,7 +17,7 @@ limitations under the License.
package strategies
import (
"fmt"
"github.com/golang/glog"
"strings"
"k8s.io/kubernetes/pkg/api/v1"
@@ -47,21 +47,19 @@ func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.Descheduler
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
podsEvicted := 0
for _, node := range nodes {
fmt.Printf("\nProcessing node: %#v\n", node.Name)
glog.V(1).Infof("Processing node: %#v", node.Name)
dpm := ListDuplicatePodsOnANode(client, node)
for creator, pods := range dpm {
if len(pods) > 1 {
fmt.Printf("%#v\n", creator)
glog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
//fmt.Printf("Removing duplicate pod %#v\n", k.Name)
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
//TODO: change fmt.Printf as glogs.
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
} else {
podsEvicted++
fmt.Printf("Evicted pod: %#v (%#v)\n", pods[i].Name, err)
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
}
}
}

View File

@@ -54,7 +54,7 @@ func TestFindDuplicatePods(t *testing.T) {
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: *resource.NewQuantity(int64(10), resource.BinarySI)},
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}

View File

@@ -17,9 +17,9 @@ limitations under the License.
package strategies
import (
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
helper "k8s.io/kubernetes/pkg/api/v1/resource"
@@ -61,16 +61,16 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
if len(lowNodes) == 0 {
fmt.Printf("No node is underutilized\n")
glog.V(1).Infof("No node is underutilized")
return
} else if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
fmt.Printf("number of nodes underutilized is less than NumberOfNodes\n")
glog.V(1).Infof("number of nodes underutilized is less than NumberOfNodes")
return
} else if len(lowNodes) == len(nodes) {
fmt.Printf("all nodes are underutilized\n")
glog.V(1).Infof("all nodes are underutilized")
return
} else if len(targetNodes) == 0 {
fmt.Printf("no node is above target utilization\n")
glog.V(1).Infof("no node is above target utilization")
return
}
evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
@@ -78,18 +78,18 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
func validateThresholds(thresholds api.ResourceThresholds) bool {
if thresholds == nil {
fmt.Printf("no resource threshold is configured\n")
glog.V(1).Infof("no resource threshold is configured")
return false
}
found := false
for name, _ := range thresholds {
for name := range thresholds {
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
found = true
break
}
}
if !found {
fmt.Printf("one of cpu, memory, or pods resource threshold must be configured\n")
glog.V(1).Infof("one of cpu, memory, or pods resource threshold must be configured")
return false
}
return found
@@ -98,10 +98,10 @@ func validateThresholds(thresholds api.ResourceThresholds) bool {
//This function could be merged into above once we are clear.
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
if targetThresholds == nil {
fmt.Printf("no target resource threshold is configured\n")
glog.V(1).Infof("no target resource threshold is configured")
return false
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
fmt.Printf("no target resource threshold for pods is configured\n")
glog.V(1).Infof("no target resource threshold for pods is configured")
return false
}
return true
@@ -112,7 +112,8 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
for node, pods := range npm {
usage, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods)
nuMap := NodeUsageMap{node, usage, nonRemovablePods, bePods, bPods, gPods}
fmt.Printf("Node %#v usage: %#v\n", node.Name, usage)
glog.V(1).Infof("Node %#v usage: %#v", node.Name, usage)
if IsNodeWithLowUtilization(usage, thresholds) {
lowNodes = append(lowNodes, nuMap)
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
@@ -159,7 +160,7 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
if len(node.node.Status.Allocatable) > 0 {
nodeCapacity = node.node.Status.Allocatable
}
fmt.Printf("evicting pods from node %#v with usage: %#v\n", node.node.Name, node.usage)
glog.V(1).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
// evict best effort pods
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
// evict burstable pods
@@ -188,9 +189,9 @@ func evictPods(inputPods []*v1.Pod,
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
if !success {
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pod.Name, err)
glog.Infof("Error when evicting pod: %#v (%#v)", pod.Name, err)
} else {
fmt.Printf("Evicted pod: %#v (%#v)\n", pod.Name, err)
glog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
// update remaining pods
*podsEvicted++
nodeUsage[v1.ResourcePods] -= onePodPercentage
@@ -204,7 +205,7 @@ func evictPods(inputPods []*v1.Pod,
*totalMem -= float64(mUsage)
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
fmt.Printf("updated node usage: %#v\n", nodeUsage)
glog.V(1).Infof("updated node usage: %#v", nodeUsage)
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCpu <= 0 && *totalMem <= 0) {
break
@@ -237,7 +238,7 @@ func CreateNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(client, node)
if err != nil {
fmt.Printf("node %s will not be processed, error in accessing its pods (%#v)\n", node.Name, err)
glog.Infof("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
} else {
npm[node] = pods
}
@@ -299,7 +300,7 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
req, _, err := helper.PodRequestsAndLimits(pod)
if err != nil {
fmt.Printf("Error computing resource usage of pod, ignoring: %#v\n", pod.Name)
glog.Infof("Error computing resource usage of pod, ignoring: %#v", pod.Name)
continue
}
for name, quantity := range req {

View File

@@ -67,7 +67,7 @@ func TestLowNodeUtilization(t *testing.T) {
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: *resource.NewQuantity(int64(10), resource.BinarySI)},
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}

View File

@@ -0,0 +1,102 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
"github.com/kubernetes-incubator/descheduler/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"github.com/golang/glog"
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//TODO: Change to client-go instead of generated clientset.
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
)
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
if !strategy.Enabled {
return
}
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun)
}
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListPodsOnANode(client, node)
if err != nil {
return 0
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if checkPodsWithAntiAffinityExist(pods[i], pods) {
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
} else {
podsEvicted++
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
// Update pods.
pods = append(pods[:i], pods[i+1:]...)
i--
totalPods--
}
}
}
}
return podsEvicted
}
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
affinity := schedulercache.ReconcileAffinity(pod)
if affinity != nil && affinity.PodAntiAffinity != nil {
for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) {
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term)
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
glog.Infof("%v", err)
return false
}
for _, existingPod := range pods {
if existingPod.Name != pod.Name && priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
return true
}
}
}
}
return false
}
// getPodAntiAffinityTerms gets the antiaffinity terms for the given pod.
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
if podAntiAffinity != nil {
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
}
}
return terms
}

View File

@@ -0,0 +1,86 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"testing"
"fmt"
"github.com/kubernetes-incubator/descheduler/test"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
)
func TestPodAntiAffinity(t *testing.T) {
node := test.BuildTestNode("n1", 2000, 3000, 10)
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
p3.Labels = map[string]string{"foo": "bar"}
p1.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"bar"},
},
},
},
TopologyKey: "region",
},
},
},
}
p3.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"bar"},
},
},
},
TopologyKey: "region",
},
},
},
}
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3}}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
expectedEvictedPodCount := 1
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false)
if podsEvicted != expectedEvictedPodCount {
fmt.Println(podsEvicted)
t.Errorf("Unexpected no of pods evicted")
}
}

View File

@@ -16,7 +16,8 @@ limitations under the License.
package test
import ("fmt"
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -92,7 +93,6 @@ func GetCriticalPodAnnotation() map[string]string {
}
}
// BuildTestNode creates a node with specified capacity.
func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node {
node := &v1.Node{

View File

@@ -158,15 +158,16 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
return nil, err
}
if s.tlsConfig == nil {
s.tlsConfig = &tls.Config{}
tlsConfig := s.tlsConfig
switch {
case tlsConfig == nil:
tlsConfig = &tls.Config{ServerName: host}
case len(tlsConfig.ServerName) == 0:
tlsConfig = tlsConfig.Clone()
tlsConfig.ServerName = host
}
if len(s.tlsConfig.ServerName) == 0 {
s.tlsConfig.ServerName = host
}
tlsConn := tls.Client(rwc, s.tlsConfig)
tlsConn := tls.Client(rwc, tlsConfig)
// need to manually call Handshake() so we can call VerifyHostname() below
if err := tlsConn.Handshake(); err != nil {
@@ -174,11 +175,11 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
}
// Return if we were configured to skip validation
if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
if tlsConfig.InsecureSkipVerify {
return tlsConn, nil
}
if err := tlsConn.VerifyHostname(host); err != nil {
if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
return nil, err
}
@@ -218,6 +219,9 @@ func (s *SpdyRoundTripper) dialWithoutProxy(url *url.URL) (net.Conn, error) {
if err != nil {
return nil, err
}
if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
host = s.tlsConfig.ServerName
}
err = conn.VerifyHostname(host)
if err != nil {
return nil, err

File diff suppressed because it is too large Load Diff

View File

@@ -105,6 +105,11 @@ func (l *lifecycle) Admit(a admission.Attributes) error {
return nil
}
// always allow deletion of other resources
if a.GetOperation() == admission.Delete {
return nil
}
// always allow access review checks. Returning status about the namespace would be leaking information
if isAccessReview(a) {
return nil

View File

@@ -135,6 +135,24 @@ func TestAdmissionNamespaceDoesNotExist(t *testing.T) {
}
t.Errorf("expected error returned from admission handler: %v", actions)
}
// verify create operations in the namespace cause an error
err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected error rejecting creates in a namespace when it is missing")
}
// verify update operations in the namespace cause an error
err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, nil))
if err == nil {
t.Errorf("Expected error rejecting updates in a namespace when it is missing")
}
// verify delete operations in the namespace can proceed
err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
}
// TestAdmissionNamespaceActive verifies a resource is admitted when the namespace is active.

View File

@@ -32,13 +32,13 @@ var (
eventCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: "event_count",
Name: "event_total",
Help: "Counter of audit events generated and sent to the audit backend.",
})
errorCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: "error_count",
Name: "error_total",
Help: "Counter of audit events that failed to be audited properly. " +
"Plugin identifies the plugin affected by the error.",
},
@@ -47,7 +47,7 @@ var (
levelCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: "level_count",
Name: "level_total",
Help: "Counter of policy levels for audit events (1 per request).",
},
[]string{"level"},

View File

@@ -575,6 +575,14 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
routes := []*restful.RouteBuilder{}
// If there is a subresource, kind should be the parent's kind.
if hasSubresource {
fqParentKind, err := a.getResourceKind(resource, a.group.Storage[resource])
if err != nil {
return nil, err
}
kind = fqParentKind.Kind
}
switch action.Verb {
case "GET": // Get a resource.
var handler restful.RouteFunction

View File

@@ -63,10 +63,6 @@ func GetOperationIDAndTags(r *restful.Route) (string, []string, error) {
op := r.Operation
path := r.Path
var tags []string
// TODO: This is hacky, figure out where this name conflict is created and fix it at the root.
if strings.HasPrefix(path, "/apis/extensions/v1beta1/namespaces/{namespace}/") && strings.HasSuffix(op, "ScaleScale") {
op = op[:len(op)-10] + strings.Title(strings.Split(path[48:], "/")[0]) + "Scale"
}
prefix, exists := verbs.GetPrefix(op)
if !exists {
return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op)

View File

@@ -537,6 +537,70 @@ func TestStartingResourceVersion(t *testing.T) {
}
}
func TestEmptyWatchEventCache(t *testing.T) {
server, etcdStorage := newEtcdTestStorage(t, etcdtest.PathPrefix())
defer server.Terminate(t)
// add a few objects
updatePod(t, etcdStorage, makeTestPod("pod1"), nil)
updatePod(t, etcdStorage, makeTestPod("pod2"), nil)
updatePod(t, etcdStorage, makeTestPod("pod3"), nil)
updatePod(t, etcdStorage, makeTestPod("pod4"), nil)
updatePod(t, etcdStorage, makeTestPod("pod5"), nil)
fooCreated := updatePod(t, etcdStorage, makeTestPod("foo"), nil)
// get rv of last pod created
rv, err := storage.ParseWatchResourceVersion(fooCreated.ResourceVersion)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
cacher := newTestCacher(etcdStorage, 10)
defer cacher.Stop()
// We now have a cacher with an empty cache of watch events and a resourceVersion of rv.
// It should support establishing watches from rv and higher, but not older.
{
watcher, err := cacher.Watch(context.TODO(), "pods/ns", strconv.Itoa(int(rv-1)), storage.Everything)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer watcher.Stop()
expectedGoneError := errors.NewGone("").ErrStatus
verifyWatchEvent(t, watcher, watch.Error, &expectedGoneError)
}
{
watcher, err := cacher.Watch(context.TODO(), "pods/ns", strconv.Itoa(int(rv+1)), storage.Everything)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer watcher.Stop()
select {
case e := <-watcher.ResultChan():
t.Errorf("unexpected event %#v", e)
case <-time.After(3 * time.Second):
// watch from rv+1 remained established successfully
}
}
{
watcher, err := cacher.Watch(context.TODO(), "pods/ns", strconv.Itoa(int(rv)), storage.Everything)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer watcher.Stop()
select {
case e := <-watcher.ResultChan():
t.Errorf("unexpected event %#v", e)
case <-time.After(3 * time.Second):
// watch from rv remained established successfully
}
}
}
func TestRandomWatchDeliver(t *testing.T) {
server, etcdStorage := newEtcdTestStorage(t, etcdtest.PathPrefix())
defer server.Terminate(t)

View File

@@ -412,7 +412,9 @@ func (w *watchCache) SetOnEvent(onEvent func(*watchCacheEvent)) {
func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) {
size := w.endIndex - w.startIndex
oldest := w.resourceVersion
// if we have no watch events in our cache, the oldest one we can successfully deliver to a watcher
// is the *next* event we'll receive, which will be at least one greater than our current resourceVersion
oldest := w.resourceVersion + 1
if size > 0 {
oldest = w.cache[w.startIndex%w.capacity].resourceVersion
}

View File

@@ -49,6 +49,6 @@ func InitFlags() {
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
pflag.Parse()
pflag.VisitAll(func(flag *pflag.Flag) {
glog.Infof("FLAG: --%s=%q", flag.Name, flag.Value)
glog.V(4).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
})
}

View File

@@ -94,6 +94,9 @@ func DialURL(url *url.URL, transport http.RoundTripper) (net.Conn, error) {
// Verify
host, _, _ := net.SplitHostPort(dialAddr)
if tlsConfig != nil && len(tlsConfig.ServerName) > 0 {
host = tlsConfig.ServerName
}
if err := tlsConn.VerifyHostname(host); err != nil {
tlsConn.Close()
return nil, err

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
797dc10a0ccd89bec0b29c41613025035ed23a0f

View File

@@ -84,6 +84,15 @@ func FileExists(filename string) (bool, error) {
return true, nil
}
func FileOrSymlinkExists(filename string) (bool, error) {
if _, err := os.Lstat(filename); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// ReadDirNoStat returns a string of files/directories contained
// in dirname without calling lstat on them.
func ReadDirNoStat(dirname string) ([]string, error) {

View File

@@ -39,8 +39,8 @@ var (
// them irrelevant. (Next we'll take it out, which may muck with
// scripts consuming the kubectl version output - but most of
// these should be looking at gitVersion already anyways.)
gitMajor string = "1" // major version, always numeric
gitMinor string = "7" // minor version, numeric possibly followed by "+"
gitMajor string = "1" // major version, always numeric
gitMinor string = "7+" // minor version, numeric possibly followed by "+"
// semantic version, derived by build scripts (see
// https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
@@ -51,7 +51,7 @@ var (
// semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight
// translation to be semver compliant.
gitVersion string = "v1.7.0+$Format:%h$"
gitVersion string = "v1.7.4-beta.0+$Format:%h$"
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"

View File

@@ -13,7 +13,7 @@ go_test(
srcs = ["azure_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = ["//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library"],
deps = ["//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library"],
)
go_library(
@@ -22,6 +22,7 @@ go_library(
tags = ["automanaged"],
deps = [
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",

View File

@@ -24,6 +24,7 @@ import (
"sync"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/golang/glog"
@@ -137,7 +138,7 @@ func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
}
type azureToken struct {
token azure.Token
token adal.Token
clientID string
tenantID string
apiserverID string
@@ -234,7 +235,7 @@ func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) {
}
return &azureToken{
token: azure.Token{
token: adal.Token{
AccessToken: accessToken,
RefreshToken: refreshToken,
ExpiresIn: expiresIn,
@@ -268,15 +269,15 @@ func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error {
}
func (ts *azureTokenSource) refreshToken(token *azureToken) (*azureToken, error) {
oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(token.tenantID)
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, token.tenantID)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err)
}
callback := func(t azure.Token) error {
callback := func(t adal.Token) error {
return nil
}
spt, err := azure.NewServicePrincipalTokenFromManualToken(
spt, err := adal.NewServicePrincipalTokenFromManualToken(
*oauthConfig,
token.clientID,
token.apiserverID,
@@ -324,12 +325,12 @@ func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID strin
}
func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) {
oauthConfig, err := ts.environment.OAuthConfigForTenant(ts.tenantID)
oauthConfig, err := adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID)
if err != nil {
return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err)
}
client := &autorest.Client{}
deviceCode, err := azure.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID)
deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID)
if err != nil {
return nil, fmt.Errorf("initialing the device code authentication: %v", err)
}
@@ -339,7 +340,7 @@ func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) {
return nil, fmt.Errorf("prompting the device code message: %v", err)
}
token, err := azure.WaitForUserCompletion(client, deviceCode)
token, err := adal.WaitForUserCompletion(client, deviceCode)
if err != nil {
return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err)
}

View File

@@ -23,7 +23,7 @@ import (
"testing"
"time"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/adal"
)
func TestAzureTokenSource(t *testing.T) {
@@ -120,8 +120,8 @@ func token2Cfg(token *azureToken) map[string]string {
return cfg
}
func newFackeAzureToken(accessToken string, expiresOn string) azure.Token {
return azure.Token{
func newFackeAzureToken(accessToken string, expiresOn string) adal.Token {
return adal.Token{
AccessToken: accessToken,
RefreshToken: "fake",
ExpiresIn: "3600",

View File

@@ -30,28 +30,33 @@
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute",
"Comment": "v7.0.1-beta",
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry",
"Comment": "v7.0.1-beta",
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk",
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network",
"Comment": "v7.0.1-beta",
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage",
"Comment": "v7.0.1-beta",
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v7.0.1-beta",
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
"Comment": "v10.0.4-beta-1-g786cc84",
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
},
{
"ImportPath": "github.com/Azure/go-ansiterm",
@@ -63,28 +68,33 @@
},
{
"ImportPath": "github.com/Azure/go-autorest/autorest",
"Comment": "v7.2.3",
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
"Comment": "v8.0.0",
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
},
{
"ImportPath": "github.com/Azure/go-autorest/autorest/adal",
"Comment": "v8.0.0",
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
},
{
"ImportPath": "github.com/Azure/go-autorest/autorest/azure",
"Comment": "v7.2.3",
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
"Comment": "v8.0.0",
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
},
{
"ImportPath": "github.com/Azure/go-autorest/autorest/date",
"Comment": "v7.2.3",
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
"Comment": "v8.0.0",
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
},
{
"ImportPath": "github.com/Azure/go-autorest/autorest/to",
"Comment": "v7.2.3",
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
"Comment": "v8.0.0",
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
},
{
"ImportPath": "github.com/Azure/go-autorest/autorest/validation",
"Comment": "v7.2.3",
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
"Comment": "v8.0.0",
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
},
{
"ImportPath": "github.com/MakeNowJust/heredoc",
@@ -726,23 +736,23 @@
},
{
"ImportPath": "github.com/coreos/go-oidc/http",
"Rev": "be73733bb8cc830d0205609b95d125215f8e9c70"
"Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f"
},
{
"ImportPath": "github.com/coreos/go-oidc/jose",
"Rev": "be73733bb8cc830d0205609b95d125215f8e9c70"
"Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f"
},
{
"ImportPath": "github.com/coreos/go-oidc/key",
"Rev": "be73733bb8cc830d0205609b95d125215f8e9c70"
"Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f"
},
{
"ImportPath": "github.com/coreos/go-oidc/oauth2",
"Rev": "be73733bb8cc830d0205609b95d125215f8e9c70"
"Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f"
},
{
"ImportPath": "github.com/coreos/go-oidc/oidc",
"Rev": "be73733bb8cc830d0205609b95d125215f8e9c70"
"Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f"
},
{
"ImportPath": "github.com/coreos/go-semver/semver",
@@ -1482,123 +1492,123 @@
},
{
"ImportPath": "github.com/gophercloud/gophercloud",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/common/extensions",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/images",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/ports",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/openstack/utils",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gophercloud/gophercloud/pagination",
"Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11"
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
{
"ImportPath": "github.com/gorilla/context",
@@ -2353,6 +2363,11 @@
"ImportPath": "github.com/samuel/go-zookeeper/zk",
"Rev": "177002e16a0061912f02377e2dd8951a8b3551bc"
},
{
"ImportPath": "github.com/satori/uuid",
"Comment": "v1.1.0-8-g5bf94b6",
"Rev": "5bf94b69c6b68ee1b541973bb8e1144db23a194b"
},
{
"ImportPath": "github.com/seccomp/libseccomp-golang",
"Rev": "1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1"
@@ -2870,19 +2885,23 @@
},
{
"ImportPath": "gopkg.in/gcfg.v1",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
"Comment": "v1.2.0",
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
},
{
"ImportPath": "gopkg.in/gcfg.v1/scanner",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
"Comment": "v1.2.0",
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
},
{
"ImportPath": "gopkg.in/gcfg.v1/token",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
"Comment": "v1.2.0",
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
},
{
"ImportPath": "gopkg.in/gcfg.v1/types",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
"Comment": "v1.2.0",
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
},
{
"ImportPath": "gopkg.in/inf.v0",
@@ -2893,6 +2912,11 @@
"Comment": "v1.0-16-g20b71e5",
"Rev": "20b71e5b60d756d3d2f80def009790325acc2b23"
},
{
"ImportPath": "gopkg.in/warnings.v0",
"Comment": "v0.1.1",
"Rev": "8a331561fe74dadba6edfc59f3be66c22c3b065d"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"

View File

@@ -8952,6 +8952,216 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/Azure/azure-sdk-for-go/arm/disk licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/Azure/azure-sdk-for-go/LICENSE cce6fd055830ca30ff78fdf077e870d6 -
================================================================================
================================================================================
= vendor/github.com/Azure/azure-sdk-for-go/arm/network licensed under: =
@@ -9839,6 +10049,205 @@ THE SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/Azure/go-autorest/autorest/adal licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b -
================================================================================
================================================================================
= vendor/github.com/Azure/go-autorest/autorest/azure licensed under: =
@@ -75366,6 +75775,34 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/github.com/satori/uuid licensed under: =
Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= vendor/github.com/satori/uuid/LICENSE 02d5d17de0c82a23a09863acccc026f6 -
================================================================================
================================================================================
= vendor/github.com/seccomp/libseccomp-golang licensed under: =
@@ -84997,6 +85434,38 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/gopkg.in/warnings.v0 licensed under: =
Copyright (c) 2016 Péter Surányi.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/gopkg.in/warnings.v0/LICENSE c6775875c9d604beb22447dfae3d7049 -
================================================================================
================================================================================
= vendor/gopkg.in/yaml.v2 licensed under: =

View File

@@ -2,7 +2,7 @@
"swagger": "2.0",
"info": {
"title": "Kubernetes",
"version": "v1.7.1"
"version": "v1.7.6"
},
"paths": {
"/api/": {
@@ -3813,7 +3813,7 @@
},
"/api/v1/namespaces/{namespace}/pods/{name}/binding": {
"post": {
"description": "create binding of a Binding",
"description": "create binding of a Pod",
"consumes": [
"*/*"
],
@@ -3828,7 +3828,7 @@
"tags": [
"core_v1"
],
"operationId": "createCoreV1NamespacedBindingBinding",
"operationId": "createCoreV1NamespacedPodBinding",
"parameters": [
{
"name": "body",
@@ -3885,7 +3885,7 @@
},
"/api/v1/namespaces/{namespace}/pods/{name}/eviction": {
"post": {
"description": "create eviction of an Eviction",
"description": "create eviction of a Pod",
"consumes": [
"*/*"
],
@@ -3900,7 +3900,7 @@
"tags": [
"core_v1"
],
"operationId": "createCoreV1NamespacedEvictionEviction",
"operationId": "createCoreV1NamespacedPodEviction",
"parameters": [
{
"name": "body",
@@ -5888,7 +5888,7 @@
},
"/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scale": {
"get": {
"description": "read scale of the specified Scale",
"description": "read scale of the specified ReplicationController",
"consumes": [
"*/*"
],
@@ -5903,7 +5903,7 @@
"tags": [
"core_v1"
],
"operationId": "readCoreV1NamespacedScaleScale",
"operationId": "readCoreV1NamespacedReplicationControllerScale",
"responses": {
"200": {
"description": "OK",
@@ -5923,7 +5923,7 @@
}
},
"put": {
"description": "replace scale of the specified Scale",
"description": "replace scale of the specified ReplicationController",
"consumes": [
"*/*"
],
@@ -5938,7 +5938,7 @@
"tags": [
"core_v1"
],
"operationId": "replaceCoreV1NamespacedScaleScale",
"operationId": "replaceCoreV1NamespacedReplicationControllerScale",
"parameters": [
{
"name": "body",
@@ -5968,7 +5968,7 @@
}
},
"patch": {
"description": "partially update scale of the specified Scale",
"description": "partially update scale of the specified ReplicationController",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
@@ -5985,7 +5985,7 @@
"tags": [
"core_v1"
],
"operationId": "patchCoreV1NamespacedScaleScale",
"operationId": "patchCoreV1NamespacedReplicationControllerScale",
"parameters": [
{
"name": "body",
@@ -20501,7 +20501,7 @@
},
"/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/rollback": {
"post": {
"description": "create rollback of a DeploymentRollback",
"description": "create rollback of a Deployment",
"consumes": [
"*/*"
],
@@ -20516,7 +20516,7 @@
"tags": [
"apps_v1beta1"
],
"operationId": "createAppsV1beta1NamespacedDeploymentRollbackRollback",
"operationId": "createAppsV1beta1NamespacedDeploymentRollback",
"parameters": [
{
"name": "body",
@@ -20573,7 +20573,7 @@
},
"/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale": {
"get": {
"description": "read scale of the specified Scale",
"description": "read scale of the specified Deployment",
"consumes": [
"*/*"
],
@@ -20588,7 +20588,7 @@
"tags": [
"apps_v1beta1"
],
"operationId": "readAppsV1beta1NamespacedScaleScale",
"operationId": "readAppsV1beta1NamespacedDeploymentScale",
"responses": {
"200": {
"description": "OK",
@@ -20608,7 +20608,7 @@
}
},
"put": {
"description": "replace scale of the specified Scale",
"description": "replace scale of the specified Deployment",
"consumes": [
"*/*"
],
@@ -20623,7 +20623,7 @@
"tags": [
"apps_v1beta1"
],
"operationId": "replaceAppsV1beta1NamespacedScaleScale",
"operationId": "replaceAppsV1beta1NamespacedDeploymentScale",
"parameters": [
{
"name": "body",
@@ -20653,7 +20653,7 @@
}
},
"patch": {
"description": "partially update scale of the specified Scale",
"description": "partially update scale of the specified Deployment",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
@@ -20670,7 +20670,7 @@
"tags": [
"apps_v1beta1"
],
"operationId": "patchAppsV1beta1NamespacedScaleScale",
"operationId": "patchAppsV1beta1NamespacedDeploymentScale",
"parameters": [
{
"name": "body",
@@ -30553,7 +30553,7 @@
},
"/apis/extensions/v1beta1/namespaces/{namespace}/deployments/{name}/rollback": {
"post": {
"description": "create rollback of a DeploymentRollback",
"description": "create rollback of a Deployment",
"consumes": [
"*/*"
],
@@ -30568,7 +30568,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "createExtensionsV1beta1NamespacedDeploymentRollbackRollback",
"operationId": "createExtensionsV1beta1NamespacedDeploymentRollback",
"parameters": [
{
"name": "body",
@@ -30625,7 +30625,7 @@
},
"/apis/extensions/v1beta1/namespaces/{namespace}/deployments/{name}/scale": {
"get": {
"description": "read scale of the specified Scale",
"description": "read scale of the specified Deployment",
"consumes": [
"*/*"
],
@@ -30640,7 +30640,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "readExtensionsV1beta1NamespacedDeploymentsScale",
"operationId": "readExtensionsV1beta1NamespacedDeploymentScale",
"responses": {
"200": {
"description": "OK",
@@ -30660,7 +30660,7 @@
}
},
"put": {
"description": "replace scale of the specified Scale",
"description": "replace scale of the specified Deployment",
"consumes": [
"*/*"
],
@@ -30675,7 +30675,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "replaceExtensionsV1beta1NamespacedDeploymentsScale",
"operationId": "replaceExtensionsV1beta1NamespacedDeploymentScale",
"parameters": [
{
"name": "body",
@@ -30705,7 +30705,7 @@
}
},
"patch": {
"description": "partially update scale of the specified Scale",
"description": "partially update scale of the specified Deployment",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
@@ -30722,7 +30722,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "patchExtensionsV1beta1NamespacedDeploymentsScale",
"operationId": "patchExtensionsV1beta1NamespacedDeploymentScale",
"parameters": [
{
"name": "body",
@@ -32467,7 +32467,7 @@
},
"/apis/extensions/v1beta1/namespaces/{namespace}/replicasets/{name}/scale": {
"get": {
"description": "read scale of the specified Scale",
"description": "read scale of the specified ReplicaSet",
"consumes": [
"*/*"
],
@@ -32482,7 +32482,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "readExtensionsV1beta1NamespacedReplicasetsScale",
"operationId": "readExtensionsV1beta1NamespacedReplicaSetScale",
"responses": {
"200": {
"description": "OK",
@@ -32502,7 +32502,7 @@
}
},
"put": {
"description": "replace scale of the specified Scale",
"description": "replace scale of the specified ReplicaSet",
"consumes": [
"*/*"
],
@@ -32517,7 +32517,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "replaceExtensionsV1beta1NamespacedReplicasetsScale",
"operationId": "replaceExtensionsV1beta1NamespacedReplicaSetScale",
"parameters": [
{
"name": "body",
@@ -32547,7 +32547,7 @@
}
},
"patch": {
"description": "partially update scale of the specified Scale",
"description": "partially update scale of the specified ReplicaSet",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
@@ -32564,7 +32564,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "patchExtensionsV1beta1NamespacedReplicasetsScale",
"operationId": "patchExtensionsV1beta1NamespacedReplicaSetScale",
"parameters": [
{
"name": "body",
@@ -32775,7 +32775,7 @@
},
"/apis/extensions/v1beta1/namespaces/{namespace}/replicationcontrollers/{name}/scale": {
"get": {
"description": "read scale of the specified Scale",
"description": "read scale of the specified ReplicationControllerDummy",
"consumes": [
"*/*"
],
@@ -32790,7 +32790,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "readExtensionsV1beta1NamespacedReplicationcontrollersScale",
"operationId": "readExtensionsV1beta1NamespacedReplicationControllerDummyScale",
"responses": {
"200": {
"description": "OK",
@@ -32810,7 +32810,7 @@
}
},
"put": {
"description": "replace scale of the specified Scale",
"description": "replace scale of the specified ReplicationControllerDummy",
"consumes": [
"*/*"
],
@@ -32825,7 +32825,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "replaceExtensionsV1beta1NamespacedReplicationcontrollersScale",
"operationId": "replaceExtensionsV1beta1NamespacedReplicationControllerDummyScale",
"parameters": [
{
"name": "body",
@@ -32855,7 +32855,7 @@
}
},
"patch": {
"description": "partially update scale of the specified Scale",
"description": "partially update scale of the specified ReplicationControllerDummy",
"consumes": [
"application/json-patch+json",
"application/merge-patch+json",
@@ -32872,7 +32872,7 @@
"tags": [
"extensions_v1beta1"
],
"operationId": "patchExtensionsV1beta1NamespacedReplicationcontrollersScale",
"operationId": "patchExtensionsV1beta1NamespacedReplicationControllerDummyScale",
"parameters": [
{
"name": "body",

View File

@@ -1737,8 +1737,8 @@
{
"type": "v1beta1.DeploymentRollback",
"method": "POST",
"summary": "create rollback of a DeploymentRollback",
"nickname": "createNamespacedDeploymentRollbackRollback",
"summary": "create rollback of a Deployment",
"nickname": "createNamespacedDeploymentRollback",
"parameters": [
{
"type": "string",
@@ -1798,8 +1798,8 @@
{
"type": "v1beta1.Scale",
"method": "GET",
"summary": "read scale of the specified Scale",
"nickname": "readNamespacedScaleScale",
"summary": "read scale of the specified Deployment",
"nickname": "readNamespacedDeploymentScale",
"parameters": [
{
"type": "string",
@@ -1845,8 +1845,8 @@
{
"type": "v1beta1.Scale",
"method": "PUT",
"summary": "replace scale of the specified Scale",
"nickname": "replaceNamespacedScaleScale",
"summary": "replace scale of the specified Deployment",
"nickname": "replaceNamespacedDeploymentScale",
"parameters": [
{
"type": "string",
@@ -1900,8 +1900,8 @@
{
"type": "v1beta1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified Scale",
"nickname": "patchNamespacedScaleScale",
"summary": "partially update scale of the specified Deployment",
"nickname": "patchNamespacedDeploymentScale",
"parameters": [
{
"type": "string",

View File

@@ -1902,8 +1902,8 @@
{
"type": "v1beta1.DeploymentRollback",
"method": "POST",
"summary": "create rollback of a DeploymentRollback",
"nickname": "createNamespacedDeploymentRollbackRollback",
"summary": "create rollback of a Deployment",
"nickname": "createNamespacedDeploymentRollback",
"parameters": [
{
"type": "string",
@@ -1963,8 +1963,8 @@
{
"type": "v1beta1.Scale",
"method": "GET",
"summary": "read scale of the specified Scale",
"nickname": "readNamespacedScaleScale",
"summary": "read scale of the specified Deployment",
"nickname": "readNamespacedDeploymentScale",
"parameters": [
{
"type": "string",
@@ -2010,8 +2010,8 @@
{
"type": "v1beta1.Scale",
"method": "PUT",
"summary": "replace scale of the specified Scale",
"nickname": "replaceNamespacedScaleScale",
"summary": "replace scale of the specified Deployment",
"nickname": "replaceNamespacedDeploymentScale",
"parameters": [
{
"type": "string",
@@ -2065,8 +2065,8 @@
{
"type": "v1beta1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified Scale",
"nickname": "patchNamespacedScaleScale",
"summary": "partially update scale of the specified Deployment",
"nickname": "patchNamespacedDeploymentScale",
"parameters": [
{
"type": "string",
@@ -5656,8 +5656,8 @@
{
"type": "v1beta1.Scale",
"method": "GET",
"summary": "read scale of the specified Scale",
"nickname": "readNamespacedScaleScale",
"summary": "read scale of the specified ReplicaSet",
"nickname": "readNamespacedReplicaSetScale",
"parameters": [
{
"type": "string",
@@ -5703,8 +5703,8 @@
{
"type": "v1beta1.Scale",
"method": "PUT",
"summary": "replace scale of the specified Scale",
"nickname": "replaceNamespacedScaleScale",
"summary": "replace scale of the specified ReplicaSet",
"nickname": "replaceNamespacedReplicaSetScale",
"parameters": [
{
"type": "string",
@@ -5758,8 +5758,8 @@
{
"type": "v1beta1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified Scale",
"nickname": "patchNamespacedScaleScale",
"summary": "partially update scale of the specified ReplicaSet",
"nickname": "patchNamespacedReplicaSetScale",
"parameters": [
{
"type": "string",
@@ -5986,8 +5986,8 @@
{
"type": "v1beta1.Scale",
"method": "GET",
"summary": "read scale of the specified Scale",
"nickname": "readNamespacedScaleScale",
"summary": "read scale of the specified ReplicationControllerDummy",
"nickname": "readNamespacedReplicationControllerDummyScale",
"parameters": [
{
"type": "string",
@@ -6033,8 +6033,8 @@
{
"type": "v1beta1.Scale",
"method": "PUT",
"summary": "replace scale of the specified Scale",
"nickname": "replaceNamespacedScaleScale",
"summary": "replace scale of the specified ReplicationControllerDummy",
"nickname": "replaceNamespacedReplicationControllerDummyScale",
"parameters": [
{
"type": "string",
@@ -6088,8 +6088,8 @@
{
"type": "v1beta1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified Scale",
"nickname": "patchNamespacedScaleScale",
"summary": "partially update scale of the specified ReplicationControllerDummy",
"nickname": "patchNamespacedReplicationControllerDummyScale",
"parameters": [
{
"type": "string",

View File

@@ -9271,8 +9271,8 @@
{
"type": "v1.Binding",
"method": "POST",
"summary": "create binding of a Binding",
"nickname": "createNamespacedBindingBinding",
"summary": "create binding of a Pod",
"nickname": "createNamespacedPodBinding",
"parameters": [
{
"type": "string",
@@ -9332,8 +9332,8 @@
{
"type": "v1beta1.Eviction",
"method": "POST",
"summary": "create eviction of an Eviction",
"nickname": "createNamespacedEvictionEviction",
"summary": "create eviction of a Pod",
"nickname": "createNamespacedPodEviction",
"parameters": [
{
"type": "string",
@@ -12234,8 +12234,8 @@
{
"type": "v1.Scale",
"method": "GET",
"summary": "read scale of the specified Scale",
"nickname": "readNamespacedScaleScale",
"summary": "read scale of the specified ReplicationController",
"nickname": "readNamespacedReplicationControllerScale",
"parameters": [
{
"type": "string",
@@ -12281,8 +12281,8 @@
{
"type": "v1.Scale",
"method": "PUT",
"summary": "replace scale of the specified Scale",
"nickname": "replaceNamespacedScaleScale",
"summary": "replace scale of the specified ReplicationController",
"nickname": "replaceNamespacedReplicationControllerScale",
"parameters": [
{
"type": "string",
@@ -12336,8 +12336,8 @@
{
"type": "v1.Scale",
"method": "PATCH",
"summary": "partially update scale of the specified Scale",
"nickname": "patchNamespacedScaleScale",
"summary": "partially update scale of the specified ReplicationController",
"nickname": "patchNamespacedReplicationControllerScale",
"parameters": [
{
"type": "string",

View File

@@ -383,7 +383,13 @@ function kube::build::short_hash() {
# a workaround for bug https://github.com/docker/docker/issues/3968.
function kube::build::destroy_container() {
"${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true
"${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
if [[ $("${DOCKER[@]}" version --format '{{.Server.Version}}') = 17.06.0* ]]; then
# Workaround https://github.com/moby/moby/issues/33948.
# TODO: remove when 17.06.0 is not relevant anymore
DOCKER_API_VERSION=v1.29 "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
else
"${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
fi
"${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true
}

View File

@@ -287,7 +287,7 @@ function kube::release::create_docker_images_for_server() {
local docker_build_path="${binary_dir}/${binary_name}.dockerbuild"
local docker_file_path="${docker_build_path}/Dockerfile"
local binary_file_path="${binary_dir}/${binary_name}"
local docker_image_tag="${KUBE_DOCKER_REGISTRY:-gcr.io/google_containers}"
local docker_image_tag="gcr.io/google_containers"
rm -rf ${docker_build_path}
mkdir -p ${docker_build_path}

View File

@@ -7,3 +7,7 @@ build --workspace_status_command hack/print-workspace-status.sh
# Make /tmp hermetic
build --sandbox_tmpfs_path=/tmp
# Ensure that Bazel never runs as root, which can cause unit tests to fail.
# This flag requires Bazel 0.5.0+
build --sandbox_fake_username

View File

@@ -238,6 +238,11 @@ define TEST_E2E_NODE_HELP_INFO
# IMAGE_SERVICE_ENDPOINT: remote image endpoint to connect to, to prepull images.
# Used when RUNTIME is set to "remote".
# IMAGE_CONFIG_FILE: path to a file containing image configuration.
# SYSTEM_SPEC_NAME: The name of the system spec to be used for validating the
# image in the node conformance test. The specs are located at
# test/e2e_node/system/specs/. For example, "SYSTEM_SPEC_NAME=gke" will use
# the spec at test/e2e_node/system/specs/gke.yaml. If unspecified, the
# default built-in spec (system.DefaultSpec) will be used.
#
# Example:
# make test-e2e-node FOCUS=Kubelet SKIP=container

View File

@@ -1,15 +1,15 @@
http_archive(
name = "io_bazel_rules_go",
sha256 = "a1596c14c799d5a1b5f49ca28fa948414c2242110d69ef324d6ed160ec890dbf",
strip_prefix = "rules_go-03c634753160632c00f506afeafc819fbea4c422",
urls = ["https://github.com/bazelbuild/rules_go/archive/03c634753160632c00f506afeafc819fbea4c422.tar.gz"],
sha256 = "08969d1cb8ad523f451098da53117108ae564698bcee281c9a2890836e5be0ee",
strip_prefix = "rules_go-473417ec48310325e1fcb1c154621a83197a17fe",
urls = ["https://github.com/bazelbuild/rules_go/archive/473417ec48310325e1fcb1c154621a83197a17fe.tar.gz"],
)
http_archive(
name = "io_kubernetes_build",
sha256 = "a9fb7027f060b868cdbd235a0de0971b557b9d26f9c89e422feb80f48d8c0e90",
strip_prefix = "repo-infra-9dedd5f4093884c133ad5ea73695b28338b954ab",
urls = ["https://github.com/kubernetes/repo-infra/archive/9dedd5f4093884c133ad5ea73695b28338b954ab.tar.gz"],
sha256 = "232fec0ffcb53df5e87fc036ae3e966ea32122fc89ead4c32581b3255c1ab7d0",
strip_prefix = "repo-infra-f521b5d472e00e05da5394994942064510a6e8bf",
urls = ["https://github.com/kubernetes/repo-infra/archive/f521b5d472e00e05da5394994942064510a6e8bf.tar.gz"],
)
# This contains a patch to not prepend ./ to tarfiles produced by pkg_tar.
@@ -17,7 +17,7 @@ http_archive(
# https://bazel-review.googlesource.com/#/c/10390/
http_archive(
name = "io_bazel",
sha256 = "667d32da016b1e2f63cf345cd3583989ec4a165034df383a01996d93635753a0",
sha256 = "892a84aa1e7c1f99fb57bb056cb648745c513077252815324579a012d263defb",
strip_prefix = "bazel-df2c687c22bdd7c76f3cdcc85f38fefd02f0b844",
urls = ["https://github.com/ixdy/bazel/archive/df2c687c22bdd7c76f3cdcc85f38fefd02f0b844.tar.gz"],
)

View File

@@ -9,6 +9,6 @@ Calico is an implementation of the Kubernetes network policy API. The provided
### Learn More
Learn more about Calico at http://docs.projectcalico.org
Learn more about Calico at https://docs.projectcalico.org
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/calico-policy-controller/README.md?pixel)]()

View File

@@ -0,0 +1,67 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- endpoints
verbs:
- get
- apiGroups: [""]
resources:
- services
verbs:
- get
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- patch
- delete
- watch

View File

@@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: calico
namespace: kube-system

View File

@@ -0,0 +1,14 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cpva
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: ["apps", "extensions"]
resources: ["deployments", "daemonsets"]
verbs: ["patch"]

View File

@@ -0,0 +1,15 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cpva
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: calico-cpva
namespace: kube-system
roleRef:
kind: ClusterRole
name: calico-cpva
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,8 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: calico-cpva
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -23,12 +23,13 @@ spec:
nodeSelector:
projectcalico.org/ds-ready: "true"
hostNetwork: true
serviceAccountName: calico
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v1.3.0
image: calico/node:v2.5.1
env:
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
@@ -46,6 +47,8 @@ spec:
value: "none"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: FELIX_HEALTHENABLED
value: "true"
- name: IP
value: ""
- name: NO_DEFAULT_POOLS
@@ -58,9 +61,18 @@ spec:
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: __CALICO_NODE_CPU__
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
@@ -71,7 +83,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v1.9.1
image: calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
@@ -103,7 +115,7 @@ spec:
{
"type": "portmap",
"capabilities": {"portMappings": true},
"noSnat": true
"snat": true
}
]
}

View File

@@ -0,0 +1,22 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-node-vertical-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
node-autoscaler: |-
{
"calico-node": {
"requests": {
"cpu": {
"base": "80m",
"step": "20m",
"nodesPerStep": 10,
"max": "500m"
}
}
}
}

View File

@@ -0,0 +1,37 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: calico-node-vertical-autoscaler
namespace: kube-system
labels:
k8s-app: calico-node-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: calico-node-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa
- --target=daemonset/calico-node
- --namespace=kube-system
- --logtostderr=true
- --poll-period-seconds=30
- --v=2
- --config-file=/etc/config/node-autoscaler
volumeMounts:
- name: config
mountPath: /etc/config
volumes:
- name: config
configMap:
name: calico-node-vertical-autoscaler
serviceAccountName: calico-cpva

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global BGP Configuration
kind: CustomResourceDefinition
metadata:
name: globalbgpconfigs.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalBGPConfig
plural: globalbgpconfigs
singular: globalbgpconfig

View File

@@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Felix Configuration
kind: CustomResourceDefinition
metadata:
name: globalfelixconfigs.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalFelixConfig
plural: globalfelixconfigs
singular: globalfelixconfig

View File

@@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Network Policies
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy

View File

@@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico IP Pools
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool

View File

@@ -8,7 +8,6 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: calico-typha
spec:
replicas: __CALICO_TYPHA_REPLICAS__
revisionHistoryLimit: 2
template:
metadata:
@@ -21,8 +20,9 @@ spec:
- key: CriticalAddonsOnly
operator: Exists
hostNetwork: true
serviceAccountName: calico
containers:
- image: calico/typha:v0.2.2
- image: calico/typha:v0.4.1
name: calico-typha
ports:
- containerPort: 5473
@@ -45,13 +45,23 @@ spec:
value: "kubernetes"
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
value: "1"
- name: TYPHA_HEALTHENABLED
value: "true"
volumeMounts:
- mountPath: /etc/calico
name: etc-calico
readOnly: true
resources:
requests:
cpu: __CALICO_TYPHA_CPU__
livenessProbe:
httpGet:
path: /liveness
port: 9098
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /readiness
port: 9098
periodSeconds: 10
volumes:
- name: etc-calico
hostPath:

View File

@@ -0,0 +1,24 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-typha-horizontal-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
ladder: |-
{
"coresToReplicas": [],
"nodesToReplicas":
[
[1, 1],
[10, 2],
[100, 3],
[250, 4],
[500, 5],
[1000, 6],
[1500, 7],
[2000, 8]
]
}

View File

@@ -0,0 +1,33 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-typha-horizontal-autoscaler
namespace: kube-system
labels:
k8s-app: calico-typha-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: calico-typha-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2
name: autoscaler
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=calico-typha-horizontal-autoscaler
- --target=deployment/calico-typha
- --logtostderr=true
- --v=2
resources:
requests:
cpu: 10m
limits:
cpu: 10m

View File

@@ -0,0 +1,22 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-typha-vertical-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
typha-autoscaler: |-
{
"calico-typha": {
"requests": {
"cpu": {
"base": "120m",
"step": "80m",
"nodesPerStep": 10,
"max": "1000m"
}
}
}
}

View File

@@ -0,0 +1,37 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: calico-typha-vertical-autoscaler
namespace: kube-system
labels:
k8s-app: calico-typha-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: calico-typha-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa
- --target=deployment/calico-typha
- --namespace=kube-system
- --logtostderr=true
- --poll-period-seconds=30
- --v=2
- --config-file=/etc/config/typha-autoscaler
volumeMounts:
- name: config
mountPath: /etc/config
volumes:
- name: config
configMap:
name: calico-typha-vertical-autoscaler
serviceAccountName: calico-cpva

View File

@@ -23,29 +23,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.4.0
name: heapster-v1.4.2
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.4.0
version: v1.4.2
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
template:
metadata:
labels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: heapster
livenessProbe:
httpGet:
@@ -65,7 +65,7 @@ spec:
- name: usr-ca-certs
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: eventer
command:
- /eventer
@@ -103,7 +103,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=heapster
- --poll-period=300000
- --estimator=exponential
@@ -132,7 +132,7 @@ spec:
- --memory={{base_eventer_memory}}
- --extra-memory={{eventer_memory_per_node}}Ki
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@@ -23,29 +23,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.4.0
name: heapster-v1.4.2
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.4.0
version: v1.4.2
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
template:
metadata:
labels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: heapster
livenessProbe:
httpGet:
@@ -66,7 +66,7 @@ spec:
- name: usr-ca-certs
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: eventer
command:
- /eventer
@@ -104,7 +104,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=heapster
- --poll-period=300000
- --estimator=exponential
@@ -133,7 +133,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@@ -23,29 +23,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.4.0
name: heapster-v1.4.2
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.4.0
version: v1.4.2
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
template:
metadata:
labels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: heapster
livenessProbe:
httpGet:
@@ -58,7 +58,7 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: eventer
command:
- /eventer
@@ -89,7 +89,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=heapster
- --poll-period=300000
- --estimator=exponential
@@ -118,7 +118,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@@ -21,29 +21,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.4.0
name: heapster-v1.4.2
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.4.0
version: v1.4.2
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
template:
metadata:
labels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: heapster
livenessProbe:
httpGet:
@@ -55,8 +55,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=stackdriver
# TODO: add --disable_export when it's merged into Heapster release
- --sink=stackdriver:?cluster_name={{ cluster_name }}&min_interval_sec=100&batch_export_timeout_sec=110
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
@@ -64,6 +63,26 @@ spec:
- name: usr-ca-certs
mountPath: /usr/share/ca-certificates
readOnly: true
- name: prom-to-sd
image: gcr.io/google-containers/prometheus-to-sd:v0.2.1
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
- --stackdriver-prefix=container.googleapis.com/internal/addons
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- image: gcr.io/google_containers/addon-resizer:1.7
name: heapster-nanny
resources:
@@ -89,7 +108,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=heapster
- --poll-period=300000
- --estimator=exponential

View File

@@ -21,29 +21,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.4.0
name: heapster-v1.4.2
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.4.0
version: v1.4.2
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
template:
metadata:
labels:
k8s-app: heapster
version: v1.4.0
version: v1.4.2
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
- image: gcr.io/google_containers/heapster-amd64:v1.4.2
name: heapster
livenessProbe:
httpGet:
@@ -80,7 +80,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.4.0
- --deployment=heapster-v1.4.2
- --container=heapster
- --poll-period=300000
- --estimator=exponential

View File

@@ -29,10 +29,11 @@ subjects:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: event-exporter-v0.1.4
name: event-exporter
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.1.5
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@@ -41,16 +42,17 @@ spec:
metadata:
labels:
k8s-app: event-exporter
version: v0.1.5
spec:
serviceAccountName: event-exporter-sa
containers:
# TODO: Add resources in 1.8
- name: event-exporter
image: gcr.io/google-containers/event-exporter:v0.1.0-r2
image: gcr.io/google-containers/event-exporter:v0.1.5
command:
- '/event-exporter'
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.1.2-r2
image: gcr.io/google-containers/prometheus-to-sd:v0.2.1
command:
- /monitor
- --component=event_exporter

View File

@@ -70,27 +70,14 @@ data:
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@type copy
@type detect_exceptions
<store>
@type prometheus
<metric>
type counter
name logging_line_count
desc Total number of lines generated by application containers
</metric>
</store>
<store>
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</store>
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
@@ -337,85 +324,70 @@ data:
</metric>
</match>
# TODO(instrumentation): Reconsider this workaround later.
# Trim the entries which exceed slightly less than 100KB, to avoid
# dropping them. It is a necessity, because Stackdriver only supports
# entries that are up to 100KB in size.
<filter kubernetes.**>
@type record_transformer
enable_ruby true
<record>
log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
</record>
</filter>
# We use 2 output stanzas - one to handle the container logs and one to handle
# the node daemon logs, the latter of which explicitly sends its logs to the
# compute.googleapis.com service rather than container.googleapis.com to keep
# them separate since most users don't care about the node logs.
<match kubernetes.**>
@type copy
@type google_cloud
<store>
@type google_cloud
# Set the buffer type to file to improve the reliability and reduce the memory consumption
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
# Set queue_full action to block because we want to pause gracefully
# in case of the off-the-limits load instead of throwing an exception
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the GCL limit
# of 10MiB per write request.
buffer_chunk_limit 2M
# Cap the combined memory usage of this buffer and the one below to
# 2MiB/chunk * (6 + 2) chunks = 16 MiB
buffer_queue_limit 6
# Never wait more than 5 seconds before flushing logs in the non-error case.
flush_interval 5s
# Never wait longer than 30 seconds between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
</store>
<store>
@type prometheus
<metric>
type counter
name logging_entry_count
desc Total number of log entries generated by either application containers or system components
<labels>
component container
</labels>
</metric>
</store>
# Collect metrics in Prometheus registry about plugin activity.
enable_monitoring true
monitoring_type prometheus
# Set the buffer type to file to improve the reliability and reduce the memory consumption
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
# Set queue_full action to block because we want to pause gracefully
# in case of the off-the-limits load instead of throwing an exception
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the GCL limit
# of 10MiB per write request.
buffer_chunk_limit 2M
# Cap the combined memory usage of this buffer and the one below to
# 2MiB/chunk * (6 + 2) chunks = 16 MiB
buffer_queue_limit 6
# Never wait more than 5 seconds before flushing logs in the non-error case.
flush_interval 5s
# Never wait longer than 30 seconds between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
</match>
# Keep a smaller buffer here since these logs are less important than the user's
# container logs.
<match **>
@type copy
@type google_cloud
<store>
@type google_cloud
detect_subservice false
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
buffer_queue_full_action block
buffer_chunk_limit 2M
buffer_queue_limit 2
flush_interval 5s
max_retry_wait 30
disable_retry_limit
num_threads 2
</store>
<store>
@type prometheus
<metric>
type counter
name logging_entry_count
desc Total number of log entries generated by either application containers or system components
<labels>
component system
</labels>
</metric>
</store>
enable_monitoring true
monitoring_type prometheus
detect_subservice false
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
buffer_queue_full_action block
buffer_chunk_limit 2M
buffer_queue_limit 2
flush_interval 5s
max_retry_wait 30
disable_retry_limit
num_threads 2
</match>
metadata:
name: fluentd-gcp-config-v1.1
name: fluentd-gcp-config-v1.1.1
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@@ -27,7 +27,7 @@ spec:
hostNetwork: true
containers:
- name: fluentd-gcp
image: gcr.io/google-containers/fluentd-gcp:2.0.7
image: gcr.io/google-containers/fluentd-gcp:2.0.8
# If fluentd consumes its own logs, the following situation may happen:
# fluentd fails to send a chunk to the server => writes it to the log =>
# tries to send this message to the server => fails to send a chunk and so on.
@@ -90,13 +90,13 @@ spec:
exit 1;
fi;
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.1.0
image: gcr.io/google-containers/prometheus-to-sd:v0.1.3
command:
- /monitor
- --component=fluentd
- --target-port=31337
- --stackdriver-prefix=container.googleapis.com/internal/addons
- --whitelisted-metrics=logging_line_count,logging_entry_count
- --whitelisted-metrics=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
@@ -107,6 +107,9 @@ spec:
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
#TODO: remove this toleration once #44445 is properly fixed.
- operator: "Exists"
effect: "NoSchedule"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
@@ -120,7 +123,7 @@ spec:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-gcp-config-v1.1
name: fluentd-gcp-config-v1.1.1
- name: ssl-certs
hostPath:
path: /etc/ssl/certs

View File

@@ -20,40 +20,68 @@ data:
access_log /dev/stdout;
server {
listen 127.0.0.1:988;
# When serving 301s, don't redirect to port 988.
port_in_redirect off;
# By default, return 403. This protects us from new API versions.
location / {
return 403;
return 403 "This metadata API is not allowed by the metadata proxy.";
}
# Allow for REST discovery.
location = / {
if ($args ~* "^(.+&)?recursive=") {
return 403 "?recursive calls are not allowed by the metadata proxy.";
}
proxy_pass http://169.254.169.254;
}
location = /computeMetadata/ {
if ($args ~* "^(.+&)?recursive=") {
return 403 "?recursive calls are not allowed by the metadata proxy.";
}
proxy_pass http://169.254.169.254;
}
# By default, allow the v0.1, v1beta1, and v1 APIs.
location /0.1/ {
if ($args ~* "^(.+&)?recursive=") {
return 403 "?recursive calls are not allowed by the metadata proxy.";
}
proxy_pass http://169.254.169.254;
}
location /computeMetadata/v1beta1/ {
if ($args ~* "^(.+&)?recursive=") {
return 403 "?recursive calls are not allowed by the metadata proxy.";
}
proxy_pass http://169.254.169.254;
}
location /computeMetadata/v1/ {
if ($args ~* "^(.+&)?recursive=") {
return 403 "?recursive calls are not allowed by the metadata proxy.";
}
proxy_pass http://169.254.169.254;
}
# Return a 403 for the kube-env attribute in all allowed API versions.
location /0.1/meta-data/attributes/kube-env {
return 403;
return 403 "This metadata endpoint is concealed.";
}
location /computeMetadata/v1beta1/instance/attributes/kube-env {
return 403;
return 403 "This metadata endpoint is concealed.";
}
location /computeMetadata/v1/instance/attributes/kube-env {
return 403;
return 403 "This metadata endpoint is concealed.";
}
# Return a 403 for instance identity in all allowed API versions.
location ~ /0.1/meta-data/service-accounts/.+/identity {
return 403 "This metadata endpoint is concealed.";
}
location ~ /computeMetadata/v1beta1/instance/service-accounts/.+/identity {
return 403 "This metadata endpoint is concealed.";
}
location ~ /computeMetadata/v1/instance/service-accounts/.+/identity {
return 403 "This metadata endpoint is concealed.";
}
}
}

View File

@@ -27,7 +27,7 @@ spec:
dnsPolicy: Default
containers:
- name: metadata-proxy
image: gcr.io/google-containers/metadata-proxy:0.1.2
image: gcr.io/google-containers/metadata-proxy:0.1.3
imagePullPolicy: Always
securityContext:
privileged: true

View File

@@ -605,6 +605,7 @@ function build-kube-env {
rm -f ${file}
cat >$file <<EOF
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
@@ -780,6 +781,13 @@ ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
EOF
# KUBE_APISERVER_REQUEST_TIMEOUT_SEC (if set) controls the --request-timeout
# flag
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat >>$file <<EOF
KUBE_APISERVER_REQUEST_TIMEOUT_SEC: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT_SEC})
EOF
fi
# ETCD_IMAGE (if set) allows to use a custom etcd image.
if [ -n "${ETCD_IMAGE:-}" ]; then
cat >>$file <<EOF

View File

@@ -269,3 +269,10 @@ SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-false}" # true, false
METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@@ -87,7 +87,7 @@ ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.100.0.0/14}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.64.0.0/14}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true. It is the primary range in
# the subnet and is the range used for node instance IPs.
@@ -317,3 +317,10 @@ if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT}" == "true" ]]; then
fi
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@@ -457,6 +457,11 @@ EOF
if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then

View File

@@ -34,54 +34,6 @@ function create-dirs {
fi
}
# Vars assumed:
# NUM_NODES
function get-calico-node-cpu {
local suggested_calico_cpus=100m
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_calico_cpus=250m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_calico_cpus=500m
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_calico_cpus=1000m
fi
echo "${suggested_calico_cpus}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-replicas {
local typha_count=1
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_count=2
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_count=3
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
typha_count=4
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
typha_count=5
fi
echo "${typha_count}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-cpu {
local typha_cpu=200m
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_cpu=500m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_cpu=1000m
fi
echo "${typha_cpu}"
}
# Create directories referenced in the kube-controller-manager manifest for
# bindmounts. This is used under the rkt runtime to work around
# https://github.com/kubernetes/kubernetes/issues/26816
@@ -867,6 +819,9 @@ function start-kube-apiserver {
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then
params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s"
fi
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
@@ -1129,7 +1084,7 @@ function start-cluster-autoscaler {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:-}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
@@ -1168,6 +1123,8 @@ function setup-addon-manifests {
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
@@ -1205,6 +1162,7 @@ function start-kube-addons {
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
@@ -1260,20 +1218,9 @@ function start-kube-addons {
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Configure Calico based on cluster size and image type.
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/opt/cni/bin@g" "${ds_file}"
sed -i -e "s@__CALICO_NODE_CPU__@$(get-calico-node-cpu)@g" "${ds_file}"
sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}"
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}"
else
# If not configured to use Calico, the set the typha replica count to 0, but only if the
# addon is present.
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
if [[ -e $typha_dep_file ]]; then
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@0@g" "${typha_dep_file}"
fi
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"

View File

@@ -32,55 +32,6 @@ function setup-os-params {
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# Vars assumed:
# NUM_NODES
function get-calico-node-cpu {
local suggested_calico_cpus=100m
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_calico_cpus=250m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_calico_cpus=500m
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_calico_cpus=1000m
fi
echo "${suggested_calico_cpus}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-replicas {
local typha_count=1
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_count=2
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_count=3
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
typha_count=4
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
typha_count=5
fi
echo "${typha_count}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-cpu {
local typha_cpu=200m
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_cpu=500m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_cpu=1000m
fi
echo "${typha_cpu}"
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
# The GCI image has host firewall which drop most inbound/forwarded packets.
@@ -239,10 +190,13 @@ function append_or_replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
local -r dirname="$(dirname ${file})"
local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"
touch "${file}"
awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${file}.filtered" && mv "${file}.filtered" "${file}"
echo "${prefix}${suffix}" >> "${file}"
awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${tmpfile}"
echo "${prefix}${suffix}" >> "${tmpfile}"
mv "${tmpfile}" "${file}"
}
function create-node-pki {
@@ -356,7 +310,11 @@ function create-master-auth {
fi
append_or_replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
fi
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
rm "${known_tokens_csv}"
fi
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
@@ -788,20 +746,26 @@ function assemble-docker-flags {
# If using a network plugin, extend the docker configuration to always remove
# the network checkpoint to avoid corrupt checkpoints.
# (https://github.com/docker/docker/issues/18283).
echo "Extend the default docker.service configuration"
echo "Extend the docker.service configuration to remove the network checkpiont"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/01network.conf
[Service]
ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network"
EOF
fi
# Ensure TasksMax is sufficient for docker.
# (https://github.com/kubernetes/kubernetes/issues/51977)
echo "Extend the docker.service configuration to set a higher pids limit"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/02tasksmax.conf
[Service]
TasksMax=infinity
EOF
systemctl daemon-reload
# If using a network plugin, we need to explicitly restart docker daemon, because
# kubelet will not do it.
echo "Docker command line is updated. Restart docker to pick it up"
systemctl restart docker
fi
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
@@ -987,6 +951,8 @@ function start-node-problem-detector {
flags+=" --logtostderr"
flags+=" --system-log-monitors=${km_config},${dm_config}"
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
flags+=" --port=${npd_port}"
# Write the systemd service file for node problem detector.
cat <<EOF >/etc/systemd/system/node-problem-detector.service
@@ -1186,6 +1152,7 @@ function prepare-mounter-rootfs {
mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
mount --bind -o ro /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/resolv.conf"
}
# A helper function for removing salt configuration and comments from a file.
@@ -1251,6 +1218,9 @@ function start-kube-apiserver {
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then
params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s"
fi
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
@@ -1562,7 +1532,7 @@ function start-cluster-autoscaler {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:-}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
@@ -1600,7 +1570,29 @@ function setup-addon-manifests {
chmod 644 "${dst_dir}"/*
}
# Fluentd manifest is modified using kubectl, which may not be available at
# this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
until kubectl get nodes
do
sleep 10
done
kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} \
--limits=memory=${FLUENTD_GCP_MEMORY_LIMIT} \
--requests=cpu=${FLUENTD_GCP_CPU_REQUEST},memory=${FLUENTD_GCP_MEMORY_REQUEST} \
--containers=fluentd-gcp -o yaml > ${fluentd_gcp_yaml}.tmp
mv ${fluentd_gcp_yaml}.tmp ${fluentd_gcp_yaml}
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
@@ -1638,6 +1630,7 @@ function start-kube-addons {
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
@@ -1680,6 +1673,8 @@ function start-kube-addons {
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
start-fluentd-resource-update
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
@@ -1697,20 +1692,9 @@ function start-kube-addons {
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Configure Calico based on cluster size and image type.
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
sed -i -e "s@__CALICO_NODE_CPU__@$(get-calico-node-cpu)@g" "${ds_file}"
sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}"
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}"
else
# If not configured to use Calico, the set the typha replica count to 0, but only if the
# addon is present.
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
if [[ -e $typha_dep_file ]]; then
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@0@g" "${typha_dep_file}"
fi
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"

View File

@@ -43,11 +43,12 @@ fi
GREP_REGEX=""
function gcloud-compute-list() {
local -r resource=$1
local -r filter=${2:-}
echo -e "\n\n[ ${resource} ]"
local attempt=1
local result=""
while true; do
if result=$(gcloud compute ${resource} list --project=${PROJECT} ${@:2}); then
if result=$(gcloud compute ${resource} list --project=${PROJECT} ${filter:+--filter="$filter"} ${@:3}); then
if [[ ! -z "${GREP_REGEX}" ]]; then
result=$(echo "${result}" | grep "${GREP_REGEX}" || true)
fi
@@ -73,21 +74,21 @@ echo "Provider: ${KUBERNETES_PROVIDER:-}"
# List resources related to instances, filtering by the instance prefix if
# provided.
gcloud-compute-list instance-templates --regexp="${INSTANCE_PREFIX}.*"
gcloud-compute-list instance-groups ${ZONE:+"--zones=${ZONE}"} --regexp="${INSTANCE_PREFIX}.*"
gcloud-compute-list instances ${ZONE:+"--zones=${ZONE}"} --regexp="${INSTANCE_PREFIX}.*"
gcloud-compute-list instance-templates "name ~ '${INSTANCE_PREFIX}.*'"
gcloud-compute-list instance-groups "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
gcloud-compute-list instances "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
# List disk resources, filterying by instance prefix if provided.
gcloud-compute-list disks ${ZONE:+"--zones=${ZONE}"} --regexp="${INSTANCE_PREFIX}.*"
# List disk resources, filtering by instance prefix if provided.
gcloud-compute-list disks "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'"
# List network resources. We include names starting with "a", corresponding to
# those that Kubernetes creates.
gcloud-compute-list addresses ${REGION:+"--regions=${REGION}"} --regexp="a.*|${INSTANCE_PREFIX}.*"
gcloud-compute-list addresses "${REGION:+"region=(${REGION}) AND "}name ~ 'a.*|${INSTANCE_PREFIX}.*'"
# Match either the header or a line with the specified e2e network.
# This assumes that the network name is the second field in the output.
GREP_REGEX="^NAME\|^[^ ]\+[ ]\+\(default\|${NETWORK}\) "
gcloud-compute-list routes --regexp="default.*|${INSTANCE_PREFIX}.*"
gcloud-compute-list firewall-rules --regexp="default.*|k8s-fw.*|${INSTANCE_PREFIX}.*"
gcloud-compute-list routes "name ~ 'default.*|${INSTANCE_PREFIX}.*'"
gcloud-compute-list firewall-rules "name ~ 'default.*|k8s-fw.*|${INSTANCE_PREFIX}.*'"
GREP_REGEX=""
gcloud-compute-list forwarding-rules ${REGION:+"--regions=${REGION}"}
gcloud-compute-list target-pools ${REGION:+"--regions=${REGION}"}
gcloud-compute-list forwarding-rules ${REGION:+"region=(${REGION})"}
gcloud-compute-list target-pools ${REGION:+"region=(${REGION})"}

View File

@@ -340,8 +340,7 @@ function do-node-upgrade() {
for group in ${INSTANCE_GROUPS[@]}; do
old_templates+=($(gcloud compute instance-groups managed list \
--project="${PROJECT}" \
--zones="${ZONE}" \
--regexp="${group}" \
--filter="name ~ '${group}' AND zone:(${ZONE})" \
--format='value(instanceTemplate)' || true))
set_instance_template_out=$(gcloud compute instance-groups managed set-instance-template "${group}" \
--template="${template_name}" \

View File

@@ -303,9 +303,9 @@ function detect-node-names() {
detect-project
INSTANCE_GROUPS=()
INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
--zones "${ZONE}" --project "${PROJECT}" \
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
--format='value(instanceGroup)' || true))
--project "${PROJECT}" \
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)' || true))
NODE_NAMES=()
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
for group in "${INSTANCE_GROUPS[@]}"; do
@@ -1033,7 +1033,13 @@ function create-master() {
# Sets MASTER_ROOT_DISK_SIZE that is used by create-master-instance
get-master-root-disk-size
create-master-instance "${MASTER_RESERVED_IP}" &
if [[ "${NUM_NODES}" -ge "50" ]]; then
# We block on master creation for large clusters to avoid doing too much
# unnecessary work in case master start-up fails (like creation of nodes).
create-master-instance "${MASTER_RESERVED_IP}"
else
create-master-instance "${MASTER_RESERVED_IP}" &
fi
}
# Adds master replica to etcd cluster.
@@ -1067,7 +1073,7 @@ function add-replica-to-etcd() {
function set-existing-master() {
local existing_master=$(gcloud compute instances list \
--project "${PROJECT}" \
--regexp "$(get-replica-name-regexp)" \
--filter "name ~ '$(get-replica-name-regexp)'" \
--format "value(name,zone)" | head -n1)
EXISTING_MASTER_NAME="$(echo "${existing_master}" | cut -f1)"
EXISTING_MASTER_ZONE="$(echo "${existing_master}" | cut -f2)"
@@ -1527,7 +1533,7 @@ function kube-down() {
# Check if this are any remaining master replicas.
local REMAINING_MASTER_COUNT=$(gcloud compute instances list \
--project "${PROJECT}" \
--regexp "$(get-replica-name-regexp)" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(zone)" | wc -l)
# In the replicated scenario, if there's only a single master left, we should also delete load balancer in front of it.
@@ -1569,8 +1575,8 @@ function kube-down() {
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
--project "${PROJECT}" --zones "${ZONE}" \
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
--project "${PROJECT}" \
--filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)') )
# If any minions are running, delete them in batches.
while (( "${#minions[@]}" > 0 )); do
@@ -1596,7 +1602,7 @@ function kube-down() {
# first allows the master to cleanup routes itself.
local TRUNCATED_PREFIX="${INSTANCE_PREFIX:0:26}"
routes=( $(gcloud compute routes list --project "${PROJECT}" \
--regexp "${TRUNCATED_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}" \
--filter="name ~ '${TRUNCATED_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}'" \
--format='value(name)') )
while (( "${#routes[@]}" > 0 )); do
echo Deleting routes "${routes[*]::${batch}}"
@@ -1662,8 +1668,7 @@ function kube-down() {
function get-replica-name() {
echo $(gcloud compute instances list \
--project "${PROJECT}" \
--zones "${ZONE}" \
--regexp "$(get-replica-name-regexp)" \
--filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \
--format "value(name)" | head -n1)
}
@@ -1677,7 +1682,7 @@ function get-replica-name() {
function get-all-replica-names() {
echo $(gcloud compute instances list \
--project "${PROJECT}" \
--regexp "$(get-replica-name-regexp)" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(name)" | tr "\n" "," | sed 's/,$//')
}
@@ -1689,7 +1694,7 @@ function get-master-replicas-count() {
detect-project
local num_masters=$(gcloud compute instances list \
--project "${PROJECT}" \
--regexp "$(get-replica-name-regexp)" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(zone)" | wc -l)
echo -n "${num_masters}"
}
@@ -1713,7 +1718,7 @@ function get-replica-name-regexp() {
function set-replica-name() {
local instances=$(gcloud compute instances list \
--project "${PROJECT}" \
--regexp "$(get-replica-name-regexp)" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(name)")
suffix=""
@@ -1730,7 +1735,8 @@ function set-replica-name() {
#
# $1: project
function get-template() {
gcloud compute instance-templates list -r "${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?" \
gcloud compute instance-templates list \
--filter="name ~ '${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?'" \
--project="${1}" --format='value(name)'
}
@@ -1778,8 +1784,8 @@ function check-resources() {
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
--project "${PROJECT}" --zones "${ZONE}" \
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
--project "${PROJECT}" \
--filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)') )
if (( "${#minions[@]}" > 0 )); then
KUBE_RESOURCE_FOUND="${#minions[@]} matching matching ${NODE_INSTANCE_PREFIX}-.+"
@@ -1798,7 +1804,7 @@ function check-resources() {
local -a routes
routes=( $(gcloud compute routes list --project "${PROJECT}" \
--regexp "${INSTANCE_PREFIX}-minion-.{4}" --format='value(name)') )
--filter="name ~ '${INSTANCE_PREFIX}-minion-.{4}'" --format='value(name)') )
if (( "${#routes[@]}" > 0 )); then
KUBE_RESOURCE_FOUND="${#routes[@]} routes matching ${INSTANCE_PREFIX}-minion-.{4}"
return 1

View File

@@ -47,3 +47,10 @@ KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG=true
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@@ -27,19 +27,21 @@ readonly report_dir="${1:-_artifacts}"
# check for a function named log_dump_custom_get_instances. If it's
# defined, we assume the function can me called with one argument, the
# role, which is either "master" or "node".
echo "Checking for custom logdump instances, if any"
if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then
readonly use_custom_instance_list=yes
else
readonly use_custom_instance_list=
fi
readonly master_ssh_supported_providers="gce aws kubemark"
readonly node_ssh_supported_providers="gce gke aws kubemark"
readonly master_ssh_supported_providers="gce aws"
readonly node_ssh_supported_providers="gce gke aws"
readonly gcloud_supported_providers="gce gke"
readonly master_logfiles="kube-apiserver kube-scheduler rescheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd"
readonly node_logfiles="kube-proxy fluentd node-problem-detector"
readonly node_systemd_services="node-problem-detector"
readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-*"
readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-hollow-node-*"
readonly aws_logfiles="cloud-init-output"
readonly gce_logfiles="startupscript"
readonly kern_logfile="kern"
@@ -51,16 +53,17 @@ readonly systemd_services="kubelet docker"
# file descriptors for large clusters.
readonly max_scp_processes=25
# This template spits out the external IPs and images for each node in the cluster in a format like so:
# 52.32.7.85 gcr.io/google_containers/kube-apiserver:1355c18c32d7bef16125120bce194fad gcr.io/google_containers/kube-controller-manager:46365cdd8d28b8207950c3c21d1f3900 [...]
readonly ips_and_images='{range .items[*]}{@.status.addresses[?(@.type == "ExternalIP")].address} {@.status.images[*].names[*]}{"\n"}{end}'
function setup() {
if [[ -z "${use_custom_instance_list}" ]]; then
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
echo "Obtaining KUBE_ROOT"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBE_CONFIG_FILE:="config-test.sh"}
echo "Sourcing kube-util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
detect-project &> /dev/null
echo "Detecting project"
detect-project 2>&1
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
echo "Using 'use_custom_instance_list' with gke, skipping check for LOG_DUMP_SSH_KEY and LOG_DUMP_SSH_USER"
elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
echo "LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances"
exit 1
@@ -98,20 +101,17 @@ function copy-logs-from-node() {
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
local -r scp_files="{$(printf "%s," "${files[@]}")}"
if [[ -n "${use_custom_instance_list}" ]]; then
if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
local ip=$(get_ssh_hostname "${node}")
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
elif [[ -n "${use_custom_instance_list}" ]]; then
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
else
case "${KUBERNETES_PROVIDER}" in
gce|gke|kubemark)
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
;;
aws)
local ip=$(get_ssh_hostname "${node}")
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
;;
esac
echo "Unknown cloud-provider '${KUBERNETES_PROVIDER}' and use_custom_instance_list is unset too - skipping logdump for '${node}'"
fi
}
@@ -130,11 +130,8 @@ function save-logs() {
fi
else
case "${KUBERNETES_PROVIDER}" in
gce|gke|kubemark)
gce|gke)
files="${files} ${gce_logfiles}"
if [[ "${KUBERNETES_PROVIDER}" == "kubemark" && "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
files="${files} ${hollow_node_logfiles}"
fi
;;
aws)
files="${files} ${aws_logfiles}"
@@ -224,13 +221,18 @@ function dump_nodes() {
return
fi
node_logfiles_all="${node_logfiles}"
if [[ "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
node_logfiles_all="${node_logfiles_all} ${hollow_node_logfiles}"
fi
proc=${max_scp_processes}
for node_name in "${node_names[@]}"; do
node_dir="${report_dir}/${node_name}"
mkdir -p "${node_dir}"
# Save logs in the background. This speeds up things when there are
# many nodes.
save-logs "${node_name}" "${node_dir}" "${node_logfiles}" "${node_systemd_services}" &
save-logs "${node_name}" "${node_dir}" "${node_logfiles_all}" "${node_systemd_services}" &
# We don't want to run more than ${max_scp_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might

View File

@@ -25,7 +25,7 @@
"containers": [
{
"name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v0.6.0",
"image": "gcr.io/google_containers/cluster-autoscaler:v0.6.2",
"livenessProbe": {
"httpGet": {
"path": "/health-check",

Some files were not shown because too many files have changed in this diff Show More