mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
77 Commits
deschedule
...
deschedule
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
012482b9b3 | ||
|
|
0894f7740c | ||
|
|
c3f07dc366 | ||
|
|
ca8f1051eb | ||
|
|
c7692a2e9f | ||
|
|
53badf7b61 | ||
|
|
f801c5f72f | ||
|
|
327880ba51 | ||
|
|
7bb8b4feda | ||
|
|
36b1e1f061 | ||
|
|
4507a90bb6 | ||
|
|
550f68306c | ||
|
|
2b668566ce | ||
|
|
ee414ea366 | ||
|
|
5761b5d595 | ||
|
|
07f476dfc4 | ||
|
|
f5e9f07321 | ||
|
|
05c69ee26a | ||
|
|
1623e09122 | ||
|
|
696aa7c505 | ||
|
|
c53dce0805 | ||
|
|
cd8b5a0354 | ||
|
|
b51f24eb8e | ||
|
|
ae3b4368ee | ||
|
|
61eef93618 | ||
|
|
fa0a2ec6fe | ||
|
|
7ece10a643 | ||
|
|
71c8eae47e | ||
|
|
267b0837dc | ||
|
|
c713537d56 | ||
|
|
e374229707 | ||
|
|
f834581a8e | ||
|
|
15fcde5229 | ||
|
|
96c5dd3941 | ||
|
|
65a03e76bf | ||
|
|
43525f6493 | ||
|
|
7457626f62 | ||
|
|
08c22e8921 | ||
|
|
4ff533ec17 | ||
|
|
7680e3d079 | ||
|
|
305801dd0e | ||
|
|
9951b85d60 | ||
|
|
ff21ec9432 | ||
|
|
5e15d77bf2 | ||
|
|
d833c73fc4 | ||
|
|
795a80dfb0 | ||
|
|
f5e4acdd8a | ||
|
|
eb4c1bb355 | ||
|
|
6dfa95cc87 | ||
|
|
eb9d974a8b | ||
|
|
d41a1f4a56 | ||
|
|
138ad556a3 | ||
|
|
37124e6e45 | ||
|
|
bd412bf87f | ||
|
|
6f9b31f568 | ||
|
|
c858740c4f | ||
|
|
bfefe634a1 | ||
|
|
7b7b9e1cd7 | ||
|
|
0a4b8b0a25 | ||
|
|
f28183dcbe | ||
|
|
abdf79454f | ||
|
|
46b570b71d | ||
|
|
616a9b5f6b | ||
|
|
003a4cdc2b | ||
|
|
54ea05d8bb | ||
|
|
d0eea0cabb | ||
|
|
f0297dfe03 | ||
|
|
ef1f36f8e4 | ||
|
|
a733c95dcc | ||
|
|
5a81a0661b | ||
|
|
83e04960af | ||
|
|
2a8dc69cbb | ||
|
|
5d82d08af3 | ||
|
|
c265825166 | ||
|
|
ed0126fb63 | ||
|
|
8c7267b379 | ||
|
|
435674fb44 |
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a bug report to help improve descheduler
|
||||
title: ''
|
||||
labels: 'kind/bug'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please answer these questions before submitting your bug report. Thanks! -->
|
||||
|
||||
**What version of descheduler are you using?**
|
||||
|
||||
descheduler version:
|
||||
|
||||
|
||||
**Does this issue reproduce with the latest release?**
|
||||
|
||||
|
||||
**Which descheduler CLI options are you using?**
|
||||
|
||||
|
||||
**Please provide a copy of your descheduler policy config file**
|
||||
|
||||
|
||||
**What k8s version are you using (`kubectl version`)?**
|
||||
|
||||
<details><summary><code>kubectl version</code> Output</summary><br><pre>
|
||||
$ kubectl version
|
||||
|
||||
</pre></details>
|
||||
|
||||
|
||||
**What did you do?**
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
A detailed sequence of steps describing what to do to observe the issue is good.
|
||||
A complete runnable bash shell script is best.
|
||||
-->
|
||||
|
||||
|
||||
**What did you expect to see?**
|
||||
|
||||
|
||||
**What did you see instead?**
|
||||
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for descheduler
|
||||
title: ''
|
||||
labels: 'kind/feature'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please answer these questions before submitting your feature request. Thanks! -->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
**Describe the solution you'd like**
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
**What version of descheduler are you using?**
|
||||
|
||||
descheduler version:
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Miscellaneous
|
||||
about: Not a bug and not a feature
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Please do not use this to submit a bug report or feature request. Use the
|
||||
bug report or feature request options instead.
|
||||
|
||||
Also, please consider posting in the Kubernetes Slack #sig-scheduling channel
|
||||
instead of opening an issue if this is a support request.
|
||||
|
||||
Thanks!
|
||||
-->
|
||||
16
.github/workflows/release.yaml
vendored
16
.github/workflows/release.yaml
vendored
@@ -3,6 +3,7 @@ name: Release Charts
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
|
||||
jobs:
|
||||
@@ -11,21 +12,20 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
- name: Fetch history
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
uses: helm/chart-releaser-action@v1.0.0-rc.2
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.13.9
|
||||
FROM golang:1.14.4
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
|
||||
5
Makefile
5
Makefile
@@ -23,7 +23,7 @@ LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
||||
|
||||
GOLANGCI_VERSION := v1.15.0
|
||||
HAS_GOLANGCI := $(shell which golangci-lint)
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
@@ -82,8 +82,7 @@ gen:
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
#undo go mod changes caused by above.
|
||||
go mod tidy
|
||||
|
||||
lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -9,3 +9,4 @@ reviewers:
|
||||
- ravisantoshgudimetla
|
||||
- damemi
|
||||
- seanmalloy
|
||||
- ingvagabund
|
||||
|
||||
17
README.md
17
README.md
@@ -94,7 +94,8 @@ usage is below threshold for all (cpu, memory, and number of pods), the node is
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
|
||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, or number of pods),
|
||||
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
@@ -119,6 +120,15 @@ strategies:
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Only three types of resources are supported: `cpu`, `memory` and `pods`.
|
||||
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
If any of the resource types is not specified, all its thresholds default to 100% to avoid nodes going
|
||||
from underutilized to overutilized.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
@@ -227,11 +237,14 @@ When the descheduler decides to evict pods from a node, it employs the following
|
||||
never evicted because these pods won't be recreated.
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted.
|
||||
* Best efforts pods are evicted before burstable and guaranteed pods.
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
|
||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||
|
||||
### Pod Disruption Budget (PDB)
|
||||
|
||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.18.2
|
||||
version: 0.18.0
|
||||
appVersion: 0.18.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: kubernetes-sig-scheduling@googlegroups.com
|
||||
- name: stevehipwell
|
||||
email: steve.hipwell@github.com
|
||||
|
||||
@@ -46,7 +46,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `image.repository` | Docker repository to use | `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
aflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.13+](https://golang.org/dl/)
|
||||
- [Go 1.14+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind](https://kind.sigs.k8s.io/)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
1. Make sure your repo is clean by git's standards
|
||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Publish a draft release using the tag you just created
|
||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||
7. Publish release
|
||||
@@ -18,7 +18,7 @@
|
||||
1. Make sure your repo is clean by git's standards
|
||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push`
|
||||
7. Publish a draft release using the tag you just created
|
||||
@@ -27,13 +27,11 @@
|
||||
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
### Notes
|
||||
It's important to create the tag on the master branch after creating the release-* branch so that the [Helm releaser-action](#helm-chart) can work.
|
||||
It compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
||||
will be nothing to compare and it will fail (creating a new release branch usually involves no code changes). For this same reason, you should
|
||||
also tag patch releases (on the release-* branch) *after* pushing changes (if those changes involve bumping the Helm chart version).
|
||||
|
||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||
|
||||
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
|
||||
or use the below `gcloud` commands.
|
||||
|
||||
List images in staging registry.
|
||||
```
|
||||
gcloud container images list --repository gcr.io/k8s-staging-descheduler
|
||||
@@ -55,17 +53,17 @@ docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23
|
||||
```
|
||||
|
||||
## Helm Chart
|
||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
||||
Released versions of the helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action)
|
||||
is setup to build and push the helm charts to the `gh-pages` branch when changes are pushed to a `release-*` branch.
|
||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `chart-*`. Example git tag name is `chart-0.18.0`. Released versions of the
|
||||
helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) is setup to
|
||||
build and push the helm charts to the `gh-pages` branch when a `chart-*` git tag is created.
|
||||
|
||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version chart-0.18.0 corresponds
|
||||
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||
|
||||
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
||||
1. Merge all helm chart changes into the appropriate release branch(i.e. release-1.18)
|
||||
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||
2. Make sure your repo is clean by git's standards
|
||||
3. Follow the release-branch or patch release tagging pattern from the above section.
|
||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||
3. Create the tag and push it `git checkout release-1.18; CHART_VERSION=chart-0.18.0; git tag $CHART_VERSION; git push origin $CHART_VERSION`
|
||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||
|
||||
@@ -21,5 +21,5 @@ strategies:
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThresholds: 100
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
|
||||
15
go.mod
15
go.mod
@@ -1,14 +1,15 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.18.2
|
||||
k8s.io/apimachinery v0.18.2
|
||||
k8s.io/apiserver v0.18.2
|
||||
k8s.io/client-go v0.18.2
|
||||
k8s.io/component-base v0.18.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/api v0.18.4
|
||||
k8s.io/apimachinery v0.18.4
|
||||
k8s.io/apiserver v0.18.4
|
||||
k8s.io/client-go v0.18.4
|
||||
k8s.io/code-generator v0.18.4
|
||||
k8s.io/component-base v0.18.4
|
||||
k8s.io/klog/v2 v2.0.0
|
||||
)
|
||||
|
||||
47
go.sum
47
go.sum
@@ -19,8 +19,10 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -54,6 +56,7 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
@@ -66,17 +69,22 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -98,7 +106,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -108,7 +115,6 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
@@ -152,6 +158,7 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@@ -252,6 +259,7 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
||||
@@ -278,7 +286,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -301,6 +308,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -337,30 +347,35 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
||||
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
||||
k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
|
||||
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
|
||||
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
||||
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
|
||||
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
|
||||
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
|
||||
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
||||
k8s.io/api v0.18.4 h1:8x49nBRxuXGUlDlwlWd3RMY1SayZrzFfxea3UZSkFw4=
|
||||
k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4=
|
||||
k8s.io/apimachinery v0.18.4 h1:ST2beySjhqwJoIFk6p7Hp5v5O0hYY6Gngq/gUYXTPIA=
|
||||
k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
||||
k8s.io/apiserver v0.18.4 h1:pn1jSQkfboPSirZopkVpEdLW4FcQLnYMaIY8LFxxj30=
|
||||
k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8=
|
||||
k8s.io/client-go v0.18.4 h1:un55V1Q/B3JO3A76eS0kUSywgGK/WR3BQ8fHQjNa6Zc=
|
||||
k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g=
|
||||
k8s.io/code-generator v0.18.4 h1:SouAMfh3jbL7aL8rnUQ/C+7WwXYTZnPa8L9V2TtIE7o=
|
||||
k8s.io/code-generator v0.18.4/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||
k8s.io/component-base v0.18.4 h1:Kr53Fp1iCGNsl9Uv4VcRvLy7YyIqi9oaJOQ7SXtKI98=
|
||||
k8s.io/component-base v0.18.4/go.mod h1:7jr/Ef5PGmKwQhyAz/pjByxJbC58mhKAhiaDu0vXfPk=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
22
hack/tools.go
Normal file
22
hack/tools.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build tools
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||
package tools
|
||||
|
||||
import _ "k8s.io/code-generator"
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13|go1.14') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13|go1.14') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -41,7 +41,7 @@ type DeschedulerStrategy struct {
|
||||
Weight int
|
||||
|
||||
// Strategy parameters
|
||||
Params StrategyParameters
|
||||
Params *StrategyParameters
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
|
||||
@@ -41,7 +41,7 @@ type DeschedulerStrategy struct {
|
||||
Weight int `json:"weight,omitempty"`
|
||||
|
||||
// Strategy parameters
|
||||
Params StrategyParameters `json:"params,omitempty"`
|
||||
Params *StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Only one of its members may be specified
|
||||
|
||||
@@ -121,9 +121,7 @@ func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Desched
|
||||
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
out.Weight = in.Weight
|
||||
if err := Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,9 +133,7 @@ func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *Desched
|
||||
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
out.Weight = in.Weight
|
||||
if err := Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(&in.Params, &out.Params, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,11 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
in.Params.DeepCopyInto(&out.Params)
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,11 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
in.Params.DeepCopyInto(&out.Params)
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
@@ -60,7 +60,7 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||
}
|
||||
|
||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor)
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
@@ -99,11 +99,12 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
rs.DryRun,
|
||||
rs.MaxNoOfPodsToEvictPerNode,
|
||||
nodes,
|
||||
rs.EvictLocalStoragePods,
|
||||
)
|
||||
|
||||
for name, f := range strategyFuncs {
|
||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||
f(ctx, rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
||||
f(ctx, rs.Client, strategy, nodes, podEvictor)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,37 +19,47 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvict int
|
||||
nodepodCount nodePodEvictedCount
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode int
|
||||
nodepodCount nodePodEvictedCount
|
||||
evictLocalStoragePods bool
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
client clientset.Interface,
|
||||
policyGroupVersion string,
|
||||
dryRun bool,
|
||||
maxPodsToEvict int,
|
||||
maxPodsToEvictPerNode int,
|
||||
nodes []*v1.Node,
|
||||
evictLocalStoragePods bool,
|
||||
) *PodEvictor {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
for _, node := range nodes {
|
||||
@@ -58,14 +68,46 @@ func NewPodEvictor(
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvict: maxPodsToEvict,
|
||||
nodepodCount: nodePodCount,
|
||||
client: client,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
nodepodCount: nodePodCount,
|
||||
evictLocalStoragePods: evictLocalStoragePods,
|
||||
}
|
||||
}
|
||||
|
||||
// IsEvictable checks if a pod is evictable or not.
|
||||
func (pe *PodEvictor) IsEvictable(pod *v1.Pod) bool {
|
||||
checkErrs := []error{}
|
||||
if IsCriticalPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is critical"))
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
}
|
||||
|
||||
if len(ownerRefList) == 0 {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
|
||||
}
|
||||
|
||||
if !pe.evictLocalStoragePods && IsPodWithLocalStorage(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod has local storage and descheduler is not configured with --evict-local-storage-pods"))
|
||||
}
|
||||
|
||||
if IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
|
||||
klog.V(4).Infof("Pod %s in namespace %s is not evictable: Pod lacks an eviction annotation and fails the following checks: %v", pod.Name, pod.Namespace, errors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
|
||||
return pe.nodepodCount[node]
|
||||
@@ -81,25 +123,34 @@ func (pe *PodEvictor) TotalEvicted() int {
|
||||
}
|
||||
|
||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
||||
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
|
||||
// is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
|
||||
var reason string
|
||||
if len(reasons) > 0 {
|
||||
reason = " (" + strings.Join(reasons, ", ") + ")"
|
||||
}
|
||||
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
klog.Errorf("Error evicting pod: %#v in namespace %#v (%#v)", pod.Name, pod.Namespace, err)
|
||||
klog.Errorf("Error evicting pod: %#v in namespace %#v%s: %#v", pod.Name, pod.Namespace, reason, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pe.nodepodCount[node]++
|
||||
if pe.dryRun {
|
||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v", pod.Name, pod.Namespace)
|
||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
||||
} else {
|
||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v", pod.Name, pod.Namespace)
|
||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.V(3).Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -123,14 +174,6 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
||||
}
|
||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
|
||||
if err == nil {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.V(3).Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events(pod.Namespace)})
|
||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
|
||||
return nil
|
||||
}
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
}
|
||||
@@ -139,3 +182,37 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
return utils.IsCriticalPod(pod)
|
||||
}
|
||||
|
||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefList {
|
||||
if ownerRef.Kind == "DaemonSet" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
return utils.IsMirrorPod(pod)
|
||||
}
|
||||
|
||||
// HaveEvictAnnotation checks if the pod have evict annotation
|
||||
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -21,9 +21,12 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -65,3 +68,217 @@ func TestEvictPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsEvictable(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
type testCase struct {
|
||||
pod *v1.Pod
|
||||
runBefore func(*v1.Pod)
|
||||
evictLocalStoragePods bool
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test.runBefore(test.pod)
|
||||
podEvictor := &PodEvictor{
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
}
|
||||
result := podEvictor.IsEvictable(test.pod)
|
||||
if result != test.result {
|
||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
func TestPodTypes(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
|
||||
// These won't be evicted.
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
||||
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p3.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p5.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p5.Spec.Priority = &priority
|
||||
systemCriticalPriority := utils.SystemCriticalPriority
|
||||
p5.Spec.Priority = &systemCriticalPriority
|
||||
if !IsMirrorPod(p4) {
|
||||
t.Errorf("Expected p4 to be a mirror pod.")
|
||||
}
|
||||
if !IsCriticalPod(p5) {
|
||||
t.Errorf("Expected p5 to be a critical pod.")
|
||||
}
|
||||
if !IsPodWithLocalStorage(p3) {
|
||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||
}
|
||||
ownerRefList := podutil.OwnerRef(p2)
|
||||
if !IsDaemonsetPod(ownerRefList) {
|
||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||
}
|
||||
ownerRefList = podutil.OwnerRef(p1)
|
||||
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -130,10 +130,10 @@ func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
}
|
||||
|
||||
if !ok {
|
||||
klog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||
klog.V(2).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||
klog.V(2).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -23,39 +23,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
|
||||
)
|
||||
|
||||
// IsEvictable checks if a pod is evictable or not.
|
||||
func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
|
||||
ownerRefList := OwnerRef(pod)
|
||||
if !HaveEvictAnnotation(pod) && (IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||
func ListEvictablePodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
||||
pods, err := ListPodsOnANode(ctx, client, node)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
for _, pod := range pods {
|
||||
if !IsEvictable(pod, evictLocalStoragePods) {
|
||||
continue
|
||||
} else {
|
||||
evictablePods = append(evictablePods, pod)
|
||||
}
|
||||
}
|
||||
return evictablePods, nil
|
||||
}
|
||||
|
||||
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
// ListPodsOnANode lists all of the pods on a node
|
||||
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
|
||||
// (Usually this is podEvictor.IsEvictable, in order to only list the evictable pods on a node, but can
|
||||
// be used by strategies to extend IsEvictable if there are further restrictions, such as with NodeAffinity).
|
||||
// The filter function should return true if the pod should be returned from ListPodsOnANode
|
||||
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, filter func(pod *v1.Pod) bool) ([]*v1.Pod, error) {
|
||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
@@ -69,13 +45,17 @@ func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.N
|
||||
|
||||
pods := make([]*v1.Pod, 0)
|
||||
for i := range podList.Items {
|
||||
if filter != nil && !filter(&podList.Items[i]) {
|
||||
continue
|
||||
}
|
||||
pods = append(pods, &podList.Items[i])
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
return utils.IsCriticalPod(pod)
|
||||
// OwnerRef returns the ownerRefList for the pod.
|
||||
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
}
|
||||
|
||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||
@@ -90,37 +70,26 @@ func IsGuaranteedPod(pod *v1.Pod) bool {
|
||||
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
|
||||
}
|
||||
|
||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefList {
|
||||
if ownerRef.Kind == "DaemonSet" {
|
||||
// SortPodsBasedOnPriorityLowToHigh sorts pods based on their priorities from low to high.
|
||||
// If pods have same priorities, they will be sorted by QoS in the following order:
|
||||
// BestEffort, Burstable, Guaranteed
|
||||
func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
||||
sort.Slice(pods, func(i, j int) bool {
|
||||
if pods[i].Spec.Priority == nil && pods[j].Spec.Priority != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
return utils.IsMirrorPod(pod)
|
||||
}
|
||||
|
||||
// HaveEvictAnnotation checks if the pod have evict annotation
|
||||
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
||||
return true
|
||||
if pods[j].Spec.Priority == nil && pods[i].Spec.Priority != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// OwnerRef returns the ownerRefList for the pod.
|
||||
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
|
||||
if IsBestEffortPod(pods[i]) {
|
||||
return true
|
||||
}
|
||||
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||
})
|
||||
}
|
||||
|
||||
@@ -17,221 +17,99 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestIsEvictable(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
type testCase struct {
|
||||
pod *v1.Pod
|
||||
runBefore func(*v1.Pod)
|
||||
evictLocalStoragePods bool
|
||||
result bool
|
||||
}
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
)
|
||||
|
||||
testCases := []testCase{
|
||||
func TestListPodsOnANode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pods map[string][]v1.Pod
|
||||
node *v1.Node
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
name: "test listing pods on a node",
|
||||
pods: map[string][]v1.Pod{
|
||||
"n1": {
|
||||
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
*test.BuildTestPod("pod2", 100, 0, "n1", nil),
|
||||
},
|
||||
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test.runBefore(test.pod)
|
||||
result := IsEvictable(test.pod, test.evictLocalStoragePods)
|
||||
if result != test.result {
|
||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
||||
for _, testCase := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: testCase.pods["n1"]}, nil
|
||||
} else if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: testCase.pods["n2"]}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, nil)
|
||||
if len(pods) != testCase.expectedPodCount {
|
||||
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
func TestPodTypes(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
|
||||
// These won't be evicted.
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
||||
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
})
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p3.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p5.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p5.Spec.Priority = &priority
|
||||
systemCriticalPriority := utils.SystemCriticalPriority
|
||||
p5.Spec.Priority = &systemCriticalPriority
|
||||
if !IsMirrorPod(p4) {
|
||||
t.Errorf("Expected p4 to be a mirror pod.")
|
||||
}
|
||||
if !IsCriticalPod(p5) {
|
||||
t.Errorf("Expected p5 to be a critical pod.")
|
||||
}
|
||||
if !IsPodWithLocalStorage(p3) {
|
||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||
}
|
||||
ownerRefList := OwnerRef(p2)
|
||||
if !IsDaemonsetPod(ownerRefList) {
|
||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||
}
|
||||
ownerRefList = OwnerRef(p1)
|
||||
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
// BestEffort
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBestEffortPod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeGuaranteedPod(pod)
|
||||
})
|
||||
|
||||
// Best effort with nil priorities.
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, test.MakeBestEffortPod)
|
||||
p5.Spec.Priority = nil
|
||||
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
|
||||
SortPodsBasedOnPriorityLowToHigh(podList)
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -42,14 +42,70 @@ func RemoveDuplicatePods(
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
evictLocalStoragePods bool,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
duplicatePods := listDuplicatePodsOnANode(ctx, client, node, strategy, evictLocalStoragePods)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
if err != nil {
|
||||
klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
duplicatePods := make([]*v1.Pod, 0, len(pods))
|
||||
// Each pod has a list of owners and a list of containers, and each container has 1 image spec.
|
||||
// For each pod, we go through all the OwnerRef/Image mappings and represent them as a "key" string.
|
||||
// All of those mappings together makes a list of "key" strings that essentially represent that pod's uniqueness.
|
||||
// This list of keys representing a single pod is then sorted alphabetically.
|
||||
// If any other pod has a list that matches that pod's list, those pods are undeniably duplicates for the following reasons:
|
||||
// - The 2 pods have the exact same ownerrefs
|
||||
// - The 2 pods have the exact same container images
|
||||
//
|
||||
// duplicateKeysMap maps the first Namespace/Kind/Name/Image in a pod's list to a 2D-slice of all the other lists where that is the first key
|
||||
// (Since we sort each pod's list, we only need to key the map on the first entry in each list. Any pod that doesn't have
|
||||
// the same first entry is clearly not a duplicate. This makes lookup quick and minimizes storage needed).
|
||||
// If any of the existing lists for that first key matches the current pod's list, the current pod is a duplicate.
|
||||
// If not, then we add this pod's list to the list of lists for that key.
|
||||
duplicateKeysMap := map[string][][]string{}
|
||||
for _, pod := range pods {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if hasExcludedOwnerRefKind(ownerRefList, strategy) {
|
||||
continue
|
||||
}
|
||||
podContainerKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
|
||||
for _, ownerRef := range ownerRefList {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
// Namespace/Kind/Name should be unique for the cluster.
|
||||
// We also consider the image, as 2 pods could have the same owner but serve different purposes
|
||||
// So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod.
|
||||
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, container.Image}, "/")
|
||||
podContainerKeys = append(podContainerKeys, s)
|
||||
}
|
||||
}
|
||||
sort.Strings(podContainerKeys)
|
||||
|
||||
// If there have been any other pods with the same first "key", look through all the lists to see if any match
|
||||
if existing, ok := duplicateKeysMap[podContainerKeys[0]]; ok {
|
||||
matched := false
|
||||
for _, keys := range existing {
|
||||
if reflect.DeepEqual(keys, podContainerKeys) {
|
||||
matched = true
|
||||
duplicatePods = append(duplicatePods, pod)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !matched {
|
||||
// Found no matches, add this list of keys to the list of lists that have the same first key
|
||||
duplicateKeysMap[podContainerKeys[0]] = append(duplicateKeysMap[podContainerKeys[0]], podContainerKeys)
|
||||
}
|
||||
} else {
|
||||
// This is the first pod we've seen that has this first "key" entry
|
||||
duplicateKeysMap[podContainerKeys[0]] = [][]string{podContainerKeys}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range duplicatePods {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "RemoveDuplicatePods"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
@@ -57,66 +113,8 @@ func RemoveDuplicatePods(
|
||||
}
|
||||
}
|
||||
|
||||
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
// It checks for pods which have the same owner and have at least 1 container with the same image spec
|
||||
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, strategy api.DeschedulerStrategy, evictLocalStoragePods bool) []*v1.Pod {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
duplicatePods := make([]*v1.Pod, 0, len(pods))
|
||||
// Each pod has a list of owners and a list of containers, and each container has 1 image spec.
|
||||
// For each pod, we go through all the OwnerRef/Image mappings and represent them as a "key" string.
|
||||
// All of those mappings together makes a list of "key" strings that essentially represent that pod's uniqueness.
|
||||
// This list of keys representing a single pod is then sorted alphabetically.
|
||||
// If any other pod has a list that matches that pod's list, those pods are undeniably duplicates for the following reasons:
|
||||
// - The 2 pods have the exact same ownerrefs
|
||||
// - The 2 pods have the exact same container images
|
||||
//
|
||||
// duplicateKeysMap maps the first Namespace/Kind/Name/Image in a pod's list to a 2D-slice of all the other lists where that is the first key
|
||||
// (Since we sort each pod's list, we only need to key the map on the first entry in each list. Any pod that doesn't have
|
||||
// the same first entry is clearly not a duplicate. This makes lookup quick and minimizes storage needed).
|
||||
// If any of the existing lists for that first key matches the current pod's list, the current pod is a duplicate.
|
||||
// If not, then we add this pod's list to the list of lists for that key.
|
||||
duplicateKeysMap := map[string][][]string{}
|
||||
for _, pod := range pods {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if hasExcludedOwnerRefKind(ownerRefList, strategy) {
|
||||
continue
|
||||
}
|
||||
podContainerKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
|
||||
for _, ownerRef := range ownerRefList {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
// Namespace/Kind/Name should be unique for the cluster.
|
||||
// We also consider the image, as 2 pods could have the same owner but serve different purposes
|
||||
// So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod.
|
||||
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, container.Image}, "/")
|
||||
podContainerKeys = append(podContainerKeys, s)
|
||||
}
|
||||
}
|
||||
sort.Strings(podContainerKeys)
|
||||
|
||||
// If there have been any other pods with the same first "key", look through all the lists to see if any match
|
||||
if existing, ok := duplicateKeysMap[podContainerKeys[0]]; ok {
|
||||
for _, keys := range existing {
|
||||
if reflect.DeepEqual(keys, podContainerKeys) {
|
||||
duplicatePods = append(duplicatePods, pod)
|
||||
break
|
||||
}
|
||||
// Found no matches, add this list of keys to the list of lists that have the same first key
|
||||
duplicateKeysMap[podContainerKeys[0]] = append(duplicateKeysMap[podContainerKeys[0]], podContainerKeys)
|
||||
}
|
||||
} else {
|
||||
// This is the first pod we've seen that has this first "key" entry
|
||||
duplicateKeysMap[podContainerKeys[0]] = [][]string{podContainerKeys}
|
||||
}
|
||||
}
|
||||
return duplicatePods
|
||||
}
|
||||
|
||||
func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, strategy api.DeschedulerStrategy) bool {
|
||||
if strategy.Params.RemoveDuplicates == nil {
|
||||
if strategy.Params == nil || strategy.Params.RemoveDuplicates == nil {
|
||||
return false
|
||||
}
|
||||
exclude := sets.NewString(strategy.Params.RemoveDuplicates.ExcludeOwnerKinds...)
|
||||
|
||||
@@ -115,70 +115,70 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
maxPodsToEvict int
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p8, *p9, *p10},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||
expectedEvictedPodCount: 4,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
maxPodsToEvict: 2,
|
||||
maxPodsToEvictPerNode: 2,
|
||||
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||
expectedEvictedPodCount: 4,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11, *p12},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p13},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11, *p13},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
@@ -197,11 +197,12 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvict,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node}, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -18,12 +18,13 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -32,36 +33,55 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// NodeUsageMap stores a node's info, pods on it and its resource usage
|
||||
type NodeUsageMap struct {
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
// NodePodsMap is a set of (node, pods) pairs
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
const (
|
||||
// MinResourcePercentage is the minimum value of a resource's percentage
|
||||
MinResourcePercentage = 0
|
||||
// MaxResourcePercentage is the maximum value of a resource's percentage
|
||||
MaxResourcePercentage = 100
|
||||
)
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
// todo: move to config validation?
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if strategy.Params.NodeResourceUtilizationThresholds == nil {
|
||||
if strategy.Params == nil || strategy.Params.NodeResourceUtilizationThresholds == nil {
|
||||
klog.V(1).Infof("NodeResourceUtilizationThresholds not set")
|
||||
return
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
if !validateThresholds(thresholds) {
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.Errorf("LowNodeUtilization config is not valid: %v", err)
|
||||
return
|
||||
}
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if !validateTargetThresholds(targetThresholds) {
|
||||
return
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ctx, client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
|
||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||
@@ -96,68 +116,73 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
targetThresholds,
|
||||
evictLocalStoragePods,
|
||||
podEvictor)
|
||||
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
if thresholds == nil || len(thresholds) == 0 {
|
||||
klog.V(1).Infof("no resource threshold is configured")
|
||||
return false
|
||||
// validateStrategyConfig checks if the strategy's config is valid
|
||||
func validateStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||
// validate thresholds and targetThresholds config
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
for name := range thresholds {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
continue
|
||||
case v1.ResourceMemory:
|
||||
continue
|
||||
case v1.ResourcePods:
|
||||
continue
|
||||
default:
|
||||
klog.Errorf("only cpu, memory, or pods thresholds can be specified")
|
||||
return false
|
||||
if err := validateThresholds(targetThresholds); err != nil {
|
||||
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
||||
}
|
||||
|
||||
// validate if thresholds and targetThresholds have same resources configured
|
||||
if len(thresholds) != len(targetThresholds) {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
}
|
||||
for resourceName, value := range thresholds {
|
||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
} else if value > targetValue {
|
||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||
}
|
||||
}
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
//This function could be merged into above once we are clear.
|
||||
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
if targetThresholds == nil {
|
||||
klog.V(1).Infof("no target resource threshold is configured")
|
||||
return false
|
||||
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
|
||||
klog.V(1).Infof("no target resource threshold for pods is configured")
|
||||
return false
|
||||
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
|
||||
func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
if thresholds == nil || len(thresholds) == 0 {
|
||||
return fmt.Errorf("no resource threshold is configured")
|
||||
}
|
||||
return true
|
||||
for name, percent := range thresholds {
|
||||
switch name {
|
||||
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
|
||||
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("only cpu, memory, or pods thresholds can be specified")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage := nodeUtilization(node, pods, evictLocalStoragePods)
|
||||
usage := nodeUtilization(node, pods)
|
||||
nuMap := NodeUsageMap{
|
||||
node: node,
|
||||
usage: usage,
|
||||
allPods: pods,
|
||||
}
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if !nodeutil.IsNodeUnschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
|
||||
if !nodeutil.IsNodeUnschedulable(node) && isNodeWithLowUtilization(usage, thresholds) {
|
||||
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||
lowNodes = append(lowNodes, nuMap)
|
||||
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
} else if isNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
|
||||
targetNodes = append(targetNodes, nuMap)
|
||||
} else {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).Infof("Node %#v is unschedulable", node.Name)
|
||||
}
|
||||
klog.V(2).Infof("Node %#v is utilized with usage: %#v", node.Name, usage)
|
||||
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||
}
|
||||
}
|
||||
return lowNodes, targetNodes
|
||||
@@ -170,11 +195,10 @@ func evictPodsFromTargetNodes(
|
||||
ctx context.Context,
|
||||
targetNodes, lowNodes []NodeUsageMap,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
evictLocalStoragePods bool,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
sortNodesByUsage(targetNodes)
|
||||
|
||||
// upper bound on total number of pods/cpu/memory to be moved
|
||||
var totalPods, totalCPU, totalMem float64
|
||||
@@ -190,16 +214,12 @@ func evictPodsFromTargetNodes(
|
||||
totalPods += ((float64(podsPercentage) * float64(nodeCapacity.Pods().Value())) / 100)
|
||||
|
||||
// totalCPU capacity to be moved
|
||||
if _, ok := targetThresholds[v1.ResourceCPU]; ok {
|
||||
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
||||
totalCPU += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
||||
}
|
||||
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
||||
totalCPU += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
||||
|
||||
// totalMem capacity to be moved
|
||||
if _, ok := targetThresholds[v1.ResourceMemory]; ok {
|
||||
memPercentage := targetThresholds[v1.ResourceMemory] - node.usage[v1.ResourceMemory]
|
||||
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
|
||||
}
|
||||
memPercentage := targetThresholds[v1.ResourceMemory] - node.usage[v1.ResourceMemory]
|
||||
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
|
||||
@@ -212,30 +232,19 @@ func evictPodsFromTargetNodes(
|
||||
}
|
||||
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
|
||||
nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods := classifyPods(node.allPods, evictLocalStoragePods)
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bestEffortPods:%v, burstablePods:%v, guaranteedPods:%v", len(node.allPods), len(nonRemovablePods), len(bestEffortPods), len(burstablePods), len(guaranteedPods))
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podEvictor)
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, removablePods:%v", len(node.allPods), len(nonRemovablePods), len(removablePods))
|
||||
|
||||
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
|
||||
if node.allPods[0].Spec.Priority != nil {
|
||||
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...)
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
sortPodsBasedOnPriority(evictablePods)
|
||||
evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
} else {
|
||||
// TODO: Remove this when we support only priority.
|
||||
// Falling back to evicting pods based on priority.
|
||||
klog.V(1).Infof("Evicting pods based on QoS")
|
||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
||||
// evict best effort pods
|
||||
evictPods(ctx, bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
// evict burstable pods
|
||||
evictPods(ctx, burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
// evict guaranteed pods
|
||||
evictPods(ctx, guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
if len(removablePods) == 0 {
|
||||
klog.V(1).Infof("no removable pods on node %#v, try next node", node.node.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(1).Infof("evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, removablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
|
||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
||||
}
|
||||
}
|
||||
@@ -252,7 +261,8 @@ func evictPods(
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
node *v1.Node) {
|
||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCPU > 0 || *totalMem > 0) {
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
if isNodeAboveTargetUtilization(nodeUsage, targetThresholds) && *totalPods > 0 && *totalCPU > 0 && *totalMem > 0 {
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
@@ -263,7 +273,7 @@ func evictPods(
|
||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "LowNodeUtilization")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
@@ -284,8 +294,8 @@ func evictPods(
|
||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
|
||||
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
|
||||
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
|
||||
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCPU <= 0 && *totalMem <= 0) {
|
||||
// check if node utilization drops below target threshold or any required capacity (cpu, memory, pods) is moved
|
||||
if !isNodeAboveTargetUtilization(nodeUsage, targetThresholds) || *totalPods <= 0 || *totalCPU <= 0 || *totalMem <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -293,7 +303,8 @@ func evictPods(
|
||||
}
|
||||
}
|
||||
|
||||
func SortNodesByUsage(nodes []NodeUsageMap) {
|
||||
// sortNodesByUsage sorts nodes based on usage in descending order
|
||||
func sortNodesByUsage(nodes []NodeUsageMap) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
var ti, tj api.Percentage
|
||||
for name, value := range nodes[i].usage {
|
||||
@@ -311,33 +322,11 @@ func SortNodesByUsage(nodes []NodeUsageMap) {
|
||||
})
|
||||
}
|
||||
|
||||
// sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
|
||||
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
||||
sort.Slice(evictablePods, func(i, j int) bool {
|
||||
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
|
||||
return true
|
||||
}
|
||||
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
|
||||
return false
|
||||
}
|
||||
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
|
||||
if podutil.IsBestEffortPod(evictablePods[i]) {
|
||||
return true
|
||||
}
|
||||
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
|
||||
})
|
||||
}
|
||||
|
||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||
npm := NodePodsMap{}
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, nil)
|
||||
if err != nil {
|
||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||
} else {
|
||||
@@ -347,7 +336,8 @@ func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []
|
||||
return npm
|
||||
}
|
||||
|
||||
func IsNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||
func isNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
for name, nodeValue := range nodeThresholds {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
if value, ok := thresholds[name]; !ok {
|
||||
@@ -360,7 +350,8 @@ func IsNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresho
|
||||
return false
|
||||
}
|
||||
|
||||
func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
// isNodeWithLowUtilization checks if a node is underutilized
|
||||
func isNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
for name, nodeValue := range nodeThresholds {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
if value, ok := thresholds[name]; !ok {
|
||||
@@ -373,7 +364,7 @@ func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds
|
||||
return true
|
||||
}
|
||||
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) api.ResourceThresholds {
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod) api.ResourceThresholds {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
@@ -402,34 +393,16 @@ func nodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool)
|
||||
}
|
||||
}
|
||||
|
||||
func classifyPods(pods []*v1.Pod, evictLocalStoragePods bool) ([]*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods []*v1.Pod
|
||||
|
||||
// From https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
|
||||
//
|
||||
// For a Pod to be given a QoS class of Guaranteed:
|
||||
// - every Container in the Pod must have a memory limit and a memory request, and they must be the same.
|
||||
// - every Container in the Pod must have a CPU limit and a CPU request, and they must be the same.
|
||||
// A Pod is given a QoS class of Burstable if:
|
||||
// - the Pod does not meet the criteria for QoS class Guaranteed.
|
||||
// - at least one Container in the Pod has a memory or CPU request.
|
||||
// For a Pod to be given a QoS class of BestEffort, the Containers in the Pod must not have any memory or CPU limits or requests.
|
||||
func classifyPods(pods []*v1.Pod, evictor *evictions.PodEvictor) ([]*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, removablePods []*v1.Pod
|
||||
|
||||
for _, pod := range pods {
|
||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||
if !evictor.IsEvictable(pod) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
continue
|
||||
}
|
||||
|
||||
switch utils.GetPodQOS(pod) {
|
||||
case v1.PodQOSGuaranteed:
|
||||
guaranteedPods = append(guaranteedPods, pod)
|
||||
case v1.PodQOSBurstable:
|
||||
burstablePods = append(burstablePods, pod)
|
||||
default: // alias v1.PodQOSBestEffort
|
||||
bestEffortPods = append(bestEffortPods, pod)
|
||||
} else {
|
||||
removablePods = append(removablePods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods
|
||||
return nonRemovablePods, removablePods
|
||||
}
|
||||
|
||||
@@ -22,15 +22,13 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"reflect"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -45,30 +43,6 @@ var (
|
||||
highPriority = int32(10000)
|
||||
)
|
||||
|
||||
func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() }
|
||||
func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() }
|
||||
func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() }
|
||||
func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority }
|
||||
func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority }
|
||||
func setNodeUnschedulable(node *v1.Node) { node.Spec.Unschedulable = true }
|
||||
|
||||
func makeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
func makeBurstablePod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
func makeGuaranteedPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
||||
}
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n1NodeName := "n1"
|
||||
@@ -80,9 +54,67 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
maxPodsToEvictPerNode int
|
||||
expectedPodsEvicted int
|
||||
evictedPods []string
|
||||
}{
|
||||
{
|
||||
name: "no evictable pods",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -96,21 +128,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
@@ -134,11 +166,72 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities stop when cpu capacity is depleted",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 2100, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
@@ -154,40 +247,40 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setLowPriority(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setDSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
test.SetDSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
setLowPriority(pod)
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
@@ -211,12 +304,13 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
@@ -231,38 +325,38 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setDSOwnerRef(pod)
|
||||
test.SetDSOwnerRef(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
@@ -286,13 +380,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -346,20 +441,26 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ctx, fakeClient, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
test.expectedPodsEvicted,
|
||||
test.maxPodsToEvictPerNode,
|
||||
nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
evictPodsFromTargetNodes(ctx, targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
},
|
||||
},
|
||||
}
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
@@ -371,41 +472,102 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortPodsByPriority(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
func TestValidateStrategyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "passing invalid targetThresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
"resourceInvalid": 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("targetThresholds config is not valid: %v",
|
||||
fmt.Errorf("only cpu, memory, or pods thresholds can be specified")),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority)
|
||||
for _, testCase := range tests {
|
||||
validateErr := validateStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||
|
||||
// BestEffort
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeBestEffortPod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeGuaranteedPod(pod)
|
||||
})
|
||||
|
||||
// Best effort with nil priorities.
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, makeBestEffortPod)
|
||||
p5.Spec.Priority = nil
|
||||
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, makeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
|
||||
sortPodsBasedOnPriority(podList)
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v\nto be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v\nto be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,17 +575,17 @@ func TestValidateThresholds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input api.ResourceThresholds
|
||||
succeed bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing nil map for threshold",
|
||||
input: nil,
|
||||
succeed: false,
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing no threshold",
|
||||
input: api.ResourceThresholds{},
|
||||
succeed: false,
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing unsupported resource name",
|
||||
@@ -431,7 +593,7 @@ func TestValidateThresholds(t *testing.T) {
|
||||
v1.ResourceCPU: 40,
|
||||
v1.ResourceStorage: 25.5,
|
||||
},
|
||||
succeed: false,
|
||||
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource name",
|
||||
@@ -439,7 +601,30 @@ func TestValidateThresholds(t *testing.T) {
|
||||
v1.ResourceCPU: 40,
|
||||
"coolResource": 42.0,
|
||||
},
|
||||
succeed: false,
|
||||
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 110,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("%v threshold not in [%v, %v] range", v1.ResourceCPU, MinResourcePercentage, MaxResourcePercentage),
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with max and min resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 0,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with only cpu",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory and pods",
|
||||
@@ -448,15 +633,19 @@ func TestValidateThresholds(t *testing.T) {
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
succeed: true,
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
isValid := validateThresholds(test.input)
|
||||
validateErr := validateThresholds(test.input)
|
||||
|
||||
if isValid != test.succeed {
|
||||
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.succeed, isValid)
|
||||
if validateErr == nil || test.errInfo == nil {
|
||||
if validateErr != test.errInfo {
|
||||
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != test.errInfo.Error() {
|
||||
t.Errorf("expected validity of threshold: %#v\nto be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -509,7 +698,7 @@ func TestWithTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
@@ -533,7 +722,7 @@ func TestWithTaints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef)
|
||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef)
|
||||
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "key",
|
||||
@@ -552,16 +741,16 @@ func TestWithTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{n1, n2, n3},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
// Node 2 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
@@ -570,16 +759,16 @@ func TestWithTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 0,
|
||||
},
|
||||
@@ -588,16 +777,16 @@ func TestWithTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
podThatToleratesTaint,
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
@@ -630,9 +819,10 @@ func TestWithTaints(t *testing.T) {
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
item.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
LowNodeUtilization(ctx, &fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
||||
LowNodeUtilization(ctx, &fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, podEvictor)
|
||||
|
||||
if item.evictionsExpected != evictionCounter {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -29,7 +29,12 @@ import (
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params == nil {
|
||||
klog.V(1).Infof("NodeAffinityType not set")
|
||||
return
|
||||
}
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
@@ -38,19 +43,21 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, func(pod *v1.Pod) bool {
|
||||
return podEvictor.IsEvictable(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(pod, nodes)
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
@@ -93,13 +93,13 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvict int
|
||||
maxPodsToEvictPerNode int
|
||||
}{
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingRequiredDuringExecution",
|
||||
},
|
||||
@@ -108,7 +108,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
@@ -116,7 +116,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
@@ -124,15 +124,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 1, should not be evicted",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvict: 1,
|
||||
maxPodsToEvictPerNode: 1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
@@ -140,7 +140,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -155,11 +155,12 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -26,14 +26,14 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
if err != nil {
|
||||
//no pods evicted as error encountered retrieving evictable Pods
|
||||
return
|
||||
@@ -46,7 +46,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
evictLocalStoragePods bool
|
||||
maxPodsToEvict int
|
||||
maxPodsToEvictPerNode int
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -119,15 +119,15 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxPodsToEvict> number of Pods not tolerating node taint should be evicted",
|
||||
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p1, *p5, *p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
maxPodsToEvict: 1,
|
||||
maxPodsToEvictPerNode: 1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
@@ -135,7 +135,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
@@ -143,7 +143,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
@@ -151,7 +151,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
}
|
||||
@@ -168,11 +168,12 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.nodes,
|
||||
tc.evictLocalStoragePods,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -26,28 +26,29 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// sort the evictable Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node)
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v\n because of existing anti-affinity", pods[i].Name)
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
// Update pods.
|
||||
|
||||
@@ -37,42 +37,65 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.Labels = map[string]string{"foo": "bar"}
|
||||
p6.Labels = map[string]string{"foo": "bar"}
|
||||
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||
test.SetNormalOwnerRef(p1)
|
||||
test.SetNormalOwnerRef(p2)
|
||||
test.SetNormalOwnerRef(p3)
|
||||
test.SetNormalOwnerRef(p4)
|
||||
test.SetNormalOwnerRef(p5)
|
||||
test.SetNormalOwnerRef(p6)
|
||||
test.SetNormalOwnerRef(p7)
|
||||
|
||||
// set pod anti affinity
|
||||
setPodAntiAffinity(p1)
|
||||
setPodAntiAffinity(p3)
|
||||
setPodAntiAffinity(p4)
|
||||
setPodAntiAffinity(p1, "foo", "bar")
|
||||
setPodAntiAffinity(p3, "foo", "bar")
|
||||
setPodAntiAffinity(p4, "foo", "bar")
|
||||
setPodAntiAffinity(p5, "foo1", "bar1")
|
||||
setPodAntiAffinity(p6, "foo1", "bar1")
|
||||
setPodAntiAffinity(p7, "foo", "bar")
|
||||
|
||||
// set pod priority
|
||||
test.SetPodPriority(p5, 100)
|
||||
test.SetPodPriority(p6, 50)
|
||||
test.SetPodPriority(p7, 0)
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
maxPodsToEvict int
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvict: 3,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Evict only 1 pod after sorting",
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p5, *p6, *p7},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// create fake client
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
@@ -82,11 +105,12 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
test.maxPodsToEvict,
|
||||
test.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
@@ -94,7 +118,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setPodAntiAffinity(inputPod *v1.Pod) {
|
||||
func setPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) {
|
||||
inputPod.Spec.Affinity = &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@@ -102,9 +126,9 @@ func setPodAntiAffinity(inputPod *v1.Pod) {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
Values: []string{labelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -30,17 +30,17 @@ import (
|
||||
)
|
||||
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.MaxPodLifeTimeSeconds == nil {
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params == nil || strategy.Params.MaxPodLifeTimeSeconds == nil {
|
||||
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
||||
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, podEvictor)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
@@ -53,8 +53,8 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictor *evictions.PodEvictor) []*v1.Pod {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, evictor.IsEvictable)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
strategy api.DeschedulerStrategy
|
||||
maxPodsToEvict int
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
@@ -97,11 +97,11 @@ func TestPodLifeTime(t *testing.T) {
|
||||
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -109,11 +109,11 @@ func TestPodLifeTime(t *testing.T) {
|
||||
description: "Two pods in the `dev` Namespace, 2 are new and 0 are old. 0 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p3, *p4},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
@@ -121,11 +121,11 @@ func TestPodLifeTime(t *testing.T) {
|
||||
description: "Two pods in the `dev` Namespace, 1 created 605 seconds ago. 1 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p5, *p6},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -133,11 +133,11 @@ func TestPodLifeTime(t *testing.T) {
|
||||
description: "Two pods in the `dev` Namespace, 1 created 595 seconds ago. 0 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p7, *p8},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
@@ -155,11 +155,12 @@ func TestPodLifeTime(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
false,
|
||||
)
|
||||
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -31,14 +31,14 @@ import (
|
||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params == nil || strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
||||
return
|
||||
}
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podEvictor.IsEvictable)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
continue
|
||||
@@ -53,7 +53,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
||||
return api.DeschedulerStrategy{
|
||||
Enabled: enabled,
|
||||
Params: api.StrategyParameters{
|
||||
Params: &api.StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &api.PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: restartThresholds,
|
||||
IncludingInitContainers: includingInitContainers,
|
||||
@@ -98,61 +98,61 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvict int
|
||||
maxPodsToEvictPerNode int
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
strategy: createStrategy(true, true, 10000),
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
strategy: createStrategy(true, true, 1),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||
strategy: createStrategy(true, true, 1*25),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
|
||||
strategy: createStrategy(true, false, 1*25),
|
||||
expectedEvictedPodCount: 5,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||
strategy: createStrategy(true, true, 1*20),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
|
||||
strategy: createStrategy(true, false, 1*20),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, true, 5*25+1),
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, false, 5*20+1),
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvict: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvict=3), 3 pods evictions",
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pods evictions",
|
||||
strategy: createStrategy(true, true, 1),
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvict: 3,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -169,11 +169,12 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// The following code has been copied from predicates package to avoid the
|
||||
|
||||
@@ -19,16 +19,20 @@ package e2e
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -63,8 +67,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
@@ -78,7 +81,8 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
@@ -96,79 +100,119 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
|
||||
}
|
||||
|
||||
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) {
|
||||
var thresholds = make(deschedulerapi.ResourceThresholds)
|
||||
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
||||
thresholds[v1.ResourceMemory] = 20
|
||||
thresholds[v1.ResourcePods] = 20
|
||||
thresholds[v1.ResourceCPU] = 85
|
||||
targetThresholds[v1.ResourceMemory] = 20
|
||||
targetThresholds[v1.ResourcePods] = 20
|
||||
targetThresholds[v1.ResourceCPU] = 90
|
||||
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer, podEvictor *evictions.PodEvictor) {
|
||||
// Run descheduler.
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
stopChannel := make(chan struct{})
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", stopChannel)
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", nil)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: deschedulerapi.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
strategies.LowNodeUtilization(
|
||||
ctx,
|
||||
clientset,
|
||||
deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &deschedulerapi.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
|
||||
Thresholds: deschedulerapi.ResourceThresholds{
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 20,
|
||||
v1.ResourceCPU: 85,
|
||||
},
|
||||
TargetThresholds: deschedulerapi.ResourceThresholds{
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 20,
|
||||
v1.ResourceCPU: 90,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
clientset,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodes,
|
||||
podEvictor,
|
||||
)
|
||||
|
||||
strategies.LowNodeUtilization(ctx, clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor)
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
||||
// be in /tmp directory as admin.conf.
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||
|
||||
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
stopChannel := make(chan struct{}, 0)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
||||
// So, the last node would have least utilization.
|
||||
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
for i := range nodeList.Items {
|
||||
node := nodeList.Items[i]
|
||||
nodes = append(nodes, &node)
|
||||
}
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
rc := RcByNameContainer("test-rc-node-utilization", testNamespace.Name, int32(15), map[string]string{"test": "node-utilization"}, nil)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc)
|
||||
|
||||
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
|
||||
deleteRC(ctx, t, clientSet, rc)
|
||||
}
|
||||
|
||||
func TestEvictAnnotation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{}, 0)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
for i := range nodeList.Items {
|
||||
node := nodeList.Items[i]
|
||||
nodes = append(nodes, &node)
|
||||
}
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
rc := RcByNameContainer("test-rc-evict-annotation", testNamespace.Name, int32(15), map[string]string{"test": "annotation"}, nil)
|
||||
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
|
||||
rc.Spec.Template.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
@@ -178,16 +222,18 @@ func TestE2E(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc)
|
||||
|
||||
evictPods(ctx, t, clientSet, nodeInformer, nodes, rc)
|
||||
deleteRC(ctx, t, clientSet, rc)
|
||||
}
|
||||
|
||||
func TestDeschedulingInterval(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
@@ -220,46 +266,17 @@ func TestDeschedulingInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
||||
var leastLoadedNode v1.Node
|
||||
podsBefore := math.MaxInt16
|
||||
for i := range nodeList.Items {
|
||||
// Skip the Master Node
|
||||
if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist {
|
||||
continue
|
||||
}
|
||||
// List all the pods on the current Node
|
||||
podsOnANode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &nodeList.Items[i], true)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
// Update leastLoadedNode if necessary
|
||||
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
|
||||
leastLoadedNode = nodeList.Items[i]
|
||||
podsBefore = tmpLoads
|
||||
}
|
||||
}
|
||||
t.Log("Eviction of pods starting")
|
||||
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer)
|
||||
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &leastLoadedNode, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
podsAfter := len(podsOnleastUtilizedNode)
|
||||
if podsBefore > podsAfter {
|
||||
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
||||
}
|
||||
|
||||
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
|
||||
//set number of replicas to 0
|
||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(ctx, rc, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error updating replica controller %v", err)
|
||||
rcdeepcopy := rc.DeepCopy()
|
||||
rcdeepcopy.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating replica controller %v", err)
|
||||
}
|
||||
allPodsDeleted := false
|
||||
//wait 30 seconds until all pods are deleted
|
||||
for i := 0; i < 6; i++ {
|
||||
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(ctx, rc.Name, metav1.GetOptions{})
|
||||
scale, _ := clientSet.CoreV1().ReplicationControllers(rc.Namespace).GetScale(ctx, rc.Name, metav1.GetOptions{})
|
||||
if scale.Spec.Replicas == 0 {
|
||||
allPodsDeleted = true
|
||||
break
|
||||
@@ -271,11 +288,60 @@ func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface,
|
||||
t.Errorf("Deleting of rc pods took too long")
|
||||
}
|
||||
|
||||
err = clientSet.CoreV1().ReplicationControllers("default").Delete(ctx, rc.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error deleting rc %v", err)
|
||||
if err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Delete(ctx, rc.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Error deleting rc %v", err)
|
||||
}
|
||||
|
||||
//wait until rc is deleted
|
||||
time.Sleep(5 * time.Second)
|
||||
if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
|
||||
_, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Get(ctx, rc.Name, metav1.GetOptions{})
|
||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error deleting rc %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList []*v1.Node, rc *v1.ReplicationController) {
|
||||
var leastLoadedNode *v1.Node
|
||||
podsBefore := math.MaxInt16
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
clientSet,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodeList,
|
||||
true,
|
||||
)
|
||||
for _, node := range nodeList {
|
||||
// Skip the Master Node
|
||||
if _, exist := node.Labels["node-role.kubernetes.io/master"]; exist {
|
||||
continue
|
||||
}
|
||||
// List all the pods on the current Node
|
||||
podsOnANode, err := podutil.ListPodsOnANode(ctx, clientSet, node, podEvictor.IsEvictable)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
// Update leastLoadedNode if necessary
|
||||
if tmpLoads := len(podsOnANode); tmpLoads < podsBefore {
|
||||
leastLoadedNode = node
|
||||
podsBefore = tmpLoads
|
||||
}
|
||||
}
|
||||
t.Log("Eviction of pods starting")
|
||||
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer, podEvictor)
|
||||
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, leastLoadedNode, podEvictor.IsEvictable)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
podsAfter := len(podsOnleastUtilizedNode)
|
||||
if podsBefore > podsAfter {
|
||||
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,3 +116,48 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply fun
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||
func MakeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
// MakeBurstablePod makes the given pod a Burstable pod
|
||||
func MakeBurstablePod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
// MakeGuaranteedPod makes the given pod an Guaranteed pod
|
||||
func MakeGuaranteedPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
||||
}
|
||||
|
||||
// SetRSOwnerRef sets the given pod's owner to ReplicaSet
|
||||
func SetRSOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetReplicaSetOwnerRefList()
|
||||
}
|
||||
|
||||
// SetDSOwnerRef sets the given pod's owner to DaemonSet
|
||||
func SetDSOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetDaemonSetOwnerRefList()
|
||||
}
|
||||
|
||||
// SetNormalOwnerRef sets the given pod's owner to Pod
|
||||
func SetNormalOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetNormalPodOwnerRefList()
|
||||
}
|
||||
|
||||
// SetPodPriority sets the given pod's priority
|
||||
func SetPodPriority(pod *v1.Pod, priority int32) {
|
||||
pod.Spec.Priority = &priority
|
||||
}
|
||||
|
||||
// SetNodeUnschedulable sets the given node unschedulable
|
||||
func SetNodeUnschedulable(node *v1.Node) {
|
||||
node.Spec.Unschedulable = true
|
||||
}
|
||||
|
||||
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
*.sublime-*
|
||||
.DS_Store
|
||||
*.swp
|
||||
*.swo
|
||||
tags
|
||||
12
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- tip
|
||||
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
Normal file
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
Copyright (c) 2012, Martin Angers
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
188
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
Normal file
188
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
# Purell
|
||||
|
||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
||||
|
||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
||||
|
||||
[](http://travis-ci.org/PuerkitoBio/purell)
|
||||
|
||||
## Install
|
||||
|
||||
`go get github.com/PuerkitoBio/purell`
|
||||
|
||||
## Changelog
|
||||
|
||||
* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor).
|
||||
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
|
||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
||||
* **v0.1.0** : Initial release.
|
||||
|
||||
## Examples
|
||||
|
||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
||||
|
||||
```go
|
||||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func ExampleNormalizeURLString() {
|
||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
||||
}
|
||||
|
||||
func ExampleMustNormalizeURLString() {
|
||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
||||
FlagsUnsafeGreedy)
|
||||
fmt.Print(normalized)
|
||||
|
||||
// Output: http://somewebsite.com/Amazing%FA/url
|
||||
}
|
||||
|
||||
func ExampleNormalizeURL() {
|
||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
|
||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
||||
|
||||
```go
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
```
|
||||
|
||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
||||
|
||||
The [full godoc reference is available on gopkgdoc][godoc].
|
||||
|
||||
Some things to note:
|
||||
|
||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
||||
|
||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
||||
- %24 -> $
|
||||
- %26 -> &
|
||||
- %2B-%3B -> +,-./0123456789:;
|
||||
- %3D -> =
|
||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
- %5F -> _
|
||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
||||
- %7E -> ~
|
||||
|
||||
|
||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
||||
|
||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
||||
|
||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
||||
|
||||
### Safe vs Usually Safe vs Unsafe
|
||||
|
||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
||||
|
||||
Consider the following URL:
|
||||
|
||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
Normalizing with the `FlagsSafe` gives:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
With the `FlagsUsuallySafeGreedy`:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
And with `FlagsUnsafeGreedy`:
|
||||
|
||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
||||
|
||||
## TODOs
|
||||
|
||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
||||
|
||||
## Thanks / Contributions
|
||||
|
||||
@rogpeppe
|
||||
@jehiah
|
||||
@opennota
|
||||
@pchristopher1275
|
||||
@zenovich
|
||||
@beeker1121
|
||||
|
||||
## License
|
||||
|
||||
The [BSD 3-Clause license][bsd].
|
||||
|
||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
||||
379
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
Normal file
379
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
/*
|
||||
Package purell offers URL normalization as described on the wikipedia page:
|
||||
http://en.wikipedia.org/wiki/URL_normalization
|
||||
*/
|
||||
package purell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
"golang.org/x/text/width"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
// be normalized.
|
||||
type NormalizationFlags uint
|
||||
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
||||
|
||||
// Map of flags to implementation function.
|
||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
||||
|
||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
||||
var flagsOrder = []NormalizationFlags{
|
||||
FlagLowercaseScheme,
|
||||
FlagLowercaseHost,
|
||||
FlagRemoveDefaultPort,
|
||||
FlagRemoveDirectoryIndex,
|
||||
FlagRemoveDotSegments,
|
||||
FlagRemoveFragment,
|
||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
||||
FlagRemoveDuplicateSlashes,
|
||||
FlagRemoveWWW,
|
||||
FlagAddWWW,
|
||||
FlagSortQuery,
|
||||
FlagDecodeDWORDHost,
|
||||
FlagDecodeOctalHost,
|
||||
FlagDecodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
||||
FlagAddTrailingSlash,
|
||||
}
|
||||
|
||||
// ... and then the map, where order is unimportant
|
||||
var flags = map[NormalizationFlags]func(*url.URL){
|
||||
FlagLowercaseScheme: lowercaseScheme,
|
||||
FlagLowercaseHost: lowercaseHost,
|
||||
FlagRemoveDefaultPort: removeDefaultPort,
|
||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
||||
FlagRemoveDotSegments: removeDotSegments,
|
||||
FlagRemoveFragment: removeFragment,
|
||||
FlagForceHTTP: forceHTTP,
|
||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
||||
FlagRemoveWWW: removeWWW,
|
||||
FlagAddWWW: addWWW,
|
||||
FlagSortQuery: sortQuery,
|
||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
||||
FlagDecodeOctalHost: decodeOctalHost,
|
||||
FlagDecodeHexHost: decodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
||||
FlagAddTrailingSlash: addTrailingSlash,
|
||||
}
|
||||
|
||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
||||
result, e := NormalizeURLString(u, f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
parsed.Host = strings.ToLower(parsed.Host)
|
||||
}
|
||||
|
||||
// The idna package doesn't fully conform to RFC 5895
|
||||
// (https://tools.ietf.org/html/rfc5895), so we do it here.
|
||||
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
|
||||
// TODO: Remove when (if?) idna package conforms to RFC 5895.
|
||||
parsed.Host = width.Fold.String(parsed.Host)
|
||||
parsed.Host = norm.NFC.String(parsed.Host)
|
||||
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
||||
for _, k := range flagsOrder {
|
||||
if f&k == k {
|
||||
flags[k](u)
|
||||
}
|
||||
}
|
||||
return urlesc.Escape(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if strings.HasSuffix(u.Path, "/") {
|
||||
u.Path = u.Path[:l-1]
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if strings.HasSuffix(u.Host, "/") {
|
||||
u.Host = u.Host[:l-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if !strings.HasSuffix(u.Host, "/") {
|
||||
u.Host += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDotSegments(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
var dotFree []string
|
||||
var lastIsDot bool
|
||||
|
||||
sections := strings.Split(u.Path, "/")
|
||||
for _, s := range sections {
|
||||
if s == ".." {
|
||||
if len(dotFree) > 0 {
|
||||
dotFree = dotFree[:len(dotFree)-1]
|
||||
}
|
||||
} else if s != "." {
|
||||
dotFree = append(dotFree, s)
|
||||
}
|
||||
lastIsDot = (s == "." || s == "..")
|
||||
}
|
||||
// Special case if host does not end with / and new path does not begin with /
|
||||
u.Path = strings.Join(dotFree, "/")
|
||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDirectoryIndex(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
||||
}
|
||||
}
|
||||
|
||||
func removeFragment(u *url.URL) {
|
||||
u.Fragment = ""
|
||||
}
|
||||
|
||||
func forceHTTP(u *url.URL) {
|
||||
if strings.ToLower(u.Scheme) == "https" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func removeWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = u.Host[4:]
|
||||
}
|
||||
}
|
||||
|
||||
func addWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = "www." + u.Host
|
||||
}
|
||||
}
|
||||
|
||||
func sortQuery(u *url.URL) {
|
||||
q := u.Query()
|
||||
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(arKeys)
|
||||
buf := new(bytes.Buffer)
|
||||
for _, k := range arKeys {
|
||||
sort.Strings(q[k])
|
||||
for _, v := range q[k] {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteRune('&')
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild the raw query string
|
||||
u.RawQuery = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDWORDHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
var parts [4]int64
|
||||
|
||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
||||
for i, shift := range []uint{24, 16, 8, 0} {
|
||||
parts[i] = dword >> shift & 0xFF
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeOctalHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
||||
var parts [4]int64
|
||||
|
||||
for i := 1; i <= 4; i++ {
|
||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHexHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
// Conversion is safe because of regex validation
|
||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
||||
// Set host as DWORD (base 10) encoded host
|
||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
||||
// The rest is the same as decoding a DWORD host
|
||||
decodeDWORDHost(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeUnncessaryHostDots(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
||||
// Trim the leading and trailing dots
|
||||
u.Host = strings.Trim(matches[1], ".")
|
||||
if len(matches) > 2 {
|
||||
u.Host += matches[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeEmptyPortSeparator(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
||||
}
|
||||
}
|
||||
15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go build .
|
||||
|
||||
script:
|
||||
- go test -v
|
||||
27
vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
vendored
Normal file
27
vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
Normal file
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
======
|
||||
|
||||
Package urlesc implements query escaping as per RFC 3986.
|
||||
|
||||
It contains some parts of the net/url package, modified so as to allow
|
||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/PuerkitoBio/urlesc
|
||||
|
||||
## License
|
||||
|
||||
Go license (BSD-3-Clause)
|
||||
|
||||
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
Normal file
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlesc implements query escaping as per RFC 3986.
|
||||
// It contains some parts of the net/url package, modified so as to allow
|
||||
// some reserved characters incorrectly escaped by net/url.
|
||||
// See https://github.com/golang/go/issues/5684
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
encodePath encoding = 1 + iota
|
||||
encodeUserPassword
|
||||
encodeQueryComponent
|
||||
encodeFragment
|
||||
)
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
func shouldEscape(c byte, mode encoding) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
|
||||
// §2.2 Reserved characters (reserved)
|
||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
||||
// Different sections of the URL allow a few of
|
||||
// the reserved characters to appear unescaped.
|
||||
switch mode {
|
||||
case encodePath: // §3.3
|
||||
// The RFC allows sub-delims and : @.
|
||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
||||
// segments. This package only manipulates the path as a whole,
|
||||
// so we allow those as well. That leaves only ? and # to escape.
|
||||
return c == '?' || c == '#'
|
||||
|
||||
case encodeUserPassword: // §3.2.1
|
||||
// The RFC allows : and sub-delims in
|
||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
||||
// all the gen-delims.
|
||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
||||
|
||||
case encodeQueryComponent: // §3.4
|
||||
// The RFC allows / and ?.
|
||||
return c != '/' && c != '?'
|
||||
|
||||
case encodeFragment: // §4.1
|
||||
// The RFC text is silent but the grammar allows
|
||||
// everything, so escape nothing but #
|
||||
return c == '#'
|
||||
}
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
// QueryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
func QueryEscape(s string) string {
|
||||
return escape(s, encodeQueryComponent)
|
||||
}
|
||||
|
||||
func escape(s string, mode encoding) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c, mode) {
|
||||
if c == ' ' && mode == encodeQueryComponent {
|
||||
spaceCount++
|
||||
} else {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spaceCount == 0 && hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
t := make([]byte, len(s)+2*hexCount)
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == ' ' && mode == encodeQueryComponent:
|
||||
t[j] = '+'
|
||||
j++
|
||||
case shouldEscape(c, mode):
|
||||
t[j] = '%'
|
||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
var uiReplacer = strings.NewReplacer(
|
||||
"%21", "!",
|
||||
"%27", "'",
|
||||
"%28", "(",
|
||||
"%29", ")",
|
||||
"%2A", "*",
|
||||
)
|
||||
|
||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
||||
func unescapeUserinfo(s string) string {
|
||||
return uiReplacer.Replace(s)
|
||||
}
|
||||
|
||||
// Escape reassembles the URL into a valid URL string.
|
||||
// The general form of the result is one of:
|
||||
//
|
||||
// scheme:opaque
|
||||
// scheme://userinfo@host/path?query#fragment
|
||||
//
|
||||
// If u.Opaque is non-empty, String uses the first form;
|
||||
// otherwise it uses the second form.
|
||||
//
|
||||
// In the second form, the following rules apply:
|
||||
// - if u.Scheme is empty, scheme: is omitted.
|
||||
// - if u.User is nil, userinfo@ is omitted.
|
||||
// - if u.Host is empty, host/ is omitted.
|
||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
||||
// the entire scheme://userinfo@host/ is omitted.
|
||||
// - if u.Host is non-empty and u.Path begins with a /,
|
||||
// the form host/path does not add its own /.
|
||||
// - if u.RawQuery is empty, ?query is omitted.
|
||||
// - if u.Fragment is empty, #fragment is omitted.
|
||||
func Escape(u *url.URL) string {
|
||||
var buf bytes.Buffer
|
||||
if u.Scheme != "" {
|
||||
buf.WriteString(u.Scheme)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
if u.Opaque != "" {
|
||||
buf.WriteString(u.Opaque)
|
||||
} else {
|
||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
||||
buf.WriteString("//")
|
||||
if ui := u.User; ui != nil {
|
||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
if h := u.Host; h != "" {
|
||||
buf.WriteString(h)
|
||||
}
|
||||
}
|
||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
||||
buf.WriteByte('/')
|
||||
}
|
||||
buf.WriteString(escape(u.Path, encodePath))
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
buf.WriteByte('?')
|
||||
buf.WriteString(u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
buf.WriteByte('#')
|
||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
70
vendor/github.com/emicklei/go-restful/.gitignore
generated
vendored
Normal file
70
vendor/github.com/emicklei/go-restful/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
restful.html
|
||||
|
||||
*.out
|
||||
|
||||
tmp.prof
|
||||
|
||||
go-restful.test
|
||||
|
||||
examples/restful-basic-authentication
|
||||
|
||||
examples/restful-encoding-filter
|
||||
|
||||
examples/restful-filters
|
||||
|
||||
examples/restful-hello-world
|
||||
|
||||
examples/restful-resource-functions
|
||||
|
||||
examples/restful-serve-static
|
||||
|
||||
examples/restful-user-service
|
||||
|
||||
*.DS_Store
|
||||
examples/restful-user-resource
|
||||
|
||||
examples/restful-multi-containers
|
||||
|
||||
examples/restful-form-handling
|
||||
|
||||
examples/restful-CORS-filter
|
||||
|
||||
examples/restful-options-filter
|
||||
|
||||
examples/restful-curly-router
|
||||
|
||||
examples/restful-cpuprofiler-service
|
||||
|
||||
examples/restful-pre-post-filters
|
||||
|
||||
curly.prof
|
||||
|
||||
examples/restful-NCSA-logging
|
||||
|
||||
examples/restful-html-template
|
||||
|
||||
s.html
|
||||
restful-path-tail
|
||||
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
script: go test -v
|
||||
273
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
Normal file
273
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
||||
## Change history of go-restful
|
||||
|
||||
|
||||
v2.9.5
|
||||
- fix panic in Response.WriteError if err == nil
|
||||
|
||||
v2.9.4
|
||||
|
||||
- fix issue #400 , parsing mime type quality
|
||||
- Route Builder added option for contentEncodingEnabled (#398)
|
||||
|
||||
v2.9.3
|
||||
|
||||
- Avoid return of 415 Unsupported Media Type when request body is empty (#396)
|
||||
|
||||
v2.9.2
|
||||
|
||||
- Reduce allocations in per-request methods to improve performance (#395)
|
||||
|
||||
v2.9.1
|
||||
|
||||
- Fix issue with default responses and invalid status code 0. (#393)
|
||||
|
||||
v2.9.0
|
||||
|
||||
- add per Route content encoding setting (overrides container setting)
|
||||
|
||||
v2.8.0
|
||||
|
||||
- add Request.QueryParameters()
|
||||
- add json-iterator (via build tag)
|
||||
- disable vgo module (until log is moved)
|
||||
|
||||
v2.7.1
|
||||
|
||||
- add vgo module
|
||||
|
||||
v2.6.1
|
||||
|
||||
- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
|
||||
|
||||
v2.6.0
|
||||
|
||||
- Make JSR 311 routing and path param processing consistent
|
||||
- Adding description to RouteBuilder.Reads()
|
||||
- Update example for Swagger12 and OpenAPI
|
||||
|
||||
2017-09-13
|
||||
|
||||
- added route condition functions using `.If(func)` in route building.
|
||||
|
||||
2017-02-16
|
||||
|
||||
- solved issue #304, make operation names unique
|
||||
|
||||
2017-01-30
|
||||
|
||||
[IMPORTANT] For swagger users, change your import statement to:
|
||||
swagger "github.com/emicklei/go-restful-swagger12"
|
||||
|
||||
- moved swagger 1.2 code to go-restful-swagger12
|
||||
- created TAG 2.0.0
|
||||
|
||||
2017-01-27
|
||||
|
||||
- remove defer request body close
|
||||
- expose Dispatch for testing filters and Routefunctions
|
||||
- swagger response model cannot be array
|
||||
- created TAG 1.0.0
|
||||
|
||||
2016-12-22
|
||||
|
||||
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
|
||||
|
||||
2016-11-26
|
||||
|
||||
- Default change! now use CurlyRouter (was RouterJSR311)
|
||||
- Default change! no more caching of request content
|
||||
- Default change! do not recover from panics
|
||||
|
||||
2016-09-22
|
||||
|
||||
- fix the DefaultRequestContentType feature
|
||||
|
||||
2016-02-14
|
||||
|
||||
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
|
||||
- add constructors for custom entity accessors for xml and json
|
||||
|
||||
2015-09-27
|
||||
|
||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
||||
|
||||
2015-09-25
|
||||
|
||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
||||
|
||||
2015-09-14
|
||||
|
||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
||||
- added support for custom EntityReaderWriters.
|
||||
|
||||
2015-08-06
|
||||
|
||||
- add support for reading entities from compressed request content
|
||||
- use sync.Pool for compressors of http response and request body
|
||||
- add Description to Parameter for documentation in Swagger UI
|
||||
|
||||
2015-03-20
|
||||
|
||||
- add configurable logging
|
||||
|
||||
2015-03-18
|
||||
|
||||
- if not specified, the Operation is derived from the Route function
|
||||
|
||||
2015-03-17
|
||||
|
||||
- expose Parameter creation functions
|
||||
- make trace logger an interface
|
||||
- fix OPTIONSFilter
|
||||
- customize rendering of ServiceError
|
||||
- JSR311 router now handles wildcards
|
||||
- add Notes to Route
|
||||
|
||||
2014-11-27
|
||||
|
||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
||||
|
||||
2014-11-12
|
||||
|
||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
||||
|
||||
2014-11-10
|
||||
|
||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
||||
|
||||
2014-10-31
|
||||
|
||||
- (api change) ReturnsError -> Returns
|
||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
||||
- fix swagger nested structs
|
||||
- sort Swagger response messages by code
|
||||
|
||||
2014-10-23
|
||||
|
||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
||||
- fixed problem with greedy CurlyRouter
|
||||
- (api add) Access-Control-Max-Age in CORS
|
||||
- add tracing functionality (injectable) for debugging purposes
|
||||
- support JSON parse 64bit int
|
||||
- fix empty parameters for swagger
|
||||
- WebServicesUrl is now optional for swagger
|
||||
- fixed duplicate AccessControlAllowOrigin in CORS
|
||||
- (api change) expose ServeMux in container
|
||||
- (api add) added AllowedDomains in CORS
|
||||
- (api add) ParameterNamed for detailed documentation
|
||||
|
||||
2014-04-16
|
||||
|
||||
- (api add) expose constructor of Request for testing.
|
||||
|
||||
2014-06-27
|
||||
|
||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
||||
|
||||
2014-07-03
|
||||
|
||||
- (api add) CORS can be configured with a list of allowed domains
|
||||
|
||||
2014-03-12
|
||||
|
||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
||||
|
||||
2014-02-26
|
||||
|
||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
||||
|
||||
2014-02-17
|
||||
|
||||
- (api change) renamed parameter constants (go-lint checks)
|
||||
|
||||
2014-01-10
|
||||
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
2014-01-07
|
||||
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
2013-11-13
|
||||
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
2013-10-29
|
||||
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
2013-10-04
|
||||
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
2013-09-12
|
||||
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
2013-08-05
|
||||
- add OPTIONS support
|
||||
- add CORS support
|
||||
|
||||
2013-08-27
|
||||
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
2014-04-15
|
||||
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
2013-08-08
|
||||
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
Important API changes:
|
||||
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
|
||||
|
||||
2013-07-06
|
||||
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
|
||||
2013-06-19
|
||||
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
|
||||
2013-06-03
|
||||
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
|
||||
2013-06-02
|
||||
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
|
||||
2013-05-22
|
||||
|
||||
- (api add) Added support for request/response filter functions
|
||||
|
||||
2013-05-18
|
||||
|
||||
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
|
||||
[2012-11-14 .. 2013-05-18>
|
||||
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
|
||||
2012-11-14
|
||||
|
||||
- Initial commit
|
||||
|
||||
|
||||
22
vendor/github.com/emicklei/go-restful/LICENSE
generated
vendored
Normal file
22
vendor/github.com/emicklei/go-restful/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2012,2013 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
Normal file
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
all: test
|
||||
|
||||
test:
|
||||
go test -v .
|
||||
|
||||
ex:
|
||||
cd examples && ls *.go | xargs go build -o /tmp/ignore
|
||||
88
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
Normal file
88
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
go-restful
|
||||
==========
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://godoc.org/github.com/emicklei/go-restful)
|
||||
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
|
||||
- PUT = Create if you are sending the full content of the specified resource (URI).
|
||||
- PUT = Update if you are updating the full content of the specified resource.
|
||||
- DELETE = Delete if you are requesting the server to delete the resource
|
||||
- PATCH = Update partial content of a resource
|
||||
- OPTIONS = Get information about the communication options for the request URI
|
||||
|
||||
### Example
|
||||
|
||||
```Go
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_XML, restful.MIME_JSON).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser).
|
||||
Doc("get a user").
|
||||
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
|
||||
Writes(User{}))
|
||||
...
|
||||
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
|
||||
|
||||
### Features
|
||||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
||||
- Configurable router:
|
||||
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
|
||||
## How to customize
|
||||
There are several hooks to customize the behavior of the go-restful package.
|
||||
|
||||
- Router algorithm
|
||||
- Panic recovery
|
||||
- JSON decoder
|
||||
- Trace logging
|
||||
- Compression
|
||||
- Encoders for other serializers
|
||||
- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
|
||||
|
||||
TODO: write examples of these.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
||||
1
vendor/github.com/emicklei/go-restful/Srcfile
generated
vendored
Normal file
1
vendor/github.com/emicklei/go-restful/Srcfile
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"SkipDirs": ["examples"]}
|
||||
10
vendor/github.com/emicklei/go-restful/bench_test.sh
generated
vendored
Normal file
10
vendor/github.com/emicklei/go-restful/bench_test.sh
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
||||
|
||||
go test -c
|
||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
||||
|
||||
#go tool pprof go-restful.test tmp.prof
|
||||
go tool pprof go-restful.test curly.prof
|
||||
|
||||
|
||||
123
vendor/github.com/emicklei/go-restful/compress.go
generated
vendored
Normal file
123
vendor/github.com/emicklei/go-restful/compress.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
|
||||
var EnableContentEncoding = false
|
||||
|
||||
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
|
||||
type CompressingResponseWriter struct {
|
||||
writer http.ResponseWriter
|
||||
compressor io.WriteCloser
|
||||
encoding string
|
||||
}
|
||||
|
||||
// Header is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) Header() http.Header {
|
||||
return c.writer.Header()
|
||||
}
|
||||
|
||||
// WriteHeader is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) WriteHeader(status int) {
|
||||
c.writer.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Write is part of http.ResponseWriter interface
|
||||
// It is passed through the compressor
|
||||
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
if c.isCompressorClosed() {
|
||||
return -1, errors.New("Compressing error: tried to write data using closed compressor")
|
||||
}
|
||||
return c.compressor.Write(bytes)
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
return errors.New("Compressing error: tried to close already closed compressor")
|
||||
}
|
||||
|
||||
c.compressor.Close()
|
||||
if ENCODING_GZIP == c.encoding {
|
||||
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
|
||||
}
|
||||
if ENCODING_DEFLATE == c.encoding {
|
||||
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
|
||||
}
|
||||
// gc hint needed?
|
||||
c.compressor = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CompressingResponseWriter) isCompressorClosed() bool {
|
||||
return nil == c.compressor
|
||||
}
|
||||
|
||||
// Hijack implements the Hijacker interface
|
||||
// This is especially useful when combining Container.EnabledContentEncoding
|
||||
// in combination with websockets (for instance gorilla/websocket)
|
||||
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
hijacker, ok := c.writer.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
|
||||
}
|
||||
return hijacker.Hijack()
|
||||
}
|
||||
|
||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
||||
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
|
||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
||||
gi := strings.Index(header, ENCODING_GZIP)
|
||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
||||
// use in order of appearance
|
||||
if gi == -1 {
|
||||
return zi != -1, ENCODING_DEFLATE
|
||||
} else if zi == -1 {
|
||||
return gi != -1, ENCODING_GZIP
|
||||
} else {
|
||||
if gi < zi {
|
||||
return true, ENCODING_GZIP
|
||||
}
|
||||
return true, ENCODING_DEFLATE
|
||||
}
|
||||
}
|
||||
|
||||
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
|
||||
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
|
||||
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
|
||||
c := new(CompressingResponseWriter)
|
||||
c.writer = httpWriter
|
||||
var err error
|
||||
if ENCODING_GZIP == encoding {
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_GZIP
|
||||
} else if ENCODING_DEFLATE == encoding {
|
||||
w := currentCompressorProvider.AcquireZlibWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_DEFLATE
|
||||
} else {
|
||||
return nil, errors.New("Unknown encoding:" + encoding)
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
103
vendor/github.com/emicklei/go-restful/compressor_cache.go
generated
vendored
Normal file
103
vendor/github.com/emicklei/go-restful/compressor_cache.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
|
||||
// of writers and readers (resources).
|
||||
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
|
||||
type BoundedCachedCompressors struct {
|
||||
gzipWriters chan *gzip.Writer
|
||||
gzipReaders chan *gzip.Reader
|
||||
zlibWriters chan *zlib.Writer
|
||||
writersCapacity int
|
||||
readersCapacity int
|
||||
}
|
||||
|
||||
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
|
||||
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
|
||||
b := &BoundedCachedCompressors{
|
||||
gzipWriters: make(chan *gzip.Writer, writersCapacity),
|
||||
gzipReaders: make(chan *gzip.Reader, readersCapacity),
|
||||
zlibWriters: make(chan *zlib.Writer, writersCapacity),
|
||||
writersCapacity: writersCapacity,
|
||||
readersCapacity: readersCapacity,
|
||||
}
|
||||
for ix := 0; ix < writersCapacity; ix++ {
|
||||
b.gzipWriters <- newGzipWriter()
|
||||
b.zlibWriters <- newZlibWriter()
|
||||
}
|
||||
for ix := 0; ix < readersCapacity; ix++ {
|
||||
b.gzipReaders <- newGzipReader()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
|
||||
var writer *gzip.Writer
|
||||
select {
|
||||
case writer, _ = <-b.gzipWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newGzipWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipWriters) < b.writersCapacity {
|
||||
b.gzipWriters <- w
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
|
||||
var reader *gzip.Reader
|
||||
select {
|
||||
case reader, _ = <-b.gzipReaders:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
reader = newGzipReader()
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipReaders) < b.readersCapacity {
|
||||
b.gzipReaders <- r
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
|
||||
var writer *zlib.Writer
|
||||
select {
|
||||
case writer, _ = <-b.zlibWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newZlibWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.zlibWriters) < b.writersCapacity {
|
||||
b.zlibWriters <- w
|
||||
}
|
||||
}
|
||||
91
vendor/github.com/emicklei/go-restful/compressor_pools.go
generated
vendored
Normal file
91
vendor/github.com/emicklei/go-restful/compressor_pools.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
|
||||
type SyncPoolCompessors struct {
|
||||
GzipWriterPool *sync.Pool
|
||||
GzipReaderPool *sync.Pool
|
||||
ZlibWriterPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
|
||||
func NewSyncPoolCompessors() *SyncPoolCompessors {
|
||||
return &SyncPoolCompessors{
|
||||
GzipWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipWriter() },
|
||||
},
|
||||
GzipReaderPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipReader() },
|
||||
},
|
||||
ZlibWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newZlibWriter() },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
|
||||
return s.GzipWriterPool.Get().(*gzip.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
s.GzipWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
|
||||
return s.GzipReaderPool.Get().(*gzip.Reader)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
s.GzipReaderPool.Put(r)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
|
||||
return s.ZlibWriterPool.Get().(*zlib.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
s.ZlibWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func newGzipWriter() *gzip.Writer {
|
||||
// create with an empty bytes writer; it will be replaced before using the gzipWriter
|
||||
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
func newGzipReader() *gzip.Reader {
|
||||
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
|
||||
// we can safely use currentCompressProvider because it is set on package initialization.
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
defer currentCompressorProvider.ReleaseGzipWriter(w)
|
||||
b := new(bytes.Buffer)
|
||||
w.Reset(b)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
func newZlibWriter() *zlib.Writer {
|
||||
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
||||
54
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
Normal file
54
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireGzipWriter() *gzip.Writer
|
||||
|
||||
// Releases an acquired *gzip.Writer.
|
||||
ReleaseGzipWriter(w *gzip.Writer)
|
||||
|
||||
// Returns a *gzip.Reader which needs to be released later.
|
||||
AcquireGzipReader() *gzip.Reader
|
||||
|
||||
// Releases an acquired *gzip.Reader.
|
||||
ReleaseGzipReader(w *gzip.Reader)
|
||||
|
||||
// Returns a *zlib.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireZlibWriter() *zlib.Writer
|
||||
|
||||
// Releases an acquired *zlib.Writer.
|
||||
ReleaseZlibWriter(w *zlib.Writer)
|
||||
}
|
||||
|
||||
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
|
||||
var currentCompressorProvider CompressorProvider
|
||||
|
||||
func init() {
|
||||
currentCompressorProvider = NewSyncPoolCompessors()
|
||||
}
|
||||
|
||||
// CurrentCompressorProvider returns the current CompressorProvider.
|
||||
// It is initialized using a SyncPoolCompessors.
|
||||
func CurrentCompressorProvider() CompressorProvider {
|
||||
return currentCompressorProvider
|
||||
}
|
||||
|
||||
// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
func SetCompressorProvider(p CompressorProvider) {
|
||||
if p == nil {
|
||||
panic("cannot set compressor provider to nil")
|
||||
}
|
||||
currentCompressorProvider = p
|
||||
}
|
||||
30
vendor/github.com/emicklei/go-restful/constants.go
generated
vendored
Normal file
30
vendor/github.com/emicklei/go-restful/constants.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
||||
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
||||
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
||||
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
|
||||
|
||||
ENCODING_GZIP = "gzip"
|
||||
ENCODING_DEFLATE = "deflate"
|
||||
)
|
||||
377
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
Normal file
377
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
Normal file
@@ -0,0 +1,377 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
||||
// The requests are further dispatched to routes of WebServices using a RouteSelector
|
||||
type Container struct {
|
||||
webServicesLock sync.RWMutex
|
||||
webServices []*WebService
|
||||
ServeMux *http.ServeMux
|
||||
isRegisteredOnRoot bool
|
||||
containerFilters []FilterFunction
|
||||
doNotRecover bool // default is true
|
||||
recoverHandleFunc RecoverHandleFunction
|
||||
serviceErrorHandleFunc ServiceErrorHandleFunction
|
||||
router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
|
||||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
ServeMux: http.NewServeMux(),
|
||||
isRegisteredOnRoot: false,
|
||||
containerFilters: []FilterFunction{},
|
||||
doNotRecover: true,
|
||||
recoverHandleFunc: logStackOnRecover,
|
||||
serviceErrorHandleFunc: writeServiceError,
|
||||
router: CurlyRouter{},
|
||||
contentEncodingEnabled: false}
|
||||
}
|
||||
|
||||
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
|
||||
// The first argument is what recover() returns. The second must be used to communicate an error response.
|
||||
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
|
||||
|
||||
// RecoverHandler changes the default function (logStackOnRecover) to be called
|
||||
// when a panic is detected. DoNotRecover must be have its default value (=false).
|
||||
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
|
||||
c.recoverHandleFunc = handler
|
||||
}
|
||||
|
||||
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
|
||||
// The first argument is the service error, the second is the request that resulted in the error and
|
||||
// the third must be used to communicate an error response.
|
||||
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
|
||||
|
||||
// ServiceErrorHandler changes the default function (writeServiceError) to be called
|
||||
// when a ServiceError is detected.
|
||||
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
|
||||
c.serviceErrorHandleFunc = handler
|
||||
}
|
||||
|
||||
// DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
// If set to true, Route functions are responsible for handling any error situation.
|
||||
// Default value is true.
|
||||
func (c *Container) DoNotRecover(doNot bool) {
|
||||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently CurlyRouter)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
|
||||
func (c *Container) EnableContentEncoding(enabled bool) {
|
||||
c.contentEncodingEnabled = enabled
|
||||
}
|
||||
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
|
||||
func (c *Container) Add(service *WebService) *Container {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// If not registered on root then add specific mapping
|
||||
if !c.isRegisteredOnRoot {
|
||||
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
}
|
||||
|
||||
// addHandler may set a new HandleFunc for the serveMux
|
||||
// this function must run inside the critical region protected by the webServicesLock.
|
||||
// returns true if the function was registered on root ("/")
|
||||
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
|
||||
pattern := fixedPrefixPath(service.RootPath())
|
||||
// check if root path registration is needed
|
||||
if "/" == pattern || "" == pattern {
|
||||
serveMux.HandleFunc("/", c.dispatch)
|
||||
return true
|
||||
}
|
||||
// detect if registration already exists
|
||||
alreadyMapped := false
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
alreadyMapped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !alreadyMapped {
|
||||
serveMux.HandleFunc(pattern, c.dispatch)
|
||||
if !strings.HasSuffix(pattern, "/") {
|
||||
serveMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ws *WebService) error {
|
||||
if c.ServeMux == http.DefaultServeMux {
|
||||
errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
|
||||
log.Print(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// build a new ServeMux and re-register all WebServices
|
||||
newServeMux := http.NewServeMux()
|
||||
newServices := []*WebService{}
|
||||
newIsRegisteredOnRoot := false
|
||||
for _, each := range c.webServices {
|
||||
if each.rootPath != ws.rootPath {
|
||||
// If not registered on root then add specific mapping
|
||||
if !newIsRegisteredOnRoot {
|
||||
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
|
||||
}
|
||||
newServices = append(newServices, each)
|
||||
}
|
||||
}
|
||||
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStackOnRecover is the default RecoverHandleFunction and is called
|
||||
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
|
||||
// Default implementation logs the stacktrace and writes the stacktrace on the response.
|
||||
// This may be a security issue as it exposes sourcecode information.
|
||||
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
||||
for i := 2; ; i += 1 {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
|
||||
}
|
||||
log.Print(buffer.String())
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
httpWriter.Write(buffer.Bytes())
|
||||
}
|
||||
|
||||
// writeServiceError is the default ServiceErrorHandleFunction and is called
|
||||
// when a ServiceError is returned during route selection. Default implementation
|
||||
// calls resp.WriteErrorString(err.Code, err.Message)
|
||||
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
||||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
if httpWriter == nil {
|
||||
panic("httpWriter cannot be nil")
|
||||
}
|
||||
if httpRequest == nil {
|
||||
panic("httpRequest cannot be nil")
|
||||
}
|
||||
c.dispatch(httpWriter, httpRequest)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
writer := httpWriter
|
||||
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Instal panic recovery unless told otherwise
|
||||
if !c.doNotRecover { // catch all for 500 response
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.recoverHandleFunc(r, writer)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Find best match Route ; err is non nil if no match was found
|
||||
var webService *WebService
|
||||
var route *Route
|
||||
var err error
|
||||
func() {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
webService, route, err = c.router.SelectRoute(
|
||||
c.webServices,
|
||||
httpRequest)
|
||||
}()
|
||||
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
contentEncodingEnabled := c.contentEncodingEnabled
|
||||
if route != nil && route.contentEncodingEnabled != nil {
|
||||
contentEncodingEnabled = *route.contentEncodingEnabled
|
||||
}
|
||||
if contentEncodingEnabled {
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// a non-200 response has already been written
|
||||
// run container filters anyway ; they should not touch the response...
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
switch err.(type) {
|
||||
case ServiceError:
|
||||
ser := err.(ServiceError)
|
||||
c.serviceErrorHandleFunc(ser, req, resp)
|
||||
}
|
||||
// TODO
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
||||
return
|
||||
}
|
||||
pathProcessor, routerProcessesPath := c.router.(PathProcessor)
|
||||
if !routerProcessesPath {
|
||||
pathProcessor = defaultPathProcessor{}
|
||||
}
|
||||
pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
|
||||
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
|
||||
// pass through filters (if any)
|
||||
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
|
||||
// compose filter chain
|
||||
allFilters := []FilterFunction{}
|
||||
allFilters = append(allFilters, c.containerFilters...)
|
||||
allFilters = append(allFilters, webService.filters...)
|
||||
allFilters = append(allFilters, route.Filters...)
|
||||
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
|
||||
// handle request by route after passing all filters
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// no filters, handle request by route
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
|
||||
func fixedPrefixPath(pathspec string) string {
|
||||
varBegin := strings.Index(pathspec, "{")
|
||||
if -1 == varBegin {
|
||||
return pathspec
|
||||
}
|
||||
return pathspec[:varBegin]
|
||||
}
|
||||
|
||||
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
||||
func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
||||
func (c *Container) Handle(pattern string, handler http.Handler) {
|
||||
c.ServeMux.Handle(pattern, handler)
|
||||
}
|
||||
|
||||
// HandleWithFilter registers the handler for the given pattern.
|
||||
// Container's filter chain is applied for handler.
|
||||
// If a handler already exists for pattern, HandleWithFilter panics.
|
||||
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
|
||||
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
|
||||
if len(c.containerFilters) == 0 {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
||||
}
|
||||
|
||||
c.Handle(pattern, http.HandlerFunc(f))
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction. These are called before dispatching
|
||||
// a http.Request to a WebService from the container
|
||||
func (c *Container) Filter(filter FilterFunction) {
|
||||
c.containerFilters = append(c.containerFilters, filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of added WebServices
|
||||
func (c *Container) RegisteredWebServices() []*WebService {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
result := make([]*WebService, len(c.webServices))
|
||||
for ix := range c.webServices {
|
||||
result[ix] = c.webServices[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
|
||||
func (c *Container) computeAllowedMethods(req *Request) []string {
|
||||
// Go through all RegisteredWebServices() and all its Routes to collect the options
|
||||
methods := []string{}
|
||||
requestPath := req.Request.URL.Path
|
||||
for _, ws := range c.RegisteredWebServices() {
|
||||
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
finalMatch := matches[len(matches)-1]
|
||||
for _, rt := range ws.Routes() {
|
||||
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
methods = append(methods, rt.Method)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// methods = append(methods, "OPTIONS") not sure about this
|
||||
return methods
|
||||
}
|
||||
|
||||
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
|
||||
// It is basic because no parameter or (produces) content-type information is given.
|
||||
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
|
||||
resp := NewResponse(httpWriter)
|
||||
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
return NewRequest(httpRequest), resp
|
||||
}
|
||||
202
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
Normal file
202
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
|
||||
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
|
||||
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
|
||||
//
|
||||
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
|
||||
// http://enable-cors.org/server.html
|
||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||
type CrossOriginResourceSharing struct {
|
||||
ExposeHeaders []string // list of Header names
|
||||
AllowedHeaders []string // list of Header names
|
||||
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
|
||||
AllowedMethods []string
|
||||
MaxAge int // number of seconds before requiring new Options request
|
||||
CookiesAllowed bool
|
||||
Container *Container
|
||||
|
||||
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
|
||||
}
|
||||
|
||||
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
|
||||
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
|
||||
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if len(origin) == 0 {
|
||||
if trace {
|
||||
traceLogger.Print("no Http header Origin set")
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
|
||||
if trace {
|
||||
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if req.Request.Method != "OPTIONS" {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
|
||||
c.doPreflightRequest(req, resp)
|
||||
} else {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
|
||||
c.setOptionsHeaders(req, resp)
|
||||
// continue processing the response
|
||||
}
|
||||
|
||||
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
|
||||
if len(c.AllowedMethods) == 0 {
|
||||
if c.Container == nil {
|
||||
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
|
||||
} else {
|
||||
c.AllowedMethods = c.Container.computeAllowedMethods(req)
|
||||
}
|
||||
}
|
||||
|
||||
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
|
||||
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestMethod,
|
||||
acrm,
|
||||
c.AllowedMethods)
|
||||
}
|
||||
return
|
||||
}
|
||||
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
||||
if len(acrhs) > 0 {
|
||||
for _, each := range strings.Split(acrhs, ",") {
|
||||
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestHeaders,
|
||||
acrhs,
|
||||
c.AllowedHeaders)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
|
||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
|
||||
c.setOptionsHeaders(req, resp)
|
||||
|
||||
// return http 200 response, no body
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
|
||||
c.checkAndSetExposeHeaders(resp)
|
||||
c.setAllowOriginHeader(req, resp)
|
||||
c.checkAndSetAllowCredentials(resp)
|
||||
if c.MaxAge > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
||||
if len(origin) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(c.AllowedDomains) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
allowed := false
|
||||
for _, domain := range c.AllowedDomains {
|
||||
if domain == origin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
if len(c.allowedOriginPatterns) == 0 {
|
||||
// compile allowed domains to allowed origin patterns
|
||||
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
c.allowedOriginPatterns = allowedOriginRegexps
|
||||
}
|
||||
|
||||
for _, pattern := range c.allowedOriginPatterns {
|
||||
if allowed = pattern.MatchString(origin); allowed {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allowed
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if c.isOriginAllowed(origin) {
|
||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
|
||||
if len(c.ExposeHeaders) > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
|
||||
if c.CookiesAllowed {
|
||||
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
|
||||
for _, each := range allowedMethods {
|
||||
if each == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
|
||||
for _, each := range c.AllowedHeaders {
|
||||
if strings.ToLower(each) == strings.ToLower(header) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Take a list of strings and compile them into a list of regular expressions.
|
||||
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
|
||||
regexps := []*regexp.Regexp{}
|
||||
for _, regexpStr := range regexpStrings {
|
||||
r, err := regexp.Compile(regexpStr)
|
||||
if err != nil {
|
||||
return regexps, err
|
||||
}
|
||||
regexps = append(regexps, r)
|
||||
}
|
||||
return regexps, nil
|
||||
}
|
||||
2
vendor/github.com/emicklei/go-restful/coverage.sh
generated
vendored
Normal file
2
vendor/github.com/emicklei/go-restful/coverage.sh
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
||||
164
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
Normal file
164
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
|
||||
type CurlyRouter struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (c CurlyRouter) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
|
||||
|
||||
requestTokens := tokenizePath(httpRequest.URL.Path)
|
||||
|
||||
detectedService := c.detectWebService(requestTokens, webServices)
|
||||
if detectedService == nil {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
|
||||
}
|
||||
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
|
||||
if len(candidateRoutes) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
|
||||
}
|
||||
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
|
||||
if selectedRoute == nil {
|
||||
return detectedService, nil, err
|
||||
}
|
||||
return detectedService, selectedRoute, nil
|
||||
}
|
||||
|
||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||
for _, each := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
|
||||
if matches {
|
||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
}
|
||||
}
|
||||
sort.Sort(candidates)
|
||||
return candidates
|
||||
}
|
||||
|
||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
|
||||
if len(routeTokens) < len(requestTokens) {
|
||||
// proceed in matching only if last routeToken is wildcard
|
||||
count := len(routeTokens)
|
||||
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
|
||||
return false, 0, 0
|
||||
}
|
||||
// proceed
|
||||
}
|
||||
for i, routeToken := range routeTokens {
|
||||
if i == len(requestTokens) {
|
||||
// reached end of request path
|
||||
return false, 0, 0
|
||||
}
|
||||
requestToken := requestTokens[i]
|
||||
if strings.HasPrefix(routeToken, "{") {
|
||||
paramCount++
|
||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
||||
// match by regex
|
||||
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
|
||||
if !matchesToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
if matchesRemainder {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else { // no { prefix
|
||||
if requestToken != routeToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
staticCount++
|
||||
}
|
||||
}
|
||||
return true, paramCount, staticCount
|
||||
}
|
||||
|
||||
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
|
||||
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
|
||||
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
|
||||
regPart := routeToken[colon+1 : len(routeToken)-1]
|
||||
if regPart == "*" {
|
||||
if trace {
|
||||
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
|
||||
}
|
||||
return true, true
|
||||
}
|
||||
matched, err := regexp.MatchString(regPart, requestToken)
|
||||
return (matched && err == nil), false
|
||||
}
|
||||
|
||||
var jsr311Router = RouterJSR311{}
|
||||
|
||||
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
|
||||
// headers of the Request. See also RouterJSR311 in jsr311.go
|
||||
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
|
||||
// tracing is done inside detectRoute
|
||||
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
|
||||
}
|
||||
|
||||
// detectWebService returns the best matching webService given the list of path tokens.
|
||||
// see also computeWebserviceScore
|
||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
||||
var best *WebService
|
||||
score := -1
|
||||
for _, each := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
||||
if matches && (eachScore > score) {
|
||||
best = each
|
||||
score = eachScore
|
||||
}
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
// computeWebserviceScore returns whether tokens match and
|
||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
||||
if len(tokens) > len(requestTokens) {
|
||||
return false, 0
|
||||
}
|
||||
score := 0
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
each := requestTokens[i]
|
||||
other := tokens[i]
|
||||
if len(each) == 0 && len(other) == 0 {
|
||||
score++
|
||||
continue
|
||||
}
|
||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
||||
// no empty match
|
||||
if len(each) == 0 {
|
||||
return false, score
|
||||
}
|
||||
score += 1
|
||||
} else {
|
||||
// not a parameter
|
||||
if each != other {
|
||||
return false, score
|
||||
}
|
||||
score += (len(tokens) - i) * 10 //fuzzy
|
||||
}
|
||||
}
|
||||
return true, score
|
||||
}
|
||||
54
vendor/github.com/emicklei/go-restful/curly_route.go
generated
vendored
Normal file
54
vendor/github.com/emicklei/go-restful/curly_route.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
|
||||
type curlyRoute struct {
|
||||
route Route
|
||||
paramCount int
|
||||
staticCount int
|
||||
}
|
||||
|
||||
// sortableCurlyRoutes orders by most parameters and path elements first.
|
||||
type sortableCurlyRoutes []curlyRoute
|
||||
|
||||
func (s *sortableCurlyRoutes) add(route curlyRoute) {
|
||||
*s = append(*s, route)
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) routes() (routes []Route) {
|
||||
routes = make([]Route, 0, len(s))
|
||||
for _, each := range s {
|
||||
routes = append(routes, each.route) // TODO change return type
|
||||
}
|
||||
return routes
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortableCurlyRoutes) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortableCurlyRoutes) Less(i, j int) bool {
|
||||
a := s[j]
|
||||
b := s[i]
|
||||
|
||||
// primary key
|
||||
if a.staticCount < b.staticCount {
|
||||
return true
|
||||
}
|
||||
if a.staticCount > b.staticCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if a.paramCount < b.paramCount {
|
||||
return true
|
||||
}
|
||||
if a.paramCount > b.paramCount {
|
||||
return false
|
||||
}
|
||||
return a.route.Path < b.route.Path
|
||||
}
|
||||
185
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
Normal file
185
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
/*
|
||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
||||
|
||||
WebServices and Routes
|
||||
|
||||
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
|
||||
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
|
||||
WebServices must be added to a container (see below) in order to handler Http requests from a server.
|
||||
|
||||
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
|
||||
This package has the logic to find the best matching Route and if found, call its Function.
|
||||
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_JSON, restful.MIME_XML).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
|
||||
|
||||
...
|
||||
|
||||
// GET http://localhost:8080/users/1
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
|
||||
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
|
||||
|
||||
Regular expression matching Routes
|
||||
|
||||
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
|
||||
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
|
||||
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
|
||||
This feature requires the use of a CurlyRouter.
|
||||
|
||||
Containers
|
||||
|
||||
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
|
||||
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
|
||||
The Default container of go-restful uses the http.DefaultServeMux.
|
||||
You can create your own Container and create a new http.Server for that particular container.
|
||||
|
||||
container := restful.NewContainer()
|
||||
server := &http.Server{Addr: ":8081", Handler: container}
|
||||
|
||||
Filters
|
||||
|
||||
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
|
||||
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
|
||||
In the restful package there are three hooks into the request,response flow where filters can be added.
|
||||
Each filter must define a FilterFunction:
|
||||
|
||||
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
|
||||
|
||||
Use the following statement to pass the request,response pair to the next filter or RouteFunction
|
||||
|
||||
chain.ProcessFilter(req, resp)
|
||||
|
||||
Container Filters
|
||||
|
||||
These are processed before any registered WebService.
|
||||
|
||||
// install a (global) filter for the default container (processed before any webservice)
|
||||
restful.Filter(globalLogging)
|
||||
|
||||
WebService Filters
|
||||
|
||||
These are processed before any Route of a WebService.
|
||||
|
||||
// install a webservice filter (processed before any route)
|
||||
ws.Filter(webserviceLogging).Filter(measureTime)
|
||||
|
||||
|
||||
Route Filters
|
||||
|
||||
These are processed before calling the function associated with the Route.
|
||||
|
||||
// install 2 chained route filters (processed before calling findUser)
|
||||
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
|
||||
|
||||
Response Encoding
|
||||
|
||||
Two encodings are supported: gzip and deflate. To enable this for all responses:
|
||||
|
||||
restful.DefaultContainer.EnableContentEncoding(true)
|
||||
|
||||
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
||||
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
|
||||
|
||||
OPTIONS support
|
||||
|
||||
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
|
||||
|
||||
Filter(OPTIONSFilter())
|
||||
|
||||
CORS
|
||||
|
||||
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
|
||||
|
||||
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
Error Handling
|
||||
|
||||
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
|
||||
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
|
||||
|
||||
400: Bad Request
|
||||
|
||||
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
|
||||
|
||||
404: Not Found
|
||||
|
||||
Despite a valid URI, the resource requested may not be available
|
||||
|
||||
500: Internal Server Error
|
||||
|
||||
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
|
||||
|
||||
405: Method Not Allowed
|
||||
|
||||
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
|
||||
|
||||
406: Not Acceptable
|
||||
|
||||
The request does not have or has an unknown Accept Header set for this operation.
|
||||
|
||||
415: Unsupported Media Type
|
||||
|
||||
The request does not have or has an unknown Content-Type Header set for this operation.
|
||||
|
||||
ServiceError
|
||||
|
||||
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
|
||||
|
||||
Performance options
|
||||
|
||||
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
|
||||
|
||||
restful.DefaultContainer.DoNotRecover(false)
|
||||
|
||||
DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
If set to false, the container will recover from panics.
|
||||
Default value is true
|
||||
|
||||
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
|
||||
|
||||
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
|
||||
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
|
||||
|
||||
Trouble shooting
|
||||
|
||||
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
|
||||
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
|
||||
|
||||
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
|
||||
|
||||
Logging
|
||||
|
||||
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
|
||||
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
|
||||
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
|
||||
preferred package is simple.
|
||||
|
||||
Resources
|
||||
|
||||
[project]: https://github.com/emicklei/go-restful
|
||||
|
||||
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
|
||||
|
||||
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
|
||||
|
||||
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
|
||||
|
||||
(c) 2012-2015, http://ernestmicklei.com. MIT License
|
||||
*/
|
||||
package restful
|
||||
162
vendor/github.com/emicklei/go-restful/entity_accessors.go
generated
vendored
Normal file
162
vendor/github.com/emicklei/go-restful/entity_accessors.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
||||
type EntityReaderWriter interface {
|
||||
// Read a serialized version of the value from the request.
|
||||
// The Request may have a decompressing reader. Depends on Content-Encoding.
|
||||
Read(req *Request, v interface{}) error
|
||||
|
||||
// Write a serialized version of the value on the response.
|
||||
// The Response may have a compressing writer. Depends on Accept-Encoding.
|
||||
// status should be a valid Http Status code
|
||||
Write(resp *Response, status int, v interface{}) error
|
||||
}
|
||||
|
||||
// entityAccessRegistry is a singleton
|
||||
var entityAccessRegistry = &entityReaderWriters{
|
||||
protection: new(sync.RWMutex),
|
||||
accessors: map[string]EntityReaderWriter{},
|
||||
}
|
||||
|
||||
// entityReaderWriters associates MIME to an EntityReaderWriter
|
||||
type entityReaderWriters struct {
|
||||
protection *sync.RWMutex
|
||||
accessors map[string]EntityReaderWriter
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
|
||||
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
|
||||
}
|
||||
|
||||
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
|
||||
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
|
||||
entityAccessRegistry.protection.Lock()
|
||||
defer entityAccessRegistry.protection.Unlock()
|
||||
entityAccessRegistry.accessors[mime] = erw
|
||||
}
|
||||
|
||||
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
|
||||
// This package is already initialized with such an accessor using the MIME_JSON contentType.
|
||||
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
|
||||
return entityJSONAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
|
||||
// This package is already initialized with such an accessor using the MIME_XML contentType.
|
||||
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
|
||||
return entityXMLAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// accessorAt returns the registered ReaderWriter for this MIME type.
|
||||
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
|
||||
r.protection.RLock()
|
||||
defer r.protection.RUnlock()
|
||||
er, ok := r.accessors[mime]
|
||||
if !ok {
|
||||
// retry with reverse lookup
|
||||
// more expensive but we are in an exceptional situation anyway
|
||||
for k, v := range r.accessors {
|
||||
if strings.Contains(mime, k) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return er, ok
|
||||
}
|
||||
|
||||
// entityXMLAccess is a EntityReaderWriter for XML encoding
|
||||
type entityXMLAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from XML
|
||||
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
|
||||
return xml.NewDecoder(req.Request.Body).Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeXML(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// writeXML marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := xml.MarshalIndent(v, " ", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write([]byte(xml.Header))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return xml.NewEncoder(resp).Encode(v)
|
||||
}
|
||||
|
||||
// entityJSONAccess is a EntityReaderWriter for JSON encoding
|
||||
type entityJSONAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from JSON
|
||||
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
|
||||
decoder := NewDecoder(req.Request.Body)
|
||||
decoder.UseNumber()
|
||||
return decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeJSON(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// write marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return NewEncoder(resp).Encode(v)
|
||||
}
|
||||
35
vendor/github.com/emicklei/go-restful/filter.go
generated
vendored
Normal file
35
vendor/github.com/emicklei/go-restful/filter.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
|
||||
type FilterChain struct {
|
||||
Filters []FilterFunction // ordered list of FilterFunction
|
||||
Index int // index into filters that is currently in progress
|
||||
Target RouteFunction // function to call after passing all filters
|
||||
}
|
||||
|
||||
// ProcessFilter passes the request,response pair through the next of Filters.
|
||||
// Each filter can decide to proceed to the next Filter or handle the Response itself.
|
||||
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
|
||||
if f.Index < len(f.Filters) {
|
||||
f.Index++
|
||||
f.Filters[f.Index-1](request, response, f)
|
||||
} else {
|
||||
f.Target(request, response)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
|
||||
type FilterFunction func(*Request, *Response, *FilterChain)
|
||||
|
||||
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
|
||||
// See examples/restful-no-cache-filter.go for usage
|
||||
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
|
||||
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
|
||||
resp.Header().Set("Expires", "0") // Proxies.
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
||||
11
vendor/github.com/emicklei/go-restful/json.go
generated
vendored
Normal file
11
vendor/github.com/emicklei/go-restful/json.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
12
vendor/github.com/emicklei/go-restful/jsoniter.go
generated
vendored
Normal file
12
vendor/github.com/emicklei/go-restful/jsoniter.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "github.com/json-iterator/go"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
297
vendor/github.com/emicklei/go-restful/jsr311.go
generated
vendored
Normal file
297
vendor/github.com/emicklei/go-restful/jsr311.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
|
||||
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
|
||||
// RouterJSR311 implements the Router interface.
|
||||
// Concept of locators is not implemented.
|
||||
type RouterJSR311 struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (r RouterJSR311) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
|
||||
|
||||
// Identify the root resource class (WebService)
|
||||
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
|
||||
if err != nil {
|
||||
return nil, nil, NewError(http.StatusNotFound, "")
|
||||
}
|
||||
// Obtain the set of candidate methods (Routes)
|
||||
routes := r.selectRoutes(dispatcher, finalMatch)
|
||||
if len(routes) == 0 {
|
||||
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
|
||||
// Identify the method (Route) that will handle the request
|
||||
route, ok := r.detectRoute(routes, httpRequest)
|
||||
return dispatcher, route, ok
|
||||
}
|
||||
|
||||
// ExtractParameters is used to obtain the path parameters from the route using the same matching
|
||||
// engine as the JSR 311 router.
|
||||
func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string {
|
||||
webServiceExpr := webService.pathExpr
|
||||
webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath)
|
||||
pathParameters := r.extractParams(webServiceExpr, webServiceMatches)
|
||||
routeExpr := route.pathExpr
|
||||
routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1])
|
||||
routeParams := r.extractParams(routeExpr, routeMatches)
|
||||
for key, value := range routeParams {
|
||||
pathParameters[key] = value
|
||||
}
|
||||
return pathParameters
|
||||
}
|
||||
|
||||
func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string {
|
||||
params := map[string]string{}
|
||||
for i := 1; i < len(matches); i++ {
|
||||
if len(pathExpr.VarNames) >= i {
|
||||
params[pathExpr.VarNames[i-1]] = matches[i]
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
|
||||
candidates := make([]*Route, 0, 8)
|
||||
for i, each := range routes {
|
||||
ok := true
|
||||
for _, fn := range each.If {
|
||||
if !fn(httpRequest) {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
candidates = append(candidates, &routes[i])
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes))
|
||||
}
|
||||
return nil, NewError(http.StatusNotFound, "404: Not Found")
|
||||
}
|
||||
|
||||
// http method
|
||||
previous := candidates
|
||||
candidates = candidates[:0]
|
||||
for _, each := range previous {
|
||||
if httpRequest.Method == each.Method {
|
||||
candidates = append(candidates, each)
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method)
|
||||
}
|
||||
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
|
||||
}
|
||||
|
||||
// content-type
|
||||
contentType := httpRequest.Header.Get(HEADER_ContentType)
|
||||
previous = candidates
|
||||
candidates = candidates[:0]
|
||||
for _, each := range previous {
|
||||
if each.matchesContentType(contentType) {
|
||||
candidates = append(candidates, each)
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
|
||||
}
|
||||
if httpRequest.ContentLength > 0 {
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
}
|
||||
|
||||
// accept
|
||||
previous = candidates
|
||||
candidates = candidates[:0]
|
||||
accept := httpRequest.Header.Get(HEADER_Accept)
|
||||
if len(accept) == 0 {
|
||||
accept = "*/*"
|
||||
}
|
||||
for _, each := range previous {
|
||||
if each.matchesAccept(accept) {
|
||||
candidates = append(candidates, each)
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept)
|
||||
}
|
||||
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
|
||||
}
|
||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||
return candidates[0], nil
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
// n/m > n/* > */*
|
||||
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
|
||||
// TODO
|
||||
return &routes[0]
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
|
||||
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
|
||||
filtered := &sortableRouteCandidates{}
|
||||
for _, each := range dispatcher.Routes() {
|
||||
pathExpr := each.pathExpr
|
||||
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
filtered.candidates = append(filtered.candidates,
|
||||
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filtered.candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
|
||||
}
|
||||
return []Route{}
|
||||
}
|
||||
sort.Sort(sort.Reverse(filtered))
|
||||
|
||||
// select other routes from candidates whoes expression matches rmatch
|
||||
matchingRoutes := []Route{filtered.candidates[0].route}
|
||||
for c := 1; c < len(filtered.candidates); c++ {
|
||||
each := filtered.candidates[c]
|
||||
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
|
||||
matchingRoutes = append(matchingRoutes, each.route)
|
||||
}
|
||||
}
|
||||
return matchingRoutes
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
|
||||
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
|
||||
filtered := &sortableDispatcherCandidates{}
|
||||
for _, each := range dispatchers {
|
||||
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
filtered.candidates = append(filtered.candidates,
|
||||
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
|
||||
}
|
||||
}
|
||||
if len(filtered.candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
|
||||
}
|
||||
return nil, "", errors.New("not found")
|
||||
}
|
||||
sort.Sort(sort.Reverse(filtered))
|
||||
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
|
||||
}
|
||||
|
||||
// Types and functions to support the sorting of Routes
|
||||
|
||||
type routeCandidate struct {
|
||||
route Route
|
||||
matchesCount int // the number of capturing groups
|
||||
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
|
||||
}
|
||||
|
||||
func (r routeCandidate) expressionToMatch() string {
|
||||
return r.route.pathExpr.Source
|
||||
}
|
||||
|
||||
func (r routeCandidate) String() string {
|
||||
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
|
||||
}
|
||||
|
||||
type sortableRouteCandidates struct {
|
||||
candidates []routeCandidate
|
||||
}
|
||||
|
||||
func (rcs *sortableRouteCandidates) Len() int {
|
||||
return len(rcs.candidates)
|
||||
}
|
||||
func (rcs *sortableRouteCandidates) Swap(i, j int) {
|
||||
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
|
||||
}
|
||||
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
|
||||
ci := rcs.candidates[i]
|
||||
cj := rcs.candidates[j]
|
||||
// primary key
|
||||
if ci.literalCount < cj.literalCount {
|
||||
return true
|
||||
}
|
||||
if ci.literalCount > cj.literalCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.matchesCount < cj.matchesCount {
|
||||
return true
|
||||
}
|
||||
if ci.matchesCount > cj.matchesCount {
|
||||
return false
|
||||
}
|
||||
// tertiary key
|
||||
if ci.nonDefaultCount < cj.nonDefaultCount {
|
||||
return true
|
||||
}
|
||||
if ci.nonDefaultCount > cj.nonDefaultCount {
|
||||
return false
|
||||
}
|
||||
// quaternary key ("source" is interpreted as Path)
|
||||
return ci.route.Path < cj.route.Path
|
||||
}
|
||||
|
||||
// Types and functions to support the sorting of Dispatchers
|
||||
|
||||
type dispatcherCandidate struct {
|
||||
dispatcher *WebService
|
||||
finalMatch string
|
||||
matchesCount int // the number of capturing groups
|
||||
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
|
||||
}
|
||||
type sortableDispatcherCandidates struct {
|
||||
candidates []dispatcherCandidate
|
||||
}
|
||||
|
||||
func (dc *sortableDispatcherCandidates) Len() int {
|
||||
return len(dc.candidates)
|
||||
}
|
||||
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
|
||||
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
|
||||
}
|
||||
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
|
||||
ci := dc.candidates[i]
|
||||
cj := dc.candidates[j]
|
||||
// primary key
|
||||
if ci.matchesCount < cj.matchesCount {
|
||||
return true
|
||||
}
|
||||
if ci.matchesCount > cj.matchesCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.literalCount < cj.literalCount {
|
||||
return true
|
||||
}
|
||||
if ci.literalCount > cj.literalCount {
|
||||
return false
|
||||
}
|
||||
// tertiary key
|
||||
return ci.nonDefaultCount < cj.nonDefaultCount
|
||||
}
|
||||
34
vendor/github.com/emicklei/go-restful/log/log.go
generated
vendored
Normal file
34
vendor/github.com/emicklei/go-restful/log/log.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
stdlog "log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
var Logger StdLogger
|
||||
|
||||
func init() {
|
||||
// default Logger
|
||||
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for this package
|
||||
func SetLogger(customLogger StdLogger) {
|
||||
Logger = customLogger
|
||||
}
|
||||
|
||||
// Print delegates to the Logger
|
||||
func Print(v ...interface{}) {
|
||||
Logger.Print(v...)
|
||||
}
|
||||
|
||||
// Printf delegates to the Logger
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Logger.Printf(format, v...)
|
||||
}
|
||||
32
vendor/github.com/emicklei/go-restful/logger.go
generated
vendored
Normal file
32
vendor/github.com/emicklei/go-restful/logger.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2014 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
import (
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
var trace bool = false
|
||||
var traceLogger log.StdLogger
|
||||
|
||||
func init() {
|
||||
traceLogger = log.Logger // use the package logger by default
|
||||
}
|
||||
|
||||
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
|
||||
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
|
||||
func TraceLogger(logger log.StdLogger) {
|
||||
traceLogger = logger
|
||||
EnableTracing(logger != nil)
|
||||
}
|
||||
|
||||
// SetLogger exposes the setter for the global logger on the top-level package
|
||||
func SetLogger(customLogger log.StdLogger) {
|
||||
log.SetLogger(customLogger)
|
||||
}
|
||||
|
||||
// EnableTracing can be used to Trace logging on and off.
|
||||
func EnableTracing(enabled bool) {
|
||||
trace = enabled
|
||||
}
|
||||
50
vendor/github.com/emicklei/go-restful/mime.go
generated
vendored
Normal file
50
vendor/github.com/emicklei/go-restful/mime.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type mime struct {
|
||||
media string
|
||||
quality float64
|
||||
}
|
||||
|
||||
// insertMime adds a mime to a list and keeps it sorted by quality.
|
||||
func insertMime(l []mime, e mime) []mime {
|
||||
for i, each := range l {
|
||||
// if current mime has lower quality then insert before
|
||||
if e.quality > each.quality {
|
||||
left := append([]mime{}, l[0:i]...)
|
||||
return append(append(left, e), l[i:]...)
|
||||
}
|
||||
}
|
||||
return append(l, e)
|
||||
}
|
||||
|
||||
const qFactorWeightingKey = "q"
|
||||
|
||||
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
|
||||
// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
|
||||
func sortedMimes(accept string) (sorted []mime) {
|
||||
for _, each := range strings.Split(accept, ",") {
|
||||
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
|
||||
if len(typeAndQuality) == 1 {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
||||
} else {
|
||||
// take factor
|
||||
qAndWeight := strings.Split(typeAndQuality[1], "=")
|
||||
if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey {
|
||||
f, err := strconv.ParseFloat(qAndWeight[1], 64)
|
||||
if err != nil {
|
||||
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
|
||||
} else {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
|
||||
}
|
||||
} else {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
34
vendor/github.com/emicklei/go-restful/options_filter.go
generated
vendored
Normal file
34
vendor/github.com/emicklei/go-restful/options_filter.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package restful
|
||||
|
||||
import "strings"
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
||||
// and provides the response with a set of allowed methods for the request URL Path.
|
||||
// As for any filter, you can also install it for a particular WebService within a Container.
|
||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
||||
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
if "OPTIONS" != req.Request.Method {
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
|
||||
archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
||||
methods := strings.Join(c.computeAllowedMethods(req), ",")
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
|
||||
resp.AddHeader(HEADER_Allow, methods)
|
||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
|
||||
resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
|
||||
}
|
||||
|
||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
||||
// and provides the response with a set of allowed methods for the request URL Path.
|
||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
||||
func OPTIONSFilter() FilterFunction {
|
||||
return DefaultContainer.OPTIONSFilter
|
||||
}
|
||||
143
vendor/github.com/emicklei/go-restful/parameter.go
generated
vendored
Normal file
143
vendor/github.com/emicklei/go-restful/parameter.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
// PathParameterKind = indicator of Request parameter type "path"
|
||||
PathParameterKind = iota
|
||||
|
||||
// QueryParameterKind = indicator of Request parameter type "query"
|
||||
QueryParameterKind
|
||||
|
||||
// BodyParameterKind = indicator of Request parameter type "body"
|
||||
BodyParameterKind
|
||||
|
||||
// HeaderParameterKind = indicator of Request parameter type "header"
|
||||
HeaderParameterKind
|
||||
|
||||
// FormParameterKind = indicator of Request parameter type "form"
|
||||
FormParameterKind
|
||||
|
||||
// CollectionFormatCSV comma separated values `foo,bar`
|
||||
CollectionFormatCSV = CollectionFormat("csv")
|
||||
|
||||
// CollectionFormatSSV space separated values `foo bar`
|
||||
CollectionFormatSSV = CollectionFormat("ssv")
|
||||
|
||||
// CollectionFormatTSV tab separated values `foo\tbar`
|
||||
CollectionFormatTSV = CollectionFormat("tsv")
|
||||
|
||||
// CollectionFormatPipes pipe separated values `foo|bar`
|
||||
CollectionFormatPipes = CollectionFormat("pipes")
|
||||
|
||||
// CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single
|
||||
// instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters
|
||||
CollectionFormatMulti = CollectionFormat("multi")
|
||||
)
|
||||
|
||||
type CollectionFormat string
|
||||
|
||||
func (cf CollectionFormat) String() string {
|
||||
return string(cf)
|
||||
}
|
||||
|
||||
// Parameter is for documententing the parameter used in a Http Request
|
||||
// ParameterData kinds are Path,Query and Body
|
||||
type Parameter struct {
|
||||
data *ParameterData
|
||||
}
|
||||
|
||||
// ParameterData represents the state of a Parameter.
|
||||
// It is made public to make it accessible to e.g. the Swagger package.
|
||||
type ParameterData struct {
|
||||
Name, Description, DataType, DataFormat string
|
||||
Kind int
|
||||
Required bool
|
||||
AllowableValues map[string]string
|
||||
AllowMultiple bool
|
||||
DefaultValue string
|
||||
CollectionFormat string
|
||||
}
|
||||
|
||||
// Data returns the state of the Parameter
|
||||
func (p *Parameter) Data() ParameterData {
|
||||
return *p.data
|
||||
}
|
||||
|
||||
// Kind returns the parameter type indicator (see const for valid values)
|
||||
func (p *Parameter) Kind() int {
|
||||
return p.data.Kind
|
||||
}
|
||||
|
||||
func (p *Parameter) bePath() *Parameter {
|
||||
p.data.Kind = PathParameterKind
|
||||
return p
|
||||
}
|
||||
func (p *Parameter) beQuery() *Parameter {
|
||||
p.data.Kind = QueryParameterKind
|
||||
return p
|
||||
}
|
||||
func (p *Parameter) beBody() *Parameter {
|
||||
p.data.Kind = BodyParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beHeader() *Parameter {
|
||||
p.data.Kind = HeaderParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beForm() *Parameter {
|
||||
p.data.Kind = FormParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
// Required sets the required field and returns the receiver
|
||||
func (p *Parameter) Required(required bool) *Parameter {
|
||||
p.data.Required = required
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowMultiple sets the allowMultiple field and returns the receiver
|
||||
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
|
||||
p.data.AllowMultiple = multiple
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowableValues sets the allowableValues field and returns the receiver
|
||||
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
|
||||
p.data.AllowableValues = values
|
||||
return p
|
||||
}
|
||||
|
||||
// DataType sets the dataType field and returns the receiver
|
||||
func (p *Parameter) DataType(typeName string) *Parameter {
|
||||
p.data.DataType = typeName
|
||||
return p
|
||||
}
|
||||
|
||||
// DataFormat sets the dataFormat field for Swagger UI
|
||||
func (p *Parameter) DataFormat(formatName string) *Parameter {
|
||||
p.data.DataFormat = formatName
|
||||
return p
|
||||
}
|
||||
|
||||
// DefaultValue sets the default value field and returns the receiver
|
||||
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
|
||||
p.data.DefaultValue = stringRepresentation
|
||||
return p
|
||||
}
|
||||
|
||||
// Description sets the description value field and returns the receiver
|
||||
func (p *Parameter) Description(doc string) *Parameter {
|
||||
p.data.Description = doc
|
||||
return p
|
||||
}
|
||||
|
||||
// CollectionFormat sets the collection format for an array type
|
||||
func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter {
|
||||
p.data.CollectionFormat = format.String()
|
||||
return p
|
||||
}
|
||||
74
vendor/github.com/emicklei/go-restful/path_expression.go
generated
vendored
Normal file
74
vendor/github.com/emicklei/go-restful/path_expression.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathExpression holds a compiled path expression (RegExp) needed to match against
|
||||
// Http request paths and to extract path parameter values.
|
||||
type pathExpression struct {
|
||||
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
VarNames []string // the names of parameters (enclosed by {}) in the path
|
||||
VarCount int // the number of named parameters (enclosed by {}) in the path
|
||||
Matcher *regexp.Regexp
|
||||
Source string // Path as defined by the RouteBuilder
|
||||
tokens []string
|
||||
}
|
||||
|
||||
// NewPathExpression creates a PathExpression from the input URL path.
|
||||
// Returns an error if the path is invalid.
|
||||
func newPathExpression(path string) (*pathExpression, error) {
|
||||
expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
|
||||
compiled, err := regexp.Compile(expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
|
||||
func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("^")
|
||||
//tokens = strings.Split(template, "/")
|
||||
tokens = tokenizePath(template)
|
||||
for _, each := range tokens {
|
||||
if each == "" {
|
||||
continue
|
||||
}
|
||||
buffer.WriteString("/")
|
||||
if strings.HasPrefix(each, "{") {
|
||||
// check for regular expression in variable
|
||||
colon := strings.Index(each, ":")
|
||||
var varName string
|
||||
if colon != -1 {
|
||||
// extract expression
|
||||
varName = strings.TrimSpace(each[1:colon])
|
||||
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
|
||||
if paramExpr == "*" { // special case
|
||||
buffer.WriteString("(.*)")
|
||||
} else {
|
||||
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
|
||||
}
|
||||
} else {
|
||||
// plain var
|
||||
varName = strings.TrimSpace(each[1 : len(each)-1])
|
||||
buffer.WriteString("([^/]+?)")
|
||||
}
|
||||
varNames = append(varNames, varName)
|
||||
varCount += 1
|
||||
} else {
|
||||
literalCount += len(each)
|
||||
encoded := each // TODO URI encode
|
||||
buffer.WriteString(regexp.QuoteMeta(encoded))
|
||||
}
|
||||
}
|
||||
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens
|
||||
}
|
||||
63
vendor/github.com/emicklei/go-restful/path_processor.go
generated
vendored
Normal file
63
vendor/github.com/emicklei/go-restful/path_processor.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Copyright 2018 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path.
|
||||
// If a Router does not implement this interface then the default behaviour will be used.
|
||||
type PathProcessor interface {
|
||||
// ExtractParameters gets the path parameters defined in the route and webService from the urlPath
|
||||
ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string
|
||||
}
|
||||
|
||||
type defaultPathProcessor struct{}
|
||||
|
||||
// Extract the parameters from the request url path
|
||||
func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string {
|
||||
urlParts := tokenizePath(urlPath)
|
||||
pathParameters := map[string]string{}
|
||||
for i, key := range r.pathParts {
|
||||
var value string
|
||||
if i >= len(urlParts) {
|
||||
value = ""
|
||||
} else {
|
||||
value = urlParts[i]
|
||||
}
|
||||
if strings.HasPrefix(key, "{") { // path-parameter
|
||||
if colon := strings.Index(key, ":"); colon != -1 {
|
||||
// extract by regex
|
||||
regPart := key[colon+1 : len(key)-1]
|
||||
keyPart := key[1:colon]
|
||||
if regPart == "*" {
|
||||
pathParameters[keyPart] = untokenizePath(i, urlParts)
|
||||
break
|
||||
} else {
|
||||
pathParameters[keyPart] = value
|
||||
}
|
||||
} else {
|
||||
// without enclosing {}
|
||||
pathParameters[key[1:len(key)-1]] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return pathParameters
|
||||
}
|
||||
|
||||
// Untokenize back into an URL path using the slash separator
|
||||
func untokenizePath(offset int, parts []string) string {
|
||||
var buffer bytes.Buffer
|
||||
for p := offset; p < len(parts); p++ {
|
||||
buffer.WriteString(parts[p])
|
||||
// do not end
|
||||
if p < len(parts)-1 {
|
||||
buffer.WriteString("/")
|
||||
}
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
118
vendor/github.com/emicklei/go-restful/request.go
generated
vendored
Normal file
118
vendor/github.com/emicklei/go-restful/request.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var defaultRequestContentType string
|
||||
|
||||
// Request is a wrapper for a http Request that provides convenience methods
|
||||
type Request struct {
|
||||
Request *http.Request
|
||||
pathParameters map[string]string
|
||||
attributes map[string]interface{} // for storing request-scoped values
|
||||
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
}
|
||||
|
||||
func NewRequest(httpRequest *http.Request) *Request {
|
||||
return &Request{
|
||||
Request: httpRequest,
|
||||
pathParameters: map[string]string{},
|
||||
attributes: map[string]interface{}{},
|
||||
} // empty parameters, attributes
|
||||
}
|
||||
|
||||
// If ContentType is missing or */* is given then fall back to this type, otherwise
|
||||
// a "Unable to unmarshal content of type:" response is returned.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
func DefaultRequestContentType(mime string) {
|
||||
defaultRequestContentType = mime
|
||||
}
|
||||
|
||||
// PathParameter accesses the Path parameter value by its name
|
||||
func (r *Request) PathParameter(name string) string {
|
||||
return r.pathParameters[name]
|
||||
}
|
||||
|
||||
// PathParameters accesses the Path parameter values
|
||||
func (r *Request) PathParameters() map[string]string {
|
||||
return r.pathParameters
|
||||
}
|
||||
|
||||
// QueryParameter returns the (first) Query parameter value by its name
|
||||
func (r *Request) QueryParameter(name string) string {
|
||||
return r.Request.FormValue(name)
|
||||
}
|
||||
|
||||
// QueryParameters returns the all the query parameters values by name
|
||||
func (r *Request) QueryParameters(name string) []string {
|
||||
return r.Request.URL.Query()[name]
|
||||
}
|
||||
|
||||
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
|
||||
func (r *Request) BodyParameter(name string) (string, error) {
|
||||
err := r.Request.ParseForm()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return r.Request.PostFormValue(name), nil
|
||||
}
|
||||
|
||||
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
|
||||
func (r *Request) HeaderParameter(name string) string {
|
||||
return r.Request.Header.Get(name)
|
||||
}
|
||||
|
||||
// ReadEntity checks the Accept header and reads the content into the entityPointer.
|
||||
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
|
||||
contentType := r.Request.Header.Get(HEADER_ContentType)
|
||||
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
|
||||
|
||||
// check if the request body needs decompression
|
||||
if ENCODING_GZIP == contentEncoding {
|
||||
gzipReader := currentCompressorProvider.AcquireGzipReader()
|
||||
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
|
||||
gzipReader.Reset(r.Request.Body)
|
||||
r.Request.Body = gzipReader
|
||||
} else if ENCODING_DEFLATE == contentEncoding {
|
||||
zlibReader, err := zlib.NewReader(r.Request.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Request.Body = zlibReader
|
||||
}
|
||||
|
||||
// lookup the EntityReader, use defaultRequestContentType if needed and provided
|
||||
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
|
||||
if !ok {
|
||||
if len(defaultRequestContentType) != 0 {
|
||||
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
|
||||
}
|
||||
if !ok {
|
||||
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
|
||||
}
|
||||
}
|
||||
return entityReader.Read(r, entityPointer)
|
||||
}
|
||||
|
||||
// SetAttribute adds or replaces the attribute with the given value.
|
||||
func (r *Request) SetAttribute(name string, value interface{}) {
|
||||
r.attributes[name] = value
|
||||
}
|
||||
|
||||
// Attribute returns the value associated to the given name. Returns nil if absent.
|
||||
func (r Request) Attribute(name string) interface{} {
|
||||
return r.attributes[name]
|
||||
}
|
||||
|
||||
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
func (r Request) SelectedRoutePath() string {
|
||||
return r.selectedRoutePath
|
||||
}
|
||||
255
vendor/github.com/emicklei/go-restful/response.go
generated
vendored
Normal file
255
vendor/github.com/emicklei/go-restful/response.go
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
|
||||
var DefaultResponseMimeType string
|
||||
|
||||
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
|
||||
var PrettyPrintResponses = true
|
||||
|
||||
// Response is a wrapper on the actual http ResponseWriter
|
||||
// It provides several convenience methods to prepare and write response content.
|
||||
type Response struct {
|
||||
http.ResponseWriter
|
||||
requestAccept string // mime-type what the Http Request says it wants to receive
|
||||
routeProduces []string // mime-types what the Route says it can produce
|
||||
statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
|
||||
contentLength int // number of bytes written for the response body
|
||||
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
|
||||
err error // err property is kept when WriteError is called
|
||||
hijacker http.Hijacker // if underlying ResponseWriter supports it
|
||||
}
|
||||
|
||||
// NewResponse creates a new response based on a http ResponseWriter.
|
||||
func NewResponse(httpWriter http.ResponseWriter) *Response {
|
||||
hijacker, _ := httpWriter.(http.Hijacker)
|
||||
return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
|
||||
}
|
||||
|
||||
// DefaultResponseContentType set a default.
|
||||
// If Accept header matching fails, fall back to this type.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultResponseContentType(restful.MIME_JSON)
|
||||
func DefaultResponseContentType(mime string) {
|
||||
DefaultResponseMimeType = mime
|
||||
}
|
||||
|
||||
// InternalServerError writes the StatusInternalServerError header.
|
||||
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
|
||||
func (r Response) InternalServerError() Response {
|
||||
r.WriteHeader(http.StatusInternalServerError)
|
||||
return r
|
||||
}
|
||||
|
||||
// Hijack implements the http.Hijacker interface. This expands
|
||||
// the Response to fulfill http.Hijacker if the underlying
|
||||
// http.ResponseWriter supports it.
|
||||
func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if r.hijacker == nil {
|
||||
return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
|
||||
}
|
||||
return r.hijacker.Hijack()
|
||||
}
|
||||
|
||||
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
|
||||
func (r *Response) PrettyPrint(bePretty bool) {
|
||||
r.prettyPrint = bePretty
|
||||
}
|
||||
|
||||
// AddHeader is a shortcut for .Header().Add(header,value)
|
||||
func (r Response) AddHeader(header string, value string) Response {
|
||||
r.Header().Add(header, value)
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
|
||||
func (r *Response) SetRequestAccepts(mime string) {
|
||||
r.requestAccept = mime
|
||||
}
|
||||
|
||||
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
|
||||
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
|
||||
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
|
||||
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
||||
sorted := sortedMimes(r.requestAccept)
|
||||
for _, eachAccept := range sorted {
|
||||
for _, eachProduce := range r.routeProduces {
|
||||
if eachProduce == eachAccept.media {
|
||||
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
}
|
||||
if eachAccept.media == "*/*" {
|
||||
for _, each := range r.routeProduces {
|
||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// if requestAccept is empty
|
||||
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
|
||||
if !ok {
|
||||
// if not registered then fallback to the defaults (if set)
|
||||
if DefaultResponseMimeType == MIME_JSON {
|
||||
return entityAccessRegistry.accessorAt(MIME_JSON)
|
||||
}
|
||||
if DefaultResponseMimeType == MIME_XML {
|
||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||
}
|
||||
// Fallback to whatever the route says it can produce.
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
for _, each := range r.routeProduces {
|
||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
if trace {
|
||||
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
|
||||
}
|
||||
}
|
||||
return writer, ok
|
||||
}
|
||||
|
||||
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
|
||||
func (r *Response) WriteEntity(value interface{}) error {
|
||||
return r.WriteHeaderAndEntity(http.StatusOK, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
|
||||
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
|
||||
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
|
||||
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
|
||||
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
|
||||
// Current implementation ignores any q-parameters in the Accept Header.
|
||||
// Returns an error if the value could not be written on the response.
|
||||
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
|
||||
writer, ok := r.EntityWriter()
|
||||
if !ok {
|
||||
r.WriteHeader(http.StatusNotAcceptable)
|
||||
return nil
|
||||
}
|
||||
return writer.Write(r, status, value)
|
||||
}
|
||||
|
||||
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
|
||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteAsXml(value interface{}) error {
|
||||
return writeXML(r, http.StatusOK, MIME_XML, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
|
||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
|
||||
return writeXML(r, status, MIME_XML, value)
|
||||
}
|
||||
|
||||
// WriteAsJson is a convenience method for writing a value in json.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteAsJson(value interface{}) error {
|
||||
return writeJSON(r, http.StatusOK, MIME_JSON, value)
|
||||
}
|
||||
|
||||
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteJson(value interface{}, contentType string) error {
|
||||
return writeJSON(r, http.StatusOK, contentType, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
|
||||
return writeJSON(r, status, contentType, value)
|
||||
}
|
||||
|
||||
// WriteError write the http status and the error string on the response. err can be nil.
|
||||
func (r *Response) WriteError(httpStatus int, err error) error {
|
||||
r.err = err
|
||||
if err == nil {
|
||||
r.WriteErrorString(httpStatus, "")
|
||||
} else {
|
||||
r.WriteErrorString(httpStatus, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
|
||||
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
|
||||
r.err = err
|
||||
return r.WriteHeaderAndEntity(httpStatus, err)
|
||||
}
|
||||
|
||||
// WriteErrorString is a convenience method for an error status with the actual error
|
||||
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
|
||||
if r.err == nil {
|
||||
// if not called from WriteError
|
||||
r.err = errors.New(errorReason)
|
||||
}
|
||||
r.WriteHeader(httpStatus)
|
||||
if _, err := r.Write([]byte(errorReason)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush implements http.Flusher interface, which sends any buffered data to the client.
|
||||
func (r *Response) Flush() {
|
||||
if f, ok := r.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
} else if trace {
|
||||
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHeader is overridden to remember the Status Code that has been written.
|
||||
// Changes to the Header of the response have no effect after this.
|
||||
func (r *Response) WriteHeader(httpStatus int) {
|
||||
r.statusCode = httpStatus
|
||||
r.ResponseWriter.WriteHeader(httpStatus)
|
||||
}
|
||||
|
||||
// StatusCode returns the code that has been written using WriteHeader.
|
||||
func (r Response) StatusCode() int {
|
||||
if 0 == r.statusCode {
|
||||
// no status code has been written yet; assume OK
|
||||
return http.StatusOK
|
||||
}
|
||||
return r.statusCode
|
||||
}
|
||||
|
||||
// Write writes the data to the connection as part of an HTTP reply.
|
||||
// Write is part of http.ResponseWriter interface.
|
||||
func (r *Response) Write(bytes []byte) (int, error) {
|
||||
written, err := r.ResponseWriter.Write(bytes)
|
||||
r.contentLength += written
|
||||
return written, err
|
||||
}
|
||||
|
||||
// ContentLength returns the number of bytes written for the response content.
|
||||
// Note that this value is only correct if all data is written through the Response using its Write* methods.
|
||||
// Data written directly using the underlying http.ResponseWriter is not accounted for.
|
||||
func (r Response) ContentLength() int {
|
||||
return r.contentLength
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (r Response) CloseNotify() <-chan bool {
|
||||
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Error returns the err created by WriteError
|
||||
func (r Response) Error() error {
|
||||
return r.err
|
||||
}
|
||||
170
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
Normal file
170
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RouteFunction declares the signature of a function that can be bound to a Route.
|
||||
type RouteFunction func(*Request, *Response)
|
||||
|
||||
// RouteSelectionConditionFunction declares the signature of a function that
|
||||
// can be used to add extra conditional logic when selecting whether the route
|
||||
// matches the HTTP request.
|
||||
type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
|
||||
|
||||
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
|
||||
type Route struct {
|
||||
Method string
|
||||
Produces []string
|
||||
Consumes []string
|
||||
Path string // webservice root path + described path
|
||||
Function RouteFunction
|
||||
Filters []FilterFunction
|
||||
If []RouteSelectionConditionFunction
|
||||
|
||||
// cached values for dispatching
|
||||
relativePath string
|
||||
pathParts []string
|
||||
pathExpr *pathExpression // cached compilation of relativePath as RegExp
|
||||
|
||||
// documentation
|
||||
Doc string
|
||||
Notes string
|
||||
Operation string
|
||||
ParameterDocs []*Parameter
|
||||
ResponseErrors map[int]ResponseError
|
||||
DefaultResponse *ResponseError
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
|
||||
// Extra information used to store custom information about the route.
|
||||
Metadata map[string]interface{}
|
||||
|
||||
// marks a route as deprecated
|
||||
Deprecated bool
|
||||
|
||||
//Overrides the container.contentEncodingEnabled
|
||||
contentEncodingEnabled *bool
|
||||
}
|
||||
|
||||
// Initialize for Route
|
||||
func (r *Route) postBuild() {
|
||||
r.pathParts = tokenizePath(r.Path)
|
||||
}
|
||||
|
||||
// Create Request and Response from their http versions
|
||||
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
|
||||
wrappedRequest := NewRequest(httpRequest)
|
||||
wrappedRequest.pathParameters = pathParams
|
||||
wrappedRequest.selectedRoutePath = r.Path
|
||||
wrappedResponse := NewResponse(httpWriter)
|
||||
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
wrappedResponse.routeProduces = r.Produces
|
||||
return wrappedRequest, wrappedResponse
|
||||
}
|
||||
|
||||
// dispatchWithFilters call the function after passing through its own filters
|
||||
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
|
||||
if len(r.Filters) > 0 {
|
||||
chain := FilterChain{Filters: r.Filters, Target: r.Function}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// unfiltered
|
||||
r.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
func stringTrimSpaceCutset(r rune) bool {
|
||||
return r == ' '
|
||||
}
|
||||
|
||||
// Return whether the mimeType matches to what this Route can produce.
|
||||
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
||||
remaining := mimeTypesWithQuality
|
||||
for {
|
||||
var mimeType string
|
||||
if end := strings.Index(remaining, ","); end == -1 {
|
||||
mimeType, remaining = remaining, ""
|
||||
} else {
|
||||
mimeType, remaining = remaining[:end], remaining[end+1:]
|
||||
}
|
||||
if quality := strings.Index(mimeType, ";"); quality != -1 {
|
||||
mimeType = mimeType[:quality]
|
||||
}
|
||||
mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
|
||||
if mimeType == "*/*" {
|
||||
return true
|
||||
}
|
||||
for _, producibleType := range r.Produces {
|
||||
if producibleType == "*/*" || producibleType == mimeType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if len(remaining) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
||||
|
||||
if len(r.Consumes) == 0 {
|
||||
// did not specify what it can consume ; any media type (“*/*”) is assumed
|
||||
return true
|
||||
}
|
||||
|
||||
if len(mimeTypes) == 0 {
|
||||
// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
|
||||
m := r.Method
|
||||
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
|
||||
return true
|
||||
}
|
||||
// proceed with default
|
||||
mimeTypes = MIME_OCTET
|
||||
}
|
||||
|
||||
remaining := mimeTypes
|
||||
for {
|
||||
var mimeType string
|
||||
if end := strings.Index(remaining, ","); end == -1 {
|
||||
mimeType, remaining = remaining, ""
|
||||
} else {
|
||||
mimeType, remaining = remaining[:end], remaining[end+1:]
|
||||
}
|
||||
if quality := strings.Index(mimeType, ";"); quality != -1 {
|
||||
mimeType = mimeType[:quality]
|
||||
}
|
||||
mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
|
||||
for _, consumeableType := range r.Consumes {
|
||||
if consumeableType == "*/*" || consumeableType == mimeType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if len(remaining) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
|
||||
func tokenizePath(path string) []string {
|
||||
if "/" == path {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
}
|
||||
|
||||
// for debugging
|
||||
func (r Route) String() string {
|
||||
return r.Method + " " + r.Path
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
|
||||
func (r Route) EnableContentEncoding(enabled bool) {
|
||||
r.contentEncodingEnabled = &enabled
|
||||
}
|
||||
326
vendor/github.com/emicklei/go-restful/route_builder.go
generated
vendored
Normal file
326
vendor/github.com/emicklei/go-restful/route_builder.go
generated
vendored
Normal file
@@ -0,0 +1,326 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// RouteBuilder is a helper to construct Routes.
|
||||
type RouteBuilder struct {
|
||||
rootPath string
|
||||
currentPath string
|
||||
produces []string
|
||||
consumes []string
|
||||
httpMethod string // required
|
||||
function RouteFunction // required
|
||||
filters []FilterFunction
|
||||
conditions []RouteSelectionConditionFunction
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction // required
|
||||
|
||||
// documentation
|
||||
doc string
|
||||
notes string
|
||||
operation string
|
||||
readSample, writeSample interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
defaultResponse *ResponseError
|
||||
metadata map[string]interface{}
|
||||
deprecated bool
|
||||
contentEncodingEnabled *bool
|
||||
}
|
||||
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||
// Example:
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
||||
for _, each := range oneArgBlocks {
|
||||
each(b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// To bind the route to a function.
|
||||
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
|
||||
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
|
||||
b.function = function
|
||||
return b
|
||||
}
|
||||
|
||||
// Method specifies what HTTP method to match. Required.
|
||||
func (b *RouteBuilder) Method(method string) *RouteBuilder {
|
||||
b.httpMethod = method
|
||||
return b
|
||||
}
|
||||
|
||||
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
|
||||
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
|
||||
b.produces = mimeTypes
|
||||
return b
|
||||
}
|
||||
|
||||
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
|
||||
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
|
||||
b.consumes = mimeTypes
|
||||
return b
|
||||
}
|
||||
|
||||
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
|
||||
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
|
||||
b.currentPath = subPath
|
||||
return b
|
||||
}
|
||||
|
||||
// Doc tells what this route is all about. Optional.
|
||||
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
|
||||
b.doc = documentation
|
||||
return b
|
||||
}
|
||||
|
||||
// Notes is a verbose explanation of the operation behavior. Optional.
|
||||
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
|
||||
b.notes = notes
|
||||
return b
|
||||
}
|
||||
|
||||
// Reads tells what resource type will be read from the request payload. Optional.
|
||||
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
|
||||
func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder {
|
||||
fn := b.typeNameHandleFunc
|
||||
if fn == nil {
|
||||
fn = reflectTypeName
|
||||
}
|
||||
typeAsName := fn(sample)
|
||||
description := ""
|
||||
if len(optionalDescription) > 0 {
|
||||
description = optionalDescription[0]
|
||||
}
|
||||
b.readSample = sample
|
||||
bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}}
|
||||
bodyParameter.beBody()
|
||||
bodyParameter.Required(true)
|
||||
bodyParameter.DataType(typeAsName)
|
||||
b.Param(bodyParameter)
|
||||
return b
|
||||
}
|
||||
|
||||
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
|
||||
// Use this to modify or extend information for the Parameter (through its Data()).
|
||||
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
|
||||
for _, each := range b.parameters {
|
||||
if each.Data().Name == name {
|
||||
return each
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Writes tells what resource type will be written as the response payload. Optional.
|
||||
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
|
||||
b.writeSample = sample
|
||||
return b
|
||||
}
|
||||
|
||||
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
|
||||
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
|
||||
if b.parameters == nil {
|
||||
b.parameters = []*Parameter{}
|
||||
}
|
||||
b.parameters = append(b.parameters, parameter)
|
||||
return b
|
||||
}
|
||||
|
||||
// Operation allows you to document what the actual method/function call is of the Route.
|
||||
// Unless called, the operation name is derived from the RouteFunction set using To(..).
|
||||
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
|
||||
b.operation = name
|
||||
return b
|
||||
}
|
||||
|
||||
// ReturnsError is deprecated, use Returns instead.
|
||||
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
|
||||
log.Print("ReturnsError is deprecated, use Returns instead.")
|
||||
return b.Returns(code, message, model)
|
||||
}
|
||||
|
||||
// Returns allows you to document what responses (errors or regular) can be expected.
|
||||
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
|
||||
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
|
||||
err := ResponseError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
IsDefault: false, // this field is deprecated, use default response instead.
|
||||
}
|
||||
// lazy init because there is no NewRouteBuilder (yet)
|
||||
if b.errorMap == nil {
|
||||
b.errorMap = map[int]ResponseError{}
|
||||
}
|
||||
b.errorMap[code] = err
|
||||
return b
|
||||
}
|
||||
|
||||
// DefaultReturns is a special Returns call that sets the default of the response.
|
||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
||||
b.defaultResponse = &ResponseError{
|
||||
Message: message,
|
||||
Model: model,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Metadata adds or updates a key=value pair to the metadata map.
|
||||
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
||||
if b.metadata == nil {
|
||||
b.metadata = map[string]interface{}{}
|
||||
}
|
||||
b.metadata[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use
|
||||
func (b *RouteBuilder) Deprecate() *RouteBuilder {
|
||||
b.deprecated = true
|
||||
return b
|
||||
}
|
||||
|
||||
// ResponseError represents a response; not necessarily an error.
|
||||
type ResponseError struct {
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
IsDefault bool
|
||||
}
|
||||
|
||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
||||
b.rootPath = path
|
||||
return b
|
||||
}
|
||||
|
||||
// Filter appends a FilterFunction to the end of filters for this Route to build.
|
||||
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
|
||||
b.filters = append(b.filters, filter)
|
||||
return b
|
||||
}
|
||||
|
||||
// If sets a condition function that controls matching the Route based on custom logic.
|
||||
// The condition function is provided the HTTP request and should return true if the route
|
||||
// should be considered.
|
||||
//
|
||||
// Efficiency note: the condition function is called before checking the method, produces, and
|
||||
// consumes criteria, so that the correct HTTP status code can be returned.
|
||||
//
|
||||
// Lifecycle note: no filter functions have been called prior to calling the condition function,
|
||||
// so the condition function should not depend on any context that might be set up by container
|
||||
// or route filters.
|
||||
func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
|
||||
b.conditions = append(b.conditions, condition)
|
||||
return b
|
||||
}
|
||||
|
||||
// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response.
|
||||
func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder {
|
||||
b.contentEncodingEnabled = &enabled
|
||||
return b
|
||||
}
|
||||
|
||||
// If no specific Route path then set to rootPath
|
||||
// If no specific Produces then set to rootProduces
|
||||
// If no specific Consumes then set to rootConsumes
|
||||
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
|
||||
if len(b.produces) == 0 {
|
||||
b.produces = rootProduces
|
||||
}
|
||||
if len(b.consumes) == 0 {
|
||||
b.consumes = rootConsumes
|
||||
}
|
||||
}
|
||||
|
||||
// typeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions.
|
||||
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
|
||||
b.typeNameHandleFunc = handler
|
||||
return b
|
||||
}
|
||||
|
||||
// Build creates a new Route using the specification details collected by the RouteBuilder
|
||||
func (b *RouteBuilder) Build() Route {
|
||||
pathExpr, err := newPathExpression(b.currentPath)
|
||||
if err != nil {
|
||||
log.Printf("Invalid path:%s because:%v", b.currentPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if b.function == nil {
|
||||
log.Printf("No function specified for route:" + b.currentPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
operationName := b.operation
|
||||
if len(operationName) == 0 && b.function != nil {
|
||||
// extract from definition
|
||||
operationName = nameOfFunction(b.function)
|
||||
}
|
||||
route := Route{
|
||||
Method: b.httpMethod,
|
||||
Path: concatPath(b.rootPath, b.currentPath),
|
||||
Produces: b.produces,
|
||||
Consumes: b.consumes,
|
||||
Function: b.function,
|
||||
Filters: b.filters,
|
||||
If: b.conditions,
|
||||
relativePath: b.currentPath,
|
||||
pathExpr: pathExpr,
|
||||
Doc: b.doc,
|
||||
Notes: b.notes,
|
||||
Operation: operationName,
|
||||
ParameterDocs: b.parameters,
|
||||
ResponseErrors: b.errorMap,
|
||||
DefaultResponse: b.defaultResponse,
|
||||
ReadSample: b.readSample,
|
||||
WriteSample: b.writeSample,
|
||||
Metadata: b.metadata,
|
||||
Deprecated: b.deprecated,
|
||||
contentEncodingEnabled: b.contentEncodingEnabled,
|
||||
}
|
||||
route.postBuild()
|
||||
return route
|
||||
}
|
||||
|
||||
func concatPath(path1, path2 string) string {
|
||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
||||
// nameOfFunction returns the short name of the function f for documentation.
|
||||
// It uses a runtime feature for debugging ; its value may change for later Go versions.
|
||||
func nameOfFunction(f interface{}) string {
|
||||
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
|
||||
tokenized := strings.Split(fun.Name(), ".")
|
||||
last := tokenized[len(tokenized)-1]
|
||||
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
|
||||
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, "-fm") // Go 1.5
|
||||
if last == "func1" { // this could mean conflicts in API docs
|
||||
val := atomic.AddInt32(&anonymousFuncCount, 1)
|
||||
last = "func" + fmt.Sprintf("%d", val)
|
||||
atomic.StoreInt32(&anonymousFuncCount, val)
|
||||
}
|
||||
return last
|
||||
}
|
||||
20
vendor/github.com/emicklei/go-restful/router.go
generated
vendored
Normal file
20
vendor/github.com/emicklei/go-restful/router.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import "net/http"
|
||||
|
||||
// A RouteSelector finds the best matching Route given the input HTTP Request
|
||||
// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the
|
||||
// path parameters after the route has been selected.
|
||||
type RouteSelector interface {
|
||||
|
||||
// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
|
||||
// It returns a selected Route and its containing WebService or an error indicating
|
||||
// a problem.
|
||||
SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
|
||||
}
|
||||
23
vendor/github.com/emicklei/go-restful/service_error.go
generated
vendored
Normal file
23
vendor/github.com/emicklei/go-restful/service_error.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
|
||||
type ServiceError struct {
|
||||
Code int
|
||||
Message string
|
||||
}
|
||||
|
||||
// NewError returns a ServiceError using the code and reason
|
||||
func NewError(code int, message string) ServiceError {
|
||||
return ServiceError{Code: code, Message: message}
|
||||
}
|
||||
|
||||
// Error returns a text representation of the service error
|
||||
func (s ServiceError) Error() string {
|
||||
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
|
||||
}
|
||||
290
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
Normal file
290
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
|
||||
type WebService struct {
|
||||
rootPath string
|
||||
pathExpr *pathExpression // cached compilation of rootPath as RegExp
|
||||
routes []Route
|
||||
produces []string
|
||||
consumes []string
|
||||
pathParameters []*Parameter
|
||||
filters []FilterFunction
|
||||
documentation string
|
||||
apiVersion string
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction
|
||||
|
||||
dynamicRoutes bool
|
||||
|
||||
// protects 'routes' if dynamic routes are enabled
|
||||
routesLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (w *WebService) SetDynamicRoutes(enable bool) {
|
||||
w.dynamicRoutes = enable
|
||||
}
|
||||
|
||||
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
|
||||
// into the restful documentation for the service.
|
||||
type TypeNameHandleFunction func(sample interface{}) string
|
||||
|
||||
// TypeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions. If not set, the web service will invoke
|
||||
// reflect.TypeOf(object).String().
|
||||
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
|
||||
w.typeNameHandleFunc = handler
|
||||
return w
|
||||
}
|
||||
|
||||
// reflectTypeName is the default TypeNameHandleFunction and for a given object
|
||||
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
|
||||
// the reflection API.
|
||||
func reflectTypeName(sample interface{}) string {
|
||||
return reflect.TypeOf(sample).String()
|
||||
}
|
||||
|
||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
||||
func (w *WebService) compilePathExpression() {
|
||||
compiled, err := newPathExpression(w.rootPath)
|
||||
if err != nil {
|
||||
log.Printf("invalid path:%s because:%v", w.rootPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
w.pathExpr = compiled
|
||||
}
|
||||
|
||||
// ApiVersion sets the API version for documentation purposes.
|
||||
func (w *WebService) ApiVersion(apiVersion string) *WebService {
|
||||
w.apiVersion = apiVersion
|
||||
return w
|
||||
}
|
||||
|
||||
// Version returns the API version for documentation purposes.
|
||||
func (w *WebService) Version() string { return w.apiVersion }
|
||||
|
||||
// Path specifies the root URL template path of the WebService.
|
||||
// All Routes will be relative to this path.
|
||||
func (w *WebService) Path(root string) *WebService {
|
||||
w.rootPath = root
|
||||
if len(w.rootPath) == 0 {
|
||||
w.rootPath = "/"
|
||||
}
|
||||
w.compilePathExpression()
|
||||
return w
|
||||
}
|
||||
|
||||
// Param adds a PathParameter to document parameters used in the root path.
|
||||
func (w *WebService) Param(parameter *Parameter) *WebService {
|
||||
if w.pathParameters == nil {
|
||||
w.pathParameters = []*Parameter{}
|
||||
}
|
||||
w.pathParameters = append(w.pathParameters, parameter)
|
||||
return w
|
||||
}
|
||||
|
||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) PathParameter(name, description string) *Parameter {
|
||||
return PathParameter(name, description)
|
||||
}
|
||||
|
||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func PathParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
|
||||
p.bePath()
|
||||
return p
|
||||
}
|
||||
|
||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func (w *WebService) QueryParameter(name, description string) *Parameter {
|
||||
return QueryParameter(name, description)
|
||||
}
|
||||
|
||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func QueryParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}}
|
||||
p.beQuery()
|
||||
return p
|
||||
}
|
||||
|
||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
||||
// It is initialized as required without a DataType.
|
||||
func (w *WebService) BodyParameter(name, description string) *Parameter {
|
||||
return BodyParameter(name, description)
|
||||
}
|
||||
|
||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
||||
// It is initialized as required without a DataType.
|
||||
func BodyParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
|
||||
p.beBody()
|
||||
return p
|
||||
}
|
||||
|
||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func (w *WebService) HeaderParameter(name, description string) *Parameter {
|
||||
return HeaderParameter(name, description)
|
||||
}
|
||||
|
||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func HeaderParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beHeader()
|
||||
return p
|
||||
}
|
||||
|
||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) FormParameter(name, description string) *Parameter {
|
||||
return FormParameter(name, description)
|
||||
}
|
||||
|
||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func FormParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beForm()
|
||||
return p
|
||||
}
|
||||
|
||||
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
|
||||
func (w *WebService) Route(builder *RouteBuilder) *WebService {
|
||||
w.routesLock.Lock()
|
||||
defer w.routesLock.Unlock()
|
||||
builder.copyDefaults(w.produces, w.consumes)
|
||||
w.routes = append(w.routes, builder.Build())
|
||||
return w
|
||||
}
|
||||
|
||||
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
|
||||
func (w *WebService) RemoveRoute(path, method string) error {
|
||||
if !w.dynamicRoutes {
|
||||
return errors.New("dynamic routes are not enabled.")
|
||||
}
|
||||
w.routesLock.Lock()
|
||||
defer w.routesLock.Unlock()
|
||||
newRoutes := make([]Route, (len(w.routes) - 1))
|
||||
current := 0
|
||||
for ix := range w.routes {
|
||||
if w.routes[ix].Method == method && w.routes[ix].Path == path {
|
||||
continue
|
||||
}
|
||||
newRoutes[current] = w.routes[ix]
|
||||
current = current + 1
|
||||
}
|
||||
w.routes = newRoutes
|
||||
return nil
|
||||
}
|
||||
|
||||
// Method creates a new RouteBuilder and initialize its http method
|
||||
func (w *WebService) Method(httpMethod string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
|
||||
}
|
||||
|
||||
// Produces specifies that this WebService can produce one or more MIME types.
|
||||
// Http requests must have one of these values set for the Accept header.
|
||||
func (w *WebService) Produces(contentTypes ...string) *WebService {
|
||||
w.produces = contentTypes
|
||||
return w
|
||||
}
|
||||
|
||||
// Consumes specifies that this WebService can consume one or more MIME types.
|
||||
// Http requests must have one of these values set for the Content-Type header.
|
||||
func (w *WebService) Consumes(accepts ...string) *WebService {
|
||||
w.consumes = accepts
|
||||
return w
|
||||
}
|
||||
|
||||
// Routes returns the Routes associated with this WebService
|
||||
func (w *WebService) Routes() []Route {
|
||||
if !w.dynamicRoutes {
|
||||
return w.routes
|
||||
}
|
||||
// Make a copy of the array to prevent concurrency problems
|
||||
w.routesLock.RLock()
|
||||
defer w.routesLock.RUnlock()
|
||||
result := make([]Route, len(w.routes))
|
||||
for ix := range w.routes {
|
||||
result[ix] = w.routes[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RootPath returns the RootPath associated with this WebService. Default "/"
|
||||
func (w *WebService) RootPath() string {
|
||||
return w.rootPath
|
||||
}
|
||||
|
||||
// PathParameters return the path parameter names for (shared among its Routes)
|
||||
func (w *WebService) PathParameters() []*Parameter {
|
||||
return w.pathParameters
|
||||
}
|
||||
|
||||
// Filter adds a filter function to the chain of filters applicable to all its Routes
|
||||
func (w *WebService) Filter(filter FilterFunction) *WebService {
|
||||
w.filters = append(w.filters, filter)
|
||||
return w
|
||||
}
|
||||
|
||||
// Doc is used to set the documentation of this service.
|
||||
func (w *WebService) Doc(plainText string) *WebService {
|
||||
w.documentation = plainText
|
||||
return w
|
||||
}
|
||||
|
||||
// Documentation returns it.
|
||||
func (w *WebService) Documentation() string {
|
||||
return w.documentation
|
||||
}
|
||||
|
||||
/*
|
||||
Convenience methods
|
||||
*/
|
||||
|
||||
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
|
||||
func (w *WebService) HEAD(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
}
|
||||
|
||||
// GET is a shortcut for .Method("GET").Path(subPath)
|
||||
func (w *WebService) GET(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
}
|
||||
|
||||
// POST is a shortcut for .Method("POST").Path(subPath)
|
||||
func (w *WebService) POST(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
}
|
||||
|
||||
// PUT is a shortcut for .Method("PUT").Path(subPath)
|
||||
func (w *WebService) PUT(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
}
|
||||
|
||||
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
|
||||
func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
}
|
||||
|
||||
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
|
||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
}
|
||||
39
vendor/github.com/emicklei/go-restful/web_service_container.go
generated
vendored
Normal file
39
vendor/github.com/emicklei/go-restful/web_service_container.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultContainer is a restful.Container that uses http.DefaultServeMux
|
||||
var DefaultContainer *Container
|
||||
|
||||
func init() {
|
||||
DefaultContainer = NewContainer()
|
||||
DefaultContainer.ServeMux = http.DefaultServeMux
|
||||
}
|
||||
|
||||
// If set the true then panics will not be caught to return HTTP 500.
|
||||
// In that case, Route functions are responsible for handling any error situation.
|
||||
// Default value is false = recover from panics. This has performance implications.
|
||||
// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
|
||||
var DoNotRecover = false
|
||||
|
||||
// Add registers a new WebService add it to the DefaultContainer.
|
||||
func Add(service *WebService) {
|
||||
DefaultContainer.Add(service)
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction from the DefaultContainer.
|
||||
// These are called before dispatching a http.Request to a WebService.
|
||||
func Filter(filter FilterFunction) {
|
||||
DefaultContainer.Filter(filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
|
||||
func RegisteredWebServices() []*WebService {
|
||||
return DefaultContainer.RegisteredWebServices()
|
||||
}
|
||||
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
36
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
36
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# A more minimal logging API for Go
|
||||
|
||||
Before you consider this package, please read [this blog post by the inimitable
|
||||
Dave Cheney](http://dave.cheney.net/2015/11/05/lets-talk-about-logging). I
|
||||
really appreciate what he has to say, and it largely aligns with my own
|
||||
experiences. Too many choices of levels means inconsistent logs.
|
||||
|
||||
This package offers a purely abstract interface, based on these ideas but with
|
||||
a few twists. Code can depend on just this interface and have the actual
|
||||
logging implementation be injected from callers. Ideally only `main()` knows
|
||||
what logging implementation is being used.
|
||||
|
||||
# Differences from Dave's ideas
|
||||
|
||||
The main differences are:
|
||||
|
||||
1) Dave basically proposes doing away with the notion of a logging API in favor
|
||||
of `fmt.Printf()`. I disagree, especially when you consider things like output
|
||||
locations, timestamps, file and line decorations, and structured logging. I
|
||||
restrict the API to just 2 types of logs: info and error.
|
||||
|
||||
Info logs are things you want to tell the user which are not errors. Error
|
||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||
function call and is logging that `error` *and not returning it*, use error
|
||||
logs.
|
||||
|
||||
2) Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||
arbitrary grades of importance for info logs, without assigning names with
|
||||
semantic meaning such as "warning", "trace", and "debug". Superficially this
|
||||
may feel very similar, but the primary difference is the lack of semantics.
|
||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||
with higher verbosity means more (and less important) logs will be generated.
|
||||
|
||||
This is a BETA grade API. I have implemented it for
|
||||
[glog](https://godoc.org/github.com/golang/glog). Until there is a significant
|
||||
2nd implementation, I don't really know how it will change.
|
||||
151
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
151
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Package logr defines abstract interfaces for logging. Packages can depend on
|
||||
// these interfaces and callers can implement logging in whatever way is
|
||||
// appropriate.
|
||||
//
|
||||
// This design derives from Dave Cheney's blog:
|
||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||
//
|
||||
// This is a BETA grade API. Until there is a significant 2nd implementation,
|
||||
// I don't really know how it will change.
|
||||
//
|
||||
// The logging specifically makes it non-trivial to use format strings, to encourage
|
||||
// attaching structured information instead of unstructured format strings.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// Logging is done using a Logger. Loggers can have name prefixes and named values
|
||||
// attached, so that all log messages logged with that Logger have some base context
|
||||
// associated.
|
||||
//
|
||||
// The term "key" is used to refer to the name associated with a particular value, to
|
||||
// disambiguate it from the general Logger name.
|
||||
//
|
||||
// For instance, suppose we're trying to reconcile the state of an object, and we want
|
||||
// to log that we've made some decision.
|
||||
//
|
||||
// With the traditional log package, we might write
|
||||
// log.Printf(
|
||||
// "decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr's structured logging, we'd write
|
||||
// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers",
|
||||
// // and the named value target-type=Foo, for extra context.
|
||||
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
|
||||
//
|
||||
// // later on...
|
||||
// log.Info("setting field foo on object", "value", targetValue, "object", object)
|
||||
//
|
||||
// Depending on our logging implementation, we could then make logging decisions based on field values
|
||||
// (like only logging such events for objects in a certain namespace), or copy the structured
|
||||
// information into a structured log store.
|
||||
//
|
||||
// For logging errors, Logger has a method called Error. Suppose we wanted to log an
|
||||
// error while reconciling. With the traditional log package, we might write
|
||||
// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err)
|
||||
//
|
||||
// With logr, we'd instead write
|
||||
// // assuming the above setup for log
|
||||
// log.Error(err, "unable to reconcile object", "object", object)
|
||||
//
|
||||
// This functions similarly to:
|
||||
// log.Info("unable to reconcile object", "error", err, "object", object)
|
||||
//
|
||||
// However, it ensures that a standard key for the error value ("error") is used across all
|
||||
// error logging. Furthermore, certain implementations may choose to attach additional
|
||||
// information (such as stack traces) on calls to Error, so it's preferred to use Error
|
||||
// to log errors.
|
||||
//
|
||||
// Parts of a log line
|
||||
//
|
||||
// Each log message from a Logger has four types of context:
|
||||
// logger name, log verbosity, log message, and the named values.
|
||||
//
|
||||
// The Logger name constists of a series of name "segments" added by successive calls to WithName.
|
||||
// These name segments will be joined in some way by the underlying implementation. It is strongly
|
||||
// reccomended that name segements contain simple identifiers (letters, digits, and hyphen), and do
|
||||
// not contain characters that could muddle the log output or confuse the joining operation (e.g.
|
||||
// whitespace, commas, periods, slashes, brackets, quotes, etc).
|
||||
//
|
||||
// Log verbosity represents how little a log matters. Level zero, the default, matters most.
|
||||
// Increasing levels matter less and less. Try to avoid lots of different verbosity levels,
|
||||
// and instead provide useful keys, logger names, and log messages for users to filter on.
|
||||
// It's illegal to pass a log level below zero.
|
||||
//
|
||||
// The log message consists of a constant message attached to the the log line. This
|
||||
// should generally be a simple description of what's occuring, and should never be a format string.
|
||||
//
|
||||
// Variable information can then be attached using named values (key/value pairs). Keys are arbitrary
|
||||
// strings, while values may be any Go value.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
//
|
||||
// While users are generally free to use key names of their choice, it's generally best to avoid
|
||||
// using the following keys, as they're frequently used by implementations:
|
||||
//
|
||||
// - `"error"`: the underlying error value in the `Error` method.
|
||||
// - `"stacktrace"`: the stack trace associated with a particular log line or error
|
||||
// (often from the `Error` message).
|
||||
// - `"caller"`: the calling information (file/line) of a particular log line.
|
||||
// - `"msg"`: the log message.
|
||||
// - `"level"`: the log level.
|
||||
// - `"ts"`: the timestamp for a log line.
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the above
|
||||
// concepts, when neccessary (for example, in a pure-JSON output form, it would be
|
||||
// necessary to represent at least message and timestamp as ordinary named values).
|
||||
package logr
|
||||
|
||||
// TODO: consider adding back in format strings if they're really needed
|
||||
// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
|
||||
// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats
|
||||
|
||||
// InfoLogger represents the ability to log non-error messages, at a particular verbosity.
|
||||
type InfoLogger interface {
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to
|
||||
// the log line. The key/value pairs can then be used to add additional
|
||||
// variable information. The key/value pairs should alternate string
|
||||
// keys and arbitrary values.
|
||||
Info(msg string, keysAndValues ...interface{})
|
||||
|
||||
// Enabled tests whether this InfoLogger is enabled. For example,
|
||||
// commandline flags might be used to set the logging verbosity and disable
|
||||
// some info logs.
|
||||
Enabled() bool
|
||||
}
|
||||
|
||||
// Logger represents the ability to log messages, both errors and not.
|
||||
type Logger interface {
|
||||
// All Loggers implement InfoLogger. Calling InfoLogger methods directly on
|
||||
// a Logger value is equivalent to calling them on a V(0) InfoLogger. For
|
||||
// example, logger.Info() produces the same result as logger.V(0).Info.
|
||||
InfoLogger
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to calling Info with the "error" named value, but may
|
||||
// have unique behavior, and should be preferred for logging errors (see the
|
||||
// package documentations for more information).
|
||||
//
|
||||
// The msg field should be used to add context to any underlying error,
|
||||
// while the err field should be used to attach the actual error that
|
||||
// triggered this log line, if present.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
|
||||
// V returns an InfoLogger value for a specific verbosity level. A higher
|
||||
// verbosity level means a log message is less important. It's illegal to
|
||||
// pass a log level less than zero.
|
||||
V(level int) InfoLogger
|
||||
|
||||
// WithValues adds some key-value pairs of context to a logger.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
WithValues(keysAndValues ...interface{}) Logger
|
||||
|
||||
// WithName adds a new element to the logger's name.
|
||||
// Successive calls with WithName continue to append
|
||||
// suffixes to the logger's name. It's strongly reccomended
|
||||
// that name segments contain only letters, digits, and hyphens
|
||||
// (see the package documentation for more information).
|
||||
WithName(name string) Logger
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user