1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-27 05:46:13 +01:00

bump(*): kubernetes release-1.16.0 dependencies

This commit is contained in:
Mike Dame
2019-10-12 11:11:43 -04:00
parent 5af668e89a
commit 1652ba7976
28121 changed files with 3491095 additions and 2280257 deletions

View File

@@ -79,9 +79,5 @@ func ping(manager challenge.Manager, endpoint, versionHeader string) error {
}
defer resp.Body.Close()
if err := manager.AddResponse(resp); err != nil {
return err
}
return nil
return manager.AddResponse(resp)
}

View File

@@ -1,6 +1,7 @@
package proxy
import (
"context"
"io"
"net/http"
"strconv"
@@ -8,14 +9,14 @@ import (
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/opencontainers/go-digest"
)
// todo(richardscothern): from cache control header or config file
const blobTTL = time.Duration(24 * 7 * time.Hour)
const blobTTL = 24 * 7 * time.Hour
type proxyBlobStore struct {
localStore distribution.BlobStore
@@ -116,7 +117,7 @@ func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) e
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
served, err := pbs.serveLocal(ctx, w, r, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
dcontext.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
return err
}
@@ -140,12 +141,12 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter,
go func(dgst digest.Digest) {
if err := pbs.storeLocal(ctx, dgst); err != nil {
context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
dcontext.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
}
blobRef, err := reference.WithDigest(pbs.repositoryName, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error creating reference: %s", err)
dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return
}

View File

@@ -1,6 +1,7 @@
package proxy
import (
"context"
"io/ioutil"
"math/rand"
"net/http"
@@ -10,7 +11,6 @@ import (
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/docker/distribution/registry/storage"
@@ -350,24 +350,30 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
w := httptest.NewRecorder()
r, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatal(err)
t.Error(err)
return
}
err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest)
if err != nil {
t.Fatalf(err.Error())
t.Errorf(err.Error())
return
}
bodyBytes := w.Body.Bytes()
localDigest := digest.FromBytes(bodyBytes)
if localDigest != remoteBlob.Digest {
t.Fatalf("Mismatching blob fetch from proxy")
t.Errorf("Mismatching blob fetch from proxy")
return
}
}
}()
}
wg.Wait()
if t.Failed() {
t.FailNow()
}
remoteBlobCount := len(te.inRemote)
sbsMu.Lock()
@@ -404,7 +410,6 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
}
}
localStats = te.LocalStats()
remoteStats = te.RemoteStats()
// Ensure remote unchanged

View File

@@ -1,17 +1,18 @@
package proxy
import (
"context"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/opencontainers/go-digest"
)
// todo(richardscothern): from cache control header or config
const repositoryTTL = time.Duration(24 * 7 * time.Hour)
const repositoryTTL = 24 * 7 * time.Hour
type proxyManifestStore struct {
ctx context.Context
@@ -72,7 +73,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
// Schedule the manifest blob for removal
repoBlob, err := reference.WithDigest(pms.repositoryName, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error creating reference: %s", err)
dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return nil, err
}

View File

@@ -1,12 +1,12 @@
package proxy
import (
"context"
"io"
"sync"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
@@ -95,7 +95,8 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE
ctx := context.Background()
truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(),
storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()),
storage.Schema1SigningKey(k))
storage.Schema1SigningKey(k),
storage.EnableSchema1)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
@@ -117,7 +118,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE
t.Fatalf(err.Error())
}
localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k))
localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k), storage.EnableSchema1)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
@@ -216,7 +217,7 @@ func TestProxyManifests(t *testing.T) {
t.Fatalf("Error checking existence")
}
if !exists {
t.Errorf("Unexpected non-existant manifest")
t.Errorf("Unexpected non-existent manifest")
}
if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 {
@@ -251,7 +252,7 @@ func TestProxyManifests(t *testing.T) {
t.Fatal(err)
}
if !exists {
t.Errorf("Unexpected non-existant manifest")
t.Errorf("Unexpected non-existent manifest")
}
if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 {

View File

@@ -1,6 +1,7 @@
package proxy
import (
"context"
"fmt"
"net/http"
"net/url"
@@ -8,7 +9,7 @@ import (
"github.com/docker/distribution"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/auth"
@@ -120,8 +121,21 @@ func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, la
func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) {
c := pr.authChallenger
tkopts := auth.TokenHandlerOptions{
Transport: http.DefaultTransport,
Credentials: c.credentialStore(),
Scopes: []auth.Scope{
auth.RepositoryScope{
Repository: name.Name(),
Actions: []string{"pull"},
},
},
Logger: dcontext.GetLogger(ctx),
}
tr := transport.NewTransport(http.DefaultTransport,
auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull")))
auth.NewAuthorizer(c.challengeManager(),
auth.NewTokenHandlerWithOptions(tkopts)))
localRepo, err := pr.embedded.Repository(ctx, name)
if err != nil {
@@ -132,7 +146,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named
return nil, err
}
remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr)
remoteRepo, err := client.NewRepository(name, pr.remoteURL.String(), tr)
if err != nil {
return nil, err
}
@@ -218,7 +232,7 @@ func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error
return err
}
context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm)
dcontext.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm)
return nil
}

View File

@@ -1,8 +1,9 @@
package proxy
import (
"context"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
// proxyTagService supports local and remote lookup of tags.

View File

@@ -1,13 +1,13 @@
package proxy
import (
"context"
"reflect"
"sort"
"sync"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
type mockTagStore struct {

View File

@@ -1,12 +1,13 @@
package scheduler
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
)
@@ -120,7 +121,7 @@ func (ttles *TTLExpirationScheduler) Start() error {
return fmt.Errorf("Scheduler already started")
}
context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
ttles.stopped = false
// Start timer for each deserialized entry
@@ -142,7 +143,7 @@ func (ttles *TTLExpirationScheduler) Start() error {
err := ttles.writeState()
if err != nil {
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
} else {
ttles.indexDirty = false
}
@@ -163,7 +164,7 @@ func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duratio
Expiry: time.Now().Add(ttl),
EntryType: eType,
}
context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil {
oldEntry.timer.Stop()
}
@@ -193,10 +194,10 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.
ref, err := reference.Parse(entry.Key)
if err == nil {
if err := f(ref); err != nil {
context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
dcontext.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
}
} else {
context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
dcontext.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
}
delete(ttles.entries, entry.Key)
@@ -210,7 +211,7 @@ func (ttles *TTLExpirationScheduler) Stop() {
defer ttles.Unlock()
if err := ttles.writeState(); err != nil {
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
}
for _, entry := range ttles.entries {