1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

bump(*): kubernetes release-1.16.0 dependencies

This commit is contained in:
Mike Dame
2019-10-12 11:11:43 -04:00
parent 5af668e89a
commit 1652ba7976
28121 changed files with 3491095 additions and 2280257 deletions

262
glide.lock generated
View File

@@ -1,64 +1,57 @@
hash: ed51a8e643db6e9996ef0ffca671fb31ab5b7fe0d61ecdda828192871f9da366 hash: f82bf395df6f00004b1794606a9256d5c69b62deb5498b574a08b2b5d15f7a6a
updated: 2018-05-22T18:05:00.26435-07:00 updated: 2019-10-12T11:11:23.472932-04:00
imports: imports:
- name: cloud.google.com/go - name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821 version: 8c41231e01b2085512d98153bcffb847ff9b4b9f
subpackages: subpackages:
- compute/metadata - compute/metadata
- internal
- name: github.com/Azure/go-autorest - name: github.com/Azure/go-autorest
version: e14a70c556c8e0db173358d1a903dca345a8e75e version: a13374e430327789ae5c206fb306a54a7c1591fe
subpackages: subpackages:
- autorest - autorest
- autorest/adal - autorest/adal
- autorest/azure - autorest/azure
- autorest/date - autorest/date
- logger
- version
- name: github.com/davecgh/go-spew - name: github.com/davecgh/go-spew
version: 782f4967f2dc4564575ca782fe2d04090b5faca8 version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
subpackages: subpackages:
- spew - spew
- name: github.com/dgrijalva/jwt-go - name: github.com/dgrijalva/jwt-go
version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20 version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e
- name: github.com/docker/distribution - name: github.com/docker/distribution
version: edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c version: 16128bbac47f75050e82f7e91b04df33775e0c23
subpackages: subpackages:
- digestset - digestset
- reference - reference
- name: github.com/emicklei/go-restful - name: github.com/evanphx/json-patch
version: ff4f55a206334ef123e4f79bbf348980da81ca46 version: 5858425f75500d40c52783dce87d085a483ce135
subpackages:
- log
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- name: github.com/go-openapi/spec
version: 7abd5745472fff5eb3685386d5fb8bf38683154d
- name: github.com/go-openapi/swag
version: f3f9494671f93fcff853e3c6e9e948b3eb71e590
- name: github.com/gogo/protobuf - name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7 version: 65acae22fc9d1fe290b33faa2bd64cdc20a463a0
subpackages: subpackages:
- proto - proto
- sortkeys - sortkeys
- name: github.com/golang/glog - name: github.com/golang/glog
version: 23def4e6c14b4da8ac2ed8007337bc5eb5007998 version: 23def4e6c14b4da8ac2ed8007337bc5eb5007998
- name: github.com/golang/protobuf - name: github.com/golang/protobuf
version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 version: b5d812f8a3706043e23a9cd5babf2e5423744d30
subpackages: subpackages:
- jsonpb
- proto - proto
- ptypes - ptypes
- ptypes/any - ptypes/any
- ptypes/duration - ptypes/duration
- ptypes/struct
- ptypes/timestamp - ptypes/timestamp
- name: github.com/google/btree - name: github.com/google/go-cmp
version: 7d79101e329e5a3adf994758c578dab82b90c017 version: 6f77996f0c42f7b84e5a2b252227263f93432e9b
subpackages:
- cmp
- cmp/internal/diff
- cmp/internal/flags
- cmp/internal/function
- cmp/internal/value
- name: github.com/google/gofuzz - name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c version: f140a6486e521aad38f5917de355cbf147cc0496
- name: github.com/googleapis/gnostic - name: github.com/googleapis/gnostic
version: 0c5108395e2debce0d731cf0287ddf7242066aba version: 0c5108395e2debce0d731cf0287ddf7242066aba
subpackages: subpackages:
@@ -66,7 +59,7 @@ imports:
- compiler - compiler
- extensions - extensions
- name: github.com/gophercloud/gophercloud - name: github.com/gophercloud/gophercloud
version: 8183543f90d1aef267a5ecc209f2e0715b355acb version: c2d73b246b48e239d3f03c455905e06fe26e33c3
subpackages: subpackages:
- openstack - openstack
- openstack/identity/v2/tenants - openstack/identity/v2/tenants
@@ -74,103 +67,68 @@ imports:
- openstack/identity/v3/tokens - openstack/identity/v3/tokens
- openstack/utils - openstack/utils
- pagination - pagination
- name: github.com/gregjones/httpcache
version: 787624de3eb7bd915c329cba748687a3b22666a6
subpackages:
- diskcache
- name: github.com/hashicorp/golang-lru - name: github.com/hashicorp/golang-lru
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 version: 7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c
subpackages: subpackages:
- simplelru - simplelru
- name: github.com/howeyc/gopass
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
- name: github.com/imdario/mergo - name: github.com/imdario/mergo
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58
- name: github.com/inconshreveable/mousetrap - name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/json-iterator/go - name: github.com/json-iterator/go
version: 36b14963da70d11297d313183d7e6388c8510e1e version: 27518f6661eba504be5a7a9a9f6d9460d892ade3
- name: github.com/juju/ratelimit - name: github.com/modern-go/concurrent
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342 version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
- name: github.com/kubernetes/repo-infra - name: github.com/modern-go/reflect2
version: dbcbd7624d5e4eb29f33c48edf1b1651809827a3 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8
- name: github.com/mailru/easyjson
version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d
subpackages:
- buffer
- jlexer
- jwriter
- name: github.com/opencontainers/go-digest - name: github.com/opencontainers/go-digest
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb version: ac19fd6e7483ff933754af248d80be865e543d22
- name: github.com/peterbourgon/diskv
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
- name: github.com/PuerkitoBio/purell
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
- name: github.com/PuerkitoBio/urlesc
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
- name: github.com/spf13/cobra - name: github.com/spf13/cobra
version: f62e98d28ab7ad31d707ba837a966378465c7b57 version: 67fc4837d267bc9bfd6e47f77783fcc3dffc68de
- name: github.com/spf13/pflag - name: github.com/spf13/pflag
version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea version: 298182f68c66c05229eb03ac171abe6e309ee79a
- name: github.com/ugorji/go - name: github.com/ugorji/go
version: f57d8945648dbfe4c332cff9c50fb57548958e3f version: 42bc974514ff101a54c6b72a0e4dee29d96c0b26
subpackages: subpackages:
- codec - codec
- name: golang.org/x/crypto - name: golang.org/x/crypto
version: 81e90905daefcd6fd217b62423c0908922eadb30 version: e84da0312774c21d64ee2317962ef669b27ffb41
subpackages: subpackages:
- bcrypt
- blowfish
- nacl/secretbox
- poly1305
- salsa20/salsa
- ssh/terminal - ssh/terminal
- name: golang.org/x/net - name: golang.org/x/net
version: 1c05540f6879653db88113bc4a2b70aec4bd491f version: cdfb69ac37fc6fa907650654115ebebb3aae2087
subpackages: subpackages:
- context - context
- context/ctxhttp - context/ctxhttp
- html - http/httpguts
- html/atom
- http2 - http2
- http2/hpack - http2/hpack
- idna - idna
- internal/timeseries
- lex/httplex
- trace
- websocket
- name: golang.org/x/oauth2 - name: golang.org/x/oauth2
version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4 version: 9f3314589c9a9136388751d9adae6b0ed400978a
subpackages: subpackages:
- google - google
- internal - internal
- jws - jws
- jwt - jwt
- name: golang.org/x/sys - name: golang.org/x/sys
version: 95c6576299259db960f6c5b9b69ea52422860fce version: 3b5209105503162ded1863c307ac66fec31120dd
subpackages: subpackages:
- unix - unix
- windows - windows
- name: golang.org/x/text - name: golang.org/x/text
version: b19bf474d317b857955b12035d2c5acb57ce8b01 version: e6919f6577db79269a6443b9dc46d18f2238fb5d
subpackages: subpackages:
- cases
- internal
- internal/tag
- language
- runes
- secure/bidirule - secure/bidirule
- secure/precis
- transform - transform
- unicode/bidi - unicode/bidi
- unicode/norm - unicode/norm
- width - name: golang.org/x/time
- name: golang.org/x/tools version: f51c12702a4d776e4c1fa9b0fabab841babae631
version: 8cab8a1319f0be9798e7fe78b15da75e5f94b2e9
subpackages: subpackages:
- imports - rate
- name: google.golang.org/appengine - name: google.golang.org/appengine
version: b1f26356af11148e710935ed1ac8a7f5702c7612 version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610
subpackages: subpackages:
- internal - internal
- internal/app_identity - internal/app_identity
@@ -184,57 +142,65 @@ imports:
- name: gopkg.in/inf.v0 - name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/yaml.v2 - name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 version: 51d6538a90f86fe93ac480b35f37b2be17fef232
- name: k8s.io/api - name: k8s.io/api
version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a version: 95b840bb6a1f5f0462af804c8589396d294d4914
subpackages: subpackages:
- admission/v1
- admission/v1beta1 - admission/v1beta1
- admissionregistration/v1alpha1 - admissionregistration/v1
- admissionregistration/v1beta1 - admissionregistration/v1beta1
- apps/v1 - apps/v1
- apps/v1beta1 - apps/v1beta1
- apps/v1beta2 - apps/v1beta2
- auditregistration/v1alpha1
- authentication/v1 - authentication/v1
- authentication/v1beta1 - authentication/v1beta1
- authorization/v1 - authorization/v1
- authorization/v1beta1 - authorization/v1beta1
- autoscaling/v1 - autoscaling/v1
- autoscaling/v2beta1 - autoscaling/v2beta1
- autoscaling/v2beta2
- batch/v1 - batch/v1
- batch/v1beta1 - batch/v1beta1
- batch/v2alpha1 - batch/v2alpha1
- certificates/v1beta1 - certificates/v1beta1
- coordination/v1
- coordination/v1beta1
- core/v1 - core/v1
- discovery/v1alpha1
- events/v1beta1 - events/v1beta1
- extensions/v1beta1 - extensions/v1beta1
- imagepolicy/v1alpha1 - imagepolicy/v1alpha1
- networking/v1 - networking/v1
- networking/v1beta1
- node/v1alpha1
- node/v1beta1
- policy/v1beta1 - policy/v1beta1
- rbac/v1 - rbac/v1
- rbac/v1alpha1 - rbac/v1alpha1
- rbac/v1beta1 - rbac/v1beta1
- scheduling/v1
- scheduling/v1alpha1 - scheduling/v1alpha1
- scheduling/v1beta1
- settings/v1alpha1 - settings/v1alpha1
- storage/v1 - storage/v1
- storage/v1alpha1 - storage/v1alpha1
- storage/v1beta1 - storage/v1beta1
- name: k8s.io/apiextensions-apiserver - name: k8s.io/apiextensions-apiserver
version: aaccab68c17a51ccdfd3c8559221cce9334d3394 version: 8f644eb6e783291c4b8cb8cb25a9983be1a74f5c
subpackages: subpackages:
- pkg/features - pkg/features
- name: k8s.io/apimachinery - name: k8s.io/apimachinery
version: 180eddb345a5be3a157cea1c624700ad5bd27b8f version: 27d36303b6556f377b4f34e64705fa9024a12b0c
subpackages: subpackages:
- pkg/api/errors - pkg/api/errors
- pkg/api/meta - pkg/api/meta
- pkg/api/resource - pkg/api/resource
- pkg/apimachinery
- pkg/apimachinery/announced
- pkg/apimachinery/registered
- pkg/apis/meta/internalversion - pkg/apis/meta/internalversion
- pkg/apis/meta/v1 - pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured - pkg/apis/meta/v1/unstructured
- pkg/apis/meta/v1alpha1 - pkg/apis/meta/v1beta1
- pkg/conversion - pkg/conversion
- pkg/conversion/queryparams - pkg/conversion/queryparams
- pkg/fields - pkg/fields
@@ -256,33 +222,35 @@ imports:
- pkg/util/framer - pkg/util/framer
- pkg/util/intstr - pkg/util/intstr
- pkg/util/json - pkg/util/json
- pkg/util/mergepatch
- pkg/util/naming
- pkg/util/net - pkg/util/net
- pkg/util/runtime - pkg/util/runtime
- pkg/util/sets - pkg/util/sets
- pkg/util/strategicpatch
- pkg/util/validation - pkg/util/validation
- pkg/util/validation/field - pkg/util/validation/field
- pkg/util/wait - pkg/util/wait
- pkg/util/yaml - pkg/util/yaml
- pkg/version - pkg/version
- pkg/watch - pkg/watch
- third_party/forked/golang/json
- third_party/forked/golang/reflect - third_party/forked/golang/reflect
- name: k8s.io/apiserver - name: k8s.io/apiserver
version: 91e14f394e4796abf5a994a349a222e7081d86b6 version: bfa5e2e684ad413c22fd0dab55b2592af1ead049
subpackages: subpackages:
- pkg/features - pkg/features
- pkg/util/feature - pkg/util/feature
- pkg/util/flag
- pkg/util/logs
- name: k8s.io/client-go - name: k8s.io/client-go
version: 78700dec6369ba22221b72770783300f143df150 version: 1fbdaa4c8d908275eee958429b1cafc2591a2c5d
subpackages: subpackages:
- discovery - discovery
- discovery/fake - discovery/fake
- kubernetes - kubernetes
- kubernetes/fake - kubernetes/fake
- kubernetes/scheme - kubernetes/scheme
- kubernetes/typed/admissionregistration/v1alpha1 - kubernetes/typed/admissionregistration/v1
- kubernetes/typed/admissionregistration/v1alpha1/fake - kubernetes/typed/admissionregistration/v1/fake
- kubernetes/typed/admissionregistration/v1beta1 - kubernetes/typed/admissionregistration/v1beta1
- kubernetes/typed/admissionregistration/v1beta1/fake - kubernetes/typed/admissionregistration/v1beta1/fake
- kubernetes/typed/apps/v1 - kubernetes/typed/apps/v1
@@ -291,6 +259,8 @@ imports:
- kubernetes/typed/apps/v1beta1/fake - kubernetes/typed/apps/v1beta1/fake
- kubernetes/typed/apps/v1beta2 - kubernetes/typed/apps/v1beta2
- kubernetes/typed/apps/v1beta2/fake - kubernetes/typed/apps/v1beta2/fake
- kubernetes/typed/auditregistration/v1alpha1
- kubernetes/typed/auditregistration/v1alpha1/fake
- kubernetes/typed/authentication/v1 - kubernetes/typed/authentication/v1
- kubernetes/typed/authentication/v1/fake - kubernetes/typed/authentication/v1/fake
- kubernetes/typed/authentication/v1beta1 - kubernetes/typed/authentication/v1beta1
@@ -303,6 +273,8 @@ imports:
- kubernetes/typed/autoscaling/v1/fake - kubernetes/typed/autoscaling/v1/fake
- kubernetes/typed/autoscaling/v2beta1 - kubernetes/typed/autoscaling/v2beta1
- kubernetes/typed/autoscaling/v2beta1/fake - kubernetes/typed/autoscaling/v2beta1/fake
- kubernetes/typed/autoscaling/v2beta2
- kubernetes/typed/autoscaling/v2beta2/fake
- kubernetes/typed/batch/v1 - kubernetes/typed/batch/v1
- kubernetes/typed/batch/v1/fake - kubernetes/typed/batch/v1/fake
- kubernetes/typed/batch/v1beta1 - kubernetes/typed/batch/v1beta1
@@ -311,14 +283,26 @@ imports:
- kubernetes/typed/batch/v2alpha1/fake - kubernetes/typed/batch/v2alpha1/fake
- kubernetes/typed/certificates/v1beta1 - kubernetes/typed/certificates/v1beta1
- kubernetes/typed/certificates/v1beta1/fake - kubernetes/typed/certificates/v1beta1/fake
- kubernetes/typed/coordination/v1
- kubernetes/typed/coordination/v1/fake
- kubernetes/typed/coordination/v1beta1
- kubernetes/typed/coordination/v1beta1/fake
- kubernetes/typed/core/v1 - kubernetes/typed/core/v1
- kubernetes/typed/core/v1/fake - kubernetes/typed/core/v1/fake
- kubernetes/typed/discovery/v1alpha1
- kubernetes/typed/discovery/v1alpha1/fake
- kubernetes/typed/events/v1beta1 - kubernetes/typed/events/v1beta1
- kubernetes/typed/events/v1beta1/fake - kubernetes/typed/events/v1beta1/fake
- kubernetes/typed/extensions/v1beta1 - kubernetes/typed/extensions/v1beta1
- kubernetes/typed/extensions/v1beta1/fake - kubernetes/typed/extensions/v1beta1/fake
- kubernetes/typed/networking/v1 - kubernetes/typed/networking/v1
- kubernetes/typed/networking/v1/fake - kubernetes/typed/networking/v1/fake
- kubernetes/typed/networking/v1beta1
- kubernetes/typed/networking/v1beta1/fake
- kubernetes/typed/node/v1alpha1
- kubernetes/typed/node/v1alpha1/fake
- kubernetes/typed/node/v1beta1
- kubernetes/typed/node/v1beta1/fake
- kubernetes/typed/policy/v1beta1 - kubernetes/typed/policy/v1beta1
- kubernetes/typed/policy/v1beta1/fake - kubernetes/typed/policy/v1beta1/fake
- kubernetes/typed/rbac/v1 - kubernetes/typed/rbac/v1
@@ -327,8 +311,12 @@ imports:
- kubernetes/typed/rbac/v1alpha1/fake - kubernetes/typed/rbac/v1alpha1/fake
- kubernetes/typed/rbac/v1beta1 - kubernetes/typed/rbac/v1beta1
- kubernetes/typed/rbac/v1beta1/fake - kubernetes/typed/rbac/v1beta1/fake
- kubernetes/typed/scheduling/v1
- kubernetes/typed/scheduling/v1/fake
- kubernetes/typed/scheduling/v1alpha1 - kubernetes/typed/scheduling/v1alpha1
- kubernetes/typed/scheduling/v1alpha1/fake - kubernetes/typed/scheduling/v1alpha1/fake
- kubernetes/typed/scheduling/v1beta1
- kubernetes/typed/scheduling/v1beta1/fake
- kubernetes/typed/settings/v1alpha1 - kubernetes/typed/settings/v1alpha1
- kubernetes/typed/settings/v1alpha1/fake - kubernetes/typed/settings/v1alpha1/fake
- kubernetes/typed/storage/v1 - kubernetes/typed/storage/v1
@@ -338,9 +326,13 @@ imports:
- kubernetes/typed/storage/v1beta1 - kubernetes/typed/storage/v1beta1
- kubernetes/typed/storage/v1beta1/fake - kubernetes/typed/storage/v1beta1/fake
- listers/core/v1 - listers/core/v1
- pkg/apis/clientauthentication
- pkg/apis/clientauthentication/v1alpha1
- pkg/apis/clientauthentication/v1beta1
- pkg/version - pkg/version
- plugin/pkg/client/auth - plugin/pkg/client/auth
- plugin/pkg/client/auth/azure - plugin/pkg/client/auth/azure
- plugin/pkg/client/auth/exec
- plugin/pkg/client/auth/gcp - plugin/pkg/client/auth/gcp
- plugin/pkg/client/auth/oidc - plugin/pkg/client/auth/oidc
- plugin/pkg/client/auth/openstack - plugin/pkg/client/auth/openstack
@@ -358,42 +350,51 @@ imports:
- tools/pager - tools/pager
- tools/reference - tools/reference
- transport - transport
- util/buffer
- util/cert - util/cert
- util/connrotation
- util/flowcontrol - util/flowcontrol
- util/homedir - util/homedir
- util/integer
- util/jsonpath - util/jsonpath
- util/keyutil
- util/retry
- name: k8s.io/code-generator - name: k8s.io/code-generator
version: fef8bcdbaf36ac6a1a18c9ef7d85200b249fad30 version: cd179ad6a2693011d6f2fa5cd64c6680ee99379f
- name: k8s.io/gengo - name: k8s.io/component-base
version: 1ef560bbde5195c01629039ad3b337ce63e7b321 version: 547f6c5d70902c6683e93ad96f84adc6b943aedf
- name: k8s.io/kube-openapi subpackages:
version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 - cli/flag
- featuregate
- logs
- name: k8s.io/gengo
version: 7fa3014cb28f99ce2d9e803bcd3efa7edd4effa9
- name: k8s.io/klog
version: 3ca30a56d8a775276f9cdae009ba326fdc05af7f
- name: k8s.io/kube-openapi
version: 743ec37842bffe49dd4221d9026f30fb1d5adbc4
subpackages: subpackages:
- pkg/builder
- pkg/common
- pkg/handler
- pkg/util
- pkg/util/proto - pkg/util/proto
- name: k8s.io/kubernetes - name: k8s.io/kubernetes
version: 925c127ec6b946659ad0fd596fa959be43f0cc05 version: c97fe5036ef3df2967d086711e6c0c405941e14b
subpackages: subpackages:
- pkg/api/legacyscheme - pkg/api/legacyscheme
- pkg/api/testapi - pkg/api/testapi
- pkg/api/v1/resource - pkg/api/v1/resource
- pkg/apis/admission - pkg/apis/admission
- pkg/apis/admission/install - pkg/apis/admission/install
- pkg/apis/admission/v1
- pkg/apis/admission/v1beta1 - pkg/apis/admission/v1beta1
- pkg/apis/admissionregistration - pkg/apis/admissionregistration
- pkg/apis/admissionregistration/install - pkg/apis/admissionregistration/install
- pkg/apis/admissionregistration/v1alpha1 - pkg/apis/admissionregistration/v1
- pkg/apis/admissionregistration/v1beta1 - pkg/apis/admissionregistration/v1beta1
- pkg/apis/apps - pkg/apis/apps
- pkg/apis/apps/install - pkg/apis/apps/install
- pkg/apis/apps/v1 - pkg/apis/apps/v1
- pkg/apis/apps/v1beta1 - pkg/apis/apps/v1beta1
- pkg/apis/apps/v1beta2 - pkg/apis/apps/v1beta2
- pkg/apis/auditregistration
- pkg/apis/auditregistration/install
- pkg/apis/auditregistration/v1alpha1
- pkg/apis/authentication - pkg/apis/authentication
- pkg/apis/authentication/install - pkg/apis/authentication/install
- pkg/apis/authentication/v1 - pkg/apis/authentication/v1
@@ -406,6 +407,7 @@ imports:
- pkg/apis/autoscaling/install - pkg/apis/autoscaling/install
- pkg/apis/autoscaling/v1 - pkg/apis/autoscaling/v1
- pkg/apis/autoscaling/v2beta1 - pkg/apis/autoscaling/v2beta1
- pkg/apis/autoscaling/v2beta2
- pkg/apis/batch - pkg/apis/batch
- pkg/apis/batch/install - pkg/apis/batch/install
- pkg/apis/batch/v1 - pkg/apis/batch/v1
@@ -414,15 +416,19 @@ imports:
- pkg/apis/certificates - pkg/apis/certificates
- pkg/apis/certificates/install - pkg/apis/certificates/install
- pkg/apis/certificates/v1beta1 - pkg/apis/certificates/v1beta1
- pkg/apis/componentconfig - pkg/apis/coordination
- pkg/apis/componentconfig/install - pkg/apis/coordination/install
- pkg/apis/componentconfig/v1alpha1 - pkg/apis/coordination/v1
- pkg/apis/coordination/v1beta1
- pkg/apis/core - pkg/apis/core
- pkg/apis/core/helper - pkg/apis/core/helper
- pkg/apis/core/install - pkg/apis/core/install
- pkg/apis/core/v1 - pkg/apis/core/v1
- pkg/apis/core/v1/helper - pkg/apis/core/v1/helper
- pkg/apis/core/v1/helper/qos - pkg/apis/core/v1/helper/qos
- pkg/apis/discovery
- pkg/apis/discovery/install
- pkg/apis/discovery/v1alpha1
- pkg/apis/events - pkg/apis/events
- pkg/apis/events/install - pkg/apis/events/install
- pkg/apis/events/v1beta1 - pkg/apis/events/v1beta1
@@ -435,6 +441,11 @@ imports:
- pkg/apis/networking - pkg/apis/networking
- pkg/apis/networking/install - pkg/apis/networking/install
- pkg/apis/networking/v1 - pkg/apis/networking/v1
- pkg/apis/networking/v1beta1
- pkg/apis/node
- pkg/apis/node/install
- pkg/apis/node/v1alpha1
- pkg/apis/node/v1beta1
- pkg/apis/policy - pkg/apis/policy
- pkg/apis/policy/install - pkg/apis/policy/install
- pkg/apis/policy/v1beta1 - pkg/apis/policy/v1beta1
@@ -445,7 +456,9 @@ imports:
- pkg/apis/rbac/v1beta1 - pkg/apis/rbac/v1beta1
- pkg/apis/scheduling - pkg/apis/scheduling
- pkg/apis/scheduling/install - pkg/apis/scheduling/install
- pkg/apis/scheduling/v1
- pkg/apis/scheduling/v1alpha1 - pkg/apis/scheduling/v1alpha1
- pkg/apis/scheduling/v1beta1
- pkg/apis/settings - pkg/apis/settings
- pkg/apis/settings/install - pkg/apis/settings/install
- pkg/apis/settings/v1alpha1 - pkg/apis/settings/v1alpha1
@@ -455,10 +468,17 @@ imports:
- pkg/apis/storage/v1alpha1 - pkg/apis/storage/v1alpha1
- pkg/apis/storage/v1beta1 - pkg/apis/storage/v1beta1
- pkg/features - pkg/features
- pkg/kubelet/apis
- pkg/kubelet/types - pkg/kubelet/types
- pkg/master/ports - pkg/scheduler/algorithm/priorities/util
- pkg/util/parsers - pkg/util/parsers
- pkg/util/pointer - name: k8s.io/utils
- plugin/pkg/scheduler/algorithm/priorities/util version: 581e00157fb1a0435d4fac54a52d1ca1e481d60e
subpackages:
- buffer
- integer
- net
- pointer
- trace
- name: sigs.k8s.io/yaml
version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480
testImports: [] testImports: []

View File

@@ -1,11 +0,0 @@
sudo: false
language: go
go:
- 1.6
- 1.7
install:
- go get -v cloud.google.com/go/...
script:
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
go test -race -v cloud.google.com/go/...

15
vendor/cloud.google.com/go/AUTHORS generated vendored
View File

@@ -1,15 +0,0 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

1193
vendor/cloud.google.com/go/CHANGES.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md generated vendored Normal file
View File

@@ -0,0 +1,44 @@
# Contributor Code of Conduct
As contributors and maintainers of this project,
and in the interest of fostering an open and welcoming community,
we pledge to respect all people who contribute through reporting issues,
posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project
a harassment-free experience for everyone,
regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information,
such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct.
By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently
applying these principles to every aspect of managing this project.
Project maintainers who do not follow or enforce the Code of Conduct
may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by opening an issue
or contacting one or more of the project maintainers.
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)

View File

@@ -1,78 +1,190 @@
# Contributing # Contributing
1. Sign one of the contributor license agreements below. 1. Sign one of the contributor license agreements below.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. 1. `go get golang.org/x/review/git-codereview` to install the code reviewing
tool.
1. You will need to ensure that your `GOBIN` directory (by default
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
1. If you would like, you may want to set up aliases for git-codereview,
such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
1. Should you run into issues with the git-codereview tool, please note
that all error messages will assume that you have set up these aliases.
1. Get the cloud package by running `go get -d cloud.google.com/go`. 1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote git 1. If you have already checked out the source, make sure that the remote
origin is https://code.googlesource.com/gocloud: git origin is https://code.googlesource.com/gocloud:
```
git remote set-url origin https://code.googlesource.com/gocloud
```
git remote set-url origin https://code.googlesource.com/gocloud
1. Make sure your auth is configured correctly by visiting 1. Make sure your auth is configured correctly by visiting
https://code.googlesource.com, clicking "Generate Password", and following https://code.googlesource.com, clicking "Generate Password", and following the
the directions. directions.
1. Make changes and create a change by running `git codereview change <name>`, 1. Make changes and create a change by running `git codereview change <name>`,
provide a commit message, and use `git codereview mail` to create a Gerrit CL. provide a commit message, and use `git codereview mail` to create a Gerrit CL.
1. Keep amending to the change and mail as your receive feedback. 1. Keep amending to the change with `git codereview change` and mail as your
receive feedback. Each new mailed amendment will create a new patch set for
your change in Gerrit.
- Note: if your change includes a breaking change, our breaking change
detector will cause CI/CD to fail. If your breaking change is acceptable
in some way, add BREAKING_CHANGE_ACCEPTABLE=<reason> to cause the
detector not to be run and to make it clear why that is acceptable.
## Integration Tests ## Integration Tests
In addition to the unit tests, you may run the integration test suite. In addition to the unit tests, you may run the integration test suite. These
directions describe setting up your environment to run integration tests for
_all_ packages: note that many of these instructions may be redundant if you
intend only to run integration tests on a single package.
To run the integrations tests, creating and configuration of a project in the #### GCP Setup
Google Developers Console is required.
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). To run the integrations tests, creation and configuration of two projects in
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project) the Google Developers Console is required: one specifically for Firestore
(or **Editor** and **Logs Configuration Writer** roles) are added to the integration tests, and another for all other integration tests. We'll refer to
service account. these projects as "general project" and "Firestore project".
Once you create a project, set the following environment variables to be able to After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
run the against the actual APIs. for each project. Ensure the project-level **Owner**
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to
each service account. During the creation of the service account, you should
download the JSON credential file for use later.
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) Next, ensure the following APIs are enabled in the general project:
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it - BigQuery API
to create the indexes used in the datastore integration tests with indexes - BigQuery Data Transfer API
found in `datastore/testdata/index.yaml`: - Cloud Dataproc API
- Cloud Dataproc Control API Private
- Cloud Datastore API
- Cloud Firestore API
- Cloud Key Management Service (KMS) API
- Cloud Natural Language API
- Cloud OS Login API
- Cloud Pub/Sub API
- Cloud Resource Manager API
- Cloud Spanner API
- Cloud Speech API
- Cloud Translation API
- Cloud Video Intelligence API
- Cloud Vision API
- Compute Engine API
- Compute Engine Instance Group Manager API
- Container Registry API
- Firebase Rules API
- Google Cloud APIs
- Google Cloud Deployment Manager V2 API
- Google Cloud SQL
- Google Cloud Storage
- Google Cloud Storage JSON API
- Google Compute Engine Instance Group Updater API
- Google Compute Engine Instance Groups API
- Kubernetes Engine API
- Stackdriver Error Reporting API
Next, create a Datastore database in the general project, and a Firestore
database in the Firestore project.
Finally, in the general project, create an API key for the translate API:
- Go to GCP Developer Console.
- Navigate to APIs & Services > Credentials.
- Click Create Credentials > API Key.
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
#### Local Setup
Once the two projects are created and configured, set the following environment
variables:
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
bamboo-shift-455) for the general project.
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
project's service account.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
(e.g. doorway-cliff-677) for the Firestore project.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
Firestore project's service account.
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
in the form
"projects/P/locations/L/keyRings/R". The creation of this is described below.
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
create some resources used in integration tests.
From the project's root directory: From the project's root directory:
``` sh ``` sh
# Set the default project in your env # Sets the default project in your env.
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
# Authenticate the gcloud tool with your account # Authenticates the gcloud tool with your account.
$ gcloud auth login $ gcloud auth login
# Create the indexes # Create the indexes used in the datastore integration tests.
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml $ gcloud datastore indexes create datastore/testdata/index.yaml
```
The Sink integration tests in preview/logging require a Google Cloud storage # Creates a Google Cloud storage bucket with the same name as your test project,
bucket with the same name as your test project, and with the Stackdriver Logging # and with the Stackdriver Logging service account as owner, for the sink
service account as owner: # integration tests in logging.
``` sh
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
# Creates a PubSub topic for integration tests of storage notifications.
$ gcloud beta pubsub topics create go-storage-notification-test
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
# as a publisher to that topic.
# Creates a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to
# delete the instance after testing with 'gcloud beta spanner instances delete'.
$ export MY_KEYRING=some-keyring-name
$ export MY_LOCATION=global
# Creates a KMS keyring, in the same location as the default location for your
# project's buckets.
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
# Creates two keys in the keyring, named key1 and key2.
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
``` ```
Once you've set the environment variables, you can run the integration tests by #### Running
Once you've done the necessary setup, you can run the integration tests by
running: running:
``` sh ``` sh
$ go test -v cloud.google.com/go/... $ go test -v cloud.google.com/go/...
``` ```
#### Replay
Some packages can record the RPCs during integration tests to a file for
subsequent replay. To record, pass the `-record` flag to `go test`. The
recording will be saved to the _package_`.replay` file. To replay integration
tests from a saved recording, the replay file must be present, the `-short`
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
environment variable must have a non-empty value.
## Contributor License Agreements ## Contributor License Agreements
Before we can accept your pull requests you'll need to sign a Contributor Before we can accept your pull requests you'll need to sign a Contributor
License Agreement (CLA): License Agreement (CLA):
- **If you are an individual writing original source code** and **you own the - **If you are an individual writing original source code** and **you own the
- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. intellectual property**, then you'll need to sign an [individual CLA][indvcla].
- **If you work for a company that wants to allow you to contribute your work**, - **If you work for a company that wants to allow you to contribute your
then you'll need to sign a [corporate CLA][corpcla]. work**, then you'll need to sign a [corporate CLA][corpcla].
You can sign these electronically (just scroll to the bottom). After that, You can sign these electronically (just scroll to the bottom). After that,
we'll be able to accept your pull requests. we'll be able to accept your pull requests.

View File

@@ -1,34 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

2
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright 2014 Google Inc. Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

420
vendor/cloud.google.com/go/README.md generated vendored
View File

@@ -1,47 +1,92 @@
# Google Cloud for Go # Google Cloud Client Libraries for Go
[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) [![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
``` go ``` go
import "cloud.google.com/go" import "cloud.google.com/go"
``` ```
Go packages for Google Cloud Platform services. To install the packages on your system, *do not clone the repo*. Instead use
**NOTE:** These packages are under development, and may occasionally make ```
backwards-incompatible changes. $ go get -u cloud.google.com/go/...
```
**NOTE:** Some of these packages are under development, and may occasionally
make backwards-incompatible changes.
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). **NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
* [News](#news)
* [Supported APIs](#supported-apis)
* [Go Versions Supported](#go-versions-supported)
* [Authorization](#authorization)
* [Cloud Datastore](#cloud-datastore-)
* [Cloud Storage](#cloud-storage-)
* [Cloud Pub/Sub](#cloud-pub-sub-)
* [BigQuery](#cloud-bigquery-)
* [Stackdriver Logging](#stackdriver-logging-)
* [Cloud Spanner](#cloud-spanner-)
## News ## News
_September 8, 2016_ _7 August 2018_
* New clients for some of Google's Machine Learning APIs: Vision, Speech, and As of November 1, the code in the repo will no longer support Go versions 1.8
Natural Language. and earlier. No one other than AppEngine users should be on those old versions,
and AppEngine
[Standard](https://groups.google.com/forum/#!topic/google-appengine-go/e7oPNomd7ak)
and
[Flex](https://groups.google.com/forum/#!topic/google-appengine-go/wHsYtxvEbXI)
will stop supporting new deployments with those versions on that date.
Changes have been moved to [CHANGES](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CHANGES.md).
* Preview version of a new [Stackdriver Logging][cloud-logging] client in
[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
This client uses gRPC as its transport layer, and supports log reading, sinks
and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
## Supported APIs ## Supported APIs
Google API | Status | Package Google API | Status | Package
-------------------------------|--------------|----------------------------------------------------------- ------------------------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref] [Asset][cloud-asset] | alpha | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref] [BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] [Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] [Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`][cloud-tasks-ref]
[BigQuery][cloud-bigquery] | experimental | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] [Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
[Logging][cloud-logging] | experimental | [`cloud.google.com/go/logging`][cloud-logging-ref] [ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref] [Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref] [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref] [Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
[Dialogflow][cloud-dialogflow] | alpha | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
[Data Loss Prevention][cloud-dlp] | alpha | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
[Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`][cloud-iam-ref]
[IoT][cloud-iot] | alpha | [`cloud.google.com/iot/apiv1`][cloud-iot-ref]
[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms`][cloud-kms-ref]
[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/go/oslogin/apiv1`][cloud-oslogin-ref]
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1betad1`][cloud-phishingprotection-ref]
[reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1betad1`][cloud-recaptcha-ref]
[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`][cloud-memorystore-ref]
[Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`][cloud-scheduler-ref]
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`][cloud-talent-ref]
[Text To Speech][cloud-texttospeech] | alpha | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref]
[Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`][cloud-translate-ref]
[Video Intelligence][cloud-video] | alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
> **Alpha status**: the API is still being actively developed. As a
> **Experimental status**: the API is still being actively developed. As a
> result, it might change in backward-incompatible ways and is not recommended > result, it might change in backward-incompatible ways and is not recommended
> for production use. > for production use.
> >
@@ -62,9 +107,7 @@ for updates on these packages.
## Go Versions Supported ## Go Versions Supported
We support the two most recent major versions of Go. If Google App Engine uses We support the two most recent major versions of Go. If Google App Engine uses
an older version, we support that as well. You can see which versions are an older version, we support that as well.
currently supported by looking at the lines following `go:` in
[`.travis.yml`](.travis.yml).
## Authorization ## Authorization
@@ -72,35 +115,56 @@ By default, each API will use [Google Application Default Credentials][default-c
for authorization credentials used in calling the API endpoints. This will allow your for authorization credentials used in calling the API endpoints. This will allow your
application to run in many environments without requiring explicit configuration. application to run in many environments without requiring explicit configuration.
Manually-configured authorization can be achieved using the [snip]:# (auth)
```go
client, err := storage.NewClient(ctx)
```
To authorize using a
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
pass
[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile)
to the `NewClient` function of the desired package. For example:
[snip]:# (auth-JSON)
```go
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
```
You can exert more control over authorization by using the
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to [`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
create an `oauth2.TokenSource`. This token source can be passed to the `NewClient` create an `oauth2.TokenSource`. Then pass
function for the relevant API using a
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) [`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
option. to the `NewClient` function:
[snip]:# (auth-ts)
```go
tokenSource := ...
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
```
## Google Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) ## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)
[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully- - [About Cloud Datastore][cloud-datastore]
managed, schemaless database for storing non-relational data. Cloud Datastore - [Activating the API for your project][cloud-datastore-activation]
automatically scales with your users and supports ACID transactions, high availability - [API documentation][cloud-datastore-docs]
of reads and writes, strong consistency for reads and ancestor queries, and eventual - [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
consistency for all other queries. - [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
Follow the [activation instructions][cloud-datastore-activation] to use the Google ### Example Usage
Cloud Datastore API with your project.
First create a `datastore.Client` to use throughout your application: First create a `datastore.Client` to use throughout your application:
[snip]:# (datastore-1)
```go ```go
client, err := datastore.NewClient(ctx, "my-project-id") client, err := datastore.NewClient(ctx, "my-project-id")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatal(err)
} }
``` ```
Then use that client to interact with the API: Then use that client to interact with the API:
[snip]:# (datastore-2)
```go ```go
type Post struct { type Post struct {
Title string Title string
@@ -108,8 +172,8 @@ type Post struct {
PublishedAt time.Time PublishedAt time.Time
} }
keys := []*datastore.Key{ keys := []*datastore.Key{
datastore.NewKey(ctx, "Post", "post1", 0, nil), datastore.NameKey("Post", "post1", nil),
datastore.NewKey(ctx, "Post", "post2", 0, nil), datastore.NameKey("Post", "post2", nil),
} }
posts := []*Post{ posts := []*Post{
{Title: "Post 1", Body: "...", PublishedAt: time.Now()}, {Title: "Post 1", Body: "...", PublishedAt: time.Now()},
@@ -120,16 +184,18 @@ if _, err := client.PutMulti(ctx, keys, posts); err != nil {
} }
``` ```
## Google Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) ## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store - [About Cloud Storage][cloud-storage]
data on Google infrastructure with very high reliability, performance and availability, - [API documentation][cloud-storage-docs]
and can be used to distribute large data objects to users via direct download. - [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
https://godoc.org/cloud.google.com/go/storage ### Example Usage
First create a `storage.Client` to use throughout your application: First create a `storage.Client` to use throughout your application:
[snip]:# (storage-1)
```go ```go
client, err := storage.NewClient(ctx) client, err := storage.NewClient(ctx)
if err != nil { if err != nil {
@@ -137,6 +203,7 @@ if err != nil {
} }
``` ```
[snip]:# (storage-2)
```go ```go
// Read the object1 from bucket. // Read the object1 from bucket.
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
@@ -150,15 +217,18 @@ if err != nil {
} }
``` ```
## Google Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) ## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)
[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect - [About Cloud Pubsub][cloud-pubsub]
your services with reliable, many-to-many, asynchronous messaging hosted on Google's - [API documentation][cloud-pubsub-docs]
infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation - [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
for building your own robust, global services. - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
### Example Usage
First create a `pubsub.Client` to use throughout your application: First create a `pubsub.Client` to use throughout your application:
[snip]:# (pubsub-1)
```go ```go
client, err := pubsub.NewClient(ctx, "project-id") client, err := pubsub.NewClient(ctx, "project-id")
if err != nil { if err != nil {
@@ -166,38 +236,155 @@ if err != nil {
} }
``` ```
Then use the client to publish and subscribe:
[snip]:# (pubsub-2)
```go ```go
// Publish "hello world" on topic1. // Publish "hello world" on topic1.
topic := client.Topic("topic1") topic := client.Topic("topic1")
msgIDs, err := topic.Publish(ctx, &pubsub.Message{ res := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"), Data: []byte("hello world"),
}) })
// The publish happens asynchronously.
// Later, you can get the result from res:
...
msgID, err := res.Get(ctx)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Create an iterator to pull messages via subscription1. // Use a callback to receive messages via subscription1.
it, err := client.Subscription("subscription1").Pull(ctx) sub := client.Subscription("subscription1")
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
fmt.Println(m.Data)
m.Ack() // Acknowledge that we've consumed the message.
})
if err != nil { if err != nil {
log.Println(err) log.Println(err)
} }
defer it.Stop() ```
// Consume N messages from the iterator. ## BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)
for i := 0; i < N; i++ {
msg, err := it.Next() - [About BigQuery][cloud-bigquery]
if err == pubsub.Done { - [API documentation][cloud-bigquery-docs]
- [Go client documentation][cloud-bigquery-ref]
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
### Example Usage
First create a `bigquery.Client` to use throughout your application:
[snip]:# (bq-1)
```go
c, err := bigquery.NewClient(ctx, "my-project-ID")
if err != nil {
// TODO: Handle error.
}
```
Then use that client to interact with the API:
[snip]:# (bq-2)
```go
// Construct a query.
q := c.Query(`
SELECT year, SUM(number)
FROM [bigquery-public-data:usa_names.usa_1910_2013]
WHERE name = "William"
GROUP BY year
ORDER BY year
`)
// Execute the query.
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
// Iterate through the results.
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break break
} }
if err != nil { if err != nil {
log.Fatalf("Failed to retrieve message: %v", err) // TODO: Handle error.
} }
fmt.Println(values)
fmt.Printf("Message %d: %s\n", i, msg.Data)
msg.Done(true) // Acknowledge that we've consumed the message.
} }
``` ```
## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
- [About Stackdriver Logging][cloud-logging]
- [API documentation][cloud-logging-docs]
- [Go client documentation][cloud-logging-ref]
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
### Example Usage
First create a `logging.Client` to use throughout your application:
[snip]:# (logging-1)
```go
ctx := context.Background()
client, err := logging.NewClient(ctx, "my-project")
if err != nil {
// TODO: Handle error.
}
```
Usually, you'll want to add log entries to a buffer to be periodically flushed
(automatically and asynchronously) to the Stackdriver Logging service.
[snip]:# (logging-2)
```go
logger := client.Logger("my-log")
logger.Log(logging.Entry{Payload: "something happened!"})
```
Close your client before your program exits, to flush any buffered log entries.
[snip]:# (logging-3)
```go
err = client.Close()
if err != nil {
// TODO: Handle error.
}
```
## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner)
- [About Cloud Spanner][cloud-spanner]
- [API documentation][cloud-spanner-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)
### Example Usage
First create a `spanner.Client` to use throughout your application:
[snip]:# (spanner-1)
```go
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
if err != nil {
log.Fatal(err)
}
```
[snip]:# (spanner-2)
```go
// Simple Reads And Writes
_, err = client.Apply(ctx, []*spanner.Mutation{
spanner.Insert("Users",
[]string{"name", "email"},
[]interface{}{"alice", "a@example.com"})})
if err != nil {
log.Fatal(err)
}
row, err := client.Single().ReadRow(ctx, "Users",
spanner.Key{"alice"}, []string{"email"})
if err != nil {
log.Fatal(err)
}
```
## Contributing ## Contributing
Contributions are welcome. Please, see the Contributions are welcome. Please, see the
@@ -215,31 +402,124 @@ for more information.
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs [cloud-datastore-docs]: https://cloud.google.com/datastore/docs
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate [cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
[cloud-firestore]: https://cloud.google.com/firestore/
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
[cloud-pubsub]: https://cloud.google.com/pubsub/ [cloud-pubsub]: https://cloud.google.com/pubsub/
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub [cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs [cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
[cloud-storage]: https://cloud.google.com/storage/ [cloud-storage]: https://cloud.google.com/storage/
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage [cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview [cloud-storage-docs]: https://cloud.google.com/storage/docs
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets [cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
[cloud-bigtable]: https://cloud.google.com/bigtable/ [cloud-bigtable]: https://cloud.google.com/bigtable/
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable [cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
[cloud-bigquery]: https://cloud.google.com/bigquery/ [cloud-bigquery]: https://cloud.google.com/bigquery/
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery [cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
[cloud-logging]: https://cloud.google.com/logging/ [cloud-logging]: https://cloud.google.com/logging/
[cloud-logging-docs]: https://cloud.google.com/logging/docs
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging [cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
[cloud-vision]: https://cloud.google.com/vision/ [cloud-monitoring]: https://cloud.google.com/monitoring/
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision [cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
[cloud-vision]: https://cloud.google.com/vision
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
[cloud-language]: https://cloud.google.com/natural-language [cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1 [cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
[cloud-oslogin-ref]: https://cloud.google.com/go/oslogin/apiv1
[cloud-speech]: https://cloud.google.com/speech [cloud-speech]: https://cloud.google.com/speech
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1 [cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
[cloud-spanner]: https://cloud.google.com/spanner/
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
[cloud-translate]: https://cloud.google.com/translate
[cloud-translate-ref]: https://godoc.org/cloud.google.com/go/translate
[cloud-video]: https://cloud.google.com/video-intelligence/
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[cloud-container]: https://cloud.google.com/containers/
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1
[cloud-debugger]: https://cloud.google.com/debugger/
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2
[cloud-dlp]: https://cloud.google.com/dlp/
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials [default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
[cloud-dataproc]: https://cloud.google.com/dataproc/
[cloud-dataproc-docs]: https://cloud.google.com/dataproc/docs
[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1
[cloud-iam]: https://cloud.google.com/iam/
[cloud-iam-docs]: https://cloud.google.com/iam/docs
[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam
[cloud-kms]: https://cloud.google.com/kms/
[cloud-kms-docs]: https://cloud.google.com/kms/docs
[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1
[cloud-natural-language]: https://cloud.google.com/natural-language/
[cloud-natural-language-docs]: https://cloud.google.com/natural-language/docs
[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
[cloud-memorystore]: https://cloud.google.com/memorystore/
[cloud-memorystore-docs]: https://cloud.google.com/memorystore/docs
[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1
[cloud-texttospeech]: https://cloud.google.com/texttospeech/
[cloud-texttospeech-docs]: https://cloud.google.com/texttospeech/docs
[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1
[cloud-trace]: https://cloud.google.com/trace/
[cloud-trace-docs]: https://cloud.google.com/trace/docs
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
[cloud-dialogflow-docs]: https://cloud.google.com/dialogflow-enterprise/docs/
[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
[cloud-containeranalysis-docs]: https://cloud.google.com/container-analysis/api/reference/rest/
[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1
[cloud-tasks]: https://cloud.google.com/tasks/
[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2
[cloud-scheduler]: https://cloud.google.com/scheduler
[cloud-scheduler-ref]: https://godoc.org/cloud.google.com/go/scheduler/apiv1
[cloud-iot]: https://cloud.google.com/iot-core/
[cloud-iot-ref]: https://godoc.org/cloud.google.com/go/iot/apiv1
[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
[cloud-phishingprotection-ref]: https://cloud.google.com/go/phishingprotection/apiv1beta1
[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/
[cloud-recaptcha-ref]: https://cloud.google.com/go/recaptchaenterprise/apiv1beta1
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
[cloud-talent-ref]: https://godoc.org/cloud.google.com/go/talent/apiv4beta1

17
vendor/cloud.google.com/go/RELEASING.md generated vendored Normal file
View File

@@ -0,0 +1,17 @@
# How to Release this Repo
1. Determine the current release version with `git tag -l`. It should look
something like `vX.Y.Z`. We'll call the current version `$CV` and the new
version `$NV`.
1. On master, run `git log $CV..` to list all the changes since the last
release.
1. Edit `CHANGES.md` to include a summary of the changes.
1. Mail the CL containing the `CHANGES.md` changes. When the CL is approved,
submit it.
1. Without submitting any other CLs:
a. Switch to master.
b. `git pull`
c. Tag the repo with the next version: `git tag $NV`.
d. Push the tag: `git push origin $NV`.
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
with the new release, copying the contents of the CHANGES.md.

View File

@@ -1,26 +0,0 @@
# This file configures AppVeyor (http://www.appveyor.com),
# a Windows-based CI service similar to Travis.
# Identifier for this run
version: "{build}"
# Clone the repo into this path, which conforms to the standard
# Go workspace structure.
clone_folder: c:\gopath\src\cloud.google.com\go
environment:
GOPATH: c:\gopath
install:
# Info for debugging.
- echo %PATH%
- go version
- go env
- go get -v -d -t ./...
# Provide a build script, or AppVeyor will call msbuild.
build_script:
- go install -v ./...
test_script:
- go test -short -v ./...

253
vendor/cloud.google.com/go/asset/apiv1/asset_client.go generated vendored Normal file
View File

@@ -0,0 +1,253 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package asset
import (
"context"
"fmt"
"time"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
ExportAssets []gax.CallOption
BatchGetAssetsHistory []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("cloudasset.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with Cloud Asset API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client assetpb.AssetServiceClient
// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new asset service client.
//
// Asset service definition.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: assetpb.NewAssetServiceClient(conn),
}
c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ExportAssets exports assets with time and resource types to a given Cloud Storage
// location. The output format is newline-delimited JSON.
// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
// to keep track of the export.
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
// For RESOURCE content, this API outputs history with asset in both
// non-delete or deleted status.
// For IAM_POLICY content, this API outputs history when the asset and its
// attached IAM POLICY both exist. This can create gaps in the output history.
// If a specified asset does not exist, this API returns an INVALID_ARGUMENT
// error.
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
var resp *assetpb.BatchGetAssetsHistoryResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ExportAssetsOperation manages a long-running operation from ExportAssets.
type ExportAssetsOperation struct {
lro *longrunning.Operation
}
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
var meta assetpb.ExportAssetsRequest
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *ExportAssetsOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *ExportAssetsOperation) Name() string {
return op.lro.Name()
}

View File

@@ -0,0 +1,75 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package asset_test
import (
"context"
asset "cloud.google.com/go/asset/apiv1"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_ExportAssets() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &assetpb.ExportAssetsRequest{
// TODO: Fill request struct fields.
}
op, err := c.ExportAssets(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_BatchGetAssetsHistory() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &assetpb.BatchGetAssetsHistoryRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchGetAssetsHistory(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

100
vendor/cloud.google.com/go/asset/apiv1/doc.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
// Package asset is an auto-generated package for the
// Cloud Asset API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The cloud asset API manages the history and inventory of cloud resources.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
package asset // import "cloud.google.com/go/asset/apiv1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20190404"

266
vendor/cloud.google.com/go/asset/apiv1/mock_test.go generated vendored Normal file
View File

@@ -0,0 +1,266 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package asset
import (
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
)
import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockAssetServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
assetpb.AssetServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockAsset mockAssetServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
assetpb.RegisterAssetServiceServer(serv, &mockAsset)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestAssetServiceExportAssets(t *testing.T) {
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}
mockAsset.err = nil
mockAsset.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var parent string = "parent-995424086"
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: parent,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAssetServiceExportAssetsError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = nil
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var parent string = "parent-995424086"
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: parent,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}
mockAsset.err = nil
mockAsset.reqs = nil
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)
var parent string = "parent-995424086"
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: parent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = gstatus.Error(errCode, "test error")
var parent string = "parent-995424086"
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: parent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@@ -0,0 +1,252 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package asset
import (
"context"
"fmt"
"time"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
ExportAssets []gax.CallOption
BatchGetAssetsHistory []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("cloudasset.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with Cloud Asset API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client assetpb.AssetServiceClient
// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new asset service client.
//
// Asset service definition.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: assetpb.NewAssetServiceClient(conn),
}
c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ExportAssets exports assets with time and resource types to a given Cloud Storage
// location. The output format is newline-delimited JSON.
// This API implements the
// [google.longrunning.Operation][google.longrunning.Operation] API allowing
// you to keep track of the export.
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
// For RESOURCE content, this API outputs history with asset in both
// non-delete or deleted status.
// For IAM_POLICY content, this API outputs history when the asset and its
// attached IAM POLICY both exist. This can create gaps in the output history.
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
var resp *assetpb.BatchGetAssetsHistoryResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ExportAssetsOperation manages a long-running operation from ExportAssets.
type ExportAssetsOperation struct {
lro *longrunning.Operation
}
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
var meta assetpb.ExportAssetsRequest
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *ExportAssetsOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *ExportAssetsOperation) Name() string {
return op.lro.Name()
}

View File

@@ -0,0 +1,75 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package asset_test
import (
"context"
asset "cloud.google.com/go/asset/apiv1beta1"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_ExportAssets() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &assetpb.ExportAssetsRequest{
// TODO: Fill request struct fields.
}
op, err := c.ExportAssets(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_BatchGetAssetsHistory() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &assetpb.BatchGetAssetsHistoryRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchGetAssetsHistory(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

100
vendor/cloud.google.com/go/asset/apiv1beta1/doc.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
// Package asset is an auto-generated package for the
// Cloud Asset API.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
// The cloud asset API manages the history and inventory of cloud resources.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
package asset // import "cloud.google.com/go/asset/apiv1beta1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20190404"

View File

@@ -0,0 +1,266 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package asset
import (
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
)
import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockAssetServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
assetpb.AssetServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockAsset mockAssetServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
assetpb.RegisterAssetServiceServer(serv, &mockAsset)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestAssetServiceExportAssets(t *testing.T) {
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}
mockAsset.err = nil
mockAsset.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAssetServiceExportAssetsError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = nil
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}
mockAsset.err = nil
mockAsset.reqs = nil
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@@ -0,0 +1,248 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package asset
import (
"context"
"time"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
ExportAssets []gax.CallOption
BatchGetAssetsHistory []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("cloudasset.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with Cloud Asset API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client assetpb.AssetServiceClient
// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new asset service client.
//
// Asset service definition.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: assetpb.NewAssetServiceClient(conn),
}
c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ExportAssets exports assets with time and resource types to a given Cloud Storage
// location. The output format is newline-delimited JSON.
// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
// to keep track of the export.
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
// For RESOURCE content, this API outputs history with asset in both
// non-delete or deleted status.
// For IAM_POLICY content, this API outputs history when the asset and its
// attached IAM POLICY both exist. This can create gaps in the output history.
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
var resp *assetpb.BatchGetAssetsHistoryResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ExportAssetsOperation manages a long-running operation from ExportAssets.
type ExportAssetsOperation struct {
lro *longrunning.Operation
}
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
return &ExportAssetsOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
var resp assetpb.ExportAssetsResponse
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
var meta assetpb.ExportAssetsRequest
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *ExportAssetsOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *ExportAssetsOperation) Name() string {
return op.lro.Name()
}

View File

@@ -0,0 +1,75 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package asset_test
import (
"context"
asset "cloud.google.com/go/asset/v1beta1"
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_ExportAssets() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &assetpb.ExportAssetsRequest{
// TODO: Fill request struct fields.
}
op, err := c.ExportAssets(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_BatchGetAssetsHistory() {
ctx := context.Background()
c, err := asset.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &assetpb.BatchGetAssetsHistoryRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchGetAssetsHistory(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

89
vendor/cloud.google.com/go/asset/v1beta1/doc.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package asset is an auto-generated package for the
// Cloud Asset API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The cloud asset API manages the history and inventory of cloud resources.
package asset // import "cloud.google.com/go/asset/v1beta1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20181219"

266
vendor/cloud.google.com/go/asset/v1beta1/mock_test.go generated vendored Normal file
View File

@@ -0,0 +1,266 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package asset
import (
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
)
import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockAssetServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
assetpb.AssetServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockAsset mockAssetServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
assetpb.RegisterAssetServiceServer(serv, &mockAsset)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestAssetServiceExportAssets(t *testing.T) {
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}
mockAsset.err = nil
mockAsset.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAssetServiceExportAssetsError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = nil
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
var request = &assetpb.ExportAssetsRequest{
Parent: formattedParent,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.ExportAssets(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}
mockAsset.err = nil
mockAsset.reqs = nil
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
errCode := codes.PermissionDenied
mockAsset.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
var request = &assetpb.BatchGetAssetsHistoryRequest{
Parent: formattedParent,
ContentType: contentType,
ReadTimeWindow: readTimeWindow,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@@ -1,60 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloud_test
import (
"cloud.google.com/go/datastore"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
func Example_applicationDefaultCredentials() {
ctx := context.Background()
// Use Google Application Default Credentials to authorize and authenticate the client.
// More information about Application Default Credentials and how to enable is at
// https://developers.google.com/identity/protocols/application-default-credentials.
//
// This is the recommended way of authorizing and authenticating.
//
// Note: The example uses the datastore client, but the same steps apply to
// the other client libraries underneath this package.
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
// TODO: handle error.
}
// Use the client.
_ = client
}
func Example_serviceAccountFile() {
// Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS
// and use the Application Default Credentials.
ctx := context.Background()
// Use a JSON key file associated with a Google service account to
// authenticate and authorize.
// Go to https://console.developers.google.com/permissions/serviceaccounts to create
// and download a service account key for your project.
//
// Note: The example uses the datastore client, but the same steps apply to
// the other client libraries underneath this package.
client, err := datastore.NewClient(ctx,
"project-id",
option.WithServiceAccountFile("/path/to/service-account-key.json"))
if err != nil {
// TODO: handle error.
}
// Use the client.
_ = client
}

View File

@@ -0,0 +1,8 @@
# BigQuery Benchmark
This directory contains benchmarks for BigQuery client.
## Usage
`go run bench.go -- <your project id> queries.json`
BigQuery service caches requests so the benchmark should be run
at least twice, disregarding the first result.

View File

@@ -0,0 +1,85 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build ignore
package main
import (
"context"
"encoding/json"
"flag"
"io/ioutil"
"log"
"time"
"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
)
func main() {
flag.Parse()
ctx := context.Background()
c, err := bigquery.NewClient(ctx, flag.Arg(0))
if err != nil {
log.Fatal(err)
}
queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
if err != nil {
log.Fatal(err)
}
var queries []string
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
log.Fatal(err)
}
for _, q := range queries {
doQuery(ctx, c, q)
}
}
func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
startTime := time.Now()
q := c.Query(qt)
it, err := q.Read(ctx)
if err != nil {
log.Fatal(err)
}
numRows, numCols := 0, 0
var firstByte time.Duration
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
if numRows == 0 {
numCols = len(values)
firstByte = time.Since(startTime)
} else if numCols != len(values) {
log.Fatalf("got %d columns, want %d", len(values), numCols)
}
numRows++
}
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
}

View File

@@ -0,0 +1,10 @@
[
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
]

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,52 +14,44 @@
package bigquery package bigquery
// TODO(mcgreevy): support dry-run mode when creating jobs.
import ( import (
"context"
"fmt" "fmt"
"io"
"net/http"
"time"
"google.golang.org/api/option" "cloud.google.com/go/internal"
"google.golang.org/api/transport" "cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go/v2"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
) )
const prodAddr = "https://www.googleapis.com/bigquery/v2/" const (
prodAddr = "https://www.googleapis.com/bigquery/v2/"
// Scope is the Oauth2 scope for the service.
Scope = "https://www.googleapis.com/auth/bigquery"
userAgent = "gcloud-golang-bigquery/20160429"
)
// A Source is a source of data for the Copy function. var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
type Source interface {
implementsSource() func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
} }
// A Destination is a destination of data for the Copy function.
type Destination interface {
implementsDestination()
}
// An Option is an optional argument to Copy.
type Option interface {
implementsOption()
}
// A ReadSource is a source of data for the Read function.
type ReadSource interface {
implementsReadSource()
}
// A ReadOption is an optional argument to Read.
type ReadOption interface {
customizeRead(conf *pagingConf)
}
const Scope = "https://www.googleapis.com/auth/bigquery"
const userAgent = "gcloud-golang-bigquery/20160429"
// Client may be used to perform BigQuery operations. // Client may be used to perform BigQuery operations.
type Client struct { type Client struct {
service service // Location, if set, will be used as the default location for all subsequent
// dataset creation and job operations. A location specified directly in one of
// those operations will override this value.
Location string
projectID string projectID string
bqs *bq.Service
} }
// NewClient constructs a new Client which can perform BigQuery operations. // NewClient constructs a new Client which can perform BigQuery operations.
@@ -71,105 +63,100 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
option.WithUserAgent(userAgent), option.WithUserAgent(userAgent),
} }
o = append(o, opts...) o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialing: %v", err) return nil, fmt.Errorf("bigquery: dialing: %v", err)
} }
bqs, err := bq.New(httpClient)
s, err := newBigqueryService(httpClient, endpoint)
if err != nil { if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err) return nil, fmt.Errorf("bigquery: constructing client: %v", err)
} }
bqs.BasePath = endpoint
c := &Client{ c := &Client{
service: s,
projectID: projectID, projectID: projectID,
bqs: bqs,
} }
return c, nil return c, nil
} }
// initJobProto creates and returns a bigquery Job proto. // Close closes any resources held by the client.
// The proto is customized using any jobOptions in options. // Close should be called when the client is no longer needed.
// The list of Options is returned with the jobOptions removed. // It need not be called at program exit.
func initJobProto(projectID string, options []Option) (*bq.Job, []Option) { func (c *Client) Close() error {
job := &bq.Job{} return nil
}
var other []Option // Calls the Jobs.Insert RPC and returns a Job.
for _, opt := range options { func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
if o, ok := opt.(jobOption); ok { call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
o.customizeJob(job, projectID) setClientHeader(call.Header())
} else { if media != nil {
other = append(other, opt) call.Media(media)
}
} }
return job, other var res *bq.Job
} var err error
invoke := func() error {
// Copy starts a BigQuery operation to copy data from a Source to a Destination. res, err = call.Do()
func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) { return err
switch dst := dst.(type) { }
case *Table: // A job with a client-generated ID can be retried; the presence of the
switch src := src.(type) { // ID makes the insert operation idempotent.
case *GCSReference: // We don't retry if there is media, because it is an io.Reader. We'd
return c.load(ctx, dst, src, options) // have to read the contents and keep it in memory, and that could be expensive.
case *Table: // TODO(jba): Look into retrying if media != nil.
return c.cp(ctx, dst, Tables{src}, options) if job.JobReference != nil && media == nil {
case Tables: err = runWithRetry(ctx, invoke)
return c.cp(ctx, dst, src, options) } else {
case *Query: err = invoke()
return c.query(ctx, dst, src, options)
}
case *GCSReference:
if src, ok := src.(*Table); ok {
return c.extract(ctx, dst, src, options)
}
} }
return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src)
}
// Query creates a query with string q. You may optionally set
// DefaultProjectID and DefaultDatasetID on the returned query before using it.
func (c *Client) Query(q string) *Query {
return &Query{Q: q, client: c}
}
// Read submits a query for execution and returns the results via an Iterator.
//
// Read uses a temporary table to hold the results of the query job.
//
// For more control over how a query is performed, don't use this method but
// instead pass the Query as a Source to Client.Copy, and call Read on the
// resulting Job.
func (q *Query) Read(ctx context.Context, options ...ReadOption) (*Iterator, error) {
dest := &Table{}
job, err := q.client.Copy(ctx, dest, q, WriteTruncate)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return job.Read(ctx, options...) return bqToJob(res, c)
} }
// executeQuery submits a query for execution and returns the results via an Iterator. // Convert a number of milliseconds since the Unix epoch to a time.Time.
func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) { // Treat an input of zero specially: convert it to the zero time,
dest := &Table{} // rather than the start of the epoch.
job, err := c.Copy(ctx, dest, q, WriteTruncate) func unixMillisToTime(m int64) time.Time {
if err != nil { if m == 0 {
return nil, err return time.Time{}
} }
return time.Unix(0, m*1e6)
return c.Read(ctx, job, options...)
} }
// Dataset creates a handle to a BigQuery dataset in the client's project. // runWithRetry calls the function until it returns nil or a non-retryable error, or
func (c *Client) Dataset(id string) *Dataset { // the context is done.
return c.DatasetInProject(c.projectID, id) // See the similar function in ../storage/invoke.go. The main difference is the
} // reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// DatasetInProject creates a handle to a BigQuery dataset in the specified project. // These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { backoff := gax.Backoff{
return &Dataset{ Initial: 1 * time.Second,
projectID: projectID, Max: 32 * time.Second,
id: datasetID, Multiplier: 2,
service: c.service,
} }
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
return !retryableError(err), err
})
}
// This is the correct definition of retryable according to the BigQuery team. It
// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors
// retryable; these are returned by systems between the client and the BigQuery
// service.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
return false
}
var reason string
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
} }

26493
vendor/cloud.google.com/go/bigquery/bigquery.replay generated vendored Normal file

File diff suppressed because one or more lines are too long

107
vendor/cloud.google.com/go/bigquery/copy.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
bq "google.golang.org/api/bigquery/v2"
)
// CopyConfig holds the configuration for a copy job.
type CopyConfig struct {
// Srcs are the tables from which data will be copied.
Srcs []*Table
// Dst is the table into which the data will be copied.
Dst *Table
// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteEmpty.
WriteDisposition TableWriteDisposition
// The labels associated with this job.
Labels map[string]string
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
}
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
var ts []*bq.TableReference
for _, t := range c.Srcs {
ts = append(ts, t.toBQ())
}
return &bq.JobConfiguration{
Labels: c.Labels,
Copy: &bq.JobConfigurationTableCopy{
CreateDisposition: string(c.CreateDisposition),
WriteDisposition: string(c.WriteDisposition),
DestinationTable: c.Dst.toBQ(),
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
SourceTables: ts,
},
}
}
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
cc := &CopyConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
Dst: bqToTable(q.Copy.DestinationTable, c),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
}
for _, t := range q.Copy.SourceTables {
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
}
return cc
}
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
type Copier struct {
JobIDConfig
CopyConfig
c *Client
}
// CopierFrom returns a Copier which can be used to copy data into a
// BigQuery table from one or more BigQuery tables.
// The returned Copier may optionally be further configured before its Run method is called.
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
return &Copier{
c: t.c,
CopyConfig: CopyConfig{
Srcs: srcs,
Dst: t,
},
}
}
// Run initiates a copy job.
func (c *Copier) Run(ctx context.Context) (*Job, error) {
return c.c.insertJob(ctx, c.newJob(), nil)
}
func (c *Copier) newJob() *bq.Job {
return &bq.Job{
JobReference: c.JobIDConfig.createJobRef(c.c),
Configuration: c.CopyConfig.toBQ(),
}
}

View File

@@ -1,47 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type copyOption interface {
customizeCopy(conf *bq.JobConfigurationTableCopy)
}
func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationTableCopy{}
dst.customizeCopyDst(payload)
src.customizeCopySrc(payload)
for _, opt := range options {
o, ok := opt.(copyOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeCopy(payload)
}
job.Configuration = &bq.JobConfiguration{
Copy: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,15 +15,16 @@
package bigquery package bigquery
import ( import (
"reflect"
"testing" "testing"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func defaultCopyJob() *bq.Job { func defaultCopyJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{ Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{ DestinationTable: &bq.TableReference{
@@ -44,11 +45,14 @@ func defaultCopyJob() *bq.Job {
} }
func TestCopy(t *testing.T) { func TestCopy(t *testing.T) {
defer fixRandomID("RANDOM")()
testCases := []struct { testCases := []struct {
dst *Table dst *Table
src Tables srcs []*Table
options []Option jobID string
want *bq.Job location string
config CopyConfig
want *bq.Job
}{ }{
{ {
dst: &Table{ dst: &Table{
@@ -56,7 +60,7 @@ func TestCopy(t *testing.T) {
DatasetID: "d-dataset-id", DatasetID: "d-dataset-id",
TableID: "d-table-id", TableID: "d-table-id",
}, },
src: Tables{ srcs: []*Table{
{ {
ProjectID: "s-project-id", ProjectID: "s-project-id",
DatasetID: "s-dataset-id", DatasetID: "s-dataset-id",
@@ -71,34 +75,89 @@ func TestCopy(t *testing.T) {
DatasetID: "d-dataset-id", DatasetID: "d-dataset-id",
TableID: "d-table-id", TableID: "d-table-id",
}, },
src: Tables{ srcs: []*Table{
{ {
ProjectID: "s-project-id", ProjectID: "s-project-id",
DatasetID: "s-dataset-id", DatasetID: "s-dataset-id",
TableID: "s-table-id", TableID: "s-table-id",
}, },
}, },
options: []Option{CreateNever, WriteTruncate}, config: CopyConfig{
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
Labels: map[string]string{"a": "b"},
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultCopyJob() j := defaultCopyJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
jobID: "job-id",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.JobId = "job-id"
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
location: "asia-northeast1",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.Location = "asia-northeast1"
return j return j
}(), }(),
}, },
} }
c := &Client{projectID: "client-project-id"}
for i, tc := range testCases {
tc.dst.c = c
copier := tc.dst.CopierFrom(tc.srcs...)
copier.JobID = tc.jobID
copier.Location = tc.location
tc.config.Srcs = tc.srcs
tc.config.Dst = tc.dst
copier.CopyConfig = tc.config
got := copier.newJob()
checkJob(t, i, got, tc.want)
for _, tc := range testCases { jc, err := bqToJobConfig(got.Configuration, c)
s := &testService{} if err != nil {
c := &Client{ t.Fatalf("#%d: %v", i, err)
service: s,
} }
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
t.Errorf("err calling cp: %v", err) cmpopts.IgnoreUnexported(Table{}))
continue if diff != "" {
} t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
} }
} }
} }

View File

@@ -1,79 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type createTableRecorder struct {
conf *createTableConf
service
}
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
rec.conf = conf
return nil
}
func TestCreateTableOptions(t *testing.T) {
s := &createTableRecorder{}
c := &Client{
projectID: "p",
service: s,
}
ds := c.Dataset("d")
table := ds.Table("t")
exp := time.Now()
q := "query"
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q)); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want := createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
viewQuery: q,
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
// No need for an elaborate schema, that is tested in schema_test.go.
schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,42 +14,523 @@
package bigquery package bigquery
import "golang.org/x/net/context" import (
"context"
"errors"
"fmt"
"time"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)
// Dataset is a reference to a BigQuery dataset. // Dataset is a reference to a BigQuery dataset.
type Dataset struct { type Dataset struct {
projectID string ProjectID string
id string DatasetID string
service service c *Client
} }
// ListTables returns a list of all the tables contained in the Dataset. // DatasetMetadata contains information about a BigQuery dataset.
func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) { type DatasetMetadata struct {
var tables []*Table // These fields can be set when creating a dataset.
Name string // The user-friendly name for this dataset.
Description string // The user-friendly description of this dataset.
Location string // The geo location of the dataset.
DefaultTableExpiration time.Duration // The default expiration time for new tables.
Labels map[string]string // User-provided labels.
Access []*AccessEntry // Access permissions.
err := getPages("", func(pageToken string) (string, error) { // These fields are read-only.
ts, tok, err := d.service.listTables(ctx, d.projectID, d.id, pageToken) CreationTime time.Time
if err == nil { LastModifiedTime time.Time // When the dataset or any of its tables were modified.
tables = append(tables, ts...) FullID string // The full dataset ID in the form projectID:datasetID.
}
return tok, err
})
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
// Only non-nil fields will be updated.
type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset.
// DefaultTableExpiration is the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration
// The entire access list. It is not possible to replace individual entries.
Access []*AccessEntry
labelUpdater
}
// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
}
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
return &Dataset{
ProjectID: projectID,
DatasetID: datasetID,
c: c,
}
}
// Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create")
defer func() { trace.EndSpan(ctx, err) }()
ds, err := md.toBQ()
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
// Use Client.Location as a default.
if ds.Location == "" {
ds.Location = d.c.Location
}
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
setClientHeader(call.Header())
_, err = call.Do()
return err
}
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return tables, nil if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
} }
// Create creates a dataset in the BigQuery service. An error will be returned func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
// if the dataset already exists. var q []*bq.DatasetAccess
func (d *Dataset) Create(ctx context.Context) error { for _, e := range a {
return d.service.insertDataset(ctx, d.id, d.projectID) a, err := e.toBQ()
if err != nil {
return nil, err
}
q = append(q, a)
}
return q, nil
}
// Delete deletes the dataset. Delete will fail if the dataset is not empty.
func (d *Dataset) Delete(ctx context.Context) (err error) {
return d.deleteInternal(ctx, false)
}
// DeleteWithContents deletes the dataset, as well as contained resources.
func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) {
return d.deleteInternal(ctx, true)
}
func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
defer func() { trace.EndSpan(ctx, err) }()
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents)
setClientHeader(call.Header())
return call.Do()
}
// Metadata fetches the metadata for the dataset.
func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata")
defer func() { trace.EndSpan(ctx, err) }()
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds)
}
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
dm := &DatasetMetadata{
CreationTime: unixMillisToTime(d.CreationTime),
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
for _, a := range d.Access {
e, err := bqToAccessEntry(a, nil)
if err != nil {
return nil, err
}
dm.Access = append(dm.Access, e)
}
return dm, nil
}
// Update modifies specific Dataset metadata fields.
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update")
defer func() { trace.EndSpan(ctx, err) }()
ds, err := dm.toBQ()
if err != nil {
return nil, err
}
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds2)
}
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.Access != nil {
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
if len(ds.Access) == 0 {
ds.NullFields = append(ds.NullFields, "Access")
}
}
labels, forces, nulls := dm.update()
ds.Labels = labels
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
ds.NullFields = append(ds.NullFields, nulls...)
return ds, nil
} }
// Table creates a handle to a BigQuery table in the dataset. // Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata. // To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it. // If the table does not already exist, use Table.Create to create it.
func (d *Dataset) Table(tableID string) *Table { func (d *Dataset) Table(tableID string) *Table {
return &Table{ProjectID: d.projectID, DatasetID: d.id, TableID: tableID, service: d.service} return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
}
// Tables returns an iterator over the tables in the Dataset.
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
it := &TableIterator{
ctx: ctx,
dataset: d,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.tables) },
func() interface{} { b := it.tables; it.tables = nil; return b })
return it
}
// A TableIterator is an iterator over Tables.
type TableIterator struct {
ctx context.Context
dataset *Dataset
tables []*Table
pageInfo *iterator.PageInfo
nextFunc func() error
}
// Next returns the next result. Its second return value is Done if there are
// no more results. Once Next returns Done, all subsequent calls will return
// Done.
func (it *TableIterator) Next() (*Table, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
t := it.tables[0]
it.tables = it.tables[1:]
return t, nil
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// for testing
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
PageToken(pageToken).
Context(it.ctx)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
var res *bq.TableList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := listTables(it, pageSize, pageToken)
if err != nil {
return "", err
}
for _, t := range res.Tables {
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
}
return res.NextPageToken, nil
}
func bqToTable(tr *bq.TableReference, c *Client) *Table {
if tr == nil {
return nil
}
return &Table{
ProjectID: tr.ProjectId,
DatasetID: tr.DatasetId,
TableID: tr.TableId,
c: c,
}
}
// Datasets returns an iterator over the datasets in a project.
// The Client's project is used by default, but that can be
// changed by setting ProjectID on the returned iterator before calling Next.
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
return c.DatasetsInProject(ctx, c.projectID)
}
// DatasetsInProject returns an iterator over the datasets in the provided project.
//
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
it := &DatasetIterator{
ctx: ctx,
c: c,
ProjectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// DatasetIterator iterates over the datasets in a project.
type DatasetIterator struct {
// ListHidden causes hidden datasets to be listed when set to true.
// Set before the first call to Next.
ListHidden bool
// Filter restricts the datasets returned by label. The filter syntax is described in
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
// Set before the first call to Next.
Filter string
// The project ID of the listed datasets.
// Set before the first call to Next.
ProjectID string
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// Next returns the next Dataset. Its second return value is iterator.Done if
// there are no more results. Once Next returns Done, all subsequent calls will
// return Done.
func (it *DatasetIterator) Next() (*Dataset, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
// for testing
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
call := it.c.bqs.Datasets.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
All(it.ListHidden)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
if it.Filter != "" {
call.Filter(it.Filter)
}
var res *bq.DatasetList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := listDatasets(it, pageSize, pageToken)
if err != nil {
return "", err
}
for _, d := range res.Datasets {
it.items = append(it.items, &Dataset{
ProjectID: d.DatasetReference.ProjectId,
DatasetID: d.DatasetReference.DatasetId,
c: it.c,
})
}
return res.NextPageToken, nil
}
// An AccessEntry describes the permissions that an entity has on a dataset.
type AccessEntry struct {
Role AccessRole // The role of the entity
EntityType EntityType // The type of entity
Entity string // The entity (individual or group) granted access
View *Table // The view granted access (EntityType must be ViewEntity)
}
// AccessRole is the level of access to grant to a dataset.
type AccessRole string
const (
// OwnerRole is the OWNER AccessRole.
OwnerRole AccessRole = "OWNER"
// ReaderRole is the READER AccessRole.
ReaderRole AccessRole = "READER"
// WriterRole is the WRITER AccessRole.
WriterRole AccessRole = "WRITER"
)
// EntityType is the type of entity in an AccessEntry.
type EntityType int
const (
// DomainEntity is a domain (e.g. "example.com").
DomainEntity EntityType = iota + 1
// GroupEmailEntity is an email address of a Google Group.
GroupEmailEntity
// UserEmailEntity is an email address of an individual user.
UserEmailEntity
// SpecialGroupEntity is a special group: one of projectOwners, projectReaders, projectWriters or
// allAuthenticatedUsers.
SpecialGroupEntity
// ViewEntity is a BigQuery view.
ViewEntity
)
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
q := &bq.DatasetAccess{Role: string(e.Role)}
switch e.EntityType {
case DomainEntity:
q.Domain = e.Entity
case GroupEmailEntity:
q.GroupByEmail = e.Entity
case UserEmailEntity:
q.UserByEmail = e.Entity
case SpecialGroupEntity:
q.SpecialGroup = e.Entity
case ViewEntity:
q.View = e.View.toBQ()
default:
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
}
return q, nil
}
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
e := &AccessEntry{Role: AccessRole(q.Role)}
switch {
case q.Domain != "":
e.Entity = q.Domain
e.EntityType = DomainEntity
case q.GroupByEmail != "":
e.Entity = q.GroupByEmail
e.EntityType = GroupEmailEntity
case q.UserByEmail != "":
e.Entity = q.UserByEmail
e.EntityType = UserEmailEntity
case q.SpecialGroup != "":
e.Entity = q.SpecialGroup
e.EntityType = SpecialGroupEntity
case q.View != nil:
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
e.EntityType = ViewEntity
default:
return nil, errors.New("bigquery: invalid access value")
}
return e, nil
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,91 +15,312 @@
package bigquery package bigquery
import ( import (
"context"
"errors" "errors"
"reflect" "strconv"
"testing" "testing"
"time"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
itest "google.golang.org/api/iterator/testing"
) )
// readServiceStub services read requests by returning data from an in-memory list of values. // readServiceStub services read requests by returning data from an in-memory list of values.
type listTablesServiceStub struct { type listTablesStub struct {
expectedProject, expectedDataset string expectedProject, expectedDataset string
values [][]*Table // contains pages of tables. tables []*bq.TableListTables
pageTokens map[string]string // maps incoming page token to returned page token.
service
} }
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
if projectID != s.expectedProject { if it.dataset.ProjectID != s.expectedProject {
return nil, "", errors.New("wrong project id") return nil, errors.New("wrong project id")
} }
if datasetID != s.expectedDataset { if it.dataset.DatasetID != s.expectedDataset {
return nil, "", errors.New("wrong dataset id") return nil, errors.New("wrong dataset id")
} }
const maxPageSize = 2
tables := s.values[0] if pageSize <= 0 || pageSize > maxPageSize {
s.values = s.values[1:] pageSize = maxPageSize
return tables, s.pageTokens[pageToken], nil
}
func TestListTables(t *testing.T) {
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
testCases := []struct {
data [][]*Table
pageTokens map[string]string
want []*Table
}{
{
data: [][]*Table{{t1, t2}, {t3}},
pageTokens: map[string]string{"": "a", "a": ""},
want: []*Table{t1, t2, t3},
},
{
data: [][]*Table{{t1, t2}, {t3}},
pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: []*Table{t1, t2},
},
} }
start := 0
for _, tc := range testCases { if pageToken != "" {
c := &Client{ var err error
service: &listTablesServiceStub{ start, err = strconv.Atoi(pageToken)
expectedProject: "x",
expectedDataset: "y",
values: tc.data,
pageTokens: tc.pageTokens,
},
projectID: "x",
}
got, err := c.Dataset("y").ListTables(context.Background())
if err != nil { if err != nil {
t.Errorf("err calling ListTables: %v", err) return nil, err
}
}
end := start + pageSize
if end > len(s.tables) {
end = len(s.tables)
}
nextPageToken := ""
if end < len(s.tables) {
nextPageToken = strconv.Itoa(end)
}
return &bq.TableList{
Tables: s.tables[start:end],
NextPageToken: nextPageToken,
}, nil
}
func TestTables(t *testing.T) {
c := &Client{projectID: "p1"}
inTables := []*bq.TableListTables{
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
}
outTables := []*Table{
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
}
lts := &listTablesStub{
expectedProject: "p1",
expectedDataset: "d1",
tables: inTables,
}
old := listTables
listTables = lts.listTables // cannot use t.Parallel with this test
defer func() { listTables = old }()
msg, ok := itest.TestIterator(outTables,
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
if !ok {
t.Error(msg)
}
}
type listDatasetsStub struct {
expectedProject string
datasets []*bq.DatasetListDatasets
hidden map[*bq.DatasetListDatasets]bool
}
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize {
pageSize = maxPageSize
}
if it.Filter != "" {
return nil, errors.New("filter not supported")
}
if it.ProjectID != s.expectedProject {
return nil, errors.New("bad project ID")
}
start := 0
if pageToken != "" {
var err error
start, err = strconv.Atoi(pageToken)
if err != nil {
return nil, err
}
}
var (
i int
result []*bq.DatasetListDatasets
nextPageToken string
)
for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
if s.hidden[s.datasets[i]] && !it.ListHidden {
continue continue
} }
result = append(result, s.datasets[i])
}
if i < len(s.datasets) {
nextPageToken = strconv.Itoa(i)
}
return &bq.DatasetList{
Datasets: result,
NextPageToken: nextPageToken,
}, nil
}
if !reflect.DeepEqual(got, tc.want) { func TestDatasets(t *testing.T) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) client := &Client{projectID: "p"}
inDatasets := []*bq.DatasetListDatasets{
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
}
outDatasets := []*Dataset{
{"p", "a", client},
{"p", "b", client},
{"p", "hidden", client},
{"p", "c", client},
}
lds := &listDatasetsStub{
expectedProject: "p",
datasets: inDatasets,
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
}
old := listDatasets
listDatasets = lds.listDatasets // cannot use t.Parallel with this test
defer func() { listDatasets = old }()
msg, ok := itest.TestIterator(outDatasets,
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok {
t.Fatalf("ListHidden=true: %s", msg)
}
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok {
t.Fatalf("ListHidden=false: %s", msg)
}
}
func TestDatasetToBQ(t *testing.T) {
for _, test := range []struct {
in *DatasetMetadata
want *bq.Dataset
}{
{nil, &bq.Dataset{}},
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
{&DatasetMetadata{
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
}, &bq.Dataset{
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
}},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
}
}
// Check that non-writeable fields are unset.
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, dm := range []*DatasetMetadata{
{CreationTime: aTime},
{LastModifiedTime: aTime},
{FullID: "x"},
{ETag: "e"},
} {
if _, err := dm.toBQ(); err == nil {
t.Errorf("%+v: got nil, want error", dm)
} }
} }
} }
func TestListTablesError(t *testing.T) { func TestBQToDatasetMetadata(t *testing.T) {
c := &Client{ cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
service: &listTablesServiceStub{ cMillis := cTime.UnixNano() / 1e6
expectedProject: "x", mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
expectedDataset: "y", mMillis := mTime.UnixNano() / 1e6
q := &bq.Dataset{
CreationTime: cMillis,
LastModifiedTime: mMillis,
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{
{Role: "READER", UserByEmail: "joe@example.com"},
{Role: "WRITER", GroupByEmail: "users@example.com"},
}, },
projectID: "x", Etag: "etag",
} }
// Test that service read errors are propagated back to the caller. want := &DatasetMetadata{
// Passing "not y" as the dataset id will cause the service to return an error. CreationTime: cTime,
_, err := c.Dataset("not y").ListTables(context.Background()) LastModifiedTime: mTime,
if err == nil { Name: "name",
// Read should not return an error; only Err should. Description: "desc",
t.Errorf("ListTables expected: non-nil err, got: nil") DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
},
ETag: "etag",
}
got, err := bqToDatasetMetadata(q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}
func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got, err := dm.toBQ()
if err != nil {
t.Fatal(err)
}
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}
func TestConvertAccessEntry(t *testing.T) {
c := &Client{projectID: "pid"}
for _, e := range []*AccessEntry{
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
{Role: ReaderRole, EntityType: ViewEntity,
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
} {
q, err := e.toBQ()
if err != nil {
t.Fatal(err)
}
got, err := bqToAccessEntry(q, c)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}
e := &AccessEntry{Role: ReaderRole, Entity: "e"}
if _, err := e.toBQ(); err == nil {
t.Error("got nil, want error")
}
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
t.Error("got nil, want error")
} }
} }

View File

@@ -0,0 +1,67 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package datatransfer
import (
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
import (
"context"
"fmt"
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestDataTransferServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var formattedParent string = fmt.Sprintf("projects/%s", projectId)
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}
iter := c.ListDataSources(ctx, request)
if _, err := iter.Next(); err != nil && err != iterator.Done {
t.Error(err)
}
}

View File

@@ -0,0 +1,625 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package datatransfer
import (
"context"
"fmt"
"math"
"time"
"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetDataSource []gax.CallOption
ListDataSources []gax.CallOption
CreateTransferConfig []gax.CallOption
UpdateTransferConfig []gax.CallOption
DeleteTransferConfig []gax.CallOption
GetTransferConfig []gax.CallOption
ListTransferConfigs []gax.CallOption
ScheduleTransferRuns []gax.CallOption
GetTransferRun []gax.CallOption
DeleteTransferRun []gax.CallOption
ListTransferRuns []gax.CallOption
ListTransferLogs []gax.CallOption
CheckValidCreds []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
GetDataSource: retry[[2]string{"default", "idempotent"}],
ListDataSources: retry[[2]string{"default", "idempotent"}],
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
GetTransferRun: retry[[2]string{"default", "idempotent"}],
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with BigQuery Data Transfer API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client datatransferpb.DataTransferServiceClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new data transfer service client.
//
// The Google BigQuery Data Transfer Service API enables BigQuery users to
// configure the transfer of their data from other Google Products into
// BigQuery. This service contains methods that are end user exposed. It backs
// up the frontend.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: datatransferpb.NewDataTransferServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// GetDataSource retrieves a supported data source and returns its settings,
// which can be used for UI rendering.
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
var resp *datatransferpb.DataSource
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDataSources lists supported data sources and returns their settings,
// which can be used for UI rendering.
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
it := &DataSourceIterator{}
req = proto.Clone(req).(*datatransferpb.ListDataSourcesRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
var resp *datatransferpb.ListDataSourcesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.DataSources, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// CreateTransferConfig creates a new data transfer configuration.
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateTransferConfig updates a data transfer configuration.
// All fields must be set, even if they are not updated.
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "transfer_config.name", req.GetTransferConfig().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferConfig deletes a data transfer configuration,
// including any associated transfer runs and logs.
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetTransferConfig returns information about a data transfer config.
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListTransferConfigs returns information about all data transfers in the project.
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
it := &TransferConfigIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferConfigsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
var resp *datatransferpb.ListTransferConfigsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferConfigs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
// For each date - or whatever granularity the data source supports - in the
// range, one transfer run is created.
// Note that runs are created per UTC time in the time range.
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
var resp *datatransferpb.ScheduleTransferRunsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetTransferRun returns information about the particular transfer run.
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
var resp *datatransferpb.TransferRun
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferRun deletes the specified transfer run.
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ListTransferRuns returns information about running and completed jobs.
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
it := &TransferRunIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferRunsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
var resp *datatransferpb.ListTransferRunsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferRuns, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// ListTransferLogs returns user facing log messages for the data transfer run.
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
it := &TransferMessageIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferLogsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
var resp *datatransferpb.ListTransferLogsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferMessages, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// CheckValidCreds returns true if valid credentials exist for the given data source and
// requesting user.
// Some data sources doesn't support service account, so we need to talk to
// them on behalf of the end user. This API just checks whether we have OAuth
// token for the particular user, which is a pre-requisite before user can
// create a transfer config.
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
var resp *datatransferpb.CheckValidCredsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DataSourceIterator manages a stream of *datatransferpb.DataSource.
type DataSourceIterator struct {
items []*datatransferpb.DataSource
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
var item *datatransferpb.DataSource
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DataSourceIterator) bufLen() int {
return len(it.items)
}
func (it *DataSourceIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
type TransferConfigIterator struct {
items []*datatransferpb.TransferConfig
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
var item *datatransferpb.TransferConfig
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferConfigIterator) bufLen() int {
return len(it.items)
}
func (it *TransferConfigIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
type TransferMessageIterator struct {
items []*datatransferpb.TransferMessage
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
var item *datatransferpb.TransferMessage
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferMessageIterator) bufLen() int {
return len(it.items)
}
func (it *TransferMessageIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
type TransferRunIterator struct {
items []*datatransferpb.TransferRun
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
var item *datatransferpb.TransferRun
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferRunIterator) bufLen() int {
return len(it.items)
}
func (it *TransferRunIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@@ -0,0 +1,289 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package datatransfer_test
import (
"context"
datatransfer "cloud.google.com/go/bigquery/datatransfer/apiv1"
"google.golang.org/api/iterator"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_GetDataSource() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetDataSourceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDataSource(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListDataSources() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListDataSourcesRequest{
// TODO: Fill request struct fields.
}
it := c.ListDataSources(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CreateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CreateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_UpdateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.UpdateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferConfigRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_GetTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListTransferConfigs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferConfigsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferConfigs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ScheduleTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ScheduleTransferRunsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ScheduleTransferRuns(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_GetTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferRunRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferRunRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_ListTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferRunsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferRuns(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ListTransferLogs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferLogsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferLogs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CheckValidCreds() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CheckValidCredsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CheckValidCreds(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

View File

@@ -0,0 +1,101 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
// Package datatransfer is an auto-generated package for the
// BigQuery Data Transfer API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Transfers data from partner SaaS applications to Google BigQuery on a
// scheduled, managed basis.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20190404"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,135 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datatransfer
// ProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func ProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}
// LocationPath returns the path for the location resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s", project, location)
// instead.
func LocationPath(project, location string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
""
}
// LocationDataSourcePath returns the path for the location data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
// instead.
func LocationDataSourcePath(project, location, dataSource string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/dataSources/" +
dataSource +
""
}
// LocationTransferConfigPath returns the path for the location transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
// instead.
func LocationTransferConfigPath(project, location, transferConfig string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
""
}
// LocationRunPath returns the path for the location run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
// instead.
func LocationRunPath(project, location, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}
// DataSourcePath returns the path for the data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
// instead.
func DataSourcePath(project, dataSource string) string {
return "" +
"projects/" +
project +
"/dataSources/" +
dataSource +
""
}
// TransferConfigPath returns the path for the transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
// instead.
func TransferConfigPath(project, transferConfig string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
""
}
// RunPath returns the path for the run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
// instead.
func RunPath(project, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,7 +12,299 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Package bigquery provides a client for the BigQuery service. /*
// Package bigquery provides a client for the BigQuery service.
// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected.
Note: This package is in beta. Some backwards-incompatible changes may occur.
The following assumes a basic familiarity with BigQuery concepts.
See https://cloud.google.com/bigquery/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Creating a Client
To start working with this package, create a client:
ctx := context.Background()
client, err := bigquery.NewClient(ctx, projectID)
if err != nil {
// TODO: Handle error.
}
Querying
To query existing tables, create a Query and call its Read method:
q := client.Query(`
SELECT year, SUM(number) as num
FROM ` + "`bigquery-public-data.usa_names.usa_1910_2013`" + `
WHERE name = "William"
GROUP BY year
ORDER BY year
`)
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
Then iterate through the resulting rows. You can store a row using
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
A slice is simplest:
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(values)
}
You can also use a struct whose exported fields match the query:
type Count struct {
Year int
Num int
}
for {
var c Count
err := it.Next(&c)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(c)
}
You can also start the query running and get the results later.
Create the query as above, but call Run instead of Read. This returns a Job,
which represents an asynchronous operation.
job, err := q.Run(ctx)
if err != nil {
// TODO: Handle error.
}
Get the job's ID, a printable string. You can save this string to retrieve
the results at a later time, even in another process.
jobID := job.ID()
fmt.Printf("The job ID is %s\n", jobID)
To retrieve the job's results from the ID, first look up the Job:
job, err = client.JobFromID(ctx, jobID)
if err != nil {
// TODO: Handle error.
}
Use the Job.Read method to obtain an iterator, and loop over the rows.
Query.Read is just a convenience method that combines Query.Run and Job.Read.
it, err = job.Read(ctx)
if err != nil {
// TODO: Handle error.
}
// Proceed with iteration as above.
Datasets and Tables
You can refer to datasets in the client's project with the Dataset method, and
in other projects with the DatasetInProject method:
myDataset := client.Dataset("my_dataset")
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
These methods create references to datasets, not the datasets themselves. You can have
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
create a dataset from a reference:
if err := myDataset.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
to an object in BigQuery that may or may not exist.
table := myDataset.Table("my_table")
You can create, delete and update the metadata of tables with methods on Table.
For instance, you could create a temporary table with:
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
ExpirationTime: time.Now().Add(1*time.Hour)})
if err != nil {
// TODO: Handle error.
}
We'll see how to create a table with a schema in the next section.
Schemas
There are two ways to construct schemas with this package.
You can build a schema by hand, like so:
schema1 := bigquery.Schema{
{Name: "Name", Required: true, Type: bigquery.StringFieldType},
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
}
Or you can infer the schema from a struct:
type student struct {
Name string
Grades []int
Optional bigquery.NullInt64
}
schema2, err := bigquery.InferSchema(student{})
if err != nil {
// TODO: Handle error.
}
// schema1 and schema2 are identical.
Struct inference supports tags like those of the encoding/json package, so you can
change names, ignore fields, or mark a field as nullable (non-required). Fields
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool,
NullTimestamp, NullDate, NullTime, NullDateTime, and NullGeography) are
automatically inferred as nullable, so the "nullable" tag is only needed for []byte,
*big.Rat and pointer-to-struct fields.
type student2 struct {
Name string `bigquery:"full_name"`
Grades []int
Secret string `bigquery:"-"`
Optional []byte `bigquery:",nullable"
}
schema3, err := bigquery.InferSchema(student2{})
if err != nil {
// TODO: Handle error.
}
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".
Having constructed a schema, you can create a table with it like so:
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
// TODO: Handle error.
}
Copying
You can copy one or more tables to another table. Begin by constructing a Copier
describing the copy. Then set any desired copy options, and finally call Run to get a Job:
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
copier.WriteDisposition = bigquery.WriteTruncate
job, err = copier.Run(ctx)
if err != nil {
// TODO: Handle error.
}
You can chain the call to Run if you don't want to set options:
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
You can wait for your job to complete:
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
Job.Wait polls with exponential backoff. You can also poll yourself, if you
wish:
for {
status, err := job.Status(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Done() {
if status.Err() != nil {
log.Fatalf("Job failed with error %v", status.Err())
}
break
}
time.Sleep(pollInterval)
}
Loading and Uploading
There are two ways to populate a table with this package: load the data from a Google Cloud Storage
object, or upload rows directly from your program.
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
it as well, and call its Run method.
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
loader.CreateDisposition = bigquery.CreateNever
job, err = loader.Run(ctx)
// Poll the job for completion if desired, as above.
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
Then create an Uploader, and call its Put method with a slice of values.
u := table.Uploader()
// Item implements the ValueSaver interface.
items := []*Item{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := u.Put(ctx, items); err != nil {
// TODO: Handle error.
}
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
directly and the schema will be inferred:
type Item2 struct {
Name string
Size float64
Count int
}
// Item implements the ValueSaver interface.
items2 := []*Item2{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := u.Put(ctx, items2); err != nil {
// TODO: Handle error.
}
Extracting
If you've been following so far, extracting data from a BigQuery table
into a Google Cloud Storage object will feel familiar. First create an
Extractor, then optionally configure it, and lastly call its Run method.
extractor := table.ExtractorTo(gcsRef)
extractor.DisableHeader = true
job, err = extractor.Run(ctx)
// Poll the job for completion if desired, as above.
Errors
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code = 409 { ... }
}
*/
package bigquery // import "cloud.google.com/go/bigquery" package bigquery // import "cloud.google.com/go/bigquery"

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ import (
) )
// An Error contains detailed information about a failed bigquery operation. // An Error contains detailed information about a failed bigquery operation.
// Detailed description of possible Reasons can be found here: https://cloud.google.com/bigquery/troubleshooting-errors.
type Error struct { type Error struct {
// Mirrors bq.ErrorProto, but drops DebugInfo // Mirrors bq.ErrorProto, but drops DebugInfo
Location, Message, Reason string Location, Message, Reason string
@@ -30,7 +31,7 @@ func (e Error) Error() string {
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
} }
func errorFromErrorProto(ep *bq.ErrorProto) *Error { func bqToError(ep *bq.ErrorProto) *Error {
if ep == nil { if ep == nil {
return nil return nil
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -16,10 +16,10 @@ package bigquery
import ( import (
"errors" "errors"
"reflect"
"strings" "strings"
"testing" "testing"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -94,7 +94,7 @@ func TestErrorFromErrorProto(t *testing.T) {
want: &Error{Location: "L", Message: "M", Reason: "R"}, want: &Error{Location: "L", Message: "M", Reason: "R"},
}, },
} { } {
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) { if got := bqToError(test.in); !testutil.Equal(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want) t.Errorf("%v: got %v, want %v", test.in, got, test.want)
} }
} }

829
vendor/cloud.google.com/go/bigquery/examples_test.go generated vendored Normal file
View File

@@ -0,0 +1,829 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery_test
import (
"context"
"fmt"
"os"
"time"
"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
)
func ExampleNewClient() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
_ = client // TODO: Use client.
}
func ExampleClient_Dataset() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
fmt.Println(ds)
}
func ExampleClient_DatasetInProject() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.DatasetInProject("their-project-id", "their-dataset")
fmt.Println(ds)
}
func ExampleClient_Datasets() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Datasets(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleClient_DatasetsInProject() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.DatasetsInProject(ctx, "their-project-id")
_ = it // TODO: iterate using Next or iterator.Pager.
}
func getJobID() string { return "" }
func ExampleClient_JobFromID() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
job, err := client.JobFromID(ctx, jobID)
if err != nil {
// TODO: Handle error.
}
fmt.Println(job.LastStatus()) // Display the job's status.
}
func ExampleClient_Jobs() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Jobs(ctx)
it.State = bigquery.Running // list only running jobs.
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleNewGCSReference() {
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
fmt.Println(gcsRef)
}
func ExampleClient_Query() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
q.DefaultProjectID = "project-id"
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}
func ExampleClient_Query_parameters() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select num from t1 where name = @user")
q.Parameters = []bigquery.QueryParameter{
{Name: "user", Value: "Elizabeth"},
}
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}
// This example demonstrates how to run a query job on a table
// with a customer-managed encryption key. The same
// applies to load and copy jobs as well.
func ExampleClient_Query_encryptionKey() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
// TODO: Replace this key with a key you have created in Cloud KMS.
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName}
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}
func ExampleQuery_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleRowIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
for {
var row []bigquery.Value
err := it.Next(&row)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(row)
}
}
func ExampleRowIterator_Next_struct() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
type score struct {
Name string
Num int
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
for {
var s score
err := it.Next(&s)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(s)
}
}
func ExampleJob_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
// Call Query.Run to get a Job, then call Read on the job.
// Note: Query.Read is a shorthand for this.
job, err := q.Run(ctx)
if err != nil {
// TODO: Handle error.
}
it, err := job.Read(ctx)
if err != nil {
// TODO: Handle error.
}
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleJob_Wait() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleJob_Config() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
jc, err := job.Config()
if err != nil {
// TODO: Handle error.
}
copyConfig := jc.(*bigquery.CopyConfig)
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
}
func ExampleDataset_Create() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil {
// TODO: Handle error.
}
}
func ExampleDataset_Delete() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
// TODO: Handle error.
}
}
func ExampleDataset_Metadata() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
// This example illustrates how to perform a read-modify-write sequence on dataset
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleDataset_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
md, err := ds.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := ds.Update(ctx,
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleDataset_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleDataset_Table() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// Table creates a reference to the table. It does not create the actual
// table in BigQuery; to do so, use Table.Create.
t := client.Dataset("my_dataset").Table("my_table")
fmt.Println(t)
}
func ExampleDataset_Tables() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Tables(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleDatasetIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Datasets(ctx)
for {
ds, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(ds)
}
}
func ExampleInferSchema() {
type Item struct {
Name string
Size float64
Count int
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
fmt.Println(err)
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type)
}
// Output:
// Name STRING
// Size FLOAT
// Count INTEGER
}
func ExampleInferSchema_tags() {
type Item struct {
Name string
Size float64
Count int `bigquery:"number"`
Secret []byte `bigquery:"-"`
Optional bigquery.NullBool
OptBytes []byte `bigquery:",nullable"`
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
fmt.Println(err)
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type, fs.Required)
}
// Output:
// Name STRING true
// Size FLOAT true
// number INTEGER true
// Optional BOOLEAN false
// OptBytes BYTES false
}
func ExampleTable_Create() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
}
// Initialize a new table by passing TableMetadata to Table.Create.
func ExampleTable_Create_initialize() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
if err != nil {
// TODO: Handle error.
}
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
ExpirationTime: time.Now().Add(24 * time.Hour),
}); err != nil {
// TODO: Handle error.
}
}
// This example demonstrates how to create a table with
// a customer-managed encryption key.
func ExampleTable_Create_encryptionKey() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
if err != nil {
// TODO: Handle error.
}
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
// TODO: Replace this key with a key you have created in Cloud KMS.
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName},
}); err != nil {
// TODO: Handle error.
}
}
func ExampleTable_Delete() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
// TODO: Handle error.
}
}
func ExampleTable_Metadata() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleTable_Inserter() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
_ = ins // TODO: Use ins.
}
func ExampleTable_Inserter_options() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
ins.SkipInvalidRows = true
ins.IgnoreUnknownValues = true
_ = ins // TODO: Use ins.
}
func ExampleTable_CopierFrom() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
c.WriteDisposition = bigquery.WriteTruncate
// TODO: set other options on the Copier.
job, err := c.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_ExtractorTo() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.FieldDelimiter = ":"
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
extractor.DisableHeader = true
// TODO: set other options on the Extractor.
job, err := extractor.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_LoaderFrom() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
gcsRef.MaxBadRecords = 5
gcsRef.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(gcsRef)
loader.CreateDisposition = bigquery.CreateNever
// TODO: set other options on the Loader.
job, err := loader.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_LoaderFrom_reader() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
f, err := os.Open("data.csv")
if err != nil {
// TODO: Handle error.
}
rs := bigquery.NewReaderSource(f)
rs.AllowJaggedRows = true
rs.MaxBadRecords = 5
rs.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(rs)
loader.CreateDisposition = bigquery.CreateNever
// TODO: set other options on the Loader.
job, err := loader.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}
// This example illustrates how to perform a read-modify-write sequence on table
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleTable_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
md, err := t.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := t.Update(ctx,
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleTable_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "my favorite table",
}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(tm)
}
func ExampleTableIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Tables(ctx)
for {
t, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(t)
}
}
type Item struct {
Name string
Size float64
Count int
}
// Save implements the ValueSaver interface.
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
return map[string]bigquery.Value{
"Name": i.Name,
"Size": i.Size,
"Count": i.Count,
}, "", nil
}
func ExampleInserter_Put() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
// Item implements the ValueSaver interface.
items := []*Item{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := ins.Put(ctx, items); err != nil {
// TODO: Handle error.
}
}
var schema bigquery.Schema
func ExampleInserter_Put_structSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
type score struct {
Name string
Num int
}
// Assume schema holds the table's schema.
savers := []*bigquery.StructSaver{
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
}
if err := ins.Put(ctx, savers); err != nil {
// TODO: Handle error.
}
}
func ExampleInserter_Put_struct() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
type score struct {
Name string
Num int
}
scores := []score{
{Name: "n1", Num: 12},
{Name: "n2", Num: 31},
{Name: "n3", Num: 7},
}
// Schema is inferred from the score type.
if err := ins.Put(ctx, scores); err != nil {
// TODO: Handle error.
}
}
func ExampleInserter_Put_valuesSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
var vss []*bigquery.ValuesSaver
for i, name := range []string{"n1", "n2", "n3"} {
// Assume schema holds the table's schema.
vss = append(vss, &bigquery.ValuesSaver{
Schema: schema,
InsertID: name,
Row: []bigquery.Value{name, int64(i)},
})
}
if err := ins.Put(ctx, vss); err != nil {
// TODO: Handle error.
}
}

400
vendor/cloud.google.com/go/bigquery/external.go generated vendored Normal file
View File

@@ -0,0 +1,400 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/base64"
"unicode/utf8"
bq "google.golang.org/api/bigquery/v2"
)
// DataFormat describes the format of BigQuery table data.
type DataFormat string
// Constants describing the format of BigQuery table data.
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
GoogleSheets DataFormat = "GOOGLE_SHEETS"
Bigtable DataFormat = "BIGTABLE"
Parquet DataFormat = "PARQUET"
ORC DataFormat = "ORC"
)
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
// *ExternalDataConfig.
// GCSReference also implements it, for backwards compatibility.
type ExternalData interface {
toBQ() bq.ExternalDataConfiguration
}
// ExternalDataConfig describes data external to BigQuery that can be used
// in queries and to create external tables.
type ExternalDataConfig struct {
// The format of the data. Required.
SourceFormat DataFormat
// The fully-qualified URIs that point to your
// data in Google Cloud. Required.
//
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
// and it must come after the 'bucket' name. Size limits related to load jobs
// apply to external data sources.
//
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
//
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
// the '*' wildcard character is not allowed.
SourceURIs []string
// The schema of the data. Required for CSV and JSON; disallowed for the
// other formats.
Schema Schema
// Try to detect schema and format options automatically.
// Any option specified explicitly will be honored.
AutoDetect bool
// The compression type of the data.
Compression Compression
// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool
// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64
// Additional options for CSV, GoogleSheets and Bigtable formats.
Options ExternalDataConfigOptions
}
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
q := bq.ExternalDataConfiguration{
SourceFormat: string(e.SourceFormat),
SourceUris: e.SourceURIs,
Autodetect: e.AutoDetect,
Compression: string(e.Compression),
IgnoreUnknownValues: e.IgnoreUnknownValues,
MaxBadRecords: e.MaxBadRecords,
}
if e.Schema != nil {
q.Schema = e.Schema.toBQ()
}
if e.Options != nil {
e.Options.populateExternalDataConfig(&q)
}
return q
}
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
e := &ExternalDataConfig{
SourceFormat: DataFormat(q.SourceFormat),
SourceURIs: q.SourceUris,
AutoDetect: q.Autodetect,
Compression: Compression(q.Compression),
IgnoreUnknownValues: q.IgnoreUnknownValues,
MaxBadRecords: q.MaxBadRecords,
Schema: bqToSchema(q.Schema),
}
switch {
case q.CsvOptions != nil:
e.Options = bqToCSVOptions(q.CsvOptions)
case q.GoogleSheetsOptions != nil:
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
case q.BigtableOptions != nil:
var err error
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
if err != nil {
return nil, err
}
}
return e, nil
}
// ExternalDataConfigOptions are additional options for external data configurations.
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
type ExternalDataConfigOptions interface {
populateExternalDataConfig(*bq.ExternalDataConfiguration)
}
// CSVOptions are additional options for CSV external data sources.
type CSVOptions struct {
// AllowJaggedRows causes missing trailing optional columns to be tolerated
// when reading CSV data. Missing values are treated as nulls.
AllowJaggedRows bool
// AllowQuotedNewlines sets whether quoted data sections containing
// newlines are allowed when reading CSV data.
AllowQuotedNewlines bool
// Encoding is the character encoding of data to be read.
Encoding Encoding
// FieldDelimiter is the separator for fields in a CSV file, used when
// reading or exporting data. The default is ",".
FieldDelimiter string
// Quote is the value used to quote data sections in a CSV file. The
// default quotation character is the double quote ("), which is used if
// both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation
// character, set ForceZeroQuote to true.
// Only used when reading data.
Quote string
ForceZeroQuote bool
// The number of rows at the top of a CSV file that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.CsvOptions = &bq.CsvOptions{
AllowJaggedRows: o.AllowJaggedRows,
AllowQuotedNewlines: o.AllowQuotedNewlines,
Encoding: string(o.Encoding),
FieldDelimiter: o.FieldDelimiter,
Quote: o.quote(),
SkipLeadingRows: o.SkipLeadingRows,
}
}
// quote returns the CSV quote character, or nil if unset.
func (o *CSVOptions) quote() *string {
if o.ForceZeroQuote {
quote := ""
return &quote
}
if o.Quote == "" {
return nil
}
return &o.Quote
}
func (o *CSVOptions) setQuote(ps *string) {
if ps != nil {
o.Quote = *ps
if o.Quote == "" {
o.ForceZeroQuote = true
}
}
}
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
o := &CSVOptions{
AllowJaggedRows: q.AllowJaggedRows,
AllowQuotedNewlines: q.AllowQuotedNewlines,
Encoding: Encoding(q.Encoding),
FieldDelimiter: q.FieldDelimiter,
SkipLeadingRows: q.SkipLeadingRows,
}
o.setQuote(q.Quote)
return o
}
// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
type GoogleSheetsOptions struct {
// The number of rows at the top of a sheet that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
SkipLeadingRows: o.SkipLeadingRows,
}
}
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
return &GoogleSheetsOptions{
SkipLeadingRows: q.SkipLeadingRows,
}
}
// BigtableOptions are additional options for Bigtable external data sources.
type BigtableOptions struct {
// A list of column families to expose in the table schema along with their
// types. If omitted, all column families are present in the table schema and
// their values are read as BYTES.
ColumnFamilies []*BigtableColumnFamily
// If true, then the column families that are not specified in columnFamilies
// list are not exposed in the table schema. Otherwise, they are read with BYTES
// type values. The default is false.
IgnoreUnspecifiedColumnFamilies bool
// If true, then the rowkey column families will be read and converted to string.
// Otherwise they are read with BYTES type values and users need to manually cast
// them with CAST if necessary. The default is false.
ReadRowkeyAsString bool
}
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
q := &bq.BigtableOptions{
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: o.ReadRowkeyAsString,
}
for _, f := range o.ColumnFamilies {
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
}
c.BigtableOptions = q
}
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
b := &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: q.ReadRowkeyAsString,
}
for _, f := range q.ColumnFamilies {
f2, err := bqToBigtableColumnFamily(f)
if err != nil {
return nil, err
}
b.ColumnFamilies = append(b.ColumnFamilies, f2)
}
return b, nil
}
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
type BigtableColumnFamily struct {
// Identifier of the column family.
FamilyID string
// Lists of columns that should be exposed as individual fields as opposed to a
// list of (column name, value) pairs. All columns whose qualifier matches a
// qualifier in this list can be accessed as .. Other columns can be accessed as
// a list through .Column field.
Columns []*BigtableColumn
// The encoding of the values when the type is not STRING. Acceptable encoding values are:
// - TEXT - indicates values are alphanumeric text strings.
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
// This can be overridden for a specific column by listing that column in 'columns' and
// specifying an encoding for it.
Encoding string
// If true, only the latest version of values are exposed for all columns in this
// column family. This can be overridden for a specific column by listing that
// column in 'columns' and specifying a different setting for that column.
OnlyReadLatest bool
// The type to convert the value in cells of this
// column family. The values are expected to be encoded using HBase
// Bytes.toBytes function when using the BINARY encoding value.
// Following BigQuery types are allowed (case-sensitive):
// BYTES STRING INTEGER FLOAT BOOLEAN.
// The default type is BYTES. This can be overridden for a specific column by
// listing that column in 'columns' and specifying a type for it.
Type string
}
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
q := &bq.BigtableColumnFamily{
FamilyId: b.FamilyID,
Encoding: b.Encoding,
OnlyReadLatest: b.OnlyReadLatest,
Type: b.Type,
}
for _, col := range b.Columns {
q.Columns = append(q.Columns, col.toBQ())
}
return q
}
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
b := &BigtableColumnFamily{
FamilyID: q.FamilyId,
Encoding: q.Encoding,
OnlyReadLatest: q.OnlyReadLatest,
Type: q.Type,
}
for _, col := range q.Columns {
c, err := bqToBigtableColumn(col)
if err != nil {
return nil, err
}
b.Columns = append(b.Columns, c)
}
return b, nil
}
// BigtableColumn describes how BigQuery should access a Bigtable column.
type BigtableColumn struct {
// Qualifier of the column. Columns in the parent column family that have this
// exact qualifier are exposed as . field. The column field name is the
// same as the column qualifier.
Qualifier string
// If the qualifier is not a valid BigQuery field identifier i.e. does not match
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
// name and is used as field name in queries.
FieldName string
// If true, only the latest version of values are exposed for this column.
// See BigtableColumnFamily.OnlyReadLatest.
OnlyReadLatest bool
// The encoding of the values when the type is not STRING.
// See BigtableColumnFamily.Encoding
Encoding string
// The type to convert the value in cells of this column.
// See BigtableColumnFamily.Type
Type string
}
func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
q := &bq.BigtableColumn{
FieldName: b.FieldName,
OnlyReadLatest: b.OnlyReadLatest,
Encoding: b.Encoding,
Type: b.Type,
}
if utf8.ValidString(b.Qualifier) {
q.QualifierString = b.Qualifier
} else {
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
}
return q
}
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
b := &BigtableColumn{
FieldName: q.FieldName,
OnlyReadLatest: q.OnlyReadLatest,
Encoding: q.Encoding,
Type: q.Type,
}
if q.QualifierString != "" {
b.Qualifier = q.QualifierString
} else {
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
if err != nil {
return nil, err
}
b.Qualifier = string(bytes)
}
return b, nil
}

143
vendor/cloud.google.com/go/bigquery/external_test.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
)
func TestExternalDataConfig(t *testing.T) {
// Round-trip of ExternalDataConfig to underlying representation.
for i, want := range []*ExternalDataConfig{
{
SourceFormat: CSV,
SourceURIs: []string{"uri"},
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
AutoDetect: true,
Compression: Gzip,
IgnoreUnknownValues: true,
MaxBadRecords: 17,
Options: &CSVOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
FieldDelimiter: "f",
Quote: "q",
SkipLeadingRows: 3,
},
},
{
SourceFormat: GoogleSheets,
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
},
{
SourceFormat: Bigtable,
Options: &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: true,
ReadRowkeyAsString: true,
ColumnFamilies: []*BigtableColumnFamily{
{
FamilyID: "f1",
Encoding: "TEXT",
OnlyReadLatest: true,
Type: "FLOAT",
Columns: []*BigtableColumn{
{
Qualifier: "valid-utf-8",
FieldName: "fn",
OnlyReadLatest: true,
Encoding: "BINARY",
Type: "STRING",
},
},
},
},
},
},
} {
q := want.toBQ()
got, err := bqToExternalDataConfig(&q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
}
}
}
func TestQuote(t *testing.T) {
ptr := func(s string) *string { return &s }
for _, test := range []struct {
quote string
force bool
want *string
}{
{"", false, nil},
{"", true, ptr("")},
{"-", false, ptr("-")},
{"-", true, ptr("")},
} {
o := CSVOptions{
Quote: test.quote,
ForceZeroQuote: test.force,
}
got := o.quote()
if (got == nil) != (test.want == nil) {
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
}
if got != nil && test.want != nil && *got != *test.want {
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
}
}
}
func TestQualifier(t *testing.T) {
b := BigtableColumn{Qualifier: "a"}
q := b.toBQ()
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
}
b2, err := bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
const (
invalidUTF8 = "\xDF\xFF"
invalidEncoded = "3/8"
)
b = BigtableColumn{Qualifier: invalidUTF8}
q = b.toBQ()
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, "", b.Qualifier, invalidEncoded)
}
b2, err = bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
}

110
vendor/cloud.google.com/go/bigquery/extract.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// ExtractConfig holds the configuration for an extract job.
type ExtractConfig struct {
// Src is the table from which data will be extracted.
Src *Table
// Dst is the destination into which the data will be extracted.
Dst *GCSReference
// DisableHeader disables the printing of a header row in exported data.
DisableHeader bool
// The labels associated with this job.
Labels map[string]string
}
func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
var printHeader *bool
if e.DisableHeader {
f := false
printHeader = &f
}
return &bq.JobConfiguration{
Labels: e.Labels,
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.URIs...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.toBQ(),
PrintHeader: printHeader,
},
}
}
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
qe := q.Extract
return &ExtractConfig{
Labels: q.Labels,
Dst: &GCSReference{
URIs: qe.DestinationUris,
Compression: Compression(qe.Compression),
DestinationFormat: DataFormat(qe.DestinationFormat),
FileConfig: FileConfig{
CSVOptions: CSVOptions{
FieldDelimiter: qe.FieldDelimiter,
},
},
},
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
Src: bqToTable(qe.SourceTable, c),
}
}
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
type Extractor struct {
JobIDConfig
ExtractConfig
c *Client
}
// ExtractorTo returns an Extractor which can be used to extract data from a
// BigQuery table into Google Cloud Storage.
// The returned Extractor may optionally be further configured before its Run method is called.
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
return &Extractor{
c: t.c,
ExtractConfig: ExtractConfig{
Src: t,
Dst: dst,
},
}
}
// Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run")
defer func() { trace.EndSpan(ctx, err) }()
return e.c.insertJob(ctx, e.newJob(), nil)
}
func (e *Extractor) newJob() *bq.Job {
return &bq.Job{
JobReference: e.JobIDConfig.createJobRef(e.c),
Configuration: e.ExtractConfig.toBQ(),
}
}

View File

@@ -1,59 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type extractOption interface {
customizeExtract(conf *bq.JobConfigurationExtract)
}
// DisableHeader returns an Option that disables the printing of a header row in exported data.
func DisableHeader() Option { return disableHeader{} }
type disableHeader struct{}
func (opt disableHeader) implementsOption() {}
func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract) {
f := false
conf.PrintHeader = &f
}
func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationExtract{}
dst.customizeExtractDst(payload)
src.customizeExtractSrc(payload)
for _, opt := range options {
o, ok := opt.(extractOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeExtract(payload)
}
job.Configuration = &bq.JobConfiguration{
Extract: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,20 +15,20 @@
package bigquery package bigquery
import ( import (
"reflect"
"testing" "testing"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func defaultExtractJob() *bq.Job { func defaultExtractJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{ Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{ SourceTable: &bq.TableReference{
ProjectId: "project-id", ProjectId: "client-project-id",
DatasetId: "dataset-id", DatasetId: "dataset-id",
TableId: "table-id", TableId: "table-id",
}, },
@@ -38,39 +38,53 @@ func defaultExtractJob() *bq.Job {
} }
} }
func defaultGCS() *GCSReference {
return &GCSReference{
URIs: []string{"uri"},
}
}
func TestExtract(t *testing.T) { func TestExtract(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{
projectID: "client-project-id",
}
testCases := []struct { testCases := []struct {
dst *GCSReference dst *GCSReference
src *Table src *Table
options []Option config ExtractConfig
want *bq.Job want *bq.Job
}{ }{
{ {
dst: defaultGCS, dst: defaultGCS(),
src: defaultTable(nil), src: c.Dataset("dataset-id").Table("table-id"),
want: defaultExtractJob(), want: defaultExtractJob(),
}, },
{ {
dst: defaultGCS, dst: defaultGCS(),
src: defaultTable(nil), src: c.Dataset("dataset-id").Table("table-id"),
options: []Option{ config: ExtractConfig{
DisableHeader(), DisableHeader: true,
Labels: map[string]string{"a": "b"},
}, },
want: func() *bq.Job { want: func() *bq.Job {
j := defaultExtractJob() j := defaultExtractJob()
j.Configuration.Labels = map[string]string{"a": "b"}
f := false f := false
j.Configuration.Extract.PrintHeader = &f j.Configuration.Extract.PrintHeader = &f
return j return j
}(), }(),
}, },
{ {
dst: &GCSReference{ dst: func() *GCSReference {
uris: []string{"uri"}, g := NewGCSReference("uri")
Compression: Gzip, g.Compression = Gzip
DestinationFormat: JSON, g.DestinationFormat = JSON
FieldDelimiter: "\t", g.FieldDelimiter = "\t"
}, return g
src: defaultTable(nil), }(),
src: c.Dataset("dataset-id").Table("table-id"),
want: func() *bq.Job { want: func() *bq.Job {
j := defaultExtractJob() j := defaultExtractJob()
j.Configuration.Extract.Compression = "GZIP" j.Configuration.Extract.Compression = "GZIP"
@@ -81,17 +95,22 @@ func TestExtract(t *testing.T) {
}, },
} }
for _, tc := range testCases { for i, tc := range testCases {
s := &testService{} ext := tc.src.ExtractorTo(tc.dst)
c := &Client{ tc.config.Src = ext.Src
service: s, tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config
got := ext.newJob()
checkJob(t, i, got, tc.want)
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
} }
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { diff := testutil.Diff(jc, &ext.ExtractConfig,
t.Errorf("err calling extract: %v", err) cmp.AllowUnexported(Table{}, Client{}))
continue if diff != "" {
} t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
} }
} }
} }

137
vendor/cloud.google.com/go/bigquery/file.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"io"
bq "google.golang.org/api/bigquery/v2"
)
// A ReaderSource is a source for a load operation that gets
// data from an io.Reader.
//
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
// its internal io.Reader will be nil, so it cannot be used for a
// subsequent load operation.
type ReaderSource struct {
r io.Reader
FileConfig
}
// NewReaderSource creates a ReaderSource from an io.Reader. You may
// optionally configure properties on the ReaderSource that describe the
// data being read, before passing it to Table.LoaderFrom.
func NewReaderSource(r io.Reader) *ReaderSource {
return &ReaderSource{r: r}
}
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
r.FileConfig.populateLoadConfig(lc)
return r.r
}
// FileConfig contains configuration options that pertain to files, typically
// text files that require interpretation to be used as a BigQuery table. A
// file may live in Google Cloud Storage (see GCSReference), or it may be
// loaded into a table via the Table.LoaderFromReader.
type FileConfig struct {
// SourceFormat is the format of the data to be read.
// Allowed values are: Avro, CSV, DatastoreBackup, JSON, ORC, and Parquet. The default is CSV.
SourceFormat DataFormat
// Indicates if we should automatically infer the options and
// schema for CSV and JSON sources.
AutoDetect bool
// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64
// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool
// Schema describes the data. It is required when reading CSV or JSON data,
// unless the data is being loaded into a table that already exists.
Schema Schema
// Additional options for CSV files.
CSVOptions
}
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
conf.SkipLeadingRows = fc.SkipLeadingRows
conf.SourceFormat = string(fc.SourceFormat)
conf.Autodetect = fc.AutoDetect
conf.AllowJaggedRows = fc.AllowJaggedRows
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
conf.Encoding = string(fc.Encoding)
conf.FieldDelimiter = fc.FieldDelimiter
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords
if fc.Schema != nil {
conf.Schema = fc.Schema.toBQ()
}
conf.Quote = fc.quote()
}
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
fc.SourceFormat = DataFormat(conf.SourceFormat)
fc.AutoDetect = conf.Autodetect
fc.MaxBadRecords = conf.MaxBadRecords
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
fc.Schema = bqToSchema(conf.Schema)
fc.SkipLeadingRows = conf.SkipLeadingRows
fc.AllowJaggedRows = conf.AllowJaggedRows
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
fc.Encoding = Encoding(conf.Encoding)
fc.FieldDelimiter = conf.FieldDelimiter
fc.CSVOptions.setQuote(conf.Quote)
}
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
format := fc.SourceFormat
if format == "" {
// Format must be explicitly set for external data sources.
format = CSV
}
conf.Autodetect = fc.AutoDetect
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords
conf.SourceFormat = string(format)
if fc.Schema != nil {
conf.Schema = fc.Schema.toBQ()
}
if format == CSV {
fc.CSVOptions.populateExternalDataConfig(conf)
}
}
// Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used.
type Encoding string
const (
// UTF_8 specifies the UTF-8 encoding type.
UTF_8 Encoding = "UTF-8"
// ISO_8859_1 specifies the ISO-8859-1 encoding type.
ISO_8859_1 Encoding = "ISO-8859-1"
)

98
vendor/cloud.google.com/go/bigquery/file_test.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
var (
hyphen = "-"
fc = FileConfig{
SourceFormat: CSV,
AutoDetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: Schema{
stringFieldSchema(),
nestedFieldSchema(),
},
CSVOptions: CSVOptions{
Quote: hyphen,
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
},
}
)
func TestFileConfigPopulateLoadConfig(t *testing.T) {
want := &bq.JobConfigurationLoad{
SourceFormat: "CSV",
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Autodetect: true,
Encoding: "UTF-8",
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
Quote: &hyphen,
}
got := &bq.JobConfigurationLoad{}
fc.populateLoadConfig(got)
if !testutil.Equal(got, want) {
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
}
}
func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
got := &bq.ExternalDataConfiguration{}
fc.populateExternalDataConfig(got)
want := &bq.ExternalDataConfiguration{
SourceFormat: "CSV",
Autodetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: "\t",
Quote: &hyphen,
SkipLeadingRows: 8,
},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,99 +14,62 @@
package bigquery package bigquery
import bq "google.golang.org/api/bigquery/v2" import (
"io"
bq "google.golang.org/api/bigquery/v2"
)
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute // GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
// an input or output to a BigQuery operation. // an input or output to a BigQuery operation.
type GCSReference struct { type GCSReference struct {
uris []string // URIs refer to Google Cloud Storage objects.
URIs []string
// FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data. FileConfig
// The default is ",".
FieldDelimiter string
// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
SkipLeadingRows int64
// SourceFormat is the format of the GCS data to be loaded into BigQuery.
// Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV.
SourceFormat DataFormat
// Only used when loading data.
Encoding Encoding
// Quote is the value used to quote data sections in a CSV file.
// The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true.
// Only used when loading data.
Quote string
ForceZeroQuote bool
// DestinationFormat is the format to use when writing exported files. // DestinationFormat is the format to use when writing exported files.
// Allowed values are: CSV, Avro, JSON. The default is CSV. // Allowed values are: CSV, Avro, JSON. The default is CSV.
// CSV is not supported for tables with nested or repeated fields. // CSV is not supported for tables with nested or repeated fields.
DestinationFormat DataFormat DestinationFormat DataFormat
// Only used when writing data. Default is None.
// Compression specifies the type of compression to apply when writing data
// to Google Cloud Storage, or using this GCSReference as an ExternalData
// source with CSV or JSON SourceFormat. Default is None.
Compression Compression Compression Compression
} }
func (gcs *GCSReference) implementsSource() {}
func (gcs *GCSReference) implementsDestination() {}
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. // NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. // In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. // Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. // Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
// For more information about the treatment of wildcards and multiple URIs, // For more information about the treatment of wildcards and multiple URIs,
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple // see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
func (c *Client) NewGCSReference(uri ...string) *GCSReference { func NewGCSReference(uri ...string) *GCSReference {
return &GCSReference{uris: uri} return &GCSReference{URIs: uri}
} }
type DataFormat string
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
)
// Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used.
type Encoding string
const (
UTF_8 Encoding = "UTF-8"
ISO_8859_1 Encoding = "ISO-8859-1"
)
// Compression is the type of compression to apply when writing data to Google Cloud Storage. // Compression is the type of compression to apply when writing data to Google Cloud Storage.
type Compression string type Compression string
const ( const (
// None specifies no compression.
None Compression = "NONE" None Compression = "NONE"
// Gzip specifies gzip compression.
Gzip Compression = "GZIP" Gzip Compression = "GZIP"
) )
func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad) { func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
conf.SourceUris = gcs.uris lc.SourceUris = gcs.URIs
conf.SkipLeadingRows = gcs.SkipLeadingRows gcs.FileConfig.populateLoadConfig(lc)
conf.SourceFormat = string(gcs.SourceFormat) return nil
conf.Encoding = string(gcs.Encoding) }
conf.FieldDelimiter = gcs.FieldDelimiter
if gcs.ForceZeroQuote { func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
quote := "" conf := bq.ExternalDataConfiguration{
conf.Quote = &quote Compression: string(gcs.Compression),
} else if gcs.Quote != "" { SourceUris: append([]string{}, gcs.URIs...),
conf.Quote = &gcs.Quote
} }
} gcs.FileConfig.populateExternalDataConfig(&conf)
return conf
func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract) {
conf.DestinationUris = gcs.uris
conf.Compression = string(gcs.Compression)
conf.DestinationFormat = string(gcs.DestinationFormat)
conf.FieldDelimiter = gcs.FieldDelimiter
} }

238
vendor/cloud.google.com/go/bigquery/inserter.go generated vendored Normal file
View File

@@ -0,0 +1,238 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"errors"
"fmt"
"reflect"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// An Inserter does streaming inserts into a BigQuery table.
// It is safe for concurrent use.
type Inserter struct {
t *Table
// SkipInvalidRows causes rows containing invalid data to be silently
// ignored. The default value is false, which causes the entire request to
// fail if there is an attempt to insert an invalid row.
SkipInvalidRows bool
// IgnoreUnknownValues causes values not matching the schema to be ignored.
// The default value is false, which causes records containing such values
// to be treated as invalid records.
IgnoreUnknownValues bool
// A TableTemplateSuffix allows Inserters to create tables automatically.
//
// Experimental: this option is experimental and may be modified or removed in future versions,
// regardless of any other documented package stability guarantees.
//
// When you specify a suffix, the table you upload data to
// will be used as a template for creating a new table, with the same schema,
// called <table> + <suffix>.
//
// More information is available at
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
TableTemplateSuffix string
}
// Inserter returns an Inserter that can be used to append rows to t.
// The returned Inserter may optionally be further configured before its Put method is called.
//
// To stream rows into a date-partitioned table at a particular date, add the
// $yyyymmdd suffix to the table name when constructing the Table.
func (t *Table) Inserter() *Inserter {
return &Inserter{t: t}
}
// Uploader calls Inserter.
// Deprecated: use Table.Inserter instead.
func (t *Table) Uploader() *Inserter { return t.Inserter() }
// Put uploads one or more rows to the BigQuery service.
//
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
//
// If src is a struct or pointer to a struct, then a schema is inferred from it
// and used to create a StructSaver. The InsertID of the StructSaver will be
// empty.
//
// If src is a slice of ValueSavers, structs, or struct pointers, then each
// element of the slice is treated as above, and multiple rows are uploaded.
//
// Put returns a PutMultiError if one or more rows failed to be uploaded.
// The PutMultiError contains a RowInsertionError for each failed row.
//
// Put will retry on temporary errors (see
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
// the call will run indefinitely. Pass a context with a timeout to prevent
// hanging calls.
func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put")
defer func() { trace.EndSpan(ctx, err) }()
savers, err := valueSavers(src)
if err != nil {
return err
}
return u.putMulti(ctx, savers)
}
func valueSavers(src interface{}) ([]ValueSaver, error) {
saver, ok, err := toValueSaver(src)
if err != nil {
return nil, err
}
if ok {
return []ValueSaver{saver}, nil
}
srcVal := reflect.ValueOf(src)
if srcVal.Kind() != reflect.Slice {
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
}
var savers []ValueSaver
for i := 0; i < srcVal.Len(); i++ {
s := srcVal.Index(i).Interface()
saver, ok, err := toValueSaver(s)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
}
savers = append(savers, saver)
}
return savers, nil
}
// Make a ValueSaver from x, which must implement ValueSaver already
// or be a struct or pointer to struct.
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
if _, ok := x.(StructSaver); ok {
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
}
var insertID string
// Handle StructSavers specially so we can infer the schema if necessary.
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
x = ss.Struct
insertID = ss.InsertID
// Fall through so we can infer the schema.
}
if saver, ok := x.(ValueSaver); ok {
return saver, ok, nil
}
v := reflect.ValueOf(x)
// Support Put with []interface{}
if v.Kind() == reflect.Interface {
v = v.Elem()
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
return nil, false, nil
}
schema, err := inferSchemaReflectCached(v.Type())
if err != nil {
return nil, false, err
}
return &StructSaver{
Struct: x,
InsertID: insertID,
Schema: schema,
}, true, nil
}
func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error {
req, err := u.newInsertRequest(src)
if err != nil {
return err
}
if req == nil {
return nil
}
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
call = call.Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err = runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
return err
}
return handleInsertErrors(res.InsertErrors, req.Rows)
}
func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
if savers == nil { // If there are no rows, do nothing.
return nil, nil
}
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: u.TableTemplateSuffix,
IgnoreUnknownValues: u.IgnoreUnknownValues,
SkipInvalidRows: u.SkipInvalidRows,
}
for _, saver := range savers {
row, insertID, err := saver.Save()
if err != nil {
return nil, err
}
if insertID == "" {
insertID = randomIDFn()
}
m := make(map[string]bq.JsonValue)
for k, v := range row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: insertID,
Json: m,
})
}
return req, nil
}
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
if len(ierrs) == 0 {
return nil
}
var errs PutMultiError
for _, e := range ierrs {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertId,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, bqToError(errp))
}
errs = append(errs, rie)
}
return errs
}
// Uploader is an obsolete name for Inserter.
type Uploader = Inserter

210
vendor/cloud.google.com/go/bigquery/inserter_test.go generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"strconv"
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)
type testSaver struct {
row map[string]Value
insertID string
err error
}
func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.row, ts.insertID, ts.err
}
func TestNewInsertRequest(t *testing.T) {
prev := randomIDFn
n := 0
randomIDFn = func() string { n++; return strconv.Itoa(n) }
defer func() { randomIDFn = prev }()
tests := []struct {
ul *Uploader
savers []ValueSaver
req *bq.TableDataInsertAllRequest
}{
{
ul: &Uploader{},
savers: nil,
req: nil,
},
{
ul: &Uploader{},
savers: []ValueSaver{
testSaver{row: map[string]Value{"one": 1}},
testSaver{row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
},
},
},
{
ul: &Uploader{
TableTemplateSuffix: "suffix",
IgnoreUnknownValues: true,
SkipInvalidRows: true,
},
savers: []ValueSaver{
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
testSaver{insertID: "", row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
},
TemplateSuffix: "suffix",
SkipInvalidRows: true,
IgnoreUnknownValues: true,
},
},
}
for i, tc := range tests {
got, err := tc.ul.newInsertRequest(tc.savers)
if err != nil {
t.Fatal(err)
}
want := tc.req
if !testutil.Equal(got, want) {
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
}
}
}
func TestNewInsertRequestErrors(t *testing.T) {
var u Uploader
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("bang")}})
if err == nil {
t.Error("got nil, want error")
}
}
func TestHandleInsertErrors(t *testing.T) {
rows := []*bq.TableDataInsertAllRequestRows{
{InsertId: "a"},
{InsertId: "b"},
}
for _, test := range []struct {
in []*bq.TableDataInsertAllResponseInsertErrors
want error
}{
{
in: nil,
want: nil,
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
},
want: PutMultiError{
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
},
},
} {
got := handleInsertErrors(test.in, rows)
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
}
}
}
func TestValueSavers(t *testing.T) {
ts := &testSaver{}
type T struct{ I int }
schema, err := InferSchema(T{})
if err != nil {
t.Fatal(err)
}
for _, test := range []struct {
in interface{}
want []ValueSaver
}{
{[]interface{}(nil), nil},
{[]interface{}{}, nil},
{ts, []ValueSaver{ts}},
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 1}},
&StructSaver{Schema: schema, Struct: T{I: 2}},
}},
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 1}},
&StructSaver{Schema: schema, Struct: &T{I: 2}},
}},
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
[]ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
}},
} {
got, err := valueSavers(test.in)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
}
// Make sure Save is successful.
for i, vs := range got {
_, _, err := vs.Save()
if err != nil {
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
}
}
}
}
func TestValueSaversErrors(t *testing.T) {
inputs := []interface{}{
nil,
1,
[]int{1, 2},
[]interface{}{
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
1,
},
StructSaver{},
}
for _, in := range inputs {
if _, err := valueSavers(in); err == nil {
t.Errorf("%#v: got nil, want error", in)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,172 +15,208 @@
package bigquery package bigquery
import ( import (
"errors" "context"
"fmt" "fmt"
"reflect"
"golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
) )
// A pageFetcher returns a page of rows, starting from the row specified by token. // Construct a RowIterator.
type pageFetcher interface { // If pf is nil, there are no rows in the result set.
fetch(ctx context.Context, s service, token string) (*readDataResult, error) func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
} it := &RowIterator{
ctx: ctx,
// Iterator provides access to the result of a BigQuery lookup. table: t,
// Next must be called before the first call to Get. pf: pf,
type Iterator struct {
service service
err error // contains any error encountered during calls to Next.
// Once Next has been called at least once, schema has the result schema, rs contains the current
// page of data, and nextToken contains the token for fetching the next
// page (empty if there is no more data to be fetched).
schema Schema
rs [][]Value
nextToken string
// The remaining fields contain enough information to fetch the current
// page of data, and determine which row of data from this page is the
// current row.
pf pageFetcher
pageToken string
// The offset from the start of the current page to the current row.
// For a new iterator, this is -1.
offset int64
}
func newIterator(s service, pf pageFetcher) *Iterator {
return &Iterator{
service: s,
pf: pf,
offset: -1,
} }
if pf != nil {
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.rows) },
func() interface{} { r := it.rows; it.rows = nil; return r })
}
return it
} }
// fetchPage loads the current page of data from the server. // A RowIterator provides access to the result of a BigQuery lookup.
// The contents of rs and nextToken are replaced with the loaded data. type RowIterator struct {
// If there is an error while fetching, the error is stored in it.err and false is returned. ctx context.Context
func (it *Iterator) fetchPage(ctx context.Context) bool { table *Table
var res *readDataResult pf pageFetcher
var err error pageInfo *iterator.PageInfo
for { nextFunc func() error
res, err = it.pf.fetch(ctx, it.service, it.pageToken)
if err != errIncompleteJob { // StartIndex can be set before the first call to Next. If PageInfo().Token
break // is also set, StartIndex is ignored.
StartIndex uint64
// The schema of the table. Available after the first call to Next.
Schema Schema
// The total number of rows in the result. Available after the first call to Next.
// May be zero just after rows were inserted.
TotalRows uint64
rows [][]Value
structLoader structLoader // used to populate a pointer to a struct
}
// Next loads the next row into dst. Its return value is iterator.Done if there
// are no more results. Once Next returns iterator.Done, all subsequent calls
// will return iterator.Done.
//
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
//
// If dst is a *[]Value, it will be set to new []Value whose i'th element
// will be populated with the i'th column of the row.
//
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
// for each schema column name, the map key of that name will be set to the column's
// value. STRUCT types (RECORD types or nested schemas) become nested maps.
//
// If dst is pointer to a struct, each column in the schema will be matched
// with an exported field of the struct that has the same name, ignoring case.
// Unmatched schema columns and struct fields will be ignored.
//
// Each BigQuery column type corresponds to one or more Go types; a matching struct
// field must be of the correct type. The correspondences are:
//
// STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
//
// A repeated field corresponds to a slice or array of the element type. A STRUCT
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field,
// unless the field is of type []byte or is one of the special Null types: NullInt64,
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
// table with NULLs.
func (it *RowIterator) Next(dst interface{}) error {
if it.pf == nil { // There are no rows in the result set.
return iterator.Done
}
var vl ValueLoader
switch dst := dst.(type) {
case ValueLoader:
vl = dst
case *[]Value:
vl = (*valueList)(dst)
case *map[string]Value:
vl = (*valueMap)(dst)
default:
if !isStructPtr(dst) {
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
} }
} }
if err := it.nextFunc(); err != nil {
return err
}
row := it.rows[0]
it.rows = it.rows[1:]
if vl == nil {
// This can only happen if dst is a pointer to a struct. We couldn't
// set vl above because we need the schema.
if err := it.structLoader.set(dst, it.Schema); err != nil {
return err
}
vl = &it.structLoader
}
return vl.Load(row, it.Schema)
}
func isStructPtr(x interface{}) bool {
t := reflect.TypeOf(x)
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
if err != nil { if err != nil {
it.err = err return "", err
return false
} }
it.rows = append(it.rows, res.rows...)
it.schema = res.schema it.Schema = res.schema
it.rs = res.rows it.TotalRows = res.totalRows
it.nextToken = res.pageToken return res.pageToken, nil
return true
} }
// getEnoughData loads new data into rs until offset no longer points beyond the end of rs. // A pageFetcher returns a page of rows from a destination table.
func (it *Iterator) getEnoughData(ctx context.Context) bool { type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
if len(it.rs) == 0 {
// Either we have not yet fetched any pages, or we are iterating over an empty dataset.
// In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken.
// In the latter case, it is harmless to fetch a page of data.
if !it.fetchPage(ctx) {
return false
}
}
for it.offset >= int64(len(it.rs)) { type fetchPageResult struct {
// If offset is still outside the bounds of the loaded data, pageToken string
// but there are no more pages of data to fetch, then we have rows [][]Value
// failed to satisfy the offset. totalRows uint64
if it.nextToken == "" { schema Schema
return false
}
// offset cannot be satisfied with the currently loaded data,
// so we fetch the next page. We no longer need the existing
// cached rows, so we remove them and update the offset to be
// relative to the new page that we're about to fetch.
// NOTE: we can't just set offset to 0, because after
// marshalling/unmarshalling, it's possible for the offset to
// point arbitrarily far beyond the end of rs.
// This can happen if the server returns a different size
// results page before and after marshalling.
it.offset -= int64(len(it.rs))
it.pageToken = it.nextToken
if !it.fetchPage(ctx) {
return false
}
}
return true
} }
// Next advances the Iterator to the next row, making that row available // fetchPage gets a page of rows from t.
// via the Get method. func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
// Next must be called before the first call to Get or Schema, and blocks until data is available. // Fetch the table schema in the background, if necessary.
// Next returns false when there are no more rows available, either because errc := make(chan error, 1)
// the end of the output was reached, or because there was an error (consult if schema != nil {
// the Err method to determine which). errc <- nil
func (it *Iterator) Next(ctx context.Context) bool { } else {
if it.err != nil { go func() {
return false var bqt *bq.Table
err := runWithRetry(ctx, func() (err error) {
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && bqt.Schema != nil {
schema = bqToSchema(bqt.Schema)
}
errc <- err
}()
} }
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
// Advance offset to where we want it to be for the next call to Get. setClientHeader(call.Header())
it.offset++ if pageToken != "" {
call.PageToken(pageToken)
// offset may now point beyond the end of rs, so we fetch data } else {
// until offset is within its bounds again. If there are no more call.StartIndex(startIndex)
// results available, offset will be left pointing beyond the bounds }
// of rs. if pageSize > 0 {
// At the end of this method, rs will contain at least one element call.MaxResults(pageSize)
// unless the dataset we are iterating over is empty. }
return it.getEnoughData(ctx) var res *bq.TableDataList
} err := runWithRetry(ctx, func() (err error) {
res, err = call.Context(ctx).Do()
// Err returns the last error encountered by Next, or nil for no error. return err
func (it *Iterator) Err() error { })
return it.err if err != nil {
} return nil, err
}
// verifyState checks that the iterator is pointing to a valid row. err = <-errc
func (it *Iterator) verifyState() error { if err != nil {
if it.err != nil { return nil, err
return fmt.Errorf("called on iterator in error state: %v", it.err) }
} rows, err := convertRows(res.Rows, schema)
if err != nil {
// If Next has been called, then offset should always index into a return nil, err
// valid row in rs, as long as there is still data available. }
if it.offset >= int64(len(it.rs)) || it.offset < 0 { return &fetchPageResult{
return errors.New("called without preceding successful call to Next") pageToken: res.PageToken,
} rows: rows,
totalRows: uint64(res.TotalRows),
return nil schema: schema,
} }, nil
// Get loads the current row into dst, which must implement ValueLoader.
func (it *Iterator) Get(dst interface{}) error {
if err := it.verifyState(); err != nil {
return fmt.Errorf("Get %v", err)
}
if dst, ok := dst.(ValueLoader); ok {
return dst.Load(it.rs[it.offset])
}
return errors.New("Get called with unsupported argument type")
}
// Schema returns the schema of the result rows.
func (it *Iterator) Schema() (Schema, error) {
if err := it.verifyState(); err != nil {
return nil, fmt.Errorf("Schema %v", err)
}
return it.schema, nil
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,324 +15,226 @@
package bigquery package bigquery
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"reflect"
"testing" "testing"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
) )
type fetchResponse struct { type fetchResponse struct {
result *readDataResult // The result to return. result *fetchPageResult // The result to return.
err error // The error to return. err error // The error to return.
} }
// pageFetcherStub services fetch requests by returning data from an in-memory list of values. // pageFetcherStub services fetch requests by returning data from an in-memory list of values.
type pageFetcherStub struct { type pageFetcherStub struct {
fetchResponses map[string]fetchResponse fetchResponses map[string]fetchResponse
err error
err error
} }
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
call, ok := pf.fetchResponses[token] call, ok := pf.fetchResponses[pageToken]
if !ok { if !ok {
pf.err = fmt.Errorf("Unexpected page token: %q", token) pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
} }
return call.result, call.err return call.result, call.err
} }
func TestIterator(t *testing.T) { func TestIterator(t *testing.T) {
var (
iiSchema = Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
}
siSchema = Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
}
)
fetchFailure := errors.New("fetch failure") fetchFailure := errors.New("fetch failure")
testCases := []struct { testCases := []struct {
desc string desc string
alreadyConsumed int64 // amount to advance offset before commencing reading. pageToken string
fetchResponses map[string]fetchResponse fetchResponses map[string]fetchResponse
want []ValueList want [][]Value
wantErr error wantErr error
wantSchema Schema wantSchema Schema
wantTotalRows uint64
}{ }{
{ {
desc: "Iteration over single empty page", desc: "Iteration over single empty page",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{}, rows: [][]Value{},
schema: Schema{}, schema: Schema{},
}, },
}, },
}, },
want: []ValueList{}, want: [][]Value{},
wantSchema: Schema{}, wantSchema: Schema{},
}, },
{ {
desc: "Iteration over single page", desc: "Iteration over single page",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType}, totalRows: 4,
{Type: IntegerFieldType},
},
}, },
}, },
}, },
want: []ValueList{{1, 2}, {11, 12}}, want: [][]Value{{1, 2}, {11, 12}},
wantSchema: Schema{ wantSchema: iiSchema,
{Type: IntegerFieldType}, wantTotalRows: 4,
{Type: IntegerFieldType},
},
}, },
{ {
desc: "Iteration over single page with different schema", desc: "Iteration over single page with different schema",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{"1", 2}, {"11", 12}}, rows: [][]Value{{"1", 2}, {"11", 12}},
schema: Schema{ schema: siSchema,
{Type: StringFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
}, },
want: []ValueList{{"1", 2}, {"11", 12}}, want: [][]Value{{"1", 2}, {"11", 12}},
wantSchema: Schema{ wantSchema: siSchema,
{Type: StringFieldType},
{Type: IntegerFieldType},
},
}, },
{ {
desc: "Iteration over two pages", desc: "Iteration over two pages",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType}, totalRows: 4,
{Type: IntegerFieldType},
},
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType}, totalRows: 4,
{Type: IntegerFieldType},
},
}, },
}, },
}, },
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{ wantSchema: iiSchema,
{Type: IntegerFieldType}, wantTotalRows: 4,
{Type: IntegerFieldType},
},
}, },
{ {
desc: "Server response includes empty page", desc: "Server response includes empty page",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "b", pageToken: "b",
rows: [][]Value{}, rows: [][]Value{},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
"b": { "b": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
}, },
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{ wantSchema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
{ {
desc: "Fetch error", desc: "Fetch error",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
"a": { "a": {
// We returns some data from this fetch, but also an error. // We returns some data from this fetch, but also an error.
// So the end result should include only data from the previous fetch. // So the end result should include only data from the previous fetch.
err: fetchFailure, err: fetchFailure,
result: &readDataResult{ result: &fetchPageResult{
pageToken: "b", pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
}, },
want: []ValueList{{1, 2}, {11, 12}}, want: [][]Value{{1, 2}, {11, 12}},
wantErr: fetchFailure, wantErr: fetchFailure,
wantSchema: Schema{ wantSchema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
{ {
desc: "Skip over a single element", desc: "Skip over an entire page",
alreadyConsumed: 1, pageToken: "a",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
}, },
want: []ValueList{{11, 12}, {101, 102}, {111, 112}}, want: [][]Value{{101, 102}, {111, 112}},
wantSchema: Schema{ wantSchema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
{ {
desc: "Skip over an entire page", desc: "Skip beyond all data",
alreadyConsumed: 2, pageToken: "b",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{ schema: iiSchema,
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
}, },
}, },
}, "b": {
want: []ValueList{{101, 102}, {111, 112}}, result: &fetchPageResult{},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip beyond start of second page",
alreadyConsumed: 3,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip beyond all data",
alreadyConsumed: 4,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
}, },
}, },
// In this test case, Next will return false on its first call, // In this test case, Next will return false on its first call,
// so we won't even attempt to call Get. // so we won't even attempt to call Get.
want: []ValueList{}, want: [][]Value{},
wantSchema: Schema{}, wantSchema: Schema{},
}, },
} }
@@ -341,173 +243,88 @@ func TestIterator(t *testing.T) {
pf := &pageFetcherStub{ pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses, fetchResponses: tc.fetchResponses,
} }
it := newIterator(nil, pf) it := newRowIterator(context.Background(), nil, pf.fetchPage)
it.offset += tc.alreadyConsumed it.PageInfo().Token = tc.pageToken
values, schema, totalRows, err := consumeRowIterator(it)
values, schema, err := consumeIterator(it) if err != tc.wantErr {
if err != nil { t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
t.Fatalf("%s: %v", tc.desc, err)
} }
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
} }
if it.Err() != tc.wantErr { if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr)
}
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
} }
} if totalRows != tc.wantTotalRows {
} t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows)
// consumeIterator reads the schema and all values from an iterator and returns them.
func consumeIterator(it *Iterator) ([]ValueList, Schema, error) {
var got []ValueList
var schema Schema
for it.Next(context.Background()) {
var vals ValueList
var err error
if err = it.Get(&vals); err != nil {
return nil, Schema{}, fmt.Errorf("err calling Get: %v", err)
}
got = append(got, vals)
if schema, err = it.Schema(); err != nil {
return nil, Schema{}, fmt.Errorf("err calling Schema: %v", err)
} }
} }
return got, schema, nil
} }
func TestGetBeforeNext(t *testing.T) { // consumeRowIterator reads the schema and all values from a RowIterator and returns them.
// TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators. func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) {
pf := &pageFetcherStub{ var (
fetchResponses: map[string]fetchResponse{ got [][]Value
"": { schema Schema
result: &readDataResult{ totalRows uint64
pageToken: "", )
rows: [][]Value{{1, 2}, {11, 12}}, for {
}, var vls []Value
}, err := it.Next(&vls)
}, if err == iterator.Done {
} return got, schema, totalRows, nil
it := newIterator(nil, pf) }
var vals ValueList if err != nil {
if err := it.Get(&vals); err == nil { return got, schema, totalRows, err
t.Errorf("Expected error calling Get before Next") }
got = append(got, vls)
schema = it.Schema
totalRows = it.TotalRows
} }
} }
type delayedPageFetcher struct { func TestNextDuringErrorState(t *testing.T) {
pageFetcherStub
delayCount int
}
func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
if pf.delayCount > 0 {
pf.delayCount--
return nil, errIncompleteJob
}
return pf.pageFetcherStub.fetch(ctx, s, token)
}
func TestIterateIncompleteJob(t *testing.T) {
want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
pf := pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
},
},
},
}
dpf := &delayedPageFetcher{
pageFetcherStub: pf,
delayCount: 1,
}
it := newIterator(nil, dpf)
values, _, err := consumeIterator(it)
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
}
if it.Err() != nil {
t.Fatalf("iterator.Err: got:\n%v", it.Err())
}
if dpf.delayCount != 0 {
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
}
}
func TestGetDuringErrorState(t *testing.T) {
pf := &pageFetcherStub{ pf := &pageFetcherStub{
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": {err: errors.New("bang")}, "": {err: errors.New("bang")},
}, },
} }
it := newIterator(nil, pf) it := newRowIterator(context.Background(), nil, pf.fetchPage)
var vals ValueList var vals []Value
it.Next(context.Background()) if err := it.Next(&vals); err == nil {
if it.Err() == nil {
t.Errorf("Expected error after calling Next") t.Errorf("Expected error after calling Next")
} }
if err := it.Get(&vals); err == nil { if err := it.Next(&vals); err == nil {
t.Errorf("Expected error calling Get when iterator has a non-nil error.") t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
} }
} }
func TestGetAfterFinished(t *testing.T) { func TestNextAfterFinished(t *testing.T) {
testCases := []struct { testCases := []struct {
alreadyConsumed int64 // amount to advance offset before commencing reading. fetchResponses map[string]fetchResponse
fetchResponses map[string]fetchResponse want [][]Value
want []ValueList
}{ }{
{ {
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
}, },
}, },
}, },
want: []ValueList{{1, 2}, {11, 12}}, want: [][]Value{{1, 2}, {11, 12}},
}, },
{ {
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{}, rows: [][]Value{},
}, },
}, },
}, },
want: []ValueList{}, want: [][]Value{},
},
{
alreadyConsumed: 100,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
want: []ValueList{},
}, },
} }
@@ -515,24 +332,31 @@ func TestGetAfterFinished(t *testing.T) {
pf := &pageFetcherStub{ pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses, fetchResponses: tc.fetchResponses,
} }
it := newIterator(nil, pf) it := newRowIterator(context.Background(), nil, pf.fetchPage)
it.offset += tc.alreadyConsumed
values, _, err := consumeIterator(it) values, _, _, err := consumeRowIterator(it)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
} }
if it.Err() != nil {
t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err())
}
// Try calling Get again. // Try calling Get again.
var vals ValueList var vals []Value
if err := it.Get(&vals); err == nil { if err := it.Next(&vals); err != iterator.Done {
t.Errorf("Expected error calling Get when there are no more values") t.Errorf("Expected Done calling Next when there are no more values")
}
}
}
func TestIteratorNextTypes(t *testing.T) {
it := newRowIterator(context.Background(), nil, nil)
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
map[string]Value{}, &map[string]interface{}{},
struct{}{},
} {
if err := it.Next(v); err == nil {
t.Errorf("%v: want error, got nil", v)
} }
} }
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,48 +15,80 @@
package bigquery package bigquery
import ( import (
"context"
"errors" "errors"
"fmt"
"time"
"golang.org/x/net/context" "cloud.google.com/go/internal"
"cloud.google.com/go/internal/trace"
gax "github.com/googleapis/gax-go/v2"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
) )
// A Job represents an operation which has been submitted to BigQuery for processing. // A Job represents an operation which has been submitted to BigQuery for processing.
type Job struct { type Job struct {
service service c *Client
projectID string projectID string
jobID string jobID string
location string
isQuery bool email string
config *bq.JobConfiguration
lastStatus *JobStatus
} }
// JobFromID creates a Job which refers to an existing BigQuery job. The job // JobFromID creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package. For example, the job may have // need not have been created by this package. For example, the job may have
// been created in the BigQuery console. // been created in the BigQuery console.
//
// For jobs whose location is other than "US" or "EU", set Client.Location or use
// JobFromIDLocation.
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
jobType, err := c.service.getJobType(ctx, c.projectID, id) return c.JobFromIDLocation(ctx, id, c.Location)
}
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package (for example, it may have
// been created in the BigQuery console), but it must exist in the specified location.
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation")
defer func() { trace.EndSpan(ctx, err) }()
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
if err != nil { if err != nil {
return nil, err return nil, err
} }
return bqToJob(bqjob, c)
return &Job{
service: c.service,
projectID: c.projectID,
jobID: id,
isQuery: jobType == queryJobType,
}, nil
} }
// ID returns the job's ID.
func (j *Job) ID() string { func (j *Job) ID() string {
return j.jobID return j.jobID
} }
// Location returns the job's location.
func (j *Job) Location() string {
return j.location
}
// Email returns the email of the job's creator.
func (j *Job) Email() string {
return j.email
}
// State is one of a sequence of states that a Job progresses through as it is processed. // State is one of a sequence of states that a Job progresses through as it is processed.
type State int type State int
const ( const (
Pending State = iota // StateUnspecified is the default JobIterator state.
StateUnspecified State = iota
// Pending is a state that describes that the job is pending.
Pending
// Running is a state that describes that the job is running.
Running Running
// Done is a state that describes that the job is done.
Done Done
) )
@@ -69,63 +101,730 @@ type JobStatus struct {
// All errors encountered during the running of the job. // All errors encountered during the running of the job.
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
Errors []*Error Errors []*Error
// Statistics about the job.
Statistics *JobStatistics
} }
// jobOption is an Option which modifies a bq.Job proto. // JobConfig contains configuration information for a job. It is implemented by
// This is used for configuring values that apply to all operations, such as setting a jobReference. // *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
type jobOption interface { type JobConfig interface {
customizeJob(job *bq.Job, projectID string) isJobConfig()
} }
type jobID string func (*CopyConfig) isJobConfig() {}
func (*ExtractConfig) isJobConfig() {}
func (*LoadConfig) isJobConfig() {}
func (*QueryConfig) isJobConfig() {}
// JobID returns an Option that sets the job ID of a BigQuery job. // Config returns the configuration information for j.
// If this Option is not used, a job ID is generated automatically. func (j *Job) Config() (JobConfig, error) {
func JobID(ID string) Option { return bqToJobConfig(j.config, j.c)
return jobID(ID)
} }
func (opt jobID) implementsOption() {} func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
switch {
func (opt jobID) customizeJob(job *bq.Job, projectID string) { case q == nil:
job.JobReference = &bq.JobReference{ return nil, nil
JobId: string(opt), case q.Copy != nil:
ProjectId: projectID, return bqToCopyConfig(q, c), nil
case q.Extract != nil:
return bqToExtractConfig(q, c), nil
case q.Load != nil:
return bqToLoadConfig(q, c), nil
case q.Query != nil:
return bqToQueryConfig(q, c)
default:
return nil, nil
} }
} }
// JobIDConfig describes how to create an ID for a job.
type JobIDConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Location is the location for the job.
Location string
}
// createJobRef creates a JobReference.
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
// We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference.
loc := j.Location
if loc == "" { // Use Client.Location as a default.
loc = c.Location
}
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
if j.JobID == "" {
jr.JobId = randomIDFn()
} else if j.AddJobIDSuffix {
jr.JobId = j.JobID + "-" + randomIDFn()
} else {
jr.JobId = j.JobID
}
return jr
}
// Done reports whether the job has completed. // Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccesfully. // After Done returns true, the Err method will return an error if the job completed unsuccessfully.
func (s *JobStatus) Done() bool { func (s *JobStatus) Done() bool {
return s.State == Done return s.State == Done
} }
// Err returns the error that caused the job to complete unsuccesfully (if any). // Err returns the error that caused the job to complete unsuccessfully (if any).
func (s *JobStatus) Err() error { func (s *JobStatus) Err() error {
return s.err return s.err
} }
// Status returns the current status of the job. It fails if the Status could not be determined. // Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) { func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) {
return j.service.jobStatus(ctx, j.projectID, j.jobID) ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status")
defer func() { trace.EndSpan(ctx, err) }()
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
if err != nil {
return nil, err
}
if err := j.setStatus(bqjob.Status); err != nil {
return nil, err
}
j.setStatistics(bqjob.Statistics, j.c)
return j.lastStatus, nil
}
// LastStatus returns the most recently retrieved status of the job. The status is
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
// Call Job.Status to get the most up-to-date information about a job.
func (j *Job) LastStatus() *JobStatus {
return j.lastStatus
} }
// Cancel requests that a job be cancelled. This method returns without waiting for // Cancel requests that a job be cancelled. This method returns without waiting for
// cancellation to take effect. To check whether the job has terminated, use Job.Status. // cancellation to take effect. To check whether the job has terminated, use Job.Status.
// Cancelled jobs may still incur costs. // Cancelled jobs may still incur costs.
func (j *Job) Cancel(ctx context.Context) error { func (j *Job) Cancel(ctx context.Context) error {
return j.service.jobCancel(ctx, j.projectID, j.jobID) // Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
Location(j.location).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
} }
func (j *Job) implementsReadSource() {} // Wait blocks until the job or the context is done. It returns the final status
// of the job.
// If an error occurs while retrieving the status, Wait returns that error. But
// Wait returns nil if the status was retrieved successfully, even if
// status.Err() != nil. So callers must check both errors. See the example.
func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait")
defer func() { trace.EndSpan(ctx, err) }()
func (j *Job) customizeReadQuery(cursor *readQueryConf) error { if j.isQuery() {
// There are mulitple kinds of jobs, but only a query job is suitable for reading. // We can avoid polling for query jobs.
if !j.isQuery { if _, _, err := j.waitForQuery(ctx, j.projectID); err != nil {
return errors.New("Cannot read from a non-query job") return nil, err
}
// Note: extra RPC even if you just want to wait for the query to finish.
js, err := j.Status(ctx)
if err != nil {
return nil, err
}
return js, nil
}
// Non-query jobs must poll.
err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
js, err = j.Status(ctx)
if err != nil {
return true, err
}
if js.Done() {
return true, nil
}
return false, nil
})
if err != nil {
return nil, err
}
return js, nil
}
// Read fetches the results of a query job.
// If j is not a query job, Read returns an error.
func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read")
defer func() { trace.EndSpan(ctx, err) }()
return j.read(ctx, j.waitForQuery, fetchPage)
}
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, uint64, error), pf pageFetcher) (*RowIterator, error) {
if !j.isQuery() {
return nil, errors.New("bigquery: cannot read from a non-query job")
}
destTable := j.config.Query.DestinationTable
// The destination table should only be nil if there was a query error.
projectID := j.projectID
if destTable != nil && projectID != destTable.ProjectId {
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
}
schema, totalRows, err := waitForQuery(ctx, projectID)
if err != nil {
return nil, err
}
if destTable == nil {
return nil, errors.New("bigquery: query job missing destination table")
}
dt := bqToTable(destTable, j.c)
if totalRows == 0 {
pf = nil
}
it := newRowIterator(ctx, dt, pf)
it.Schema = schema
it.TotalRows = totalRows
return it, nil
}
// waitForQuery waits for the query job to complete and returns its schema. It also
// returns the total number of rows in the result set.
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint64, error) {
// Use GetQueryResults only to wait for completion, not to read results.
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
setClientHeader(call.Header())
backoff := gax.Backoff{
Initial: 1 * time.Second,
Multiplier: 2,
Max: 60 * time.Second,
}
var res *bq.GetQueryResultsResponse
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
res, err = call.Do()
if err != nil {
return !retryableError(err), err
}
if !res.JobComplete { // GetQueryResults may return early without error; retry.
return false, nil
}
return true, nil
})
if err != nil {
return nil, 0, err
}
return bqToSchema(res.Schema), res.TotalRows, nil
}
// JobStatistics contains statistics about a job.
type JobStatistics struct {
CreationTime time.Time
StartTime time.Time
EndTime time.Time
TotalBytesProcessed int64
Details Statistics
}
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
type Statistics interface {
implementsStatistics()
}
// ExtractStatistics contains statistics about an extract job.
type ExtractStatistics struct {
// The number of files per destination URI or URI pattern specified in the
// extract configuration. These values will be in the same order as the
// URIs specified in the 'destinationUris' field.
DestinationURIFileCounts []int64
}
// LoadStatistics contains statistics about a load job.
type LoadStatistics struct {
// The number of bytes of source data in a load job.
InputFileBytes int64
// The number of source files in a load job.
InputFiles int64
// Size of the loaded data in bytes. Note that while a load job is in the
// running state, this value may change.
OutputBytes int64
// The number of rows imported in a load job. Note that while an import job is
// in the running state, this value may change.
OutputRows int64
}
// QueryStatistics contains statistics about a query job.
type QueryStatistics struct {
// Billing tier for the job.
BillingTier int64
// Whether the query result was fetched from the query cache.
CacheHit bool
// The type of query statement, if valid.
StatementType string
// Total bytes billed for the job.
TotalBytesBilled int64
// Total bytes processed for the job.
TotalBytesProcessed int64
// For dry run queries, indicates how accurate the TotalBytesProcessed value is.
// When indicated, values include:
// UNKNOWN: accuracy of the estimate is unknown.
// PRECISE: estimate is precise.
// LOWER_BOUND: estimate is lower bound of what the query would cost.
// UPPER_BOUND: estiamte is upper bound of what the query would cost.
TotalBytesProcessedAccuracy string
// Describes execution plan for the query.
QueryPlan []*ExplainQueryStage
// The number of rows affected by a DML statement. Present only for DML
// statements INSERT, UPDATE or DELETE.
NumDMLAffectedRows int64
// Describes a timeline of job execution.
Timeline []*QueryTimelineSample
// ReferencedTables: [Output-only, Experimental] Referenced tables for
// the job. Queries that reference more than 50 tables will not have a
// complete list.
ReferencedTables []*Table
// The schema of the results. Present only for successful dry run of
// non-legacy SQL queries.
Schema Schema
// Slot-milliseconds consumed by this query job.
SlotMillis int64
// Standard SQL: list of undeclared query parameter names detected during a
// dry run validation.
UndeclaredQueryParameterNames []string
// DDL target table.
DDLTargetTable *Table
// DDL Operation performed on the target table. Used to report how the
// query impacted the DDL target table.
DDLOperationPerformed string
}
// ExplainQueryStage describes one stage of a query.
type ExplainQueryStage struct {
// CompletedParallelInputs: Number of parallel input segments completed.
CompletedParallelInputs int64
// ComputeAvg: Duration the average shard spent on CPU-bound tasks.
ComputeAvg time.Duration
// ComputeMax: Duration the slowest shard spent on CPU-bound tasks.
ComputeMax time.Duration
// Relative amount of the total time the average shard spent on CPU-bound tasks.
ComputeRatioAvg float64
// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
ComputeRatioMax float64
// EndTime: Stage end time.
EndTime time.Time
// Unique ID for stage within plan.
ID int64
// InputStages: IDs for stages that are inputs to this stage.
InputStages []int64
// Human-readable name for stage.
Name string
// ParallelInputs: Number of parallel input segments to be processed.
ParallelInputs int64
// ReadAvg: Duration the average shard spent reading input.
ReadAvg time.Duration
// ReadMax: Duration the slowest shard spent reading input.
ReadMax time.Duration
// Relative amount of the total time the average shard spent reading input.
ReadRatioAvg float64
// Relative amount of the total time the slowest shard spent reading input.
ReadRatioMax float64
// Number of records read into the stage.
RecordsRead int64
// Number of records written by the stage.
RecordsWritten int64
// ShuffleOutputBytes: Total number of bytes written to shuffle.
ShuffleOutputBytes int64
// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle
// and spilled to disk.
ShuffleOutputBytesSpilled int64
// StartTime: Stage start time.
StartTime time.Time
// Current status for the stage.
Status string
// List of operations within the stage in dependency order (approximately
// chronological).
Steps []*ExplainQueryStep
// WaitAvg: Duration the average shard spent waiting to be scheduled.
WaitAvg time.Duration
// WaitMax: Duration the slowest shard spent waiting to be scheduled.
WaitMax time.Duration
// Relative amount of the total time the average shard spent waiting to be scheduled.
WaitRatioAvg float64
// Relative amount of the total time the slowest shard spent waiting to be scheduled.
WaitRatioMax float64
// WriteAvg: Duration the average shard spent on writing output.
WriteAvg time.Duration
// WriteMax: Duration the slowest shard spent on writing output.
WriteMax time.Duration
// Relative amount of the total time the average shard spent on writing output.
WriteRatioAvg float64
// Relative amount of the total time the slowest shard spent on writing output.
WriteRatioMax float64
}
// ExplainQueryStep describes one step of a query stage.
type ExplainQueryStep struct {
// Machine-readable operation type.
Kind string
// Human-readable stage descriptions.
Substeps []string
}
// QueryTimelineSample represents a sample of execution statistics at a point in time.
type QueryTimelineSample struct {
// Total number of units currently being processed by workers, represented as largest value since last sample.
ActiveUnits int64
// Total parallel units of work completed by this query.
CompletedUnits int64
// Time elapsed since start of query execution.
Elapsed time.Duration
// Total parallel units of work remaining for the active stages.
PendingUnits int64
// Cumulative slot-milliseconds consumed by the query.
SlotMillis int64
}
func (*ExtractStatistics) implementsStatistics() {}
func (*LoadStatistics) implementsStatistics() {}
func (*QueryStatistics) implementsStatistics() {}
// Jobs lists jobs within a project.
func (c *Client) Jobs(ctx context.Context) *JobIterator {
it := &JobIterator{
ctx: ctx,
c: c,
ProjectID: c.projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// JobIterator iterates over jobs in a project.
type JobIterator struct {
ProjectID string // Project ID of the jobs to list. Default is the client's project.
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
State State // List only jobs in the given state. Defaults to all states.
MinCreationTime time.Time // List only jobs created after this time.
MaxCreationTime time.Time // List only jobs created before this time.
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Job
}
// PageInfo is a getter for the JobIterator's PageInfo.
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// Next returns the next Job. Its second return value is iterator.Done if
// there are no more results. Once Next returns Done, all subsequent calls will
// return Done.
func (it *JobIterator) Next() (*Job, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
var st string
switch it.State {
case StateUnspecified:
st = ""
case Pending:
st = "pending"
case Running:
st = "running"
case Done:
st = "done"
default:
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
} }
cursor.projectID = j.projectID req := it.c.bqs.Jobs.List(it.ProjectID).
cursor.jobID = j.jobID Context(it.ctx).
PageToken(pageToken).
Projection("full").
AllUsers(it.AllUsers)
if st != "" {
req.StateFilter(st)
}
if !it.MinCreationTime.IsZero() {
req.MinCreationTime(uint64(it.MinCreationTime.UnixNano() / 1e6))
}
if !it.MaxCreationTime.IsZero() {
req.MaxCreationTime(uint64(it.MaxCreationTime.UnixNano() / 1e6))
}
setClientHeader(req.Header())
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
res, err := req.Do()
if err != nil {
return "", err
}
for _, j := range res.Jobs {
job, err := convertListedJob(j, it.c)
if err != nil {
return "", err
}
it.items = append(it.items, job)
}
return res.NextPageToken, nil
}
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, j.UserEmail, c)
}
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
if location != "" {
call = call.Location(location)
}
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, q.UserEmail, c)
}
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, email string, c *Client) (*Job, error) {
j := &Job{
projectID: qr.ProjectId,
jobID: qr.JobId,
location: qr.Location,
c: c,
email: email,
}
j.setConfig(qc)
if err := j.setStatus(qs); err != nil {
return nil, err
}
j.setStatistics(qt, c)
return j, nil
}
func (j *Job) setConfig(config *bq.JobConfiguration) {
if config == nil {
return
}
j.config = config
}
func (j *Job) isQuery() bool {
return j.config != nil && j.config.Query != nil
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func (j *Job) setStatus(qs *bq.JobStatus) error {
if qs == nil {
return nil
}
state, ok := stateMap[qs.State]
if !ok {
return fmt.Errorf("unexpected job state: %v", qs.State)
}
j.lastStatus = &JobStatus{
State: state,
err: nil,
}
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
j.lastStatus.err = err
}
for _, ep := range qs.Errors {
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
}
return nil return nil
} }
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
if s == nil || j.lastStatus == nil {
return
}
js := &JobStatistics{
CreationTime: unixMillisToTime(s.CreationTime),
StartTime: unixMillisToTime(s.StartTime),
EndTime: unixMillisToTime(s.EndTime),
TotalBytesProcessed: s.TotalBytesProcessed,
}
switch {
case s.Extract != nil:
js.Details = &ExtractStatistics{
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
}
case s.Load != nil:
js.Details = &LoadStatistics{
InputFileBytes: s.Load.InputFileBytes,
InputFiles: s.Load.InputFiles,
OutputBytes: s.Load.OutputBytes,
OutputRows: s.Load.OutputRows,
}
case s.Query != nil:
var names []string
for _, qp := range s.Query.UndeclaredQueryParameters {
names = append(names, qp.Name)
}
var tables []*Table
for _, tr := range s.Query.ReferencedTables {
tables = append(tables, bqToTable(tr, c))
}
js.Details = &QueryStatistics{
BillingTier: s.Query.BillingTier,
CacheHit: s.Query.CacheHit,
DDLTargetTable: bqToTable(s.Query.DdlTargetTable, c),
DDLOperationPerformed: s.Query.DdlOperationPerformed,
StatementType: s.Query.StatementType,
TotalBytesBilled: s.Query.TotalBytesBilled,
TotalBytesProcessed: s.Query.TotalBytesProcessed,
TotalBytesProcessedAccuracy: s.Query.TotalBytesProcessedAccuracy,
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
Schema: bqToSchema(s.Query.Schema),
SlotMillis: s.Query.TotalSlotMs,
Timeline: timelineFromProto(s.Query.Timeline),
ReferencedTables: tables,
UndeclaredQueryParameterNames: names,
}
}
j.lastStatus.Statistics = js
}
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
var res []*ExplainQueryStage
for _, s := range stages {
var steps []*ExplainQueryStep
for _, p := range s.Steps {
steps = append(steps, &ExplainQueryStep{
Kind: p.Kind,
Substeps: p.Substeps,
})
}
res = append(res, &ExplainQueryStage{
CompletedParallelInputs: s.CompletedParallelInputs,
ComputeAvg: time.Duration(s.ComputeMsAvg) * time.Millisecond,
ComputeMax: time.Duration(s.ComputeMsMax) * time.Millisecond,
ComputeRatioAvg: s.ComputeRatioAvg,
ComputeRatioMax: s.ComputeRatioMax,
EndTime: time.Unix(0, s.EndMs*1e6),
ID: s.Id,
InputStages: s.InputStages,
Name: s.Name,
ParallelInputs: s.ParallelInputs,
ReadAvg: time.Duration(s.ReadMsAvg) * time.Millisecond,
ReadMax: time.Duration(s.ReadMsMax) * time.Millisecond,
ReadRatioAvg: s.ReadRatioAvg,
ReadRatioMax: s.ReadRatioMax,
RecordsRead: s.RecordsRead,
RecordsWritten: s.RecordsWritten,
ShuffleOutputBytes: s.ShuffleOutputBytes,
ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled,
StartTime: time.Unix(0, s.StartMs*1e6),
Status: s.Status,
Steps: steps,
WaitAvg: time.Duration(s.WaitMsAvg) * time.Millisecond,
WaitMax: time.Duration(s.WaitMsMax) * time.Millisecond,
WaitRatioAvg: s.WaitRatioAvg,
WaitRatioMax: s.WaitRatioMax,
WriteAvg: time.Duration(s.WriteMsAvg) * time.Millisecond,
WriteMax: time.Duration(s.WriteMsMax) * time.Millisecond,
WriteRatioAvg: s.WriteRatioAvg,
WriteRatioMax: s.WriteRatioMax,
})
}
return res
}
func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample {
var res []*QueryTimelineSample
for _, s := range timeline {
res = append(res, &QueryTimelineSample{
ActiveUnits: s.ActiveUnits,
CompletedUnits: s.CompletedUnits,
Elapsed: time.Duration(s.ElapsedMs) * time.Millisecond,
PendingUnits: s.PendingUnits,
SlotMillis: s.TotalSlotMs,
})
}
return res
}

95
vendor/cloud.google.com/go/bigquery/job_test.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
func TestCreateJobRef(t *testing.T) {
defer fixRandomID("RANDOM")()
cNoLoc := &Client{projectID: "projectID"}
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"}
for _, test := range []struct {
in JobIDConfig
client *Client
want *bq.JobReference
}{
{
in: JobIDConfig{JobID: "foo"},
want: &bq.JobReference{JobId: "foo"},
},
{
in: JobIDConfig{},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
in: JobIDConfig{AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "foo-RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
{
in: JobIDConfig{JobID: "foo"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
} {
client := test.client
if client == nil {
client = cNoLoc
}
got := test.in.createJobRef(client)
test.want.ProjectId = "projectID"
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want)
}
}
}
func fixRandomID(s string) func() {
prev := randomIDFn
randomIDFn = func() string { return s }
return func() { randomIDFn = prev }
}
func checkJob(t *testing.T, i int, got, want *bq.Job) {
if got.JobReference == nil {
t.Errorf("#%d: empty job reference", i)
return
}
if got.JobReference.JobId == "" {
t.Errorf("#%d: empty job ID", i)
return
}
d := testutil.Diff(got, want)
if d != "" {
t.Errorf("#%d: (got=-, want=+) %s", i, d)
}
}

View File

@@ -1,70 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
)
// OpenTable creates a handle to an existing BigQuery table. If the table does
// not already exist, subsequent uses of the *Table will fail.
//
// Deprecated: use Client.DatasetInProject.Table instead.
func (c *Client) OpenTable(projectID, datasetID, tableID string) *Table {
return c.Table(projectID, datasetID, tableID)
}
// Table creates a handle to a BigQuery table.
//
// Use this method to reference a table in a project other than that of the
// Client.
//
// Deprecated: use Client.DatasetInProject.Table instead.
func (c *Client) Table(projectID, datasetID, tableID string) *Table {
return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service}
}
// CreateTable creates a table in the BigQuery service and returns a handle to it.
//
// Deprecated: use Table.Create instead.
func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, options ...CreateTableOption) (*Table, error) {
t := c.Table(projectID, datasetID, tableID)
if err := t.Create(ctx, options...); err != nil {
return nil, err
}
return t, nil
}
// Read fetches data from a ReadSource and returns the data via an Iterator.
//
// Deprecated: use Query.Read, Job.Read or Table.Read instead.
func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) {
switch src := src.(type) {
case *Job:
return src.Read(ctx, options...)
case *Query:
// For compatibility, support Query values created by literal, rather
// than Client.Query.
if src.client == nil {
src.client = c
}
return src.Read(ctx, options...)
case *Table:
return src.Read(ctx, options...)
}
return nil, fmt.Errorf("src (%T) does not support the Read operation", src)
}

153
vendor/cloud.google.com/go/bigquery/load.go generated vendored Normal file
View File

@@ -0,0 +1,153 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"io"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// LoadConfig holds the configuration for a load job.
type LoadConfig struct {
// Src is the source from which data will be loaded.
Src LoadSource
// Dst is the table into which the data will be loaded.
Dst *Table
// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteAppend.
WriteDisposition TableWriteDisposition
// The labels associated with this job.
Labels map[string]string
// If non-nil, the destination table is partitioned by time.
TimePartitioning *TimePartitioning
// Clustering specifies the data clustering configuration for the destination table.
Clustering *Clustering
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
// Allows the schema of the destination table to be updated as a side effect of
// the load job.
SchemaUpdateOptions []string
// For Avro-based loads, controls whether logical type annotations are used.
// See https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types
// for additional information.
UseAvroLogicalTypes bool
}
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
config := &bq.JobConfiguration{
Labels: l.Labels,
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition),
WriteDisposition: string(l.WriteDisposition),
DestinationTable: l.Dst.toBQ(),
TimePartitioning: l.TimePartitioning.toBQ(),
Clustering: l.Clustering.toBQ(),
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: l.SchemaUpdateOptions,
UseAvroLogicalTypes: l.UseAvroLogicalTypes,
},
}
media := l.Src.populateLoadConfig(config.Load)
return config, media
}
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
lc := &LoadConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
Dst: bqToTable(q.Load.DestinationTable, c),
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
Clustering: bqToClustering(q.Load.Clustering),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
UseAvroLogicalTypes: q.Load.UseAvroLogicalTypes,
}
var fc *FileConfig
if len(q.Load.SourceUris) == 0 {
s := NewReaderSource(nil)
fc = &s.FileConfig
lc.Src = s
} else {
s := NewGCSReference(q.Load.SourceUris...)
fc = &s.FileConfig
lc.Src = s
}
bqPopulateFileConfig(q.Load, fc)
return lc
}
// A Loader loads data from Google Cloud Storage into a BigQuery table.
type Loader struct {
JobIDConfig
LoadConfig
c *Client
}
// A LoadSource represents a source of data that can be loaded into
// a BigQuery table.
//
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
// objects, and ReaderSource, for data read from an io.Reader.
type LoadSource interface {
// populates config, returns media
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
}
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
// The returned Loader may optionally be further configured before its Run method is called.
// See GCSReference and ReaderSource for additional configuration options that
// affect loading.
func (t *Table) LoaderFrom(src LoadSource) *Loader {
return &Loader{
c: t.c,
LoadConfig: LoadConfig{
Src: src,
Dst: t,
},
}
}
// Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run")
defer func() { trace.EndSpan(ctx, err) }()
job, media := l.newJob()
return l.c.insertJob(ctx, job, media)
}
func (l *Loader) newJob() (*bq.Job, io.Reader) {
config, media := l.LoadConfig.toBQ()
return &bq.Job{
JobReference: l.JobIDConfig.createJobRef(l.c),
Configuration: config,
}, media
}

View File

@@ -1,112 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type loadOption interface {
customizeLoad(conf *bq.JobConfigurationLoad)
}
// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table.
// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table.
// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup.
// schema must not be nil.
func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} }
type destSchema struct {
Schema
}
func (opt destSchema) implementsOption() {}
func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.Schema = opt.asTableSchema()
}
// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored.
// If this maximum is exceeded, the operation will be unsuccessful.
func MaxBadRecords(n int64) Option { return maxBadRecords(n) }
type maxBadRecords int64
func (opt maxBadRecords) implementsOption() {}
func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.MaxBadRecords = int64(opt)
}
// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls.
func AllowJaggedRows() Option { return allowJaggedRows{} }
type allowJaggedRows struct{}
func (opt allowJaggedRows) implementsOption() {}
func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.AllowJaggedRows = true
}
// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data.
func AllowQuotedNewlines() Option { return allowQuotedNewlines{} }
type allowQuotedNewlines struct{}
func (opt allowQuotedNewlines) implementsOption() {}
func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.AllowQuotedNewlines = true
}
// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated.
// Unknown values are ignored. For CSV this ignores extra values at the end of a line.
// For JSON this ignores named values that do not match any column name.
// If this Option is not used, records containing unknown values are treated as bad records.
// The MaxBadRecords Option can be used to customize how bad records are handled.
func IgnoreUnknownValues() Option { return ignoreUnknownValues{} }
type ignoreUnknownValues struct{}
func (opt ignoreUnknownValues) implementsOption() {}
func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.IgnoreUnknownValues = true
}
func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationLoad{}
dst.customizeLoadDst(payload)
src.customizeLoadSrc(payload)
for _, opt := range options {
o, ok := opt.(loadOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeLoad(payload)
}
job.Configuration = &bq.JobConfiguration{
Load: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,20 +15,23 @@
package bigquery package bigquery
import ( import (
"reflect" "strings"
"testing" "testing"
"time"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func defaultLoadJob() *bq.Job { func defaultLoadJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{ Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{ DestinationTable: &bq.TableReference{
ProjectId: "project-id", ProjectId: "client-project-id",
DatasetId: "dataset-id", DatasetId: "dataset-id",
TableId: "table-id", TableId: "table-id",
}, },
@@ -66,26 +69,76 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
} }
func TestLoad(t *testing.T) { func TestLoad(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{projectID: "client-project-id"}
testCases := []struct { testCases := []struct {
dst *Table dst *Table
src *GCSReference src LoadSource
options []Option jobID string
want *bq.Job location string
config LoadConfig
want *bq.Job
}{ }{
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultGCS, src: NewGCSReference("uri"),
want: defaultLoadJob(), want: defaultLoadJob(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultGCS, src: NewGCSReference("uri"),
options: []Option{ location: "loc",
MaxBadRecords(1), want: func() *bq.Job {
AllowJaggedRows(), j := defaultLoadJob()
AllowQuotedNewlines(), j.JobReference.Location = "loc"
IgnoreUnknownValues(), return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobID: "ajob",
config: LoadConfig{
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
Labels: map[string]string{"a": "b"},
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
Clustering: &Clustering{Fields: []string{"cfield1"}},
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
}, },
src: NewGCSReference("uri"),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1234,
}
j.Configuration.Load.Clustering = &bq.Clustering{
Fields: []string{"cfield1"},
}
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
j.JobReference = &bq.JobReference{
JobId: "ajob",
ProjectId: "client-project-id",
}
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.MaxBadRecords = 1
g.AllowJaggedRows = true
g.AllowQuotedNewlines = true
g.IgnoreUnknownValues = true
return g
}(),
want: func() *bq.Job { want: func() *bq.Job {
j := defaultLoadJob() j := defaultLoadJob()
j.Configuration.Load.MaxBadRecords = 1 j.Configuration.Load.MaxBadRecords = 1
@@ -96,33 +149,15 @@ func TestLoad(t *testing.T) {
}(), }(),
}, },
{ {
dst: &Table{ dst: c.Dataset("dataset-id").Table("table-id"),
ProjectID: "project-id", src: func() *GCSReference {
DatasetID: "dataset-id", g := NewGCSReference("uri")
TableID: "table-id", g.Schema = Schema{
},
options: []Option{CreateNever, WriteTruncate},
src: defaultGCS,
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
src: defaultGCS,
options: []Option{
DestinationSchema(Schema{
stringFieldSchema(), stringFieldSchema(),
nestedFieldSchema(), nestedFieldSchema(),
}), }
}, return g
}(),
want: func() *bq.Job { want: func() *bq.Job {
j := defaultLoadJob() j := defaultLoadJob()
j.Configuration.Load.Schema = &bq.TableSchema{ j.Configuration.Load.Schema = &bq.TableSchema{
@@ -134,15 +169,16 @@ func TestLoad(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: &GCSReference{ src: func() *GCSReference {
uris: []string{"uri"}, g := NewGCSReference("uri")
SkipLeadingRows: 1, g.SkipLeadingRows = 1
SourceFormat: JSON, g.SourceFormat = JSON
Encoding: UTF_8, g.Encoding = UTF_8
FieldDelimiter: "\t", g.FieldDelimiter = "\t"
Quote: "-", g.Quote = "-"
}, return g
}(),
want: func() *bq.Job { want: func() *bq.Job {
j := defaultLoadJob() j := defaultLoadJob()
j.Configuration.Load.SkipLeadingRows = 1 j.Configuration.Load.SkipLeadingRows = 1
@@ -155,24 +191,22 @@ func TestLoad(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: &GCSReference{ src: NewGCSReference("uri"),
uris: []string{"uri"},
Quote: "",
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultLoadJob() j := defaultLoadJob()
// Quote is left unset in GCSReference, so should be nil here.
j.Configuration.Load.Quote = nil j.Configuration.Load.Quote = nil
return j return j
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: &GCSReference{ src: func() *GCSReference {
uris: []string{"uri"}, g := NewGCSReference("uri")
Quote: "", g.ForceZeroQuote = true
ForceZeroQuote: true, return g
}, }(),
want: func() *bq.Job { want: func() *bq.Job {
j := defaultLoadJob() j := defaultLoadJob()
empty := "" empty := ""
@@ -180,19 +214,85 @@ func TestLoad(t *testing.T) {
return j return j
}(), }(),
}, },
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *ReaderSource {
r := NewReaderSource(strings.NewReader("foo"))
r.SkipLeadingRows = 1
r.SourceFormat = JSON
r.Encoding = UTF_8
r.FieldDelimiter = "\t"
r.Quote = "-"
return r
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceUris = nil
j.Configuration.Load.SkipLeadingRows = 1
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Load.Encoding = "UTF-8"
j.Configuration.Load.FieldDelimiter = "\t"
hyphen := "-"
j.Configuration.Load.Quote = &hyphen
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.SourceFormat = Avro
return g
}(),
config: LoadConfig{
UseAvroLogicalTypes: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceFormat = "AVRO"
j.Configuration.Load.UseAvroLogicalTypes = true
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *ReaderSource {
r := NewReaderSource(strings.NewReader("foo"))
r.SourceFormat = Avro
return r
}(),
config: LoadConfig{
UseAvroLogicalTypes: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceUris = nil
j.Configuration.Load.SourceFormat = "AVRO"
j.Configuration.Load.UseAvroLogicalTypes = true
return j
}(),
},
} }
for _, tc := range testCases { for i, tc := range testCases {
s := &testService{} loader := tc.dst.LoaderFrom(tc.src)
c := &Client{ loader.JobID = tc.jobID
service: s, loader.Location = tc.location
tc.config.Src = tc.src
tc.config.Dst = tc.dst
loader.LoadConfig = tc.config
got, _ := loader.newJob()
checkJob(t, i, got, tc.want)
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
} }
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
t.Errorf("err calling load: %v", err) cmp.AllowUnexported(Table{}, Client{}),
continue cmpopts.IgnoreUnexported(ReaderSource{}))
} if diff != "" {
if !reflect.DeepEqual(s.Job, tc.want) { t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want)
} }
} }
} }

348
vendor/cloud.google.com/go/bigquery/nulls.go generated vendored Normal file
View File

@@ -0,0 +1,348 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"time"
"cloud.google.com/go/civil"
)
// NullInt64 represents a BigQuery INT64 that may be NULL.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL.
}
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }
// NullString represents a BigQuery STRING that may be NULL.
type NullString struct {
StringVal string
Valid bool // Valid is true if StringVal is not NULL.
}
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
// NullGeography represents a BigQuery GEOGRAPHY string that may be NULL.
type NullGeography struct {
GeographyVal string
Valid bool // Valid is true if GeographyVal is not NULL.
}
func (n NullGeography) String() string { return nullstr(n.Valid, n.GeographyVal) }
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL.
}
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }
// NullBool represents a BigQuery BOOL that may be NULL.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL.
}
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }
// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
type NullTimestamp struct {
Timestamp time.Time
Valid bool // Valid is true if Time is not NULL.
}
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }
// NullDate represents a BigQuery DATE that may be null.
type NullDate struct {
Date civil.Date
Valid bool // Valid is true if Date is not NULL.
}
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }
// NullTime represents a BigQuery TIME that may be null.
type NullTime struct {
Time civil.Time
Valid bool // Valid is true if Time is not NULL.
}
func (n NullTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilTimeString(n.Time)
}
// NullDateTime represents a BigQuery DATETIME that may be null.
type NullDateTime struct {
DateTime civil.DateTime
Valid bool // Valid is true if DateTime is not NULL.
}
func (n NullDateTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilDateTimeString(n.DateTime)
}
// MarshalJSON converts the NullInt64 to JSON.
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
// MarshalJSON converts the NullFloat64 to JSON.
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
// MarshalJSON converts the NullBool to JSON.
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
// MarshalJSON converts the NullString to JSON.
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
// MarshalJSON converts the NullGeography to JSON.
func (n NullGeography) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.GeographyVal) }
// MarshalJSON converts the NullTimestamp to JSON.
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
// MarshalJSON converts the NullDate to JSON.
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
// MarshalJSON converts the NullTime to JSON.
func (n NullTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
}
// MarshalJSON converts the NullDateTime to JSON.
func (n NullDateTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
}
func nullstr(valid bool, v interface{}) string {
if !valid {
return "NULL"
}
return fmt.Sprint(v)
}
var jsonNull = []byte("null")
func nulljson(valid bool, v interface{}) ([]byte, error) {
if !valid {
return jsonNull, nil
}
return json.Marshal(v)
}
// UnmarshalJSON converts JSON into a NullInt64.
func (n *NullInt64) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Int64 = 0
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Int64); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullFloat64.
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Float64 = 0
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Float64); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullBool.
func (n *NullBool) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Bool = false
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Bool); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullString.
func (n *NullString) UnmarshalJSON(b []byte) error {
n.Valid = false
n.StringVal = ""
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.StringVal); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullGeography.
func (n *NullGeography) UnmarshalJSON(b []byte) error {
n.Valid = false
n.GeographyVal = ""
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.GeographyVal); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullTimestamp.
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Timestamp = time.Time{}
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Timestamp); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullDate.
func (n *NullDate) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Date = civil.Date{}
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Date); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullTime.
func (n *NullTime) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Time = civil.Time{}
if bytes.Equal(b, jsonNull) {
return nil
}
s, err := strconv.Unquote(string(b))
if err != nil {
return err
}
t, err := civil.ParseTime(s)
if err != nil {
return err
}
n.Time = t
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullDateTime.
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
n.Valid = false
n.DateTime = civil.DateTime{}
if bytes.Equal(b, jsonNull) {
return nil
}
s, err := strconv.Unquote(string(b))
if err != nil {
return err
}
dt, err := parseCivilDateTime(s)
if err != nil {
return err
}
n.DateTime = dt
n.Valid = true
return nil
}
var (
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
typeOfNullBool = reflect.TypeOf(NullBool{})
typeOfNullString = reflect.TypeOf(NullString{})
typeOfNullGeography = reflect.TypeOf(NullGeography{})
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
typeOfNullDate = reflect.TypeOf(NullDate{})
typeOfNullTime = reflect.TypeOf(NullTime{})
typeOfNullDateTime = reflect.TypeOf(NullDateTime{})
)
func nullableFieldType(t reflect.Type) FieldType {
switch t {
case typeOfNullInt64:
return IntegerFieldType
case typeOfNullFloat64:
return FloatFieldType
case typeOfNullBool:
return BooleanFieldType
case typeOfNullString:
return StringFieldType
case typeOfNullGeography:
return GeographyFieldType
case typeOfNullTimestamp:
return TimestampFieldType
case typeOfNullDate:
return DateFieldType
case typeOfNullTime:
return TimeFieldType
case typeOfNullDateTime:
return DateTimeFieldType
default:
return ""
}
}

75
vendor/cloud.google.com/go/bigquery/nulls_test.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/json"
"reflect"
"testing"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
)
var (
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000}
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime}
)
func TestNullsJSON(t *testing.T) {
for _, test := range []struct {
in interface{}
want string
}{
{&NullInt64{Valid: true, Int64: 3}, `3`},
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
{&NullBool{Valid: true, Bool: true}, `true`},
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
{&NullGeography{Valid: true, GeographyVal: "ST_GEOPOINT(47.649154, -122.350220)"}, `"ST_GEOPOINT(47.649154, -122.350220)"`},
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`},
{&NullInt64{}, `null`},
{&NullFloat64{}, `null`},
{&NullBool{}, `null`},
{&NullString{}, `null`},
{&NullGeography{}, `null`},
{&NullTimestamp{}, `null`},
{&NullDate{}, `null`},
{&NullTime{}, `null`},
{&NullDateTime{}, `null`},
} {
bytes, err := json.Marshal(test.in)
if err != nil {
t.Fatal(err)
}
if got, want := string(bytes), test.want; got != want {
t.Errorf("%#v: got %s, want %s", test.in, got, want)
}
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type()
value := reflect.New(typ).Interface()
err = json.Unmarshal(bytes, value)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(value, test.in) {
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in)
}
}
}

38
vendor/cloud.google.com/go/bigquery/oc_test.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"testing"
"cloud.google.com/go/internal/testutil"
)
func TestOCTracing(t *testing.T) {
ctx := context.Background()
client := getClient(t)
defer client.Close()
te := testutil.NewTestExporter()
defer te.Unregister()
q := client.Query("select *")
q.Run(ctx) // Doesn't matter if we get an error; span should be created either way
if len(te.Spans) == 0 {
t.Fatalf("Expected some spans to be created, but got %d", 0)
}
}

370
vendor/cloud.google.com/go/bigquery/params.go generated vendored Normal file
View File

@@ -0,0 +1,370 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/base64"
"errors"
"fmt"
"math/big"
"reflect"
"regexp"
"time"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/fields"
bq "google.golang.org/api/bigquery/v2"
)
var (
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
timestampFormat = "2006-01-02 15:04:05.999999-07:00"
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
)
const nullableTagOption = "nullable"
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
if err != nil {
return "", false, nil, err
}
if name != "" && !validFieldName.MatchString(name) {
return "", false, nil, invalidFieldNameError(name)
}
for _, opt := range opts {
if opt != nullableTagOption {
return "", false, nil, fmt.Errorf(
"bigquery: invalid tag option %q. The only valid option is %q",
opt, nullableTagOption)
}
}
return name, keep, opts, nil
}
type invalidFieldNameError string
func (e invalidFieldNameError) Error() string {
return fmt.Sprintf("bigquery: invalid name %q of field in struct", string(e))
}
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
var (
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
stringParamType = &bq.QueryParameterType{Type: "STRING"}
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
dateParamType = &bq.QueryParameterType{Type: "DATE"}
timeParamType = &bq.QueryParameterType{Type: "TIME"}
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
numericParamType = &bq.QueryParameterType{Type: "NUMERIC"}
)
var (
typeOfDate = reflect.TypeOf(civil.Date{})
typeOfTime = reflect.TypeOf(civil.Time{})
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
typeOfGoTime = reflect.TypeOf(time.Time{})
typeOfRat = reflect.TypeOf(&big.Rat{})
)
// A QueryParameter is a parameter to a query.
type QueryParameter struct {
// Name is used for named parameter mode.
// It must match the name in the query case-insensitively.
Name string
// Value is the value of the parameter.
//
// When you create a QueryParameter to send to BigQuery, the following Go types
// are supported, with their corresponding Bigquery types:
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
// Note that uint, uint64 and uintptr are not supported, because
// they may contain values that cannot fit into a 64-bit signed integer.
// float32, float64: FLOAT64
// bool: BOOL
// string: STRING
// []byte: BYTES
// time.Time: TIMESTAMP
// *big.Rat: NUMERIC
// Arrays and slices of the above.
// Structs of the above. Only the exported fields are used.
//
// BigQuery does not support params of type GEOGRAPHY. For users wishing
// to parameterize Geography values, use string parameters and cast in the
// SQL query, e.g. `SELECT ST_GeogFromText(@string_param) as geo`
//
// When a QueryParameter is returned inside a QueryConfig from a call to
// Job.Config:
// Integers are of type int64.
// Floating-point values are of type float64.
// Arrays are of type []interface{}, regardless of the array element type.
// Structs are of type map[string]interface{}.
Value interface{}
}
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
pv, err := paramValue(reflect.ValueOf(p.Value))
if err != nil {
return nil, err
}
pt, err := paramType(reflect.TypeOf(p.Value))
if err != nil {
return nil, err
}
return &bq.QueryParameter{
Name: p.Name,
ParameterValue: &pv,
ParameterType: pt,
}, nil
}
func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
if t == nil {
return nil, errors.New("bigquery: nil parameter")
}
switch t {
case typeOfDate:
return dateParamType, nil
case typeOfTime:
return timeParamType, nil
case typeOfDateTime:
return dateTimeParamType, nil
case typeOfGoTime:
return timestampParamType, nil
case typeOfRat:
return numericParamType, nil
}
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64ParamType, nil
case reflect.Float32, reflect.Float64:
return float64ParamType, nil
case reflect.Bool:
return boolParamType, nil
case reflect.String:
return stringParamType, nil
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return bytesParamType, nil
}
fallthrough
case reflect.Array:
et, err := paramType(t.Elem())
if err != nil {
return nil, err
}
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
break
}
t = t.Elem()
fallthrough
case reflect.Struct:
var fts []*bq.QueryParameterTypeStructTypes
fields, err := fieldCache.Fields(t)
if err != nil {
return nil, err
}
for _, f := range fields {
pt, err := paramType(f.Type)
if err != nil {
return nil, err
}
fts = append(fts, &bq.QueryParameterTypeStructTypes{
Name: f.Name,
Type: pt,
})
}
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
}
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
}
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
var res bq.QueryParameterValue
if !v.IsValid() {
return res, errors.New("bigquery: nil parameter")
}
t := v.Type()
switch t {
case typeOfDate:
res.Value = v.Interface().(civil.Date).String()
return res, nil
case typeOfTime:
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
// (If we send nanoseconds, then when we try to read the result we get "query job
// missing destination table").
res.Value = CivilTimeString(v.Interface().(civil.Time))
return res, nil
case typeOfDateTime:
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
return res, nil
case typeOfGoTime:
res.Value = v.Interface().(time.Time).Format(timestampFormat)
return res, nil
case typeOfRat:
res.Value = NumericString(v.Interface().(*big.Rat))
return res, nil
}
switch t.Kind() {
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
return res, nil
}
fallthrough
case reflect.Array:
var vals []*bq.QueryParameterValue
for i := 0; i < v.Len(); i++ {
val, err := paramValue(v.Index(i))
if err != nil {
return bq.QueryParameterValue{}, err
}
vals = append(vals, &val)
}
return bq.QueryParameterValue{ArrayValues: vals}, nil
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
}
t = t.Elem()
v = v.Elem()
if !v.IsValid() {
// nil pointer becomes empty value
return res, nil
}
fallthrough
case reflect.Struct:
fields, err := fieldCache.Fields(t)
if err != nil {
return bq.QueryParameterValue{}, err
}
res.StructValues = map[string]bq.QueryParameterValue{}
for _, f := range fields {
fv := v.FieldByIndex(f.Index)
fp, err := paramValue(fv)
if err != nil {
return bq.QueryParameterValue{}, err
}
res.StructValues[f.Name] = fp
}
return res, nil
}
// None of the above: assume a scalar type. (If it's not a valid type,
// paramType will catch the error.)
res.Value = fmt.Sprint(v.Interface())
// Ensure empty string values are sent.
if res.Value == "" {
res.ForceSendFields = append(res.ForceSendFields, "Value")
}
return res, nil
}
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
p := QueryParameter{Name: q.Name}
val, err := convertParamValue(q.ParameterValue, q.ParameterType)
if err != nil {
return QueryParameter{}, err
}
p.Value = val
return p, nil
}
var paramTypeToFieldType = map[string]FieldType{
int64ParamType.Type: IntegerFieldType,
float64ParamType.Type: FloatFieldType,
boolParamType.Type: BooleanFieldType,
stringParamType.Type: StringFieldType,
bytesParamType.Type: BytesFieldType,
dateParamType.Type: DateFieldType,
timeParamType.Type: TimeFieldType,
numericParamType.Type: NumericFieldType,
}
// Convert a parameter value from the service to a Go value. This is similar to, but
// not quite the same as, converting data values.
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
switch qtype.Type {
case "ARRAY":
if qval == nil {
return []interface{}(nil), nil
}
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
case "STRUCT":
if qval == nil {
return map[string]interface{}(nil), nil
}
return convertParamStruct(qval.StructValues, qtype.StructTypes)
case "TIMESTAMP":
return time.Parse(timestampFormat, qval.Value)
case "DATETIME":
return parseCivilDateTime(qval.Value)
default:
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
}
}
// convertParamArray converts a query parameter array value to a Go value. It
// always returns a []interface{}.
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
var vals []interface{}
for _, el := range elVals {
val, err := convertParamValue(el, elType)
if err != nil {
return nil, err
}
vals = append(vals, val)
}
return vals, nil
}
// convertParamStruct converts a query parameter struct value into a Go value. It
// always returns a map[string]interface{}.
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
vals := map[string]interface{}{}
for _, st := range sTypes {
if sv, ok := sVals[st.Name]; ok {
val, err := convertParamValue(&sv, st.Type)
if err != nil {
return nil, err
}
vals[st.Name] = val
} else {
vals[st.Name] = nil
}
}
return vals, nil
}

385
vendor/cloud.google.com/go/bigquery/params_test.go generated vendored Normal file
View File

@@ -0,0 +1,385 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"errors"
"math"
"math/big"
"reflect"
"testing"
"time"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)
var scalarTests = []struct {
val interface{} // The Go value
wantVal string // paramValue's desired output
wantType *bq.QueryParameterType // paramType's desired output
}{
{int64(0), "0", int64ParamType},
{3.14, "3.14", float64ParamType},
{3.14159e-87, "3.14159e-87", float64ParamType},
{true, "true", boolParamType},
{"string", "string", stringParamType},
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
{math.NaN(), "NaN", float64ParamType},
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
"2016-03-20 04:22:09.000005-01:02",
timestampParamType},
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType},
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType},
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}},
"2016-03-20 04:05:06.789000",
dateTimeParamType},
{big.NewRat(12345, 1000), "12.345000000", numericParamType},
}
type (
S1 struct {
A int
B *S2
C bool
}
S2 struct {
D string
}
)
var (
s1 = S1{
A: 1,
B: &S2{D: "s"},
C: true,
}
s1ParamType = &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "A", Type: int64ParamType},
{Name: "B", Type: &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "D", Type: stringParamType},
},
}},
{Name: "C", Type: boolParamType},
},
}
s1ParamValue = bq.QueryParameterValue{
StructValues: map[string]bq.QueryParameterValue{
"A": sval("1"),
"B": {
StructValues: map[string]bq.QueryParameterValue{
"D": sval("s"),
},
},
"C": sval("true"),
},
}
s1ParamReturnValue = map[string]interface{}{
"A": int64(1),
"B": map[string]interface{}{"D": "s"},
"C": true,
}
)
func sval(s string) bq.QueryParameterValue {
return bq.QueryParameterValue{Value: s}
}
func TestParamValueScalar(t *testing.T) {
for _, test := range scalarTests {
got, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Errorf("%v: got %v, want nil", test.val, err)
continue
}
want := sval(test.wantVal)
if !testutil.Equal(got, want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
}
}
}
func TestParamValueArray(t *testing.T) {
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
{Value: "1"},
{Value: "2"},
},
}
for _, test := range []struct {
val interface{}
want bq.QueryParameterValue
}{
{[]int(nil), bq.QueryParameterValue{}},
{[]int{}, bq.QueryParameterValue{}},
{[]int{1, 2}, qpv},
{[2]int{1, 2}, qpv},
} {
got, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
}
}
}
func TestParamValueStruct(t *testing.T) {
got, err := paramValue(reflect.ValueOf(s1))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamValue) {
t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
}
}
func TestParamValueErrors(t *testing.T) {
// paramValue lets a few invalid types through, but paramType catches them.
// Since we never call one without the other that's fine.
for _, val := range []interface{}{nil, new([]int)} {
_, err := paramValue(reflect.ValueOf(val))
if err == nil {
t.Errorf("%v (%T): got nil, want error", val, val)
}
}
}
func TestParamType(t *testing.T) {
for _, test := range scalarTests {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.wantType) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
}
}
for _, test := range []struct {
val interface{}
want *bq.QueryParameterType
}{
{uint32(32767), int64ParamType},
{[]byte("foo"), bytesParamType},
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
{S1{}, s1ParamType},
} {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
}
}
}
func TestParamTypeErrors(t *testing.T) {
for _, val := range []interface{}{
nil, uint(0), new([]int), make(chan int),
} {
_, err := paramType(reflect.TypeOf(val))
if err == nil {
t.Errorf("%v (%T): got nil, want error", val, val)
}
}
}
func TestConvertParamValue(t *testing.T) {
// Scalars.
for _, test := range scalarTests {
pval, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Fatal(err)
}
ptype, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
got, err := convertParamValue(&pval, ptype)
if err != nil {
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
}
if !testutil.Equal(got, test.val) {
t.Errorf("%#v: got %#v", test.val, got)
}
}
// Arrays.
for _, test := range []struct {
pval *bq.QueryParameterValue
want []interface{}
}{
{
&bq.QueryParameterValue{},
nil,
},
{
&bq.QueryParameterValue{
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
},
[]interface{}{int64(1), int64(2)},
},
} {
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
got, err := convertParamValue(test.pval, ptype)
if err != nil {
t.Fatalf("%+v: %v", test.pval, err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
}
}
// Structs.
got, err := convertParamValue(&s1ParamValue, s1ParamType)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamReturnValue) {
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
}
}
func TestIntegration_ScalarParam(t *testing.T) {
roundToMicros := cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
c := getClient(t)
for _, test := range scalarTests {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
}
if !testutil.Equal(gotParam, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
}
}
}
func TestIntegration_OtherParam(t *testing.T) {
c := getClient(t)
for _, test := range []struct {
val interface{}
wantData interface{}
wantParam interface{}
}{
{[]int(nil), []Value(nil), []interface{}(nil)},
{[]int{}, []Value(nil), []interface{}(nil)},
{
[]int{1, 2},
[]Value{int64(1), int64(2)},
[]interface{}{int64(1), int64(2)},
},
{
[3]int{1, 2, 3},
[]Value{int64(1), int64(2), int64(3)},
[]interface{}{int64(1), int64(2), int64(3)},
},
{
S1{},
[]Value{int64(0), nil, false},
map[string]interface{}{
"A": int64(0),
"B": nil,
"C": false,
},
},
{
s1,
[]Value{int64(1), []Value{"s"}, true},
s1ParamReturnValue,
},
} {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.wantData) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotData, gotData, test.wantData, test.wantData)
}
if !testutil.Equal(gotParam, test.wantParam) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
}
}
}
// paramRoundTrip passes x as a query parameter to BigQuery. It returns
// the resulting data value from running the query and the parameter value from
// the returned job configuration.
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
ctx := context.Background()
q := c.Query("select ?")
q.Parameters = []QueryParameter{{Value: x}}
job, err := q.Run(ctx)
if err != nil {
return nil, nil, err
}
it, err := job.Read(ctx)
if err != nil {
return nil, nil, err
}
var val []Value
err = it.Next(&val)
if err != nil {
return nil, nil, err
}
if len(val) != 1 {
return nil, nil, errors.New("wrong number of values")
}
conf, err := job.Config()
if err != nil {
return nil, nil, err
}
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
}
func TestQueryParameter_toBQ(t *testing.T) {
tests := []struct {
in QueryParameter
want []string
}{
{
in: QueryParameter{Name: "name", Value: ""},
want: []string{"Value"},
},
}
for _, test := range tests {
q, err := test.in.toBQ()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
got := q.ParameterValue.ForceSendFields
if !cmp.Equal(test.want, got) {
t.Fatalf("want %v, got %v", test.want, got)
}
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,10 +14,20 @@
package bigquery package bigquery
import bq "google.golang.org/api/bigquery/v2" import (
"context"
"errors"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// QueryConfig holds the configuration for a query job.
type QueryConfig struct {
// Dst is the table into which the results of the query will be written.
// If this field is nil, a temporary table will be created.
Dst *Table
// Query represents a query to be executed. Use Client.Query to create a query.
type Query struct {
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details. // The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
Q string Q string
@@ -26,19 +36,293 @@ type Query struct {
DefaultProjectID string DefaultProjectID string
DefaultDatasetID string DefaultDatasetID string
// TableDefinitions describes data sources outside of BigQuery.
// The map keys may be used as table names in the query string.
//
// When a QueryConfig is returned from Job.Config, the map values
// are always of type *ExternalDataConfig.
TableDefinitions map[string]ExternalData
// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteEmpty.
WriteDisposition TableWriteDisposition
// DisableQueryCache prevents results being fetched from the query cache.
// If this field is false, results are fetched from the cache if they are available.
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
// Cached results are only available when TableID is unspecified in the query's destination Table.
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
DisableQueryCache bool
// DisableFlattenedResults prevents results being flattened.
// If this field is false, results from nested and repeated fields are flattened.
// DisableFlattenedResults implies AllowLargeResults
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
DisableFlattenedResults bool
// AllowLargeResults allows the query to produce arbitrarily large result tables.
// The destination must be a table.
// When using this option, queries will take longer to execute, even if the result set is small.
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
AllowLargeResults bool
// Priority specifies the priority with which to schedule the query.
// The default priority is InteractivePriority.
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
Priority QueryPriority
// MaxBillingTier sets the maximum billing tier for a Query.
// Queries that have resource usage beyond this tier will fail (without
// incurring a charge). If this field is zero, the project default will be used.
MaxBillingTier int
// MaxBytesBilled limits the number of bytes billed for
// this job. Queries that would exceed this limit will fail (without incurring
// a charge).
// If this field is less than 1, the project default will be
// used.
MaxBytesBilled int64
// UseStandardSQL causes the query to use standard SQL. The default.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool
// UseLegacySQL causes the query to use legacy SQL.
UseLegacySQL bool
// Parameters is a list of query parameters. The presence of parameters
// implies the use of standard SQL.
// If the query uses positional syntax ("?"), then no parameter may have a name.
// If the query uses named syntax ("@p"), then all parameters must have names.
// It is illegal to mix positional and named syntax.
Parameters []QueryParameter
// TimePartitioning specifies time-based partitioning
// for the destination table.
TimePartitioning *TimePartitioning
// Clustering specifies the data clustering configuration for the destination table.
Clustering *Clustering
// The labels associated with this job.
Labels map[string]string
// If true, don't actually run this job. A valid query will return a mostly
// empty response with some processing statistics, while an invalid query will
// return the same error it would if it wasn't a dry run.
//
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
// call LastStatus on the returned job to get statistics. Calling Status on a
// dry-run job will fail.
DryRun bool
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
// Allows the schema of the destination table to be updated as a side effect of
// the query job.
SchemaUpdateOptions []string
}
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
qconf := &bq.JobConfigurationQuery{
Query: qc.Q,
CreateDisposition: string(qc.CreateDisposition),
WriteDisposition: string(qc.WriteDisposition),
AllowLargeResults: qc.AllowLargeResults,
Priority: string(qc.Priority),
MaximumBytesBilled: qc.MaxBytesBilled,
TimePartitioning: qc.TimePartitioning.toBQ(),
Clustering: qc.Clustering.toBQ(),
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: qc.SchemaUpdateOptions,
}
if len(qc.TableDefinitions) > 0 {
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
}
for name, data := range qc.TableDefinitions {
qconf.TableDefinitions[name] = data.toBQ()
}
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
qconf.DefaultDataset = &bq.DatasetReference{
DatasetId: qc.DefaultDatasetID,
ProjectId: qc.DefaultProjectID,
}
}
if tier := int64(qc.MaxBillingTier); tier > 0 {
qconf.MaximumBillingTier = &tier
}
f := false
if qc.DisableQueryCache {
qconf.UseQueryCache = &f
}
if qc.DisableFlattenedResults {
qconf.FlattenResults = &f
// DisableFlattenResults implies AllowLargeResults.
qconf.AllowLargeResults = true
}
if qc.UseStandardSQL && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
ptrue := true
pfalse := false
if qc.UseLegacySQL {
qconf.UseLegacySql = &ptrue
} else {
qconf.UseLegacySql = &pfalse
}
if qc.Dst != nil && !qc.Dst.implicitTable() {
qconf.DestinationTable = qc.Dst.toBQ()
}
for _, p := range qc.Parameters {
qp, err := p.toBQ()
if err != nil {
return nil, err
}
qconf.QueryParameters = append(qconf.QueryParameters, qp)
}
return &bq.JobConfiguration{
Labels: qc.Labels,
DryRun: qc.DryRun,
Query: qconf,
}, nil
}
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
qq := q.Query
qc := &QueryConfig{
Labels: q.Labels,
DryRun: q.DryRun,
Q: qq.Query,
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
AllowLargeResults: qq.AllowLargeResults,
Priority: QueryPriority(qq.Priority),
MaxBytesBilled: qq.MaximumBytesBilled,
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql,
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
Clustering: bqToClustering(qq.Clustering),
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
SchemaUpdateOptions: qq.SchemaUpdateOptions,
}
qc.UseStandardSQL = !qc.UseLegacySQL
if len(qq.TableDefinitions) > 0 {
qc.TableDefinitions = make(map[string]ExternalData)
}
for name, qedc := range qq.TableDefinitions {
edc, err := bqToExternalDataConfig(&qedc)
if err != nil {
return nil, err
}
qc.TableDefinitions[name] = edc
}
if qq.DefaultDataset != nil {
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
}
if qq.MaximumBillingTier != nil {
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
}
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
qc.DisableQueryCache = true
}
if qq.FlattenResults != nil && !*qq.FlattenResults {
qc.DisableFlattenedResults = true
}
if qq.DestinationTable != nil {
qc.Dst = bqToTable(qq.DestinationTable, c)
}
for _, qp := range qq.QueryParameters {
p, err := bqToQueryParameter(qp)
if err != nil {
return nil, err
}
qc.Parameters = append(qc.Parameters, p)
}
return qc, nil
}
// QueryPriority specifies a priority with which a query is to be executed.
type QueryPriority string
const (
// BatchPriority specifies that the query should be scheduled with the
// batch priority. BigQuery queues each batch query on your behalf, and
// starts the query as soon as idle resources are available, usually within
// a few minutes. If BigQuery hasn't started the query within 24 hours,
// BigQuery changes the job priority to interactive. Batch queries don't
// count towards your concurrent rate limit, which can make it easier to
// start many queries at once.
//
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#batchqueries.
BatchPriority QueryPriority = "BATCH"
// InteractivePriority specifies that the query should be scheduled with
// interactive priority, which means that the query is executed as soon as
// possible. Interactive queries count towards your concurrent rate limit
// and your daily limit. It is the default priority with which queries get
// executed.
//
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#queries.
InteractivePriority QueryPriority = "INTERACTIVE"
)
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
type Query struct {
JobIDConfig
QueryConfig
client *Client client *Client
} }
func (q *Query) implementsSource() {} // Query creates a query with string q.
// The returned Query may optionally be further configured before its Run method is called.
func (q *Query) implementsReadSource() {} func (c *Client) Query(q string) *Query {
return &Query{
func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery) { client: c,
conf.Query = q.Q QueryConfig: QueryConfig{Q: q},
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
conf.DefaultDataset = &bq.DatasetReference{
DatasetId: q.DefaultDatasetID,
ProjectId: q.DefaultProjectID,
}
} }
} }
// Run initiates a query job.
func (q *Query) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run")
defer func() { trace.EndSpan(ctx, err) }()
job, err := q.newJob()
if err != nil {
return nil, err
}
j, err = q.client.insertJob(ctx, job, nil)
if err != nil {
return nil, err
}
return j, nil
}
func (q *Query) newJob() (*bq.Job, error) {
config, err := q.QueryConfig.toBQ()
if err != nil {
return nil, err
}
return &bq.Job{
JobReference: q.JobIDConfig.createJobRef(q.client),
Configuration: config,
}, nil
}
// Read submits a query for execution and returns the results via a RowIterator.
// It is a shorthand for Query.Run followed by Job.Read.
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
job, err := q.Run(ctx)
if err != nil {
return nil, err
}
return job.Read(ctx)
}

View File

@@ -1,148 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type queryOption interface {
customizeQuery(conf *bq.JobConfigurationQuery)
}
// DisableQueryCache returns an Option that prevents results being fetched from the query cache.
// If this Option is not used, results are fetched from the cache if they are available.
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
// Cached results are only available when TableID is unspecified in the query's destination Table.
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
func DisableQueryCache() Option { return disableQueryCache{} }
type disableQueryCache struct{}
func (opt disableQueryCache) implementsOption() {}
func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery) {
f := false
conf.UseQueryCache = &f
}
// DisableFlattenedResults returns an Option that prevents results being flattened.
// If this Option is not used, results from nested and repeated fields are flattened.
// DisableFlattenedResults implies AllowLargeResults
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
func DisableFlattenedResults() Option { return disableFlattenedResults{} }
type disableFlattenedResults struct{}
func (opt disableFlattenedResults) implementsOption() {}
func (opt disableFlattenedResults) customizeQuery(conf *bq.JobConfigurationQuery) {
f := false
conf.FlattenResults = &f
// DisableFlattenedResults implies AllowLargeResults
allowLargeResults{}.customizeQuery(conf)
}
// AllowLargeResults returns an Option that allows the query to produce arbitrarily large result tables.
// The destination must be a table.
// When using this option, queries will take longer to execute, even if the result set is small.
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
func AllowLargeResults() Option { return allowLargeResults{} }
type allowLargeResults struct{}
func (opt allowLargeResults) implementsOption() {}
func (opt allowLargeResults) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.AllowLargeResults = true
}
// JobPriority returns an Option that causes a query to be scheduled with the specified priority.
// The default priority is InteractivePriority.
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
func JobPriority(priority string) Option { return jobPriority(priority) }
type jobPriority string
func (opt jobPriority) implementsOption() {}
func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.Priority = string(opt)
}
const (
BatchPriority = "BATCH"
InteractivePriority = "INTERACTIVE"
)
// MaxBillingTier returns an Option that sets the maximum billing tier for a Query.
// Queries that have resource usage beyond this tier will fail (without
// incurring a charge). If this Option is not used, the project default will be used.
func MaxBillingTier(tier int) Option { return maxBillingTier(tier) }
type maxBillingTier int
func (opt maxBillingTier) implementsOption() {}
func (opt maxBillingTier) customizeQuery(conf *bq.JobConfigurationQuery) {
tier := int64(opt)
conf.MaximumBillingTier = &tier
}
// MaxBytesBilled returns an Option that limits the number of bytes billed for
// this job. Queries that would exceed this limit will fail (without incurring
// a charge).
// If this Option is not used, or bytes is < 1, the project default will be
// used.
func MaxBytesBilled(bytes int64) Option { return maxBytesBilled(bytes) }
type maxBytesBilled int64
func (opt maxBytesBilled) implementsOption() {}
func (opt maxBytesBilled) customizeQuery(conf *bq.JobConfigurationQuery) {
if opt >= 1 {
conf.MaximumBytesBilled = int64(opt)
}
}
func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationQuery{}
dst.customizeQueryDst(payload)
src.customizeQuerySrc(payload)
for _, opt := range options {
o, ok := opt.(queryOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeQuery(payload)
}
job.Configuration = &bq.JobConfiguration{
Query: payload,
}
j, err := c.service.insertJob(ctx, job, c.projectID)
if err != nil {
return nil, err
}
j.isQuery = true
return j, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,20 +15,22 @@
package bigquery package bigquery
import ( import (
"reflect"
"testing" "testing"
"time"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func defaultQueryJob() *bq.Job { func defaultQueryJob() *bq.Job {
pfalse := false
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{ Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{ DestinationTable: &bq.TableReference{
ProjectId: "project-id", ProjectId: "client-project-id",
DatasetId: "dataset-id", DatasetId: "dataset-id",
TableId: "table-id", TableId: "table-id",
}, },
@@ -37,34 +39,60 @@ func defaultQueryJob() *bq.Job {
ProjectId: "def-project-id", ProjectId: "def-project-id",
DatasetId: "def-dataset-id", DatasetId: "def-dataset-id",
}, },
UseLegacySql: &pfalse,
}, },
}, },
} }
} }
var defaultQuery = &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
func TestQuery(t *testing.T) { func TestQuery(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{
projectID: "client-project-id",
}
testCases := []struct { testCases := []struct {
dst *Table dst *Table
src *Query src *QueryConfig
options []Option jobIDConfig JobIDConfig
want *bq.Job want *bq.Job
}{ }{
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: defaultQuery,
want: defaultQueryJob(), want: defaultQueryJob(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: &Query{ src: &QueryConfig{
Q: "query string", Q: "query string",
Labels: map[string]string{"a": "b"},
DryRun: true,
}, },
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.DryRun = true
j.Configuration.Query.DefaultDataset = nil j.Configuration.Query.DefaultDataset = nil
return j return j
}(), }(),
}, },
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
src: &QueryConfig{Q: "query string"},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
j.JobReference.JobId = "jobID-RANDOM"
return j
}(),
},
{ {
dst: &Table{}, dst: &Table{},
src: defaultQuery, src: defaultQuery,
@@ -74,25 +102,85 @@ func TestQuery(t *testing.T) {
return j return j
}(), }(),
}, },
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
TableDefinitions: map[string]ExternalData{
"atable": func() *GCSReference {
g := NewGCSReference("uri")
g.AllowJaggedRows = true
g.AllowQuotedNewlines = true
g.Compression = Gzip
g.Encoding = UTF_8
g.FieldDelimiter = ";"
g.IgnoreUnknownValues = true
g.MaxBadRecords = 1
g.Quote = "'"
g.SkipLeadingRows = 2
g.Schema = Schema{{Name: "name", Type: StringFieldType}}
return g
}(),
},
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
td := make(map[string]bq.ExternalDataConfiguration)
quote := "'"
td["atable"] = bq.ExternalDataConfiguration{
Compression: "GZIP",
IgnoreUnknownValues: true,
MaxBadRecords: 1,
SourceFormat: "CSV", // must be explicitly set.
SourceUris: []string{"uri"},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: ";",
SkipLeadingRows: 2,
Quote: &quote,
},
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
{Name: "name", Type: "STRING"},
},
},
}
j.Configuration.Query.TableDefinitions = td
return j
}(),
},
{ {
dst: &Table{ dst: &Table{
ProjectID: "project-id", ProjectID: "project-id",
DatasetID: "dataset-id", DatasetID: "dataset-id",
TableID: "table-id", TableID: "table-id",
}, },
src: defaultQuery, src: &QueryConfig{
options: []Option{CreateNever, WriteTruncate}, Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER" j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j return j
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: &QueryConfig{
options: []Option{DisableQueryCache()}, Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
DisableQueryCache: true,
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
f := false f := false
@@ -101,9 +189,13 @@ func TestQuery(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: &QueryConfig{
options: []Option{AllowLargeResults()}, Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
AllowLargeResults: true,
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Query.AllowLargeResults = true j.Configuration.Query.AllowLargeResults = true
@@ -111,9 +203,13 @@ func TestQuery(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: &QueryConfig{
options: []Option{DisableFlattenedResults()}, Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
DisableFlattenedResults: true,
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
f := false f := false
@@ -123,9 +219,13 @@ func TestQuery(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: &QueryConfig{
options: []Option{JobPriority("low")}, Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
Priority: QueryPriority("low"),
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Query.Priority = "low" j.Configuration.Query.Priority = "low"
@@ -133,9 +233,14 @@ func TestQuery(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: &QueryConfig{
options: []Option{MaxBillingTier(3), MaxBytesBilled(5)}, Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
MaxBillingTier: 3,
MaxBytesBilled: 5,
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
tier := int64(3) tier := int64(3)
@@ -145,24 +250,159 @@ func TestQuery(t *testing.T) {
}(), }(),
}, },
{ {
dst: defaultTable(nil), dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery, src: &QueryConfig{
options: []Option{MaxBytesBilled(-1)}, Q: "query string",
want: defaultQueryJob(), DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseStandardSQL: true,
},
want: defaultQueryJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseLegacySQL: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
ptrue := true
j.Configuration.Query.UseLegacySql = &ptrue
j.Configuration.Query.ForceSendFields = nil
return j
}(),
}, },
} }
for i, tc := range testCases {
for _, tc := range testCases { query := c.Query("")
s := &testService{} query.JobIDConfig = tc.jobIDConfig
c := &Client{ query.QueryConfig = *tc.src
service: s, query.Dst = tc.dst
} got, err := query.newJob()
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { if err != nil {
t.Errorf("err calling query: %v", err) t.Errorf("#%d: err calling query: %v", i, err)
continue continue
} }
if !reflect.DeepEqual(s.Job, tc.want) { checkJob(t, i, got, tc.want)
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
// Round-trip.
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
wantConfig := query.QueryConfig
// We set AllowLargeResults to true when DisableFlattenedResults is true.
if wantConfig.DisableFlattenedResults {
wantConfig.AllowLargeResults = true
}
// A QueryConfig with neither UseXXXSQL field set is equivalent
// to one where UseStandardSQL = true.
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
wantConfig.UseStandardSQL = true
}
// Treat nil and empty tables the same, and ignore the client.
tableEqual := func(t1, t2 *Table) bool {
if t1 == nil {
t1 = &Table{}
}
if t2 == nil {
t2 = &Table{}
}
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
}
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
// TODO(jba): see if there is a way to express this with a transformer.
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
q := g.toBQ()
e, _ := bqToExternalDataConfig(&q)
return e
}
externalDataEqual := func(e1, e2 ExternalData) bool {
if r, ok := e1.(*GCSReference); ok {
e1 = gcsRefToEDC(r)
}
if r, ok := e2.(*GCSReference); ok {
e2 = gcsRefToEDC(r)
}
return cmp.Equal(e1, e2)
}
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
cmp.Comparer(tableEqual),
cmp.Comparer(externalDataEqual),
)
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
} }
} }
} }
func TestConfiguringQuery(t *testing.T) {
c := &Client{
projectID: "project-id",
}
query := c.Query("q")
query.JobID = "ajob"
query.DefaultProjectID = "def-project-id"
query.DefaultDatasetID = "def-dataset-id"
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
query.Clustering = &Clustering{
Fields: []string{"cfield1"},
}
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
// Note: Other configuration fields are tested in other tests above.
// A lot of that can be consolidated once Client.Copy is gone.
pfalse := false
want := &bq.Job{
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
Query: "q",
DefaultDataset: &bq.DatasetReference{
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: &pfalse,
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
Clustering: &bq.Clustering{Fields: []string{"cfield1"}},
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
},
},
JobReference: &bq.JobReference{
JobId: "ajob",
ProjectId: "project-id",
},
}
got, err := query.newJob()
if err != nil {
t.Fatalf("err calling Query.newJob: %v", err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("querying: -got +want:\n%s", diff)
}
}
func TestQueryLegacySQL(t *testing.T) {
c := &Client{projectID: "project-id"}
q := c.Query("q")
q.UseStandardSQL = true
q.UseLegacySQL = true
_, err := q.newJob()
if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
}
q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true
_, err = q.newJob()
if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error")
}
}

56
vendor/cloud.google.com/go/bigquery/random.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"math/rand"
"os"
"sync"
"time"
)
// Support for random values (typically job IDs and insert IDs).
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var (
rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
)
// For testing.
var randomIDFn = randomID
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
// suffixes.
const randomIDLen = 27
func randomID() string {
// This is used for both job IDs and insert IDs.
var b [randomIDLen]byte
rngMu.Lock()
for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))]
}
rngMu.Unlock()
return string(b[:])
}
// Seed seeds this package's random number generator, used for generating job and
// insert IDs. Use Seed to obtain repeatable, deterministic behavior from bigquery
// clients. Seed should be called before any clients are created.
func Seed(s int64) {
rng = rand.New(rand.NewSource(s))
}

View File

@@ -1,70 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import "golang.org/x/net/context"
// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery.
func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) }
type recordsPerRequest int64
func (opt recordsPerRequest) customizeRead(conf *pagingConf) {
conf.recordsPerRequest = int64(opt)
conf.setRecordsPerRequest = true
}
// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from.
func StartIndex(i uint64) ReadOption { return startIndex(i) }
type startIndex uint64
func (opt startIndex) customizeRead(conf *pagingConf) {
conf.startIndex = uint64(opt)
}
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readTabledata(ctx, conf, token)
}
// Read fetches the contents of the table.
func (t *Table) Read(_ context.Context, options ...ReadOption) (*Iterator, error) {
conf := &readTableConf{}
t.customizeReadSrc(conf)
for _, o := range options {
o.customizeRead(&conf.paging)
}
return newIterator(t.service, conf), nil
}
func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readQuery(ctx, conf, token)
}
// Read fetches the results of a query job.
func (j *Job) Read(_ context.Context, options ...ReadOption) (*Iterator, error) {
conf := &readQueryConf{}
if err := j.customizeReadQuery(conf); err != nil {
return nil, err
}
for _, o := range options {
o.customizeRead(&conf.paging)
}
return newIterator(j.service, conf), nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,92 +15,102 @@
package bigquery package bigquery
import ( import (
"context"
"errors" "errors"
"reflect"
"testing" "testing"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
) )
type readTabledataArgs struct { type pageFetcherArgs struct {
conf *readTableConf table *Table
tok string schema Schema
startIndex uint64
pageSize int64
pageToken string
} }
type readQueryArgs struct { // pageFetcherReadStub services read requests by returning data from an in-memory list of values.
conf *readQueryConf type pageFetcherReadStub struct {
tok string
}
// readServiceStub services read requests by returning data from an in-memory list of values.
type readServiceStub struct {
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
values [][][]Value // contains pages / rows / columns. values [][][]Value // contains pages / rows / columns.
pageTokens map[string]string // maps incoming page token to returned page token. pageTokens map[string]string // maps incoming page token to returned page token.
// arguments are recorded for later inspection. // arguments are recorded for later inspection.
readTabledataCalls []readTabledataArgs calls []pageFetcherArgs
readQueryCalls []readQueryArgs
service
} }
func (s *readServiceStub) readValues(tok string) *readDataResult { func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
result := &readDataResult{ s.calls = append(s.calls,
pageToken: s.pageTokens[tok], pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
result := &fetchPageResult{
pageToken: s.pageTokens[pageToken],
rows: s.values[0], rows: s.values[0],
} }
s.values = s.values[1:] s.values = s.values[1:]
return result, nil
return result
}
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
return s.readValues(token), nil
} }
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { func waitForQueryStub(context.Context, string) (Schema, uint64, error) {
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token}) return nil, 1, nil
return s.readValues(token), nil
} }
func TestRead(t *testing.T) { func TestRead(t *testing.T) {
// The data for the service stub to return is populated for each test case in the testCases for loop. // The data for the service stub to return is populated for each test case in the testCases for loop.
service := &readServiceStub{} ctx := context.Background()
c := &Client{ c := &Client{projectID: "project-id"}
service: service, pf := &pageFetcherReadStub{}
}
queryJob := &Job{ queryJob := &Job{
projectID: "project-id", projectID: "project-id",
jobID: "job-id", jobID: "job-id",
service: service, c: c,
isQuery: true, config: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
},
},
} }
for _, src := range []ReadSource{defaultTable(service), queryJob} { for _, readFunc := range []func() *RowIterator{
func() *RowIterator {
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
},
func() *RowIterator {
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
if err != nil {
t.Fatal(err)
}
return it
},
} {
testCases := []struct { testCases := []struct {
data [][][]Value data [][][]Value
pageTokens map[string]string pageTokens map[string]string
want []ValueList want [][]Value
}{ }{
{ {
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": "a", "a": ""}, pageTokens: map[string]string{"": "a", "a": ""},
want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
}, },
{ {
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": ""}, // no more pages after first one. pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: []ValueList{{1, 2}, {11, 12}}, want: [][]Value{{1, 2}, {11, 12}},
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
service.values = tc.data pf.values = tc.data
service.pageTokens = tc.pageTokens pf.pageTokens = tc.pageTokens
if got, ok := doRead(t, c, src); ok { if got, ok := collectValues(t, readFunc()); ok {
if !reflect.DeepEqual(got, tc.want) { if !testutil.Equal(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
} }
} }
@@ -108,201 +118,116 @@ func TestRead(t *testing.T) {
} }
} }
// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned. func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) { var got [][]Value
it, err := c.Read(context.Background(), src) for {
if err != nil { var vals []Value
t.Errorf("err calling Read: %v", err) err := it.Next(&vals)
return nil, false if err == iterator.Done {
} break
var got []ValueList
for it.Next(context.Background()) {
var vals ValueList
if err := it.Get(&vals); err != nil {
t.Errorf("err calling Get: %v", err)
return nil, false
} else {
got = append(got, vals)
} }
if err != nil {
t.Errorf("err calling Next: %v", err)
return nil, false
}
got = append(got, vals)
} }
return got, true return got, true
} }
func TestNoMoreValues(t *testing.T) { func TestNoMoreValues(t *testing.T) {
c := &Client{ c := &Client{projectID: "project-id"}
service: &readServiceStub{ pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}, {11, 12}}}, values: [][][]Value{{{1, 2}, {11, 12}}},
},
} }
it, err := c.Read(context.Background(), defaultTable(c.service)) it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
if err != nil { var vals []Value
t.Fatalf("err calling Read: %v", err)
}
var vals ValueList
// We expect to retrieve two values and then fail on the next attempt. // We expect to retrieve two values and then fail on the next attempt.
if !it.Next(context.Background()) { if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: false: want: true") t.Fatalf("Next: got: %v: want: nil", err)
} }
if !it.Next(context.Background()) { if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: false: want: true") t.Fatalf("Next: got: %v: want: nil", err)
} }
if err := it.Get(&vals); err != nil { if err := it.Next(&vals); err != iterator.Done {
t.Fatalf("Get: got: %v: want: nil", err) t.Fatalf("Next: got: %v: want: iterator.Done", err)
}
if it.Next(context.Background()) {
t.Fatalf("Next: got: true: want: false")
}
if err := it.Get(&vals); err == nil {
t.Fatalf("Get: got: %v: want: non-nil", err)
} }
} }
// delayedReadStub simulates reading results from a query that has not yet var errBang = errors.New("bang")
// completed. Its readQuery method initially reports that the query job is not
// yet complete. Subsequently, it proxies the request through to another
// service stub.
type delayedReadStub struct {
numDelays int
readServiceStub func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
} return nil, errBang
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
if s.numDelays > 0 {
s.numDelays--
return nil, errIncompleteJob
}
return s.readServiceStub.readQuery(ctx, conf, token)
}
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
func TestIncompleteJob(t *testing.T) {
service := &delayedReadStub{
numDelays: 2,
readServiceStub: readServiceStub{
values: [][][]Value{{{1, 2}}},
},
}
c := &Client{service: service}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: service,
isQuery: true,
}
it, err := c.Read(context.Background(), queryJob)
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
var got ValueList
want := ValueList{1, 2}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
if err := it.Get(&got); err != nil {
t.Fatalf("Error calling Get: %v", err)
}
if service.numDelays != 0 {
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
}
}
type errorReadService struct {
service
}
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
return nil, errors.New("bang!")
} }
func TestReadError(t *testing.T) { func TestReadError(t *testing.T) {
// test that service read errors are propagated back to the caller. // test that service read errors are propagated back to the caller.
c := &Client{service: &errorReadService{}} c := &Client{projectID: "project-id"}
it, err := c.Read(context.Background(), defaultTable(c.service)) it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
if err != nil { var vals []Value
// Read should not return an error; only Err should. if err := it.Next(&vals); err != errBang {
t.Fatalf("err calling Read: %v", err) t.Fatalf("Get: got: %v: want: %v", err, errBang)
}
if it.Next(context.Background()) {
t.Fatalf("Next: got: true: want: false")
}
if err := it.Err(); err.Error() != "bang!" {
t.Fatalf("Get: got: %v: want: bang!", err)
} }
} }
func TestReadTabledataOptions(t *testing.T) { func TestReadTabledataOptions(t *testing.T) {
// test that read options are propagated. // test that read options are propagated.
s := &readServiceStub{ s := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}}, values: [][][]Value{{{1, 2}}},
} }
c := &Client{service: s} c := &Client{projectID: "project-id"}
it, err := c.Read(context.Background(), defaultTable(s), RecordsPerRequest(5)) tr := c.Dataset("dataset-id").Table("table-id")
it := tr.read(context.Background(), s.fetchPage)
if err != nil { it.PageInfo().MaxSize = 5
t.Fatalf("err calling Read: %v", err) var vals []Value
if err := it.Next(&vals); err != nil {
t.Fatal(err)
} }
if !it.Next(context.Background()) { want := []pageFetcherArgs{{
t.Fatalf("Next: got: false: want: true") table: tr,
} pageSize: 5,
pageToken: "",
want := []readTabledataArgs{{
conf: &readTableConf{
projectID: "project-id",
datasetID: "dataset-id",
tableID: "table-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
}} }}
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
if !reflect.DeepEqual(s.readTabledataCalls, want) { t.Errorf("reading (got=-, want=+):\n%s", diff)
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
} }
} }
func TestReadQueryOptions(t *testing.T) { func TestReadQueryOptions(t *testing.T) {
// test that read options are propagated. // test that read options are propagated.
s := &readServiceStub{ c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}}, values: [][][]Value{{{1, 2}}},
} }
c := &Client{service: s} tr := &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
}
queryJob := &Job{ queryJob := &Job{
projectID: "project-id", projectID: "project-id",
jobID: "job-id", jobID: "job-id",
service: s, c: c,
isQuery: true, config: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{DestinationTable: tr},
},
} }
it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5)) it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
if err != nil { if err != nil {
t.Fatalf("err calling Read: %v", err) t.Fatalf("err calling Read: %v", err)
} }
if !it.Next(context.Background()) { it.PageInfo().MaxSize = 5
t.Fatalf("Next: got: false: want: true") var vals []Value
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
} }
want := []readQueryArgs{{ want := []pageFetcherArgs{{
conf: &readQueryConf{ table: bqToTable(tr, c),
projectID: "project-id", pageSize: 5,
jobID: "job-id", pageToken: "",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
}} }}
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
if !reflect.DeepEqual(s.readQueryCalls, want) { t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
} }
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,8 +15,11 @@
package bigquery package bigquery
import ( import (
"encoding/json"
"errors" "errors"
"fmt"
"reflect" "reflect"
"sync"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -24,6 +27,7 @@ import (
// Schema describes the fields in a table or query result. // Schema describes the fields in a table or query result.
type Schema []*FieldSchema type Schema []*FieldSchema
// FieldSchema describes a single field.
type FieldSchema struct { type FieldSchema struct {
// The field name. // The field name.
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
@@ -46,7 +50,7 @@ type FieldSchema struct {
Schema Schema Schema Schema
} }
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
tfs := &bq.TableFieldSchema{ tfs := &bq.TableFieldSchema{
Description: fs.Description, Description: fs.Description,
Name: fs.Name, Name: fs.Name,
@@ -60,26 +64,21 @@ func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
} // else leave as default, which is interpreted as NULLABLE. } // else leave as default, which is interpreted as NULLABLE.
for _, f := range fs.Schema { for _, f := range fs.Schema {
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) tfs.Fields = append(tfs.Fields, f.toBQ())
} }
return tfs return tfs
} }
func (s Schema) asTableSchema() *bq.TableSchema { func (s Schema) toBQ() *bq.TableSchema {
var fields []*bq.TableFieldSchema var fields []*bq.TableFieldSchema
for _, f := range s { for _, f := range s {
fields = append(fields, f.asTableFieldSchema()) fields = append(fields, f.toBQ())
} }
return &bq.TableSchema{Fields: fields} return &bq.TableSchema{Fields: fields}
} }
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable. func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
func (s Schema) customizeCreateTable(conf *createTableConf) {
conf.schema = s.asTableSchema()
}
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{ fs := &FieldSchema{
Description: tfs.Description, Description: tfs.Description,
Name: tfs.Name, Name: tfs.Name,
@@ -89,145 +88,431 @@ func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
} }
for _, f := range tfs.Fields { for _, f := range tfs.Fields {
fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) fs.Schema = append(fs.Schema, bqToFieldSchema(f))
} }
return fs return fs
} }
func convertTableSchema(ts *bq.TableSchema) Schema { func bqToSchema(ts *bq.TableSchema) Schema {
if ts == nil {
return nil
}
var s Schema var s Schema
for _, f := range ts.Fields { for _, f := range ts.Fields {
s = append(s, convertTableFieldSchema(f)) s = append(s, bqToFieldSchema(f))
} }
return s return s
} }
// FieldType is the type of field.
type FieldType string type FieldType string
const ( const (
StringFieldType FieldType = "STRING" // StringFieldType is a string field type.
IntegerFieldType FieldType = "INTEGER" StringFieldType FieldType = "STRING"
FloatFieldType FieldType = "FLOAT" // BytesFieldType is a bytes field type.
BooleanFieldType FieldType = "BOOLEAN" BytesFieldType FieldType = "BYTES"
// IntegerFieldType is a integer field type.
IntegerFieldType FieldType = "INTEGER"
// FloatFieldType is a float field type.
FloatFieldType FieldType = "FLOAT"
// BooleanFieldType is a boolean field type.
BooleanFieldType FieldType = "BOOLEAN"
// TimestampFieldType is a timestamp field type.
TimestampFieldType FieldType = "TIMESTAMP" TimestampFieldType FieldType = "TIMESTAMP"
RecordFieldType FieldType = "RECORD" // RecordFieldType is a record field type. It is typically used to create columns with repeated or nested data.
RecordFieldType FieldType = "RECORD"
// DateFieldType is a date field type.
DateFieldType FieldType = "DATE"
// TimeFieldType is a time field type.
TimeFieldType FieldType = "TIME"
// DateTimeFieldType is a datetime field type.
DateTimeFieldType FieldType = "DATETIME"
// NumericFieldType is a numeric field type. Numeric types include integer types, floating point types and the
// NUMERIC data type.
NumericFieldType FieldType = "NUMERIC"
// GeographyFieldType is a string field type. Geography types represent a set of points
// on the Earth's surface, represented in Well Known Text (WKT) format.
GeographyFieldType FieldType = "GEOGRAPHY"
) )
var errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") var (
var errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") errEmptyJSONSchema = errors.New("bigquery: empty JSON schema")
fieldTypes = map[FieldType]bool{
StringFieldType: true,
BytesFieldType: true,
IntegerFieldType: true,
FloatFieldType: true,
BooleanFieldType: true,
TimestampFieldType: true,
RecordFieldType: true,
DateFieldType: true,
TimeFieldType: true,
DateTimeFieldType: true,
NumericFieldType: true,
GeographyFieldType: true,
}
)
var typeOfByteSlice = reflect.TypeOf([]byte{})
// InferSchema tries to derive a BigQuery schema from the supplied struct value. // InferSchema tries to derive a BigQuery schema from the supplied struct value.
// NOTE: All fields in the returned Schema are configured to be required, // Each exported struct field is mapped to a field in the schema.
// unless the corresponding field in the supplied struct is a slice or array. //
// It is considered an error if the struct (including nested structs) contains // The following BigQuery types are inferred from the corresponding Go types.
// any exported fields that are pointers or one of the following types: // (This is the same mapping as that used for RowIterator.Next.) Fields inferred
// map, interface, complex64, complex128, func, chan. // from these types are marked required (non-nullable).
// In these cases, an error will be returned. //
// Future versions may handle these cases without error. // STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
// NUMERIC *big.Rat
//
// The big.Rat type supports numbers of arbitrary size and precision. Values
// will be rounded to 9 digits after the decimal point before being transmitted
// to BigQuery. See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
// for more on NUMERIC.
//
// A Go slice or array type is inferred to be a BigQuery repeated field of the
// element type. The element type must be one of the above listed types.
//
// Due to lack of unique native Go type for GEOGRAPHY, there is no schema
// inference to GEOGRAPHY at this time.
//
// Nullable fields are inferred from the NullXXX types, declared in this package:
//
// STRING NullString
// BOOL NullBool
// INTEGER NullInt64
// FLOAT NullFloat64
// TIMESTAMP NullTimestamp
// DATE NullDate
// TIME NullTime
// DATETIME NullDateTime
// GEOGRAPHY NullGeography
//
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable".
//
// A struct field that is of struct type is inferred to be a required field of type
// RECORD with a schema inferred recursively. For backwards compatibility, a field of
// type pointer to struct is also inferred to be required. To get a nullable RECORD
// field, use the "nullable" tag (see below).
//
// InferSchema returns an error if any of the examined fields is of type uint,
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
// versions may handle these cases without error.
//
// Recursively defined structs are also disallowed.
//
// Struct fields may be tagged in a way similar to the encoding/json package.
// A tag of the form
// bigquery:"name"
// uses "name" instead of the struct field name as the BigQuery field name.
// A tag of the form
// bigquery:"-"
// omits the field from the inferred schema.
// The "nullable" option marks the field as nullable (not required). It is only
// needed for []byte, *big.Rat and pointer-to-struct fields, and cannot appear on other
// fields. In this example, the Go name of the field is retained:
// bigquery:",nullable"
func InferSchema(st interface{}) (Schema, error) { func InferSchema(st interface{}) (Schema, error) {
return inferStruct(reflect.TypeOf(st)) return inferSchemaReflectCached(reflect.TypeOf(st))
} }
func inferStruct(rt reflect.Type) (Schema, error) { var schemaCache sync.Map
switch rt.Kind() {
case reflect.Struct:
return inferFields(rt)
default:
return nil, errNoStruct
}
type cacheVal struct {
schema Schema
err error
}
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
var cv cacheVal
v, ok := schemaCache.Load(t)
if ok {
cv = v.(cacheVal)
} else {
s, err := inferSchemaReflect(t)
cv = cacheVal{s, err}
schemaCache.Store(t, cv)
}
return cv.schema, cv.err
}
func inferSchemaReflect(t reflect.Type) (Schema, error) {
rec, err := hasRecursiveType(t, nil)
if err != nil {
return nil, err
}
if rec {
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
}
return inferStruct(t)
}
func inferStruct(t reflect.Type) (Schema, error) {
switch t.Kind() {
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
return nil, noStructError{t}
}
t = t.Elem()
fallthrough
case reflect.Struct:
return inferFields(t)
default:
return nil, noStructError{t}
}
} }
// inferFieldSchema infers the FieldSchema for a Go type // inferFieldSchema infers the FieldSchema for a Go type
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) { func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldSchema, error) {
switch { // Only []byte and struct pointers can be tagged nullable.
case isByteSlice(rt): if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
return &FieldSchema{Required: true, Type: StringFieldType}, nil return nil, badNullableError{fieldName, rt}
case isTimeTime(rt): }
switch rt {
case typeOfByteSlice:
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
case typeOfGoTime:
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
case isRepeated(rt): case typeOfDate:
return &FieldSchema{Required: true, Type: DateFieldType}, nil
case typeOfTime:
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
case typeOfDateTime:
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
case typeOfRat:
return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil
}
if ft := nullableFieldType(rt); ft != "" {
return &FieldSchema{Required: false, Type: ft}, nil
}
if isSupportedIntType(rt) || isSupportedUintType(rt) {
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
}
switch rt.Kind() {
case reflect.Slice, reflect.Array:
et := rt.Elem() et := rt.Elem()
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
if isRepeated(et) && !isByteSlice(et) {
// Multi dimensional slices/arrays are not supported by BigQuery // Multi dimensional slices/arrays are not supported by BigQuery
return nil, errUnsupportedFieldType return nil, unsupportedFieldTypeError{fieldName, rt}
} }
if nullableFieldType(et) != "" {
f, err := inferFieldSchema(et) // Repeated nullable types are not supported by BigQuery.
return nil, unsupportedFieldTypeError{fieldName, rt}
}
f, err := inferFieldSchema(fieldName, et, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
f.Repeated = true f.Repeated = true
f.Required = false f.Required = false
return f, nil return f, nil
case isStruct(rt): case reflect.Ptr:
nested, err := inferFields(rt) if rt.Elem().Kind() != reflect.Struct {
return nil, unsupportedFieldTypeError{fieldName, rt}
}
fallthrough
case reflect.Struct:
nested, err := inferStruct(rt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
}
switch rt.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
case reflect.String: case reflect.String:
return &FieldSchema{Required: true, Type: StringFieldType}, nil return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
case reflect.Bool: case reflect.Bool:
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: true, Type: FloatFieldType}, nil return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
default: default:
return nil, errUnsupportedFieldType return nil, unsupportedFieldTypeError{fieldName, rt}
} }
} }
// inferFields extracts all exported field types from struct type. // inferFields extracts all exported field types from struct type.
func inferFields(rt reflect.Type) (Schema, error) { func inferFields(rt reflect.Type) (Schema, error) {
var s Schema var s Schema
fields, err := fieldCache.Fields(rt)
for i := 0; i < rt.NumField(); i++ { if err != nil {
field := rt.Field(i) return nil, err
if field.PkgPath != "" { }
// field is unexported. for _, field := range fields {
continue var nullable bool
for _, opt := range field.ParsedTag.([]string) {
if opt == nullableTagOption {
nullable = true
break
}
} }
f, err := inferFieldSchema(field.Name, field.Type, nullable)
if field.Anonymous {
// TODO(nightlyone) support embedded (see https://github.com/GoogleCloudPlatform/google-cloud-go/issues/238)
return nil, errUnsupportedFieldType
}
f, err := inferFieldSchema(field.Type)
if err != nil { if err != nil {
return nil, err return nil, err
} }
f.Name = field.Name f.Name = field.Name
s = append(s, f) s = append(s, f)
} }
return s, nil return s, nil
} }
func isByteSlice(rt reflect.Type) bool { // isSupportedIntType reports whether t is an int type that can be properly
return rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 // represented by the BigQuery INTEGER/INT64 type.
} func isSupportedIntType(t reflect.Type) bool {
switch t.Kind() {
func isTimeTime(rt reflect.Type) bool { case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return rt.PkgPath() == "time" && rt.Name() == "Time"
}
func isStruct(rt reflect.Type) bool {
return rt.Kind() == reflect.Struct
}
func isRepeated(rt reflect.Type) bool {
switch rt.Kind() {
case reflect.Slice, reflect.Array:
return true return true
default: default:
return false return false
} }
} }
// isSupportedIntType reports whether t is a uint type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedUintType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
return true
default:
return false
}
}
// typeList is a linked list of reflect.Types.
type typeList struct {
t reflect.Type
next *typeList
}
func (l *typeList) has(t reflect.Type) bool {
for l != nil {
if l.t == t {
return true
}
l = l.next
}
return false
}
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
// via exported fields. (Schema inference ignores unexported fields.)
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return false, nil
}
if seen.has(t) {
return true, nil
}
fields, err := fieldCache.Fields(t)
if err != nil {
return false, err
}
seen = &typeList{t, seen}
// Because seen is a linked list, additions to it from one field's
// recursive call will not affect the value for subsequent fields' calls.
for _, field := range fields {
ok, err := hasRecursiveType(field.Type, seen)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
// bigQuerySchemaJSONField is an individual field in a JSON BigQuery table schema definition
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema).
type bigQueryJSONField struct {
Description string `json:"description"`
Fields []bigQueryJSONField `json:"fields"`
Mode string `json:"mode"`
Name string `json:"name"`
Type string `json:"type"`
}
// convertSchemaFromJSON generates a Schema:
func convertSchemaFromJSON(fs []bigQueryJSONField) (Schema, error) {
convertedSchema := Schema{}
for _, f := range fs {
convertedFieldSchema := &FieldSchema{
Description: f.Description,
Name: f.Name,
Required: f.Mode == "REQUIRED",
Repeated: f.Mode == "REPEATED",
}
if len(f.Fields) > 0 {
convertedNestedFieldSchema, err := convertSchemaFromJSON(f.Fields)
if err != nil {
return nil, err
}
convertedFieldSchema.Schema = convertedNestedFieldSchema
}
// Check that the field-type (string) maps to a known FieldType:
if _, ok := fieldTypes[FieldType(f.Type)]; !ok {
return nil, fmt.Errorf("unknown field type (%v)", f.Type)
}
convertedFieldSchema.Type = FieldType(f.Type)
convertedSchema = append(convertedSchema, convertedFieldSchema)
}
return convertedSchema, nil
}
// SchemaFromJSON takes a JSON BigQuery table schema definition
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema)
// and returns a fully-populated Schema.
func SchemaFromJSON(schemaJSON []byte) (Schema, error) {
var bigQuerySchema []bigQueryJSONField
// Make sure we actually have some content:
if len(schemaJSON) == 0 {
return nil, errEmptyJSONSchema
}
if err := json.Unmarshal(schemaJSON, &bigQuerySchema); err != nil {
return nil, err
}
return convertSchemaFromJSON(bigQuerySchema)
}
type noStructError struct {
typ reflect.Type
}
func (e noStructError) Error() string {
return fmt.Sprintf("bigquery: can only infer schema from struct or pointer to struct, not %s", e.typ)
}
type badNullableError struct {
name string
typ reflect.Type
}
func (e badNullableError) Error() string {
return fmt.Sprintf(`bigquery: field %q of type %s: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`, e.name, e.typ)
}
type unsupportedFieldTypeError struct {
name string
typ reflect.Type
}
func (e unsupportedFieldTypeError) Error() string {
return fmt.Sprintf("bigquery: field %q: type %s is not supported", e.name, e.typ)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -16,10 +16,14 @@ package bigquery
import ( import (
"fmt" "fmt"
"math/big"
"reflect" "reflect"
"testing" "testing"
"time" "time"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -139,6 +143,42 @@ func TestSchemaConversion(t *testing.T) {
fieldSchema("desc", "name", "TIMESTAMP", false, false), fieldSchema("desc", "name", "TIMESTAMP", false, false),
}, },
}, },
{
// civil times
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "f1", "TIME", ""),
bqTableFieldSchema("desc", "f2", "DATE", ""),
bqTableFieldSchema("desc", "f3", "DATETIME", ""),
},
},
schema: Schema{
fieldSchema("desc", "f1", "TIME", false, false),
fieldSchema("desc", "f2", "DATE", false, false),
fieldSchema("desc", "f3", "DATETIME", false, false),
},
},
{
// numeric
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "n", "NUMERIC", ""),
},
},
schema: Schema{
fieldSchema("desc", "n", "NUMERIC", false, false),
},
},
{
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("geo", "g", "GEOGRAPHY", ""),
},
},
schema: Schema{
fieldSchema("geo", "g", "GEOGRAPHY", false, false),
},
},
{ {
// nested // nested
bqSchema: &bq.TableSchema{ bqSchema: &bq.TableSchema{
@@ -160,7 +200,7 @@ func TestSchemaConversion(t *testing.T) {
Name: "outer", Name: "outer",
Required: true, Required: true,
Type: "RECORD", Type: "RECORD",
Schema: []*FieldSchema{ Schema: Schema{
{ {
Description: "inner field", Description: "inner field",
Name: "inner", Name: "inner",
@@ -171,14 +211,14 @@ func TestSchemaConversion(t *testing.T) {
}, },
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
bqSchema := tc.schema.asTableSchema() bqSchema := tc.schema.toBQ()
if !reflect.DeepEqual(bqSchema, tc.bqSchema) { if !testutil.Equal(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema) t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
} }
schema := convertTableSchema(tc.bqSchema) schema := bqToSchema(tc.bqSchema)
if !reflect.DeepEqual(schema, tc.schema) { if !testutil.Equal(schema, tc.schema) {
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
} }
} }
@@ -198,12 +238,9 @@ type allSignedIntegers struct {
} }
type allUnsignedIntegers struct { type allUnsignedIntegers struct {
Uint64 uint64 Uint32 uint32
Uint32 uint32 Uint16 uint16
Uint16 uint16 Uint8 uint8
Uint8 uint8
Uintptr uintptr
Uint uint
} }
type allFloat struct { type allFloat struct {
@@ -217,7 +254,30 @@ type allBoolean struct {
} }
type allTime struct { type allTime struct {
Time time.Time Timestamp time.Time
Time civil.Time
Date civil.Date
DateTime civil.DateTime
}
type allNumeric struct {
Numeric *big.Rat
}
func reqField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Required: true,
}
}
func optField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Required: false,
}
} }
func TestSimpleInference(t *testing.T) { func TestSimpleInference(t *testing.T) {
@@ -228,64 +288,76 @@ func TestSimpleInference(t *testing.T) {
{ {
in: allSignedIntegers{}, in: allSignedIntegers{},
want: Schema{ want: Schema{
fieldSchema("", "Int64", "INTEGER", false, true), reqField("Int64", "INTEGER"),
fieldSchema("", "Int32", "INTEGER", false, true), reqField("Int32", "INTEGER"),
fieldSchema("", "Int16", "INTEGER", false, true), reqField("Int16", "INTEGER"),
fieldSchema("", "Int8", "INTEGER", false, true), reqField("Int8", "INTEGER"),
fieldSchema("", "Int", "INTEGER", false, true), reqField("Int", "INTEGER"),
}, },
}, },
{ {
in: allUnsignedIntegers{}, in: allUnsignedIntegers{},
want: Schema{ want: Schema{
fieldSchema("", "Uint64", "INTEGER", false, true), reqField("Uint32", "INTEGER"),
fieldSchema("", "Uint32", "INTEGER", false, true), reqField("Uint16", "INTEGER"),
fieldSchema("", "Uint16", "INTEGER", false, true), reqField("Uint8", "INTEGER"),
fieldSchema("", "Uint8", "INTEGER", false, true),
fieldSchema("", "Uintptr", "INTEGER", false, true),
fieldSchema("", "Uint", "INTEGER", false, true),
}, },
}, },
{ {
in: allFloat{}, in: allFloat{},
want: Schema{ want: Schema{
fieldSchema("", "Float64", "FLOAT", false, true), reqField("Float64", "FLOAT"),
fieldSchema("", "Float32", "FLOAT", false, true), reqField("Float32", "FLOAT"),
}, },
}, },
{ {
in: allBoolean{}, in: allBoolean{},
want: Schema{ want: Schema{
fieldSchema("", "Bool", "BOOLEAN", false, true), reqField("Bool", "BOOLEAN"),
},
},
{
in: &allBoolean{},
want: Schema{
reqField("Bool", "BOOLEAN"),
}, },
}, },
{ {
in: allTime{}, in: allTime{},
want: Schema{ want: Schema{
fieldSchema("", "Time", "TIMESTAMP", false, true), reqField("Timestamp", "TIMESTAMP"),
reqField("Time", "TIME"),
reqField("Date", "DATE"),
reqField("DateTime", "DATETIME"),
},
},
{
in: &allNumeric{},
want: Schema{
reqField("Numeric", "NUMERIC"),
}, },
}, },
{ {
in: allStrings{}, in: allStrings{},
want: Schema{ want: Schema{
fieldSchema("", "String", "STRING", false, true), reqField("String", "STRING"),
fieldSchema("", "ByteSlice", "STRING", false, true), reqField("ByteSlice", "BYTES"),
}, },
}, },
} }
for i, tc := range testCases { for _, tc := range testCases {
got, err := InferSchema(tc.in) got, err := InferSchema(tc.in)
if err != nil { if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err) t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
} }
if !reflect.DeepEqual(got, tc.want) { if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want) t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
} }
} }
} }
type containsNested struct { type containsNested struct {
hidden string
NotNested int NotNested int
Nested struct { Nested struct {
Inside int Inside int
@@ -301,6 +373,14 @@ type containsDoubleNested struct {
} }
} }
type ptrNested struct {
Ptr *struct{ Inside int }
}
type dup struct { // more than one field of the same struct type
A, B allBoolean
}
func TestNestedInference(t *testing.T) { func TestNestedInference(t *testing.T) {
testCases := []struct { testCases := []struct {
in interface{} in interface{}
@@ -309,70 +389,97 @@ func TestNestedInference(t *testing.T) {
{ {
in: containsNested{}, in: containsNested{},
want: Schema{ want: Schema{
fieldSchema("", "NotNested", "INTEGER", false, true), reqField("NotNested", "INTEGER"),
&FieldSchema{ &FieldSchema{
Name: "Nested", Name: "Nested",
Required: true, Required: true,
Type: "RECORD", Type: "RECORD",
Schema: []*FieldSchema{ Schema: Schema{reqField("Inside", "INTEGER")},
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
}, },
}, },
}, },
{ {
in: containsDoubleNested{}, in: containsDoubleNested{},
want: Schema{ want: Schema{
fieldSchema("", "NotNested", "INTEGER", false, true), reqField("NotNested", "INTEGER"),
&FieldSchema{ &FieldSchema{
Name: "Nested", Name: "Nested",
Required: true, Required: true,
Type: "RECORD", Type: "RECORD",
Schema: []*FieldSchema{ Schema: Schema{
{ {
Name: "InsideNested", Name: "InsideNested",
Required: true, Required: true,
Type: "RECORD", Type: "RECORD",
Schema: []*FieldSchema{ Schema: Schema{reqField("Inside", "INTEGER")},
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
}, },
}, },
}, },
}, },
}, },
{
in: ptrNested{},
want: Schema{
&FieldSchema{
Name: "Ptr",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("Inside", "INTEGER")},
},
},
},
{
in: dup{},
want: Schema{
&FieldSchema{
Name: "A",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("Bool", "BOOLEAN")},
},
&FieldSchema{
Name: "B",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("Bool", "BOOLEAN")},
},
},
},
} }
for i, tc := range testCases { for _, tc := range testCases {
got, err := InferSchema(tc.in) got, err := InferSchema(tc.in)
if err != nil { if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err) t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
} }
if !reflect.DeepEqual(got, tc.want) { if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want) t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
} }
} }
} }
type simpleRepeated struct { type repeated struct {
NotRepeated []byte NotRepeated []byte
RepeatedByteSlice [][]byte RepeatedByteSlice [][]byte
Repeated []int Slice []int
Array [5]bool
} }
type simpleNestedRepeated struct { type nestedRepeated struct {
NotRepeated int NotRepeated int
Repeated []struct { Repeated []struct {
Inside int Inside int
} }
RepeatedPtr []*struct{ Inside int }
}
func repField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Repeated: true,
}
} }
func TestRepeatedInference(t *testing.T) { func TestRepeatedInference(t *testing.T) {
@@ -381,28 +488,29 @@ func TestRepeatedInference(t *testing.T) {
want Schema want Schema
}{ }{
{ {
in: simpleRepeated{}, in: repeated{},
want: Schema{ want: Schema{
fieldSchema("", "NotRepeated", "STRING", false, true), reqField("NotRepeated", "BYTES"),
fieldSchema("", "RepeatedByteSlice", "STRING", true, false), repField("RepeatedByteSlice", "BYTES"),
fieldSchema("", "Repeated", "INTEGER", true, false), repField("Slice", "INTEGER"),
repField("Array", "BOOLEAN"),
}, },
}, },
{ {
in: simpleNestedRepeated{}, in: nestedRepeated{},
want: Schema{ want: Schema{
fieldSchema("", "NotRepeated", "INTEGER", false, true), reqField("NotRepeated", "INTEGER"),
&FieldSchema{ {
Name: "Repeated", Name: "Repeated",
Repeated: true, Repeated: true,
Type: "RECORD", Type: "RECORD",
Schema: []*FieldSchema{ Schema: Schema{reqField("Inside", "INTEGER")},
{ },
Name: "Inside", {
Type: "INTEGER", Name: "RepeatedPtr",
Required: true, Repeated: true,
}, Type: "RECORD",
}, Schema: Schema{reqField("Inside", "INTEGER")},
}, },
}, },
}, },
@@ -413,83 +521,524 @@ func TestRepeatedInference(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err) t.Fatalf("%d: error inferring TableSchema: %v", i, err)
} }
if !reflect.DeepEqual(got, tc.want) { if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want) t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
} }
} }
} }
type allNulls struct {
A NullInt64
B NullFloat64
C NullBool
D NullString
E NullTimestamp
F NullTime
G NullDate
H NullDateTime
I NullGeography
}
func TestNullInference(t *testing.T) {
got, err := InferSchema(allNulls{})
if err != nil {
t.Fatal(err)
}
want := Schema{
optField("A", "INTEGER"),
optField("B", "FLOAT"),
optField("C", "BOOLEAN"),
optField("D", "STRING"),
optField("E", "TIMESTAMP"),
optField("F", "TIME"),
optField("G", "DATE"),
optField("H", "DATETIME"),
optField("I", "GEOGRAPHY"),
}
if diff := testutil.Diff(got, want); diff != "" {
t.Error(diff)
}
}
type Embedded struct { type Embedded struct {
Embedded int Embedded int
} }
type embedded struct {
Embedded2 int
}
type nestedEmbedded struct { type nestedEmbedded struct {
Embedded Embedded
embedded
}
func TestEmbeddedInference(t *testing.T) {
got, err := InferSchema(nestedEmbedded{})
if err != nil {
t.Fatal(err)
}
want := Schema{
reqField("Embedded", "INTEGER"),
reqField("Embedded2", "INTEGER"),
}
if !testutil.Equal(got, want) {
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
}
}
func TestRecursiveInference(t *testing.T) {
type List struct {
Val int
Next *List
}
_, err := InferSchema(List{})
if err == nil {
t.Fatal("got nil, want error")
}
}
type withTags struct {
NoTag int
ExcludeTag int `bigquery:"-"`
SimpleTag int `bigquery:"simple_tag"`
UnderscoreTag int `bigquery:"_id"`
MixedCase int `bigquery:"MIXEDcase"`
Nullable []byte `bigquery:",nullable"`
NullNumeric *big.Rat `bigquery:",nullable"`
}
type withTagsNested struct {
Nested withTags `bigquery:"nested"`
NestedAnonymous struct {
ExcludeTag int `bigquery:"-"`
Inside int `bigquery:"inside"`
} `bigquery:"anon"`
PNested *struct{ X int } // not nullable, for backwards compatibility
PNestedNullable *struct{ X int } `bigquery:",nullable"`
}
type withTagsRepeated struct {
Repeated []withTags `bigquery:"repeated"`
RepeatedAnonymous []struct {
ExcludeTag int `bigquery:"-"`
Inside int `bigquery:"inside"`
} `bigquery:"anon"`
}
type withTagsEmbedded struct {
withTags
}
var withTagsSchema = Schema{
reqField("NoTag", "INTEGER"),
reqField("simple_tag", "INTEGER"),
reqField("_id", "INTEGER"),
reqField("MIXEDcase", "INTEGER"),
optField("Nullable", "BYTES"),
optField("NullNumeric", "NUMERIC"),
}
func TestTagInference(t *testing.T) {
testCases := []struct {
in interface{}
want Schema
}{
{
in: withTags{},
want: withTagsSchema,
},
{
in: withTagsNested{},
want: Schema{
&FieldSchema{
Name: "nested",
Required: true,
Type: "RECORD",
Schema: withTagsSchema,
},
&FieldSchema{
Name: "anon",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("inside", "INTEGER")},
},
&FieldSchema{
Name: "PNested",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("X", "INTEGER")},
},
&FieldSchema{
Name: "PNestedNullable",
Required: false,
Type: "RECORD",
Schema: Schema{reqField("X", "INTEGER")},
},
},
},
{
in: withTagsRepeated{},
want: Schema{
&FieldSchema{
Name: "repeated",
Repeated: true,
Type: "RECORD",
Schema: withTagsSchema,
},
&FieldSchema{
Name: "anon",
Repeated: true,
Type: "RECORD",
Schema: Schema{reqField("inside", "INTEGER")},
},
},
},
{
in: withTagsEmbedded{},
want: withTagsSchema,
},
}
for i, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
}
}
func TestTagInferenceErrors(t *testing.T) {
testCases := []interface{}{
struct {
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
}{},
struct {
UnsupporedStartChar int `bigquery:"øab"`
}{},
struct {
UnsupportedEndChar int `bigquery:"abø"`
}{},
struct {
UnsupportedMiddleChar int `bigquery:"aøb"`
}{},
struct {
StartInt int `bigquery:"1abc"`
}{},
struct {
Hyphens int `bigquery:"a-b"`
}{},
}
for i, tc := range testCases {
_, got := InferSchema(tc)
if _, ok := got.(invalidFieldNameError); !ok {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant invalidFieldNameError", i, got)
}
}
_, err := InferSchema(struct {
X int `bigquery:",optional"`
}{})
if err == nil {
t.Error("got nil, want error")
}
} }
func TestSchemaErrors(t *testing.T) { func TestSchemaErrors(t *testing.T) {
testCases := []struct { testCases := []struct {
in interface{} in interface{}
err error want interface{}
}{ }{
{ {
in: []byte{}, in: []byte{},
err: errNoStruct, want: noStructError{},
}, },
{ {
in: new(int), in: new(int),
err: errNoStruct, want: noStructError{},
}, },
{ {
in: new(allStrings), in: struct{ Uint uint }{},
err: errNoStruct, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ Complex complex64 }{}, in: struct{ Uint64 uint64 }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ Map map[string]int }{}, in: struct{ Uintptr uintptr }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ Chan chan bool }{}, in: struct{ Complex complex64 }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ Ptr *int }{}, in: struct{ Map map[string]int }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ Interface interface{} }{}, in: struct{ Chan chan bool }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ MultiDimensional [][]int }{}, in: struct{ Ptr *int }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ MultiDimensional [][][]byte }{}, in: struct{ Interface interface{} }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ ChanSlice []chan bool }{}, in: struct{ MultiDimensional [][]int }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: struct{ NestedChan struct{ Chan []chan bool } }{}, in: struct{ MultiDimensional [][][]byte }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
}, },
{ {
in: nestedEmbedded{}, in: struct{ SliceOfPointer []*int }{},
err: errUnsupportedFieldType, want: unsupportedFieldTypeError{},
},
{
in: struct{ SliceOfNull []NullInt64 }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ ChanSlice []chan bool }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ NestedChan struct{ Chan []chan bool } }{},
want: unsupportedFieldTypeError{},
},
{
in: struct {
X int `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct {
X bool `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct {
X struct{ N int } `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct {
X []int `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct{ X *[]byte }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ X *[]int }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ X *int }{},
want: unsupportedFieldTypeError{},
}, },
} }
for i, tc := range testCases { for _, tc := range testCases {
want := tc.err
_, got := InferSchema(tc.in) _, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) { if reflect.TypeOf(got) != reflect.TypeOf(tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) t.Errorf("%#v: got:\n%#v\nwant type %T", tc.in, got, tc.want)
}
}
}
func TestHasRecursiveType(t *testing.T) {
type (
nonStruct int
nonRec struct{ A string }
dup struct{ A, B nonRec }
rec struct {
A int
B *rec
}
recUnexported struct {
A int
}
hasRec struct {
A int
R *rec
}
recSlicePointer struct {
A []*recSlicePointer
}
)
for _, test := range []struct {
in interface{}
want bool
}{
{nonStruct(0), false},
{nonRec{}, false},
{dup{}, false},
{rec{}, true},
{recUnexported{}, false},
{hasRec{}, true},
{&recSlicePointer{}, true},
} {
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
if err != nil {
t.Fatal(err)
}
if got != test.want {
t.Errorf("%T: got %t, want %t", test.in, got, test.want)
}
}
}
func TestSchemaFromJSON(t *testing.T) {
testCasesExpectingSuccess := []struct {
bqSchemaJSON []byte
description string
expectedSchema Schema
}{
{
description: "Flat table with a mixture of NULLABLE and REQUIRED fields",
bqSchemaJSON: []byte(`
[
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
{"name":"flat_bytes","type":"BYTES","mode":"REQUIRED","description":"Flat required BYTES"},
{"name":"flat_integer","type":"INTEGER","mode":"NULLABLE","description":"Flat nullable INTEGER"},
{"name":"flat_float","type":"FLOAT","mode":"REQUIRED","description":"Flat required FLOAT"},
{"name":"flat_boolean","type":"BOOLEAN","mode":"NULLABLE","description":"Flat nullable BOOLEAN"},
{"name":"flat_timestamp","type":"TIMESTAMP","mode":"REQUIRED","description":"Flat required TIMESTAMP"},
{"name":"flat_date","type":"DATE","mode":"NULLABLE","description":"Flat required DATE"},
{"name":"flat_time","type":"TIME","mode":"REQUIRED","description":"Flat nullable TIME"},
{"name":"flat_datetime","type":"DATETIME","mode":"NULLABLE","description":"Flat required DATETIME"},
{"name":"flat_numeric","type":"NUMERIC","mode":"REQUIRED","description":"Flat nullable NUMERIC"},
{"name":"flat_geography","type":"GEOGRAPHY","mode":"REQUIRED","description":"Flat required GEOGRAPHY"}
]`),
expectedSchema: Schema{
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
fieldSchema("Flat required BYTES", "flat_bytes", "BYTES", false, true),
fieldSchema("Flat nullable INTEGER", "flat_integer", "INTEGER", false, false),
fieldSchema("Flat required FLOAT", "flat_float", "FLOAT", false, true),
fieldSchema("Flat nullable BOOLEAN", "flat_boolean", "BOOLEAN", false, false),
fieldSchema("Flat required TIMESTAMP", "flat_timestamp", "TIMESTAMP", false, true),
fieldSchema("Flat required DATE", "flat_date", "DATE", false, false),
fieldSchema("Flat nullable TIME", "flat_time", "TIME", false, true),
fieldSchema("Flat required DATETIME", "flat_datetime", "DATETIME", false, false),
fieldSchema("Flat nullable NUMERIC", "flat_numeric", "NUMERIC", false, true),
fieldSchema("Flat required GEOGRAPHY", "flat_geography", "GEOGRAPHY", false, true),
},
},
{
description: "Table with a nested RECORD",
bqSchemaJSON: []byte(`
[
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
{"name":"nested_record","type":"RECORD","mode":"NULLABLE","description":"Nested nullable RECORD","fields":[{"name":"record_field_1","type":"STRING","mode":"NULLABLE","description":"First nested record field"},{"name":"record_field_2","type":"INTEGER","mode":"REQUIRED","description":"Second nested record field"}]}
]`),
expectedSchema: Schema{
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
&FieldSchema{
Description: "Nested nullable RECORD",
Name: "nested_record",
Required: false,
Type: "RECORD",
Schema: Schema{
{
Description: "First nested record field",
Name: "record_field_1",
Required: false,
Type: "STRING",
},
{
Description: "Second nested record field",
Name: "record_field_2",
Required: true,
Type: "INTEGER",
},
},
},
},
},
{
description: "Table with a repeated RECORD",
bqSchemaJSON: []byte(`
[
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
{"name":"nested_record","type":"RECORD","mode":"REPEATED","description":"Nested nullable RECORD","fields":[{"name":"record_field_1","type":"STRING","mode":"NULLABLE","description":"First nested record field"},{"name":"record_field_2","type":"INTEGER","mode":"REQUIRED","description":"Second nested record field"}]}
]`),
expectedSchema: Schema{
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
&FieldSchema{
Description: "Nested nullable RECORD",
Name: "nested_record",
Repeated: true,
Required: false,
Type: "RECORD",
Schema: Schema{
{
Description: "First nested record field",
Name: "record_field_1",
Required: false,
Type: "STRING",
},
{
Description: "Second nested record field",
Name: "record_field_2",
Required: true,
Type: "INTEGER",
},
},
},
},
},
}
for _, tc := range testCasesExpectingSuccess {
convertedSchema, err := SchemaFromJSON(tc.bqSchemaJSON)
if err != nil {
t.Errorf("encountered an error when converting JSON table schema (%s): %v", tc.description, err)
continue
}
if !testutil.Equal(convertedSchema, tc.expectedSchema) {
t.Errorf("generated JSON table schema (%s) differs from the expected schema", tc.description)
}
}
testCasesExpectingFailure := []struct {
bqSchemaJSON []byte
description string
}{
{
description: "Schema with invalid JSON",
bqSchemaJSON: []byte(`This is not JSON`),
},
{
description: "Schema with unknown field type",
bqSchemaJSON: []byte(`[{"name":"strange_type","type":"STRANGE","description":"This type should not exist"}]`),
},
{
description: "Schema with zero length",
bqSchemaJSON: []byte(``),
},
}
for _, tc := range testCasesExpectingFailure {
_, err := SchemaFromJSON(tc.bqSchemaJSON)
if err == nil {
t.Errorf("converting this schema should have returned an error (%s): %v", tc.description, err)
continue
} }
} }
} }

View File

@@ -1,484 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// service provides an internal abstraction to isolate the generated
// BigQuery API; most of this package uses this interface instead.
// The single implementation, *bigqueryService, contains all the knowledge
// of the generated BigQuery API.
type service interface {
// Jobs
insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error)
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
jobCancel(ctx context.Context, projectId, jobID string) error
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
// Tables
createTable(ctx context.Context, conf *createTableConf) error
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
// Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
// Datasets
insertDataset(ctx context.Context, datasetID, projectID string) error
// Misc
// readQuery reads data resulting from a query job. If the job is
// incomplete, an errIncompleteJob is returned. readQuery may be called
// repeatedly to poll for job completion.
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
}
type bigqueryService struct {
s *bq.Service
}
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
s, err := bq.New(client)
if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err)
}
s.BasePath = endpoint
return &bigqueryService{s: s}, nil
}
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
for {
var err error
token, err = getPage(token)
if err != nil {
return err
}
if token == "" {
return nil
}
}
}
func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
res, err := s.s.Jobs.Insert(projectID, job).Context(ctx).Do()
if err != nil {
return nil, err
}
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
}
type pagingConf struct {
recordsPerRequest int64
setRecordsPerRequest bool
startIndex uint64
}
type readTableConf struct {
projectID, datasetID, tableID string
paging pagingConf
schema Schema // lazily initialized when the first page of data is fetched.
}
type readDataResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}
type readQueryConf struct {
projectID, jobID string
paging pagingConf
}
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
// Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
// Fetch the table schema in the background, if necessary.
var schemaErr error
var schemaFetch sync.WaitGroup
if conf.schema == nil {
schemaFetch.Add(1)
go func() {
defer schemaFetch.Done()
var t *bq.Table
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
if schemaErr == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema)
}
}()
}
res, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
schemaFetch.Wait()
if schemaErr != nil {
return nil, schemaErr
}
result := &readDataResult{
pageToken: res.PageToken,
totalRows: uint64(res.TotalRows),
schema: conf.schema,
}
result.rows, err = convertRows(res.Rows, conf.schema)
if err != nil {
return nil, err
}
return result, nil
}
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
// getQueryResultsTimeout controls the maximum duration of a request to the
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
// cause increased overall latency, as results are returned as soon as they are
// available.
const getQueryResultsTimeout = time.Minute
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
res, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
if !res.JobComplete {
return nil, errIncompleteJob
}
schema := convertTableSchema(res.Schema)
result := &readDataResult{
pageToken: res.PageToken,
totalRows: res.TotalRows,
schema: schema,
}
result.rows, err = convertRows(res.Rows, schema)
if err != nil {
return nil, err
}
return result, nil
}
type insertRowsConf struct {
templateSuffix string
ignoreUnknownValues bool
skipInvalidRows bool
}
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: conf.templateSuffix,
IgnoreUnknownValues: conf.ignoreUnknownValues,
SkipInvalidRows: conf.skipInvalidRows,
}
for _, row := range rows {
m := make(map[string]bq.JsonValue)
for k, v := range row.Row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: row.InsertID,
Json: m,
})
}
res, err := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do()
if err != nil {
return err
}
if len(res.InsertErrors) == 0 {
return nil
}
var errs PutMultiError
for _, e := range res.InsertErrors {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertID,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
}
errs = append(errs, rie)
}
return errs
}
type jobType int
const (
copyJobType jobType = iota
extractJobType
loadJobType
queryJobType
)
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("configuration").
Context(ctx).
Do()
if err != nil {
return 0, err
}
switch {
case res.Configuration.Copy != nil:
return copyJobType, nil
case res.Configuration.Extract != nil:
return extractJobType, nil
case res.Configuration.Load != nil:
return loadJobType, nil
case res.Configuration.Query != nil:
return queryJobType, nil
default:
return 0, errors.New("unknown job type")
}
}
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
// Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
_, err := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx).
Do()
return err
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("status"). // Only fetch what we need.
Context(ctx).
Do()
if err != nil {
return nil, err
}
return jobStatusFromProto(res.Status)
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
state, ok := stateMap[status.State]
if !ok {
return nil, fmt.Errorf("unexpected job state: %v", status.State)
}
newStatus := &JobStatus{
State: state,
err: nil,
}
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
newStatus.err = err
}
for _, ep := range status.Errors {
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
}
return newStatus, nil
}
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
var tables []*Table
res, err := s.s.Tables.List(projectID, datasetID).
PageToken(pageToken).
Context(ctx).
Do()
if err != nil {
return nil, "", err
}
for _, t := range res.Tables {
tables = append(tables, s.convertListedTable(t))
}
return tables, res.NextPageToken, nil
}
type createTableConf struct {
projectID, datasetID, tableID string
expiration time.Time
viewQuery string
schema *bq.TableSchema
}
// createTable creates a table in the BigQuery service.
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
// If viewQuery is non-empty, the created table will be of type VIEW.
// Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
table := &bq.Table{
TableReference: &bq.TableReference{
ProjectId: conf.projectID,
DatasetId: conf.datasetID,
TableId: conf.tableID,
},
}
if !conf.expiration.IsZero() {
table.ExpirationTime = conf.expiration.UnixNano() / 1000
}
// TODO(jba): make it impossible to provide both a view query and a schema.
if conf.viewQuery != "" {
table.View = &bq.ViewDefinition{
Query: conf.viewQuery,
}
}
if conf.schema != nil {
table.Schema = conf.schema
}
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
return err
}
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
}
func bqTableToMetadata(t *bq.Table) *TableMetadata {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
ID: t.Id,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
}
if t.ExpirationTime != 0 {
md.ExpirationTime = time.Unix(0, t.ExpirationTime*1e6)
}
if t.CreationTime != 0 {
md.CreationTime = time.Unix(0, t.CreationTime*1e6)
}
if t.LastModifiedTime != 0 {
md.LastModifiedTime = time.Unix(0, int64(t.LastModifiedTime*1e6))
}
if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema)
}
if t.View != nil {
md.View = t.View.Query
}
return md
}
func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table {
return &Table{
ProjectID: t.TableReference.ProjectId,
DatasetID: t.TableReference.DatasetId,
TableID: t.TableReference.TableId,
service: s,
}
}
// patchTableConf contains fields to be patched.
type patchTableConf struct {
// These fields are omitted from the patch operation if nil.
Description *string
Name *string
}
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}
if conf.Description != nil {
t.Description = *conf.Description
forceSend("Description")
}
if conf.Name != nil {
t.FriendlyName = *conf.Name
forceSend("FriendlyName")
}
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
Context(ctx).
Do()
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
ds := &bq.Dataset{
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
}
_, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do()
return err
}

View File

@@ -0,0 +1,267 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package storage
import (
"context"
"fmt"
"time"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// BigQueryStorageCallOptions contains the retry settings for each method of BigQueryStorageClient.
type BigQueryStorageCallOptions struct {
CreateReadSession []gax.CallOption
ReadRows []gax.CallOption
BatchCreateReadSessionStreams []gax.CallOption
FinalizeStream []gax.CallOption
SplitReadStream []gax.CallOption
}
func defaultBigQueryStorageClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerystorage.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultBigQueryStorageCallOptions() *BigQueryStorageCallOptions {
retry := map[[2]string][]gax.CallOption{
{"create_read_session", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &BigQueryStorageCallOptions{
CreateReadSession: retry[[2]string{"create_read_session", "idempotent"}],
ReadRows: retry[[2]string{"default", "idempotent"}],
BatchCreateReadSessionStreams: retry[[2]string{"default", "idempotent"}],
FinalizeStream: retry[[2]string{"default", "idempotent"}],
SplitReadStream: retry[[2]string{"default", "idempotent"}],
}
}
// BigQueryStorageClient is a client for interacting with BigQuery Storage API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type BigQueryStorageClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
bigQueryStorageClient storagepb.BigQueryStorageClient
// The call options for this service.
CallOptions *BigQueryStorageCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewBigQueryStorageClient creates a new big query storage client.
//
// BigQuery storage API.
//
// The BigQuery storage API can be used to read data stored in BigQuery.
func NewBigQueryStorageClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryStorageClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultBigQueryStorageClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &BigQueryStorageClient{
conn: conn,
CallOptions: defaultBigQueryStorageCallOptions(),
bigQueryStorageClient: storagepb.NewBigQueryStorageClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *BigQueryStorageClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *BigQueryStorageClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *BigQueryStorageClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// CreateReadSession creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Read sessions automatically expire 24 hours after they are created and do
// not require manual clean-up by the caller.
func (c *BigQueryStorageClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v", "table_reference.project_id", req.GetTableReference().GetProjectId(), "table_reference.dataset_id", req.GetTableReference().GetDatasetId()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...)
var resp *storagepb.ReadSession
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.CreateReadSession(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ReadRows reads rows from the table in the format prescribed by the read session.
// Each response contains one or more table rows, up to a maximum of 10 MiB
// per response; read requests which attempt to read individual rows larger
// than this will fail.
//
// Each request also returns a set of stream statistics reflecting the
// estimated total number of rows in the read stream. This number is computed
// based on the total table size and the number of active streams in the read
// session, and may change as other streams continue to read data.
func (c *BigQueryStorageClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryStorage_ReadRowsClient, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_position.stream.name", req.GetReadPosition().GetStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...)
var resp storagepb.BigQueryStorage_ReadRowsClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.ReadRows(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// BatchCreateReadSessionStreams creates additional streams for a ReadSession. This API can be used to
// dynamically adjust the parallelism of a batch processing task upwards by
// adding additional workers.
func (c *BigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "session.name", req.GetSession().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.BatchCreateReadSessionStreams[0:len(c.CallOptions.BatchCreateReadSessionStreams):len(c.CallOptions.BatchCreateReadSessionStreams)], opts...)
var resp *storagepb.BatchCreateReadSessionStreamsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.BatchCreateReadSessionStreams(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// FinalizeStream triggers the graceful termination of a single stream in a ReadSession. This
// API can be used to dynamically adjust the parallelism of a batch processing
// task downwards without losing data.
//
// This API does not delete the stream -- it remains visible in the
// ReadSession, and any data processed by the stream is not released to other
// streams. However, no additional data will be assigned to the stream once
// this call completes. Callers must continue reading data on the stream until
// the end of the stream is reached so that data which has already been
// assigned to the stream will be processed.
//
// This method will return an error if there are no other live streams
// in the Session, or if SplitReadStream() has been called on the given
// Stream.
func (c *BigQueryStorageClient) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "stream.name", req.GetStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.FinalizeStream[0:len(c.CallOptions.FinalizeStream):len(c.CallOptions.FinalizeStream)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.bigQueryStorageClient.FinalizeStream(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// SplitReadStream splits a given read stream into two Streams. These streams are referred to
// as the primary and the residual of the split. The original stream can still
// be read from in the same manner as before. Both of the returned streams can
// also be read from, and the total rows return by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back to back in the
// original Stream. Concretely, it is guaranteed that for streams Original,
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
// Original[j-n] = Residual[0-m] once the streams have been read to
// completion.
//
// This method is guaranteed to be idempotent.
func (c *BigQueryStorageClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "original_stream.name", req.GetOriginalStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...)
var resp *storagepb.SplitReadStreamResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.SplitReadStream(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

View File

@@ -0,0 +1,132 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package storage_test
import (
"context"
"io"
storage "cloud.google.com/go/bigquery/storage/apiv1beta1"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
)
func ExampleNewBigQueryStorageClient() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleBigQueryStorageClient_CreateReadSession() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.CreateReadSessionRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateReadSession(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleBigQueryStorageClient_ReadRows() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.ReadRowsRequest{
// TODO: Fill request struct fields.
}
stream, err := c.ReadRows(ctx, req)
if err != nil {
// TODO: Handle error.
}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleBigQueryStorageClient_BatchCreateReadSessionStreams() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.BatchCreateReadSessionStreamsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchCreateReadSessionStreams(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleBigQueryStorageClient_FinalizeStream() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.FinalizeStreamRequest{
// TODO: Fill request struct fields.
}
err = c.FinalizeStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleBigQueryStorageClient_SplitReadStream() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.SplitReadStreamRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SplitReadStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

View File

@@ -0,0 +1,100 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
// Package storage is an auto-generated package for the
// BigQuery Storage API.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
package storage // import "cloud.google.com/go/bigquery/storage/apiv1beta1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20190404"

View File

@@ -0,0 +1,452 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package storage
import (
emptypb "github.com/golang/protobuf/ptypes/empty"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
)
import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockBigQueryStorageServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
storagepb.BigQueryStorageServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockBigQueryStorageServer) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest) (*storagepb.ReadSession, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.ReadSession), nil
}
func (s *mockBigQueryStorageServer) ReadRows(req *storagepb.ReadRowsRequest, stream storagepb.BigQueryStorage_ReadRowsServer) error {
md, _ := metadata.FromIncomingContext(stream.Context())
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return s.err
}
for _, v := range s.resps {
if err := stream.Send(v.(*storagepb.ReadRowsResponse)); err != nil {
return err
}
}
return nil
}
func (s *mockBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.BatchCreateReadSessionStreamsResponse), nil
}
func (s *mockBigQueryStorageServer) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}
func (s *mockBigQueryStorageServer) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest) (*storagepb.SplitReadStreamResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.SplitReadStreamResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockBigQueryStorage mockBigQueryStorageServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
storagepb.RegisterBigQueryStorageServer(serv, &mockBigQueryStorage)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestBigQueryStorageCreateReadSession(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &storagepb.ReadSession{
Name: name,
}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var tableReference *storagepb.TableReference = &storagepb.TableReference{}
var parent string = "parent-995424086"
var request = &storagepb.CreateReadSessionRequest{
TableReference: tableReference,
Parent: parent,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateReadSession(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageCreateReadSessionError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var tableReference *storagepb.TableReference = &storagepb.TableReference{}
var parent string = "parent-995424086"
var request = &storagepb.CreateReadSessionRequest{
TableReference: tableReference,
Parent: parent,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateReadSession(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageReadRows(t *testing.T) {
var expectedResponse *storagepb.ReadRowsResponse = &storagepb.ReadRowsResponse{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
var request = &storagepb.ReadRowsRequest{
ReadPosition: readPosition,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.ReadRows(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageReadRowsError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
var request = &storagepb.ReadRowsRequest{
ReadPosition: readPosition,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.ReadRows(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageBatchCreateReadSessionStreams(t *testing.T) {
var expectedResponse *storagepb.BatchCreateReadSessionStreamsResponse = &storagepb.BatchCreateReadSessionStreamsResponse{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var session *storagepb.ReadSession = &storagepb.ReadSession{}
var requestedStreams int32 = 1017221410
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
Session: session,
RequestedStreams: requestedStreams,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageBatchCreateReadSessionStreamsError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var session *storagepb.ReadSession = &storagepb.ReadSession{}
var requestedStreams int32 = 1017221410
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
Session: session,
RequestedStreams: requestedStreams,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageFinalizeStream(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var stream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.FinalizeStreamRequest{
Stream: stream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.FinalizeStream(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
}
func TestBigQueryStorageFinalizeStreamError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var stream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.FinalizeStreamRequest{
Stream: stream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.FinalizeStream(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestBigQueryStorageSplitReadStream(t *testing.T) {
var expectedResponse *storagepb.SplitReadStreamResponse = &storagepb.SplitReadStreamResponse{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var originalStream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.SplitReadStreamRequest{
OriginalStream: originalStream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.SplitReadStream(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageSplitReadStreamError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var originalStream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.SplitReadStreamRequest{
OriginalStream: originalStream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.SplitReadStream(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,11 +15,13 @@
package bigquery package bigquery
import ( import (
"context"
"errors"
"fmt" "fmt"
"time" "time"
"golang.org/x/net/context" "cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -33,23 +35,58 @@ type Table struct {
// The maximum length is 1,024 characters. // The maximum length is 1,024 characters.
TableID string TableID string
service service c *Client
} }
// TableMetadata contains information about a BigQuery table. // TableMetadata contains information about a BigQuery table.
type TableMetadata struct { type TableMetadata struct {
Description string // The user-friendly description of this table. // The following fields can be set when creating a table.
Name string // The user-friendly name for this table.
Schema Schema
View string
ID string // An opaque ID uniquely identifying the table. // The user-friendly name for the table.
Type TableType Name string
// The time when this table expires. If not set, the table will persist // The user-friendly description of the table.
// indefinitely. Expired tables will be deleted and their storage reclaimed. Description string
// The table schema. If provided on create, ViewQuery must be empty.
Schema Schema
// The query to use for a view. If provided on create, Schema must be nil.
ViewQuery string
// Use Legacy SQL for the view query.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseLegacySQL bool
// Use Legacy SQL for the view query. The default.
// At most one of UseLegacySQL and UseStandardSQL can be true.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool
// If non-nil, the table is partitioned by time.
TimePartitioning *TimePartitioning
// Clustering specifies the data clustering configuration for the table.
Clustering *Clustering
// The time when this table expires. If set, this table will expire at the
// specified time. Expired tables will be deleted and their storage
// reclaimed. The zero value is ignored.
ExpirationTime time.Time ExpirationTime time.Time
// User-provided labels.
Labels map[string]string
// Information about a table stored outside of BigQuery.
ExternalDataConfig *ExternalDataConfig
// Custom encryption configuration (e.g., Cloud KMS keys).
EncryptionConfig *EncryptionConfig
// All the fields below are read-only.
FullID string // An opaque ID uniquely identifying the table.
Type TableType
CreationTime time.Time CreationTime time.Time
LastModifiedTime time.Time LastModifiedTime time.Time
@@ -57,89 +94,176 @@ type TableMetadata struct {
// This does not include data that is being buffered during a streaming insert. // This does not include data that is being buffered during a streaming insert.
NumBytes int64 NumBytes int64
// The number of bytes in the table considered "long-term storage" for reduced
// billing purposes. See https://cloud.google.com/bigquery/pricing#long-term-storage
// for more information.
NumLongTermBytes int64
// The number of rows of data in this table. // The number of rows of data in this table.
// This does not include data that is being buffered during a streaming insert. // This does not include data that is being buffered during a streaming insert.
NumRows uint64 NumRows uint64
// Contains information regarding this table's streaming buffer, if one is
// present. This field will be nil if the table is not being streamed to or if
// there is no data in the streaming buffer.
StreamingBuffer *StreamingBuffer
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
} }
// Tables is a group of tables. The tables may belong to differing projects or datasets. // TableCreateDisposition specifies the circumstances under which destination table will be created.
type Tables []*Table
// CreateDisposition specifies the circumstances under which destination table will be created.
// Default is CreateIfNeeded. // Default is CreateIfNeeded.
type TableCreateDisposition string type TableCreateDisposition string
const ( const (
// The table will be created if it does not already exist. Tables are created atomically on successful completion of a job. // CreateIfNeeded will create the table if it does not already exist.
// Tables are created atomically on successful completion of a job.
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
// The table must already exist and will not be automatically created. // CreateNever ensures the table must already exist and will not be
// automatically created.
CreateNever TableCreateDisposition = "CREATE_NEVER" CreateNever TableCreateDisposition = "CREATE_NEVER"
) )
func CreateDisposition(disp TableCreateDisposition) Option { return disp }
func (opt TableCreateDisposition) implementsOption() {}
func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.CreateDisposition = string(opt)
}
func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) {
conf.CreateDisposition = string(opt)
}
func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.CreateDisposition = string(opt)
}
// TableWriteDisposition specifies how existing data in a destination table is treated. // TableWriteDisposition specifies how existing data in a destination table is treated.
// Default is WriteAppend. // Default is WriteAppend.
type TableWriteDisposition string type TableWriteDisposition string
const ( const (
// Data will be appended to any existing data in the destination table. // WriteAppend will append to any existing data in the destination table.
// Data is appended atomically on successful completion of a job. // Data is appended atomically on successful completion of a job.
WriteAppend TableWriteDisposition = "WRITE_APPEND" WriteAppend TableWriteDisposition = "WRITE_APPEND"
// Existing data in the destination table will be overwritten. // WriteTruncate overrides the existing data in the destination table.
// Data is overwritten atomically on successful completion of a job. // Data is overwritten atomically on successful completion of a job.
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
// Writes will fail if the destination table already contains data. // WriteEmpty fails writes if the destination table already contains data.
WriteEmpty TableWriteDisposition = "WRITE_EMPTY" WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
) )
func WriteDisposition(disp TableWriteDisposition) Option { return disp }
func (opt TableWriteDisposition) implementsOption() {}
func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.WriteDisposition = string(opt)
}
func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) {
conf.WriteDisposition = string(opt)
}
func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.WriteDisposition = string(opt)
}
// TableType is the type of table. // TableType is the type of table.
type TableType string type TableType string
const ( const (
// RegularTable is a regular table.
RegularTable TableType = "TABLE" RegularTable TableType = "TABLE"
ViewTable TableType = "VIEW" // ViewTable is a table type describing that the table is view. See more
// information at https://cloud.google.com/bigquery/docs/views.
ViewTable TableType = "VIEW"
// ExternalTable is a table type describing that the table is an external
// table (also known as a federated data source). See more information at
// https://cloud.google.com/bigquery/external-data-sources.
ExternalTable TableType = "EXTERNAL"
) )
func (t *Table) implementsSource() {} // TimePartitioning describes the time-based date partitioning on a table.
func (t *Table) implementsReadSource() {} // For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
func (t *Table) implementsDestination() {} type TimePartitioning struct {
func (ts Tables) implementsSource() {} // The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
func (t *Table) tableRefProto() *bq.TableReference { // If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
// DATE field. Its mode must be NULLABLE or REQUIRED.
Field string
// If true, queries that reference this table must include a filter (e.g. a WHERE predicate)
// that can be used for partition elimination.
RequirePartitionFilter bool
}
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
if p == nil {
return nil
}
return &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(p.Expiration / time.Millisecond),
Field: p.Field,
RequirePartitionFilter: p.RequirePartitionFilter,
}
}
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
if q == nil {
return nil
}
return &TimePartitioning{
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
Field: q.Field,
RequirePartitionFilter: q.RequirePartitionFilter,
}
}
// Clustering governs the organization of data within a partitioned table.
// For more information, see https://cloud.google.com/bigquery/docs/clustered-tables
type Clustering struct {
Fields []string
}
func (c *Clustering) toBQ() *bq.Clustering {
if c == nil {
return nil
}
return &bq.Clustering{
Fields: c.Fields,
}
}
func bqToClustering(q *bq.Clustering) *Clustering {
if q == nil {
return nil
}
return &Clustering{
Fields: q.Fields,
}
}
// EncryptionConfig configures customer-managed encryption on tables.
type EncryptionConfig struct {
// Describes the Cloud KMS encryption key that will be used to protect
// destination BigQuery table. The BigQuery Service Account associated with your
// project requires access to this encryption key.
KMSKeyName string
}
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
if e == nil {
return nil
}
return &bq.EncryptionConfiguration{
KmsKeyName: e.KMSKeyName,
}
}
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
if q == nil {
return nil
}
return &EncryptionConfig{
KMSKeyName: q.KmsKeyName,
}
}
// StreamingBuffer holds information about the streaming buffer.
type StreamingBuffer struct {
// A lower-bound estimate of the number of bytes currently in the streaming
// buffer.
EstimatedBytes uint64
// A lower-bound estimate of the number of rows currently in the streaming
// buffer.
EstimatedRows uint64
// The time of the oldest entry in the streaming buffer.
OldestEntryTime time.Time
}
func (t *Table) toBQ() *bq.TableReference {
return &bq.TableReference{ return &bq.TableReference{
ProjectId: t.ProjectID, ProjectId: t.ProjectID,
DatasetId: t.DatasetID, DatasetId: t.DatasetID,
@@ -157,125 +281,349 @@ func (t *Table) implicitTable() bool {
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
} }
func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad) {
conf.DestinationTable = t.tableRefProto()
}
func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract) {
conf.SourceTable = t.tableRefProto()
}
func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy) {
conf.DestinationTable = t.tableRefProto()
}
func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy) {
for _, t := range ts {
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
}
}
func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery) {
if !t.implicitTable() {
conf.DestinationTable = t.tableRefProto()
}
}
func (t *Table) customizeReadSrc(cursor *readTableConf) {
cursor.projectID = t.ProjectID
cursor.datasetID = t.DatasetID
cursor.tableID = t.TableID
}
// Create creates a table in the BigQuery service. // Create creates a table in the BigQuery service.
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error { // Pass in a TableMetadata value to configure the table.
conf := &createTableConf{ // If tm.View.Query is non-empty, the created table will be of type VIEW.
projectID: t.ProjectID, // If no ExpirationTime is specified, the table will never expire.
datasetID: t.DatasetID, // After table creation, a view can be modified only if its table was initially created
tableID: t.TableID, // with a view.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create")
defer func() { trace.EndSpan(ctx, err) }()
table, err := tm.toBQ()
if err != nil {
return err
} }
for _, o := range options { table.TableReference = &bq.TableReference{
o.customizeCreateTable(conf) ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
TableId: t.TableID,
} }
return t.service.createTable(ctx, conf) req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func (tm *TableMetadata) toBQ() (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
t.Labels = tm.Labels
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
t.TimePartitioning = tm.TimePartitioning.toBQ()
t.Clustering = tm.Clustering.toBQ()
if !validExpiration(tm.ExpirationTime) {
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
}
if !tm.ExpirationTime.IsZero() && tm.ExpirationTime != NeverExpire {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
if tm.ExternalDataConfig != nil {
edc := tm.ExternalDataConfig.toBQ()
t.ExternalDataConfiguration = &edc
}
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumLongTermBytes != 0 {
return nil, errors.New("cannot set NumLongTermBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
} }
// Metadata fetches the metadata for the table. // Metadata fetches the metadata for the table.
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) {
return t.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID) ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata")
defer func() { trace.EndSpan(ctx, err) }()
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
var table *bq.Table
err = runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return bqToTableMetadata(table)
}
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
FullID: t.Id,
Labels: t.Labels,
NumBytes: t.NumBytes,
NumLongTermBytes: t.NumLongTermBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration),
}
if t.Schema != nil {
md.Schema = bqToSchema(t.Schema)
}
if t.View != nil {
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
md.Clustering = bqToClustering(t.Clustering)
if t.StreamingBuffer != nil {
md.StreamingBuffer = &StreamingBuffer{
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
EstimatedRows: t.StreamingBuffer.EstimatedRows,
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
}
}
if t.ExternalDataConfiguration != nil {
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
if err != nil {
return nil, err
}
md.ExternalDataConfig = edc
}
return md, nil
} }
// Delete deletes the table. // Delete deletes the table.
func (t *Table) Delete(ctx context.Context) error { func (t *Table) Delete(ctx context.Context) (err error) {
return t.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID) ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
} }
// A CreateTableOption is an optional argument to CreateTable. // Read fetches the contents of the table.
type CreateTableOption interface { func (t *Table) Read(ctx context.Context) *RowIterator {
customizeCreateTable(*createTableConf) return t.read(ctx, fetchPage)
} }
type tableExpiration time.Time func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
return newRowIterator(ctx, t, pf)
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
conf.expiration = time.Time(opt)
} }
type viewQuery string // NeverExpire is a sentinel value used to remove a table'e expiration time.
var NeverExpire = time.Time{}.Add(-1)
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query. // Update modifies specific Table metadata fields.
// For more information see: https://cloud.google.com/bigquery/querying-data#views func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) {
func ViewQuery(query string) CreateTableOption { return viewQuery(query) } ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
defer func() { trace.EndSpan(ctx, err) }()
func (opt viewQuery) customizeCreateTable(conf *createTableConf) { bqt, err := tm.toBQ()
conf.viewQuery = string(opt) if err != nil {
} return nil, err
// TableMetadataPatch represents a set of changes to a table's metadata.
type TableMetadataPatch struct {
s service
projectID, datasetID, tableID string
conf patchTableConf
}
// Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields.
// In order to apply the changes, the TableMetadataPatch's Apply method must be called.
func (t *Table) Patch() *TableMetadataPatch {
return &TableMetadataPatch{
s: t.service,
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
} }
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var res *bq.Table
if err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToTableMetadata(res)
} }
// Description sets the table description. func (tm *TableMetadataToUpdate) toBQ() (*bq.Table, error) {
func (p *TableMetadataPatch) Description(desc string) { t := &bq.Table{}
p.conf.Description = &desc forceSend := func(field string) {
} t.ForceSendFields = append(t.ForceSendFields, field)
// Name sets the table name.
func (p *TableMetadataPatch) Name(name string) {
p.conf.Name = &name
}
// TODO(mcgreevy): support patching the schema.
// Apply applies the patch operation.
func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error) {
return p.s.patchTable(ctx, p.projectID, p.datasetID, p.tableID, &p.conf)
}
// NewUploader returns an *Uploader that can be used to append rows to t.
func (t *Table) NewUploader(opts ...UploadOption) *Uploader {
uploader := &Uploader{t: t}
for _, o := range opts {
o.customizeInsertRows(&uploader.conf)
} }
return uploader if tm.Description != nil {
t.Description = optional.ToString(tm.Description)
forceSend("Description")
}
if tm.Name != nil {
t.FriendlyName = optional.ToString(tm.Name)
forceSend("FriendlyName")
}
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
forceSend("Schema")
}
if tm.EncryptionConfig != nil {
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
}
if !validExpiration(tm.ExpirationTime) {
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
}
if tm.ExpirationTime == NeverExpire {
t.NullFields = append(t.NullFields, "ExpirationTime")
} else if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
if tm.TimePartitioning != nil {
t.TimePartitioning = tm.TimePartitioning.toBQ()
t.TimePartitioning.ForceSendFields = []string{"RequirePartitionFilter"}
if tm.TimePartitioning.Expiration == 0 {
t.TimePartitioning.NullFields = []string{"ExpirationMs"}
}
}
if tm.ViewQuery != nil {
t.View = &bq.ViewDefinition{
Query: optional.ToString(tm.ViewQuery),
ForceSendFields: []string{"Query"},
}
}
if tm.UseLegacySQL != nil {
if t.View == nil {
t.View = &bq.ViewDefinition{}
}
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
labels, forces, nulls := tm.update()
t.Labels = labels
t.ForceSendFields = append(t.ForceSendFields, forces...)
t.NullFields = append(t.NullFields, nulls...)
return t, nil
}
// validExpiration ensures a specified time is either the sentinel NeverExpire,
// the zero value, or within the defined range of UnixNano. Internal
// represetations of expiration times are based upon Time.UnixNano. Any time
// before 1678 or after 2262 cannot be represented by an int64 and is therefore
// undefined and invalid. See https://godoc.org/time#Time.UnixNano.
func validExpiration(t time.Time) bool {
return t == NeverExpire || t.IsZero() || time.Unix(0, t.UnixNano()).Equal(t)
}
// TableMetadataToUpdate is used when updating a table's metadata.
// Only non-nil fields will be updated.
type TableMetadataToUpdate struct {
// The user-friendly description of this table.
Description optional.String
// The user-friendly name for this table.
Name optional.String
// The table's schema.
// When updating a schema, you can add columns but not remove them.
Schema Schema
// The table's encryption configuration. When calling Update, ensure that
// all mutable fields of EncryptionConfig are populated.
EncryptionConfig *EncryptionConfig
// The time when this table expires. To remove a table's expiration,
// set ExpirationTime to NeverExpire. The zero value is ignored.
ExpirationTime time.Time
// The query to use for a view.
ViewQuery optional.String
// Use Legacy SQL for the view query.
UseLegacySQL optional.Bool
// TimePartitioning allows modification of certain aspects of partition
// configuration such as partition expiration and whether partition
// filtration is required at query time. When calling Update, ensure
// that all mutable fields of TimePartitioning are populated.
TimePartitioning *TimePartitioning
labelUpdater
}
// labelUpdater contains common code for updating labels.
type labelUpdater struct {
setLabels map[string]string
deleteLabels map[string]bool
}
// SetLabel causes a label to be added or modified on a call to Update.
func (u *labelUpdater) SetLabel(name, value string) {
if u.setLabels == nil {
u.setLabels = map[string]string{}
}
u.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted on a call to Update.
func (u *labelUpdater) DeleteLabel(name string) {
if u.deleteLabels == nil {
u.deleteLabels = map[string]bool{}
}
u.deleteLabels[name] = true
}
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
if u.setLabels == nil && u.deleteLabels == nil {
return nil, nil, nil
}
labels = map[string]string{}
for k, v := range u.setLabels {
labels[k] = v
}
if len(labels) == 0 && len(u.deleteLabels) > 0 {
forces = []string{"Labels"}
}
for l := range u.deleteLabels {
nulls = append(nulls, "Labels."+l)
}
return labels, forces, nulls
} }

372
vendor/cloud.google.com/go/bigquery/table_test.go generated vendored Normal file
View File

@@ -0,0 +1,372 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"time"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
func TestBQToTableMetadata(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
for _, test := range []struct {
in *bq.Table
want *TableMetadata
}{
{&bq.Table{}, &TableMetadata{}}, // test minimal case
{
&bq.Table{
CreationTime: aTimeMillis,
Description: "desc",
Etag: "etag",
ExpirationTime: aTimeMillis,
FriendlyName: "fname",
Id: "id",
LastModifiedTime: uint64(aTimeMillis),
Location: "loc",
NumBytes: 123,
NumLongTermBytes: 23,
NumRows: 7,
StreamingBuffer: &bq.Streamingbuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: uint64(aTimeMillis),
},
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 7890,
Type: "DAY",
Field: "pfield",
},
Clustering: &bq.Clustering{
Fields: []string{"cfield1", "cfield2"},
},
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
Type: "EXTERNAL",
View: &bq.ViewDefinition{Query: "view-query"},
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{
SourceFormat: "GOOGLE_SHEETS",
},
},
&TableMetadata{
Description: "desc",
Name: "fname",
ViewQuery: "view-query",
FullID: "id",
Type: ExternalTable,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets},
ExpirationTime: aTime.Truncate(time.Millisecond),
CreationTime: aTime.Truncate(time.Millisecond),
LastModifiedTime: aTime.Truncate(time.Millisecond),
NumBytes: 123,
NumLongTermBytes: 23,
NumRows: 7,
TimePartitioning: &TimePartitioning{
Expiration: 7890 * time.Millisecond,
Field: "pfield",
},
Clustering: &Clustering{
Fields: []string{"cfield1", "cfield2"},
},
StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: aTime,
},
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
ETag: "etag",
},
},
} {
got, err := bqToTableMetadata(test.in)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
}
}
}
func TestTableMetadataToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
for _, test := range []struct {
in *TableMetadata
want *bq.Table
}{
{nil, &bq.Table{}},
{&TableMetadata{}, &bq.Table{}},
{
&TableMetadata{
Name: "n",
Description: "d",
Schema: sc,
ExpirationTime: aTime,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
},
&bq.Table{
FriendlyName: "n",
Description: "d",
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTimeMillis,
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
},
},
{
&TableMetadata{ViewQuery: "q"},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseLegacySQL: true,
TimePartitioning: &TimePartitioning{},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 0,
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{
Expiration: time.Second,
Field: "ofDreams",
},
Clustering: &Clustering{
Fields: []string{"cfield1"},
},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1000,
Field: "ofDreams",
},
Clustering: &bq.Clustering{
Fields: []string{"cfield1"},
},
},
},
{
&TableMetadata{ExpirationTime: NeverExpire},
&bq.Table{ExpirationTime: 0},
},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatalf("%+v: %v", test.in, err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff)
}
}
// Errors
for _, in := range []*TableMetadata{
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query
{UseLegacySQL: true}, // UseLegacySQL without query
{UseStandardSQL: true}, // UseStandardSQL without query
// read-only fields
{FullID: "x"},
{Type: "x"},
{CreationTime: aTime},
{LastModifiedTime: aTime},
{NumBytes: 1},
{NumLongTermBytes: 1},
{NumRows: 1},
{StreamingBuffer: &StreamingBuffer{}},
{ETag: "x"},
// expiration time outside allowable range is invalid
// See https://godoc.org/time#Time.UnixNano
{ExpirationTime: time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC).Add(-1)},
{ExpirationTime: time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC).Add(1)},
} {
_, err := in.toBQ()
if err == nil {
t.Errorf("%+v: got nil, want error", in)
}
}
}
func TestTableMetadataToUpdateToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, test := range []struct {
tm TableMetadataToUpdate
want *bq.Table
}{
{
tm: TableMetadataToUpdate{},
want: &bq.Table{},
},
{
tm: TableMetadataToUpdate{
Description: "d",
Name: "n",
},
want: &bq.Table{
Description: "d",
FriendlyName: "n",
ForceSendFields: []string{"Description", "FriendlyName"},
},
},
{
tm: TableMetadataToUpdate{
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)},
ExpirationTime: aTime,
},
want: &bq.Table{
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTime.UnixNano() / 1e6,
ForceSendFields: []string{"Schema", "ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q"},
want: &bq.Table{
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}},
},
},
{
tm: TableMetadataToUpdate{UseLegacySQL: false},
want: &bq.Table{
View: &bq.ViewDefinition{
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true},
want: &bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
ForceSendFields: []string{"Query", "UseLegacySql"},
},
},
},
{
tm: func() (tm TableMetadataToUpdate) {
tm.SetLabel("L", "V")
tm.DeleteLabel("D")
return tm
}(),
want: &bq.Table{
Labels: map[string]string{"L": "V"},
NullFields: []string{"Labels.D"},
},
},
{
tm: TableMetadataToUpdate{ExpirationTime: NeverExpire},
want: &bq.Table{
NullFields: []string{"ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: 0}},
want: &bq.Table{
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ForceSendFields: []string{"RequirePartitionFilter"},
NullFields: []string{"ExpirationMs"},
},
},
},
{
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: time.Duration(time.Hour)}},
want: &bq.Table{
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 3600000,
Type: "DAY",
ForceSendFields: []string{"RequirePartitionFilter"},
},
},
},
} {
got, _ := test.tm.toBQ()
if !testutil.Equal(got, test.want) {
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
}
}
}
func TestTableMetadataToUpdateToBQErrors(t *testing.T) {
// See https://godoc.org/time#Time.UnixNano
start := time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC)
end := time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC)
for _, test := range []struct {
desc string
aTime time.Time
wantErr bool
}{
{desc: "ignored zero value", aTime: time.Time{}, wantErr: false},
{desc: "earliest valid time", aTime: start, wantErr: false},
{desc: "latested valid time", aTime: end, wantErr: false},
{desc: "invalid times before 1678", aTime: start.Add(-1), wantErr: true},
{desc: "invalid times after 2262", aTime: end.Add(1), wantErr: true},
{desc: "valid times after 1678", aTime: start.Add(1), wantErr: false},
{desc: "valid times before 2262", aTime: end.Add(-1), wantErr: false},
} {
tm := &TableMetadataToUpdate{ExpirationTime: test.aTime}
_, err := tm.toBQ()
if test.wantErr && err == nil {
t.Errorf("[%s] got no error, want error", test.desc)
}
if !test.wantErr && err != nil {
t.Errorf("[%s] got error, want no error", test.desc)
}
}
}

View File

@@ -1,121 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"reflect"
"golang.org/x/net/context"
)
// An UploadOption is an optional argument to NewUploader.
type UploadOption interface {
customizeInsertRows(conf *insertRowsConf)
}
// An Uploader does streaming inserts into a BigQuery table.
// It is safe for concurrent use.
type Uploader struct {
conf insertRowsConf
t *Table
}
// SkipInvalidRows returns an UploadOption that causes rows containing invalid data to be silently ignored.
// The default value is false, which causes the entire request to fail, if there is an attempt to insert an invalid row.
func SkipInvalidRows() UploadOption { return skipInvalidRows{} }
type skipInvalidRows struct{}
func (opt skipInvalidRows) customizeInsertRows(conf *insertRowsConf) {
conf.skipInvalidRows = true
}
// UploadIgnoreUnknownValues returns an UploadOption that causes values not matching the schema to be ignored.
// If this option is not used, records containing such values are treated as invalid records.
func UploadIgnoreUnknownValues() UploadOption { return uploadIgnoreUnknownValues{} }
type uploadIgnoreUnknownValues struct{}
func (opt uploadIgnoreUnknownValues) customizeInsertRows(conf *insertRowsConf) {
conf.ignoreUnknownValues = true
}
// A TableTemplateSuffix allows Uploaders to create tables automatically.
//
// Experimental: this option is experimental and may be modified or removed in future versions,
// regardless of any other documented package stability guarantees.
//
// When you specify a suffix, the table you upload data to
// will be used as a template for creating a new table, with the same schema,
// called <table> + <suffix>.
//
// More information is available at
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
func TableTemplateSuffix(suffix string) UploadOption { return tableTemplateSuffix(suffix) }
type tableTemplateSuffix string
func (opt tableTemplateSuffix) customizeInsertRows(conf *insertRowsConf) {
conf.templateSuffix = string(opt)
}
// Put uploads one or more rows to the BigQuery service. src must implement ValueSaver or be a slice of ValueSavers.
// Put returns a PutMultiError if one or more rows failed to be uploaded.
// The PutMultiError contains a RowInsertionError for each failed row.
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
// TODO(mcgreevy): Support structs which do not implement ValueSaver as src, a la Datastore.
if saver, ok := src.(ValueSaver); ok {
return u.putMulti(ctx, []ValueSaver{saver})
}
srcVal := reflect.ValueOf(src)
if srcVal.Kind() != reflect.Slice {
return fmt.Errorf("%T is not a ValueSaver or slice of ValueSavers", src)
}
var savers []ValueSaver
for i := 0; i < srcVal.Len(); i++ {
s := srcVal.Index(i).Interface()
saver, ok := s.(ValueSaver)
if !ok {
return fmt.Errorf("element %d of src is of type %T, which is not a ValueSaver", i, s)
}
savers = append(savers, saver)
}
return u.putMulti(ctx, savers)
}
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
var rows []*insertionRow
for _, saver := range src {
row, insertID, err := saver.Save()
if err != nil {
return err
}
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
}
return u.t.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &u.conf)
}
// An insertionRow represents a row of data to be inserted into a table.
type insertionRow struct {
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
// this row on a best-effort basis.
InsertID string
// The data to be inserted, represented as a map from field name to Value.
Row map[string]Value
}

View File

@@ -1,234 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
)
type testSaver struct {
ir *insertionRow
err error
}
func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.ir.Row, ts.ir.InsertID, ts.err
}
func TestRejectsNonValueSavers(t *testing.T) {
u := Uploader{t: defaultTable(nil)}
testCases := []struct {
src interface{}
}{
{
src: 1,
},
{
src: []int{1, 2},
},
{
src: []interface{}{
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
1,
},
},
}
for _, tc := range testCases {
if err := u.Put(context.Background(), tc.src); err == nil {
t.Errorf("put value: %v; got err: %v; want nil", tc.src, err)
}
}
}
type insertRowsRecorder struct {
rowBatches [][]*insertionRow
service
}
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
irr.rowBatches = append(irr.rowBatches, rows)
return nil
}
func TestInsertsData(t *testing.T) {
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
}
testCases := []struct {
data [][]*insertionRow
}{
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
{
&insertionRow{"b", map[string]Value{"two": 2}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
{
&insertionRow{"c", map[string]Value{"three": 3}},
&insertionRow{"d", map[string]Value{"four": 4}},
},
},
},
}
for _, tc := range testCases {
irr := &insertRowsRecorder{}
table.service = irr
u := Uploader{t: table}
for _, batch := range tc.data {
if len(batch) == 0 {
continue
}
var toUpload interface{}
if len(batch) == 1 {
toUpload = testSaver{ir: batch[0]}
} else {
savers := []testSaver{}
for _, row := range batch {
savers = append(savers, testSaver{ir: row})
}
toUpload = savers
}
err := u.Put(context.Background(), toUpload)
if err != nil {
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
}
}
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
}
}
type uploadOptionRecorder struct {
received *insertRowsConf
service
}
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
u.received = conf
return nil
}
func TestUploadOptionsPropagate(t *testing.T) {
// we don't care for the data in this testcase.
dummyData := testSaver{ir: &insertionRow{}}
tests := [...]struct {
opts []UploadOption
conf insertRowsConf
}{
{ // test zero options lead to zero value for insertRowsConf
},
{
opts: []UploadOption{
TableTemplateSuffix("suffix"),
},
conf: insertRowsConf{
templateSuffix: "suffix",
},
},
{
opts: []UploadOption{
UploadIgnoreUnknownValues(),
},
conf: insertRowsConf{
ignoreUnknownValues: true,
},
},
{
opts: []UploadOption{
SkipInvalidRows(),
},
conf: insertRowsConf{
skipInvalidRows: true,
},
},
{ // multiple upload options combine
opts: []UploadOption{
TableTemplateSuffix("suffix"),
SkipInvalidRows(),
UploadIgnoreUnknownValues(),
},
conf: insertRowsConf{
templateSuffix: "suffix",
skipInvalidRows: true,
ignoreUnknownValues: true,
},
},
}
for i, tc := range tests {
recorder := new(uploadOptionRecorder)
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
service: recorder,
}
u := table.NewUploader(tc.opts...)
err := u.Put(context.Background(), dummyData)
if err != nil {
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
}
if recorder.received == nil {
t.Fatalf("%d: received no options at all!", i)
}
want := tc.conf
got := *recorder.received
if got != want {
t.Errorf("%d: got %#v, want %#v, opts=%#v", i, got, want, tc.opts)
}
}
}

View File

@@ -1,54 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
var defaultGCS = &GCSReference{
uris: []string{"uri"},
}
var defaultQuery = &Query{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
func defaultTable(s service) *Table {
return &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
service: s,
}
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
s.Job = job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,11 +15,17 @@
package bigquery package bigquery
import ( import (
"encoding/base64"
"errors" "errors"
"fmt" "fmt"
"math"
"math/big"
"reflect"
"strconv" "strconv"
"strings"
"time" "time"
"cloud.google.com/go/civil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -27,17 +33,459 @@ import (
type Value interface{} type Value interface{}
// ValueLoader stores a slice of Values representing a result row from a Read operation. // ValueLoader stores a slice of Values representing a result row from a Read operation.
// See Iterator.Get for more information. // See RowIterator.Next for more information.
type ValueLoader interface { type ValueLoader interface {
Load(v []Value) error Load(v []Value, s Schema) error
} }
// ValueList converts a []Value to implement ValueLoader. // valueList converts a []Value to implement ValueLoader.
type ValueList []Value type valueList []Value
// Load stores a sequence of values in a ValueList. // Load stores a sequence of values in a valueList.
func (vs *ValueList) Load(v []Value) error { // It resets the slice length to zero, then appends each value to it.
*vs = append(*vs, v...) func (vs *valueList) Load(v []Value, _ Schema) error {
*vs = append((*vs)[:0], v...)
return nil
}
// valueMap converts a map[string]Value to implement ValueLoader.
type valueMap map[string]Value
// Load stores a sequence of values in a valueMap.
func (vm *valueMap) Load(v []Value, s Schema) error {
if *vm == nil {
*vm = map[string]Value{}
}
loadMap(*vm, v, s)
return nil
}
func loadMap(m map[string]Value, vals []Value, s Schema) {
for i, f := range s {
val := vals[i]
var v interface{}
switch {
case val == nil:
v = val
case f.Schema == nil:
v = val
case !f.Repeated:
m2 := map[string]Value{}
loadMap(m2, val.([]Value), f.Schema)
v = m2
default: // repeated and nested
sval := val.([]Value)
vs := make([]Value, len(sval))
for j, e := range sval {
m2 := map[string]Value{}
loadMap(m2, e.([]Value), f.Schema)
vs[j] = m2
}
v = vs
}
m[f.Name] = v
}
}
type structLoader struct {
typ reflect.Type // type of struct
err error
ops []structLoaderOp
vstructp reflect.Value // pointer to current struct value; changed by set
}
// A setFunc is a function that sets a struct field or slice/array
// element to a value.
type setFunc func(v reflect.Value, val interface{}) error
// A structLoaderOp instructs the loader to set a struct field to a row value.
type structLoaderOp struct {
fieldIndex []int
valueIndex int
setFunc setFunc
repeated bool
}
var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs")
func setAny(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.Set(reflect.ValueOf(x))
return nil
}
func setInt(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if v.OverflowInt(xx) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetInt(xx)
return nil
}
func setUint(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if xx < 0 || v.OverflowUint(uint64(xx)) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetUint(uint64(xx))
return nil
}
func setFloat(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(float64)
if v.OverflowFloat(xx) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetFloat(xx)
return nil
}
func setBool(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetBool(x.(bool))
return nil
}
func setString(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetString(x.(string))
return nil
}
func setGeography(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetString(x.(string))
return nil
}
func setBytes(v reflect.Value, x interface{}) error {
if x == nil {
v.SetBytes(nil)
} else {
v.SetBytes(x.([]byte))
}
return nil
}
func setNull(v reflect.Value, x interface{}, build func() interface{}) error {
if x == nil {
v.Set(reflect.Zero(v.Type()))
} else {
n := build()
v.Set(reflect.ValueOf(n))
}
return nil
}
// set remembers a value for the next call to Load. The value must be
// a pointer to a struct. (This is checked in RowIterator.Next.)
func (sl *structLoader) set(structp interface{}, schema Schema) error {
if sl.err != nil {
return sl.err
}
sl.vstructp = reflect.ValueOf(structp)
typ := sl.vstructp.Type().Elem()
if sl.typ == nil {
// First call: remember the type and compile the schema.
sl.typ = typ
ops, err := compileToOps(typ, schema)
if err != nil {
sl.err = err
return err
}
sl.ops = ops
} else if sl.typ != typ {
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
}
return nil
}
// compileToOps produces a sequence of operations that will set the fields of a
// value of structType to the contents of a row with schema.
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
var ops []structLoaderOp
fields, err := fieldCache.Fields(structType)
if err != nil {
return nil, err
}
for i, schemaField := range schema {
// Look for an exported struct field with the same name as the schema
// field, ignoring case (BigQuery column names are case-insensitive,
// and we want to act like encoding/json anyway).
structField := fields.Match(schemaField.Name)
if structField == nil {
// Ignore schema fields with no corresponding struct field.
continue
}
op := structLoaderOp{
fieldIndex: structField.Index,
valueIndex: i,
}
t := structField.Type
if schemaField.Repeated {
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
schemaField.Name, structField.Name, t)
}
t = t.Elem()
op.repeated = true
}
if schemaField.Type == RecordFieldType {
// Field can be a struct or a pointer to a struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
structField.Name, structField.Type)
}
nested, err := compileToOps(t, schemaField.Schema)
if err != nil {
return nil, err
}
op.setFunc = func(v reflect.Value, val interface{}) error {
return setNested(nested, v, val)
}
} else {
op.setFunc = determineSetFunc(t, schemaField.Type)
if op.setFunc == nil {
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
schemaField.Name, schemaField.Type, structField.Name, t)
}
}
ops = append(ops, op)
}
return ops, nil
}
// determineSetFunc chooses the best function for setting a field of type ftype
// to a value whose schema field type is stype. It returns nil if stype
// is not assignable to ftype.
// determineSetFunc considers only basic types. See compileToOps for
// handling of repetition and nesting.
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
switch stype {
case StringFieldType:
if ftype.Kind() == reflect.String {
return setString
}
if ftype == typeOfNullString {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullString{StringVal: x.(string), Valid: true}
})
}
}
case GeographyFieldType:
if ftype.Kind() == reflect.String {
return setGeography
}
if ftype == typeOfNullGeography {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullGeography{GeographyVal: x.(string), Valid: true}
})
}
}
case BytesFieldType:
if ftype == typeOfByteSlice {
return setBytes
}
case IntegerFieldType:
if isSupportedUintType(ftype) {
return setUint
} else if isSupportedIntType(ftype) {
return setInt
}
if ftype == typeOfNullInt64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullInt64{Int64: x.(int64), Valid: true}
})
}
}
case FloatFieldType:
switch ftype.Kind() {
case reflect.Float32, reflect.Float64:
return setFloat
}
if ftype == typeOfNullFloat64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullFloat64{Float64: x.(float64), Valid: true}
})
}
}
case BooleanFieldType:
if ftype.Kind() == reflect.Bool {
return setBool
}
if ftype == typeOfNullBool {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullBool{Bool: x.(bool), Valid: true}
})
}
}
case TimestampFieldType:
if ftype == typeOfGoTime {
return setAny
}
if ftype == typeOfNullTimestamp {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTimestamp{Timestamp: x.(time.Time), Valid: true}
})
}
}
case DateFieldType:
if ftype == typeOfDate {
return setAny
}
if ftype == typeOfNullDate {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDate{Date: x.(civil.Date), Valid: true}
})
}
}
case TimeFieldType:
if ftype == typeOfTime {
return setAny
}
if ftype == typeOfNullTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTime{Time: x.(civil.Time), Valid: true}
})
}
}
case DateTimeFieldType:
if ftype == typeOfDateTime {
return setAny
}
if ftype == typeOfNullDateTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true}
})
}
}
case NumericFieldType:
if ftype == typeOfRat {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} { return x.(*big.Rat) })
}
}
}
return nil
}
func (sl *structLoader) Load(values []Value, _ Schema) error {
if sl.err != nil {
return sl.err
}
return runOps(sl.ops, sl.vstructp.Elem(), values)
}
// runOps executes a sequence of ops, setting the fields of vstruct to the
// supplied values.
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
for _, op := range ops {
field := vstruct.FieldByIndex(op.fieldIndex)
var err error
if op.repeated {
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
} else {
err = op.setFunc(field, values[op.valueIndex])
}
if err != nil {
return err
}
}
return nil
}
func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error {
// v is either a struct or a pointer to a struct.
if v.Kind() == reflect.Ptr {
// If the value is nil, set the pointer to nil.
if val == nil {
v.Set(reflect.Zero(v.Type()))
return nil
}
// If the pointer is nil, set it to a zero struct value.
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return runOps(ops, v, val.([]Value))
}
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
vlen := len(vslice)
var flen int
switch field.Type().Kind() {
case reflect.Slice:
// Make a slice of the right size, avoiding allocation if possible.
switch {
case field.Len() < vlen:
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
case field.Len() > vlen:
field.SetLen(vlen)
}
flen = vlen
case reflect.Array:
flen = field.Len()
if flen > vlen {
// Set extra elements to their zero value.
z := reflect.Zero(field.Type().Elem())
for i := vlen; i < flen; i++ {
field.Index(i).Set(z)
}
}
default:
return fmt.Errorf("bigquery: impossible field type %s", field.Type())
}
for i, val := range vslice {
if i < flen { // avoid writing past the end of a short array
if err := setElem(field.Index(i), val); err != nil {
return err
}
}
}
return nil return nil
} }
@@ -61,7 +509,7 @@ type ValuesSaver struct {
Row []Value Row []Value
} }
// Save implements ValueSaver // Save implements ValueSaver.
func (vls *ValuesSaver) Save() (map[string]Value, string, error) { func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
m, err := valuesToMap(vls.Row, vls.Schema) m, err := valuesToMap(vls.Row, vls.Schema)
return m, vls.InsertID, err return m, vls.InsertID, err
@@ -69,28 +517,257 @@ func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
if len(vs) != len(schema) { if len(vs) != len(schema) {
return nil, errors.New("Schema does not match length of row to be inserted") return nil, errors.New("schema does not match length of row to be inserted")
} }
m := make(map[string]Value) m := make(map[string]Value)
for i, fieldSchema := range schema { for i, fieldSchema := range schema {
if fieldSchema.Type == RecordFieldType { if vs[i] == nil {
nested, ok := vs[i].([]Value) m[fieldSchema.Name] = nil
if !ok { continue
return nil, errors.New("Nested record is not a []Value") }
} if fieldSchema.Type != RecordFieldType {
value, err := valuesToMap(nested, fieldSchema.Schema) m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
continue
}
// Nested record, possibly repeated.
vals, ok := vs[i].([]Value)
if !ok {
return nil, errors.New("nested record is not a []Value")
}
if !fieldSchema.Repeated {
value, err := valuesToMap(vals, fieldSchema.Schema)
if err != nil { if err != nil {
return nil, err return nil, err
} }
m[fieldSchema.Name] = value m[fieldSchema.Name] = value
} else { continue
m[fieldSchema.Name] = vs[i] }
// A repeated nested field is converted into a slice of maps.
var maps []Value
for _, v := range vals {
sv, ok := v.([]Value)
if !ok {
return nil, errors.New("nested record in slice is not a []Value")
}
value, err := valuesToMap(sv, fieldSchema.Schema)
if err != nil {
return nil, err
}
maps = append(maps, value)
}
m[fieldSchema.Name] = maps
}
return m, nil
}
// StructSaver implements ValueSaver for a struct.
// The struct is converted to a map of values by using the values of struct
// fields corresponding to schema fields. Additional and missing
// fields are ignored, as are nested struct pointers that are nil.
type StructSaver struct {
// Schema determines what fields of the struct are uploaded. It should
// match the table's schema.
// Schema is optional for StructSavers that are passed to Uploader.Put.
Schema Schema
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
// of this row on a best-effort basis.
InsertID string
// Struct should be a struct or a pointer to a struct.
Struct interface{}
}
// Save implements ValueSaver.
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
vstruct := reflect.ValueOf(ss.Struct)
row, err = structToMap(vstruct, ss.Schema)
if err != nil {
return nil, "", err
}
return row, ss.InsertID, nil
}
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
if vstruct.Kind() == reflect.Ptr {
vstruct = vstruct.Elem()
}
if !vstruct.IsValid() {
return nil, nil
}
m := map[string]Value{}
if vstruct.Kind() != reflect.Struct {
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
}
fields, err := fieldCache.Fields(vstruct.Type())
if err != nil {
return nil, err
}
for _, schemaField := range schema {
// Look for an exported struct field with the same name as the schema
// field, ignoring case.
structField := fields.Match(schemaField.Name)
if structField == nil {
continue
}
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
if err != nil {
return nil, err
}
// Add the value to the map, unless it is nil.
if val != nil {
m[schemaField.Name] = val
} }
} }
return m, nil return m, nil
} }
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
// the schemaField as a guide.
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
// caller can easily identify a nil value.
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
schemaField.Name, vfield.Type())
}
// A non-nested field can be represented by its Go value, except for some types.
if schemaField.Type != RecordFieldType {
return toUploadValueReflect(vfield, schemaField), nil
}
// A non-repeated nested field is converted into a map[string]Value.
if !schemaField.Repeated {
m, err := structToMap(vfield, schemaField.Schema)
if err != nil {
return nil, err
}
if m == nil {
return nil, nil
}
return m, nil
}
// A repeated nested field is converted into a slice of maps.
// If the field is zero-length (but not nil), we return a zero-length []Value.
if vfield.IsNil() {
return nil, nil
}
vals := []Value{}
for i := 0; i < vfield.Len(); i++ {
m, err := structToMap(vfield.Index(i), schemaField.Schema)
if err != nil {
return nil, err
}
vals = append(vals, m)
}
return vals, nil
}
func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType || fs.Type == NumericFieldType {
return toUploadValueReflect(reflect.ValueOf(val), fs)
}
return val
}
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
switch fs.Type {
case TimeFieldType:
if v.Type() == typeOfNullTime {
return v.Interface()
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return CivilTimeString(v.Interface().(civil.Time))
})
case DateTimeFieldType:
if v.Type() == typeOfNullDateTime {
return v.Interface()
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return CivilDateTimeString(v.Interface().(civil.DateTime))
})
case NumericFieldType:
if r, ok := v.Interface().(*big.Rat); ok && r == nil {
return nil
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return NumericString(v.Interface().(*big.Rat))
})
default:
if !fs.Repeated || v.Len() > 0 {
return v.Interface()
}
// The service treats a null repeated field as an error. Return
// nil to omit the field entirely.
return nil
}
}
func formatUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
if !fs.Repeated {
return cvt(v)
}
if v.Len() == 0 {
return nil
}
s := make([]string, v.Len())
for i := 0; i < v.Len(); i++ {
s[i] = cvt(v.Index(i))
}
return s
}
// CivilTimeString returns a string representing a civil.Time in a format compatible
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
// string with six digits of sub-second precision.
//
// Use CivilTimeString when using civil.Time in DML, for example in INSERT
// statements.
func CivilTimeString(t civil.Time) string {
if t.Nanosecond == 0 {
return t.String()
}
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
t.Nanosecond = 0
return t.String() + fmt.Sprintf(".%06d", micro)
}
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
// with BigQuery SQL. It separate the date and time with a space, and formats the time
// with CivilTimeString.
//
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
// statements.
func CivilDateTimeString(dt civil.DateTime) string {
return dt.Date.String() + " " + CivilTimeString(dt.Time)
}
// parseCivilDateTime parses a date-time represented in a BigQuery SQL
// compatible format and returns a civil.DateTime.
func parseCivilDateTime(s string) (civil.DateTime, error) {
parts := strings.Fields(s)
if len(parts) != 2 {
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s)
}
return civil.ParseDateTime(parts[0] + "T" + parts[1])
}
const (
// NumericPrecisionDigits is the maximum number of digits in a NUMERIC value.
NumericPrecisionDigits = 38
// NumericScaleDigits is the maximum number of digits after the decimal point in a NUMERIC value.
NumericScaleDigits = 9
)
// NumericString returns a string representing a *big.Rat in a format compatible
// with BigQuery SQL. It returns a floating-point literal with 9 digits
// after the decimal point.
func NumericString(r *big.Rat) string {
return r.FloatString(NumericScaleDigits)
}
// convertRows converts a series of TableRows into a series of Value slices. // convertRows converts a series of TableRows into a series of Value slices.
// schema is used to interpret the data from rows; its length must match the // schema is used to interpret the data from rows; its length must match the
// length of each row. // length of each row.
@@ -164,7 +841,6 @@ func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, erro
for i, cell := range record { for i, cell := range record {
// each cell contains a single entry, keyed by "v" // each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"] val := cell.(map[string]interface{})["v"]
fs := schema[i] fs := schema[i]
v, err := convertValue(val, fs.Type, fs.Schema) v, err := convertValue(val, fs.Type, fs.Schema)
if err != nil { if err != nil {
@@ -180,16 +856,37 @@ func convertBasicType(val string, typ FieldType) (Value, error) {
switch typ { switch typ {
case StringFieldType: case StringFieldType:
return val, nil return val, nil
case BytesFieldType:
return base64.StdEncoding.DecodeString(val)
case IntegerFieldType: case IntegerFieldType:
return strconv.Atoi(val) return strconv.ParseInt(val, 10, 64)
case FloatFieldType: case FloatFieldType:
return strconv.ParseFloat(val, 64) return strconv.ParseFloat(val, 64)
case BooleanFieldType: case BooleanFieldType:
return strconv.ParseBool(val) return strconv.ParseBool(val)
case TimestampFieldType: case TimestampFieldType:
f, err := strconv.ParseFloat(val, 64) f, err := strconv.ParseFloat(val, 64)
return Value(time.Unix(0, int64(f*1e9))), err if err != nil {
return nil, err
}
secs := math.Trunc(f)
nanos := (f - secs) * 1e9
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil
case DateFieldType:
return civil.ParseDate(val)
case TimeFieldType:
return civil.ParseTime(val)
case DateTimeFieldType:
return civil.ParseDateTime(val)
case NumericFieldType:
r, ok := (&big.Rat{}).SetString(val)
if !ok {
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val)
}
return Value(r), nil
case GeographyFieldType:
return val, nil
default: default:
return nil, errors.New("unrecognized type") return nil, fmt.Errorf("unrecognized type: %s", typ)
} }
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2015 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,33 +15,367 @@
package bigtable package bigtable
import ( import (
"reflect" "context"
"fmt"
"math"
"sort" "sort"
"strings"
"testing" "testing"
"time" "time"
"cloud.google.com/go/bigtable/bttest" "cloud.google.com/go/internal/testutil"
"golang.org/x/net/context" "github.com/golang/protobuf/proto"
"google.golang.org/api/option" "google.golang.org/api/iterator"
"google.golang.org/grpc" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
) )
func TestAdminIntegration(t *testing.T) { func TestAdminIntegration(t *testing.T) {
srv, err := bttest.NewServer("127.0.0.1:0") testEnv, err := NewIntegrationEnv()
if err != nil { if err != nil {
t.Fatal(err) t.Fatalf("IntegrationEnv: %v", err)
} }
defer srv.Close() defer testEnv.Close()
t.Logf("bttest.Server running on %s", srv.Addr)
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) adminClient, err := testEnv.NewAdminClient()
if err != nil { if err != nil {
t.Fatalf("grpc.Dial: %v", err) t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}
if iAdminClient != nil {
defer iAdminClient.Close()
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
if err != nil {
t.Errorf("InstanceInfo: %v", err)
}
if iInfo.Name != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}
} }
adminClient, err := NewAdminClient(ctx, "proj", "instance", option.WithGRPCConn(conn)) list := func() []string {
tbls, err := adminClient.Tables(ctx)
if err != nil {
t.Fatalf("Fetching list of tables: %v", err)
}
sort.Strings(tbls)
return tbls
}
containsAll := func(got, want []string) bool {
gotSet := make(map[string]bool)
for _, s := range got {
gotSet[s] = true
}
for _, s := range want {
if !gotSet[s] {
return false
}
}
return true
}
defer adminClient.DeleteTable(ctx, "mytable")
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
t.Fatalf("Creating table: %v", err)
}
defer adminClient.DeleteTable(ctx, "myothertable")
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
t.Fatalf("Creating table: %v", err)
}
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}
must(adminClient.WaitForReplication(ctx, "mytable"))
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
t.Fatalf("Deleting table: %v", err)
}
tables := list()
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) {
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
}
tblConf := TableConf{
TableID: "conftable",
Families: map[string]GCPolicy{
"fam1": MaxVersionsPolicy(1),
"fam2": MaxVersionsPolicy(2),
},
}
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil {
t.Fatalf("Creating table from TableConf: %v", err)
}
defer adminClient.DeleteTable(ctx, tblConf.TableID)
tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID)
if err != nil {
t.Fatalf("Getting table info: %v", err)
}
sort.Strings(tblInfo.Families)
wantFams := []string{"fam1", "fam2"}
if !testutil.Equal(tblInfo.Families, wantFams) {
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
}
// Populate mytable and drop row ranges
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil {
t.Fatalf("Creating column family: %v", err)
}
client, err := testEnv.NewClient()
if err != nil {
t.Fatalf("NewClient: %v", err)
}
defer client.Close()
tbl := client.Open("mytable")
prefixes := []string{"a", "b", "c"}
for _, prefix := range prefixes {
for i := 0; i < 5; i++ {
mut := NewMutation()
mut.Set("cf", "col", 1000, []byte("1"))
if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil {
t.Fatalf("Mutating row: %v", err)
}
}
}
if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil {
t.Errorf("DropRowRange a: %v", err)
}
if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil {
t.Errorf("DropRowRange c: %v", err)
}
if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil {
t.Errorf("DropRowRange x: %v", err)
}
var gotRowCount int
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
gotRowCount++
if !strings.HasPrefix(row.Key(), "b") {
t.Errorf("Invalid row after dropping range: %v", row)
}
return true
}))
if gotRowCount != 5 {
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
}
}
func TestInstanceUpdate(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()
timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}
if iAdminClient == nil {
return
}
defer iAdminClient.Close()
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
if err != nil {
t.Errorf("InstanceInfo: %v", err)
}
if iInfo.Name != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}
if iInfo.DisplayName != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}
const numNodes = 4
// update cluster nodes
if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil {
t.Errorf("UpdateCluster: %v", err)
}
// get cluster after updating
cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster)
if err != nil {
t.Errorf("GetCluster %v", err)
}
if cis.ServeNodes != int(numNodes) {
t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes))
}
}
func TestAdminSnapshotIntegration(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()
if !testEnv.Config().UseProd {
t.Skip("emulator doesn't support snapshots")
}
timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)
adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
table := testEnv.Config().Table
cluster := testEnv.Config().Cluster
list := func(cluster string) ([]*SnapshotInfo, error) {
infos := []*SnapshotInfo(nil)
it := adminClient.Snapshots(ctx, cluster)
for {
s, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
infos = append(infos, s)
}
return infos, err
}
// Delete the table at the end of the test. Schedule ahead of time
// in case the client fails
defer adminClient.DeleteTable(ctx, table)
if err := adminClient.CreateTable(ctx, table); err != nil {
t.Fatalf("Creating table: %v", err)
}
// Precondition: no snapshots
snapshots, err := list(cluster)
if err != nil {
t.Fatalf("Initial snapshot list: %v", err)
}
if got, want := len(snapshots), 0; got != want {
t.Fatalf("Initial snapshot list len: %d, want: %d", got, want)
}
// Create snapshot
defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot")
if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil {
t.Fatalf("Creating snaphot: %v", err)
}
// List snapshot
snapshots, err = list(cluster)
if err != nil {
t.Fatalf("Listing snapshots: %v", err)
}
if got, want := len(snapshots), 1; got != want {
t.Fatalf("Listing snapshot count: %d, want: %d", got, want)
}
if got, want := snapshots[0].Name, "mysnapshot"; got != want {
t.Fatalf("Snapshot name: %s, want: %s", got, want)
}
if got, want := snapshots[0].SourceTable, table; got != want {
t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want)
}
if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 {
t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want)
}
// Get snapshot
snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot")
if err != nil {
t.Fatalf("SnapshotInfo: %v", snapshot)
}
if got, want := *snapshot, *snapshots[0]; got != want {
t.Fatalf("SnapshotInfo: %v, want: %v", got, want)
}
// Restore
restoredTable := table + "-restored"
defer adminClient.DeleteTable(ctx, restoredTable)
if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil {
t.Fatalf("CreateTableFromSnapshot: %v", err)
}
if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil {
t.Fatalf("Restored TableInfo: %v", err)
}
// Delete snapshot
if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil {
t.Fatalf("DeleteSnapshot: %v", err)
}
snapshots, err = list(cluster)
if err != nil {
t.Fatalf("List after Delete: %v", err)
}
if got, want := len(snapshots), 0; got != want {
t.Fatalf("List after delete len: %d, want: %d", got, want)
}
}
func TestGranularity(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()
timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)
adminClient, err := testEnv.NewAdminClient()
if err != nil { if err != nil {
t.Fatalf("NewAdminClient: %v", err) t.Fatalf("NewAdminClient: %v", err)
} }
@@ -55,19 +389,190 @@ func TestAdminIntegration(t *testing.T) {
sort.Strings(tbls) sort.Strings(tbls)
return tbls return tbls
} }
containsAll := func(got, want []string) bool {
gotSet := make(map[string]bool)
for _, s := range got {
gotSet[s] = true
}
for _, s := range want {
if !gotSet[s] {
return false
}
}
return true
}
defer adminClient.DeleteTable(ctx, "mytable")
if err := adminClient.CreateTable(ctx, "mytable"); err != nil { if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
t.Fatalf("Creating table: %v", err) t.Fatalf("Creating table: %v", err)
} }
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
t.Fatalf("Creating table: %v", err) tables := list()
} if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
} }
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
t.Fatalf("Deleting table: %v", err) // calling ModifyColumnFamilies to check the granularity of table
prefix := adminClient.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + "mytable",
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
Id: "cf",
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
}},
} }
if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) { table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req)
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) if err != nil {
t.Fatalf("Creating column family: %v", err)
}
if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) {
t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS))
} }
} }
func TestInstanceAdminClient_AppProfile(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()
timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}
if iAdminClient == nil {
return
}
defer iAdminClient.Close()
profile := ProfileConf{
ProfileID: "app_profile1",
InstanceID: adminClient.instance,
ClusterID: testEnv.Config().Cluster,
Description: "creating new app profile 1",
RoutingPolicy: SingleClusterRouting,
}
createdProfile, err := iAdminClient.CreateAppProfile(ctx, profile)
if err != nil {
t.Fatalf("Creating app profile: %v", err)
}
gotProfile, err := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1")
if err != nil {
t.Fatalf("Get app profile: %v", err)
}
if !proto.Equal(createdProfile, gotProfile) {
t.Fatalf("created profile: %s, got profile: %s", createdProfile.Name, gotProfile.Name)
}
list := func(instanceID string) ([]*btapb.AppProfile, error) {
profiles := []*btapb.AppProfile(nil)
it := iAdminClient.ListAppProfiles(ctx, instanceID)
for {
s, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
profiles = append(profiles, s)
}
return profiles, err
}
profiles, err := list(adminClient.instance)
if err != nil {
t.Fatalf("List app profile: %v", err)
}
if got, want := len(profiles), 1; got != want {
t.Fatalf("Initial app profile list len: %d, want: %d", got, want)
}
for _, test := range []struct {
desc string
uattrs ProfileAttrsToUpdate
want *btapb.AppProfile // nil means error
}{
{
desc: "empty update",
uattrs: ProfileAttrsToUpdate{},
want: nil,
},
{
desc: "empty description update",
uattrs: ProfileAttrsToUpdate{Description: ""},
want: &btapb.AppProfile{
Name: gotProfile.Name,
Description: "",
RoutingPolicy: gotProfile.RoutingPolicy,
Etag: gotProfile.Etag},
},
{
desc: "routing update",
uattrs: ProfileAttrsToUpdate{
RoutingPolicy: SingleClusterRouting,
ClusterID: testEnv.Config().Cluster,
},
want: &btapb.AppProfile{
Name: gotProfile.Name,
Description: "",
Etag: gotProfile.Etag,
RoutingPolicy: &btapb.AppProfile_SingleClusterRouting_{
SingleClusterRouting: &btapb.AppProfile_SingleClusterRouting{
ClusterId: testEnv.Config().Cluster,
}},
},
},
} {
err = iAdminClient.UpdateAppProfile(ctx, adminClient.instance, "app_profile1", test.uattrs)
if err != nil {
if test.want != nil {
t.Errorf("%s: %v", test.desc, err)
}
continue
}
if err == nil && test.want == nil {
t.Errorf("%s: got nil, want error", test.desc)
continue
}
got, _ := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1")
if !proto.Equal(got, test.want) {
t.Fatalf("%s : got profile : %v, want profile: %v", test.desc, gotProfile, test.want)
}
}
err = iAdminClient.DeleteAppProfile(ctx, adminClient.instance, "app_profile1")
if err != nil {
t.Fatalf("Delete app profile: %v", err)
}
}

View File

@@ -1,5 +1,5 @@
/* /*
Copyright 2015 Google Inc. All Rights Reserved. Copyright 2015 Google LLC
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@ limitations under the License.
package bigtable // import "cloud.google.com/go/bigtable" package bigtable // import "cloud.google.com/go/bigtable"
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@@ -25,14 +26,15 @@ import (
"cloud.google.com/go/bigtable/internal/gax" "cloud.google.com/go/bigtable/internal/gax"
btopt "cloud.google.com/go/bigtable/internal/option" btopt "cloud.google.com/go/bigtable/internal/option"
"cloud.google.com/go/internal/trace"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option" "google.golang.org/api/option"
"google.golang.org/api/transport" gtransport "google.golang.org/api/transport/grpc"
btpb "google.golang.org/genproto/googleapis/bigtable/v2" btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
) )
const prodAddr = "bigtable.googleapis.com:443" const prodAddr = "bigtable.googleapis.com:443"
@@ -44,24 +46,48 @@ type Client struct {
conn *grpc.ClientConn conn *grpc.ClientConn
client btpb.BigtableClient client btpb.BigtableClient
project, instance string project, instance string
appProfile string
}
// ClientConfig has configurations for the client.
type ClientConfig struct {
// The id of the app profile to associate with all data operations sent from this client.
// If unspecified, the default app profile for the instance will be used.
AppProfile string
} }
// NewClient creates a new Client for a given project and instance. // NewClient creates a new Client for a given project and instance.
// The default ClientConfig will be used.
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...)
}
// NewClientWithConfig creates a new client with the given config.
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Default to a small connection pool that can be overridden.
o = append(o,
option.WithGRPCConnectionPool(4),
// Set the max size to correspond to server-side limits.
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1<<28), grpc.MaxCallRecvMsgSize(1<<28))),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
o = append(o, opts...) o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...) conn, err := gtransport.Dial(ctx, o...)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialing: %v", err) return nil, fmt.Errorf("dialing: %v", err)
} }
return &Client{ return &Client{
conn: conn, conn: conn,
client: btpb.NewBigtableClient(conn), client: btpb.NewBigtableClient(conn),
project: project, project: project,
instance: instance, instance: instance,
appProfile: config.AppProfile,
}, nil }, nil
} }
@@ -117,14 +143,24 @@ func (c *Client) Open(table string) *Table {
// //
// By default, the yielded rows will contain all values in all cells. // By default, the yielded rows will contain all values in all cells.
// Use RowFilter to limit the cells returned. // Use RowFilter to limit the cells returned.
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) (err error) {
ctx = metadata.NewContext(ctx, t.md) ctx = mergeOutgoingMetadata(ctx, t.md)
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows")
defer func() { trace.EndSpan(ctx, err) }()
var prevRowKey string var prevRowKey string
err := gax.Invoke(ctx, func(ctx context.Context) error { attrMap := make(map[string]interface{})
err = gax.Invoke(ctx, func(ctx context.Context) error {
if !arg.valid() {
// Empty row set, no need to make an API call.
// NOTE: we must return early if arg == RowList{} because reading
// an empty RowList from bigtable returns all rows from that table.
return nil
}
req := &btpb.ReadRowsRequest{ req := &btpb.ReadRowsRequest{
TableName: t.c.fullTableName(t.table), TableName: t.c.fullTableName(t.table),
Rows: arg.proto(), AppProfileId: t.c.appProfile,
Rows: arg.proto(),
} }
for _, opt := range opts { for _, opt := range opts {
opt.set(req) opt.set(req)
@@ -132,6 +168,7 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
ctx, cancel := context.WithCancel(ctx) // for aborting the stream ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel() defer cancel()
startTime := time.Now()
stream, err := t.c.client.ReadRows(ctx, req) stream, err := t.c.client.ReadRows(ctx, req)
if err != nil { if err != nil {
return err return err
@@ -145,8 +182,15 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
if err != nil { if err != nil {
// Reset arg for next Invoke call. // Reset arg for next Invoke call.
arg = arg.retainRowsAfter(prevRowKey) arg = arg.retainRowsAfter(prevRowKey)
attrMap["rowKey"] = prevRowKey
attrMap["error"] = err.Error()
attrMap["time_secs"] = time.Since(startTime).Seconds()
trace.TracePrintf(ctx, attrMap, "Retry details in ReadRows")
return err return err
} }
attrMap["time_secs"] = time.Since(startTime).Seconds()
attrMap["rowCount"] = len(res.Chunks)
trace.TracePrintf(ctx, attrMap, "Details in ReadRows")
for _, cc := range res.Chunks { for _, cc := range res.Chunks {
row, err := cr.Process(cc) row, err := cr.Process(cc)
@@ -208,13 +252,17 @@ func decodeFamilyProto(r Row, row string, f *btpb.Family) {
} }
} }
// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange. // RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
// The serialized size of the RowSet must be no larger than 1MiB.
type RowSet interface { type RowSet interface {
proto() *btpb.RowSet proto() *btpb.RowSet
// retainRowsAfter returns a new RowSet that does not include the // retainRowsAfter returns a new RowSet that does not include the
// given row key or any row key lexicographically less than it. // given row key or any row key lexicographically less than it.
retainRowsAfter(lastRowKey string) RowSet retainRowsAfter(lastRowKey string) RowSet
// Valid reports whether this set can cover at least one row.
valid() bool
} }
// RowList is a sequence of row keys. // RowList is a sequence of row keys.
@@ -238,6 +286,10 @@ func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
return retryKeys return retryKeys
} }
func (r RowList) valid() bool {
return len(r) > 0
}
// A RowRange is a half-open interval [Start, Limit) encompassing // A RowRange is a half-open interval [Start, Limit) encompassing
// all the rows with keys at least as large as Start, and less than Limit. // all the rows with keys at least as large as Start, and less than Limit.
// (Bigtable string comparison is the same as Go's.) // (Bigtable string comparison is the same as Go's.)
@@ -275,8 +327,9 @@ func (r RowRange) String() string {
} }
func (r RowRange) proto() *btpb.RowSet { func (r RowRange) proto() *btpb.RowSet {
var rr *btpb.RowRange rr := &btpb.RowRange{
rr = &btpb.RowRange{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}} StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)},
}
if !r.Unbounded() { if !r.Unbounded() {
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)} rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)}
} }
@@ -284,6 +337,9 @@ func (r RowRange) proto() *btpb.RowSet {
} }
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
if lastRowKey == "" || lastRowKey < r.start {
return r
}
// Set the beginning of the range to the row after the last scanned. // Set the beginning of the range to the row after the last scanned.
start := lastRowKey + "\x00" start := lastRowKey + "\x00"
if r.Unbounded() { if r.Unbounded() {
@@ -292,12 +348,49 @@ func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
return NewRange(start, r.limit) return NewRange(start, r.limit)
} }
// SingleRow returns a RowRange for reading a single row. func (r RowRange) valid() bool {
func SingleRow(row string) RowRange { return r.Unbounded() || r.start < r.limit
return RowRange{ }
start: row,
limit: row + "\x00", // RowRangeList is a sequence of RowRanges representing the union of the ranges.
type RowRangeList []RowRange
func (r RowRangeList) proto() *btpb.RowSet {
ranges := make([]*btpb.RowRange, len(r))
for i, rr := range r {
// RowRange.proto() returns a RowSet with a single element RowRange array
ranges[i] = rr.proto().RowRanges[0]
} }
return &btpb.RowSet{RowRanges: ranges}
}
func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet {
if lastRowKey == "" {
return r
}
// Return a list of any range that has not yet been completely processed
var ranges RowRangeList
for _, rr := range r {
retained := rr.retainRowsAfter(lastRowKey)
if retained.valid() {
ranges = append(ranges, retained.(RowRange))
}
}
return ranges
}
func (r RowRangeList) valid() bool {
for _, rr := range r {
if rr.valid() {
return true
}
}
return false
}
// SingleRow returns a RowSet for reading a single row.
func SingleRow(row string) RowSet {
return RowList{row}
} }
// PrefixRange returns a RowRange consisting of all keys starting with the prefix. // PrefixRange returns a RowRange consisting of all keys starting with the prefix.
@@ -341,6 +434,9 @@ type ReadOption interface {
} }
// RowFilter returns a ReadOption that applies f to the contents of read rows. // RowFilter returns a ReadOption that applies f to the contents of read rows.
//
// If multiple RowFilters are provided, only the last is used. To combine filters,
// use ChainFilters or InterleaveFilters instead.
func RowFilter(f Filter) ReadOption { return rowFilter{f} } func RowFilter(f Filter) ReadOption { return rowFilter{f} }
type rowFilter struct{ f Filter } type rowFilter struct{ f Filter }
@@ -368,21 +464,27 @@ func mutationsAreRetryable(muts []*btpb.Mutation) bool {
return true return true
} }
// Apply applies a Mutation to a specific row. const maxMutations = 100000
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
ctx = metadata.NewContext(ctx, t.md) // Apply mutates a row atomically. A mutation must contain at least one
// operation and at most 100000 operations.
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) (err error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
after := func(res proto.Message) { after := func(res proto.Message) {
for _, o := range opts { for _, o := range opts {
o.after(res) o.after(res)
} }
} }
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigtable/Apply")
defer func() { trace.EndSpan(ctx, err) }()
var callOptions []gax.CallOption var callOptions []gax.CallOption
if m.cond == nil { if m.cond == nil {
req := &btpb.MutateRowRequest{ req := &btpb.MutateRowRequest{
TableName: t.c.fullTableName(t.table), TableName: t.c.fullTableName(t.table),
RowKey: []byte(row), AppProfileId: t.c.appProfile,
Mutations: m.ops, RowKey: []byte(row),
Mutations: m.ops,
} }
if mutationsAreRetryable(m.ops) { if mutationsAreRetryable(m.ops) {
callOptions = retryOptions callOptions = retryOptions
@@ -401,20 +503,27 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
req := &btpb.CheckAndMutateRowRequest{ req := &btpb.CheckAndMutateRowRequest{
TableName: t.c.fullTableName(t.table), TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row), RowKey: []byte(row),
PredicateFilter: m.cond.proto(), PredicateFilter: m.cond.proto(),
} }
if m.mtrue != nil { if m.mtrue != nil {
if m.mtrue.cond != nil {
return errors.New("bigtable: conditional mutations cannot be nested")
}
req.TrueMutations = m.mtrue.ops req.TrueMutations = m.mtrue.ops
} }
if m.mfalse != nil { if m.mfalse != nil {
if m.mfalse.cond != nil {
return errors.New("bigtable: conditional mutations cannot be nested")
}
req.FalseMutations = m.mfalse.ops req.FalseMutations = m.mfalse.ops
} }
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
callOptions = retryOptions callOptions = retryOptions
} }
var cmRes *btpb.CheckAndMutateRowResponse var cmRes *btpb.CheckAndMutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error { err = gax.Invoke(ctx, func(ctx context.Context) error {
var err error var err error
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
return err return err
@@ -468,25 +577,20 @@ func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
} }
// Set sets a value in a specified column, with the given timestamp. // Set sets a value in a specified column, with the given timestamp.
// The timestamp will be truncated to millisecond resolution. // The timestamp will be truncated to millisecond granularity.
// A timestamp of ServerTime means to use the server timestamp. // A timestamp of ServerTime means to use the server timestamp.
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
if ts != ServerTime { m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
// Truncate to millisecond resolution, since that's the default table config.
// TODO(dsymonds): Provide a way to override this behaviour.
ts -= ts % 1000
}
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
FamilyName: family, FamilyName: family,
ColumnQualifier: []byte(column), ColumnQualifier: []byte(column),
TimestampMicros: int64(ts), TimestampMicros: int64(ts.TruncateToMilliseconds()),
Value: value, Value: value,
}}}) }}})
} }
// DeleteCellsInColumn will delete all the cells whose columns are family:column. // DeleteCellsInColumn will delete all the cells whose columns are family:column.
func (m *Mutation) DeleteCellsInColumn(family, column string) { func (m *Mutation) DeleteCellsInColumn(family, column string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
FamilyName: family, FamilyName: family,
ColumnQualifier: []byte(column), ColumnQualifier: []byte(column),
}}}) }}})
@@ -495,27 +599,28 @@ func (m *Mutation) DeleteCellsInColumn(family, column string) {
// DeleteTimestampRange deletes all cells whose columns are family:column // DeleteTimestampRange deletes all cells whose columns are family:column
// and whose timestamps are in the half-open interval [start, end). // and whose timestamps are in the half-open interval [start, end).
// If end is zero, it will be interpreted as infinity. // If end is zero, it will be interpreted as infinity.
// The timestamps will be truncated to millisecond granularity.
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
FamilyName: family, FamilyName: family,
ColumnQualifier: []byte(column), ColumnQualifier: []byte(column),
TimeRange: &btpb.TimestampRange{ TimeRange: &btpb.TimestampRange{
StartTimestampMicros: int64(start), StartTimestampMicros: int64(start.TruncateToMilliseconds()),
EndTimestampMicros: int64(end), EndTimestampMicros: int64(end.TruncateToMilliseconds()),
}, },
}}}) }}})
} }
// DeleteCellsInFamily will delete all the cells whose columns are family:*. // DeleteCellsInFamily will delete all the cells whose columns are family:*.
func (m *Mutation) DeleteCellsInFamily(family string) { func (m *Mutation) DeleteCellsInFamily(family string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{ m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{
FamilyName: family, FamilyName: family,
}}}) }}})
} }
// DeleteRow deletes the entire row. // DeleteRow deletes the entire row.
func (m *Mutation) DeleteRow() { func (m *Mutation) DeleteRow() {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}}) m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}})
} }
// entryErr is a container that combines an entry with the error that was returned for it. // entryErr is a container that combines an entry with the error that was returned for it.
@@ -525,7 +630,7 @@ type entryErr struct {
Err error Err error
} }
// ApplyBulk applies multiple Mutations. // ApplyBulk applies multiple Mutations, up to a maximum of 100,000.
// Each mutation is individually applied atomically, // Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order. // but the set of mutations may be applied in any order.
// //
@@ -535,8 +640,11 @@ type entryErr struct {
// will correspond to the relevant rowKeys/muts arguments. // will correspond to the relevant rowKeys/muts arguments.
// //
// Conditional mutations cannot be applied in bulk and providing one will result in an error. // Conditional mutations cannot be applied in bulk and providing one will result in an error.
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) (errs []error, err error) {
ctx = metadata.NewContext(ctx, t.md) ctx = mergeOutgoingMetadata(ctx, t.md)
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk")
defer func() { trace.EndSpan(ctx, err) }()
if len(rowKeys) != len(muts) { if len(rowKeys) != len(muts) {
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
} }
@@ -550,31 +658,31 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
} }
// entries will be reduced after each invocation to just what needs to be retried. for _, group := range groupEntries(origEntries, maxMutations) {
entries := make([]*entryErr, len(rowKeys)) attrMap := make(map[string]interface{})
copy(entries, origEntries) err = gax.Invoke(ctx, func(ctx context.Context) error {
err := gax.Invoke(ctx, func(ctx context.Context) error { attrMap["rowCount"] = len(group)
err := t.doApplyBulk(ctx, entries, opts...) trace.TracePrintf(ctx, attrMap, "Row count in ApplyBulk")
err := t.doApplyBulk(ctx, group, opts...)
if err != nil {
// We want to retry the entire request with the current group
return err
}
group = t.getApplyBulkRetries(group)
if len(group) > 0 && len(idempotentRetryCodes) > 0 {
// We have at least one mutation that needs to be retried.
// Return an arbitrary error that is retryable according to callOptions.
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
}
return nil
}, retryOptions...)
if err != nil { if err != nil {
// We want to retry the entire request with the current entries return nil, err
return err
} }
entries = t.getApplyBulkRetries(entries)
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
// We have at least one mutation that needs to be retried.
// Return an arbitrary error that is retryable according to callOptions.
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
}
return nil
}, retryOptions...)
if err != nil {
return nil, err
} }
// Accumulate all of the errors into an array to return, interspersed with nils for successful // All the errors are accumulated into an array and returned, interspersed with nils for successful
// entries. The absence of any errors means we should return nil. // entries. The absence of any errors means we should return nil.
var errs []error
var foundErr bool var foundErr bool
for _, entry := range origEntries { for _, entry := range origEntries {
if entry.Err != nil { if entry.Err != nil {
@@ -614,8 +722,9 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
entries[i] = entryErr.Entry entries[i] = entryErr.Entry
} }
req := &btpb.MutateRowsRequest{ req := &btpb.MutateRowsRequest{
TableName: t.c.fullTableName(t.table), TableName: t.c.fullTableName(t.table),
Entries: entries, AppProfileId: t.c.appProfile,
Entries: entries,
} }
stream, err := t.c.client.MutateRows(ctx, req) stream, err := t.c.client.MutateRows(ctx, req)
if err != nil { if err != nil {
@@ -631,11 +740,11 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
} }
for i, entry := range res.Entries { for i, entry := range res.Entries {
status := entry.Status s := entry.Status
if status.Code == int32(codes.OK) { if s.Code == int32(codes.OK) {
entryErrs[i].Err = nil entryErrs[i].Err = nil
} else { } else {
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message) entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message)
} }
} }
after(res) after(res)
@@ -643,6 +752,32 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
return nil return nil
} }
// groupEntries groups entries into groups of a specified size without breaking up
// individual entries.
func groupEntries(entries []*entryErr, maxSize int) [][]*entryErr {
var (
res [][]*entryErr
start int
gmuts int
)
addGroup := func(end int) {
if end-start > 0 {
res = append(res, entries[start:end])
start = end
gmuts = 0
}
}
for i, e := range entries {
emuts := len(e.Entry.Mutations)
if gmuts+emuts > maxSize {
addGroup(i)
}
gmuts += emuts
}
addGroup(len(entries))
return res
}
// Timestamp is in units of microseconds since 1 January 1970. // Timestamp is in units of microseconds since 1 January 1970.
type Timestamp int64 type Timestamp int64
@@ -659,19 +794,32 @@ func Now() Timestamp { return Time(time.Now()) }
// Time converts a Timestamp into a time.Time. // Time converts a Timestamp into a time.Time.
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
// which is currently the only granularity supported.
func (ts Timestamp) TruncateToMilliseconds() Timestamp {
if ts == ServerTime {
return ts
}
return ts - ts%1000
}
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. // ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
// It returns the newly written cells. // It returns the newly written cells.
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
ctx = metadata.NewContext(ctx, t.md) ctx = mergeOutgoingMetadata(ctx, t.md)
req := &btpb.ReadModifyWriteRowRequest{ req := &btpb.ReadModifyWriteRowRequest{
TableName: t.c.fullTableName(t.table), TableName: t.c.fullTableName(t.table),
RowKey: []byte(row), AppProfileId: t.c.appProfile,
Rules: m.ops, RowKey: []byte(row),
Rules: m.ops,
} }
res, err := t.c.client.ReadModifyWriteRow(ctx, req) res, err := t.c.client.ReadModifyWriteRow(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if res.Row == nil {
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
}
r := make(Row) r := make(Row)
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
decodeFamilyProto(r, row, fam) decodeFamilyProto(r, row, fam)
@@ -700,7 +848,7 @@ func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family, FamilyName: family,
ColumnQualifier: []byte(column), ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_AppendValue{v}, Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v},
}) })
} }
@@ -712,6 +860,52 @@ func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family, FamilyName: family,
ColumnQualifier: []byte(column), ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta}, Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta},
}) })
} }
// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata,
// if any, joined with internal metadata.
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
mdCopy, _ := metadata.FromOutgoingContext(ctx)
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
}
// SampleRowKeys returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of
// the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces.
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
var sampledRowKeys []string
err := gax.Invoke(ctx, func(ctx context.Context) error {
sampledRowKeys = nil
req := &btpb.SampleRowKeysRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
}
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()
stream, err := t.c.client.SampleRowKeys(ctx, req)
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
key := string(res.RowKey)
if key == "" {
continue
}
sampledRowKeys = append(sampledRowKeys, key)
}
return nil
}, retryOptions...)
return sampledRowKeys, err
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/* /*
Copyright 2016 Google Inc. All Rights Reserved. Copyright 2016 Google LLC
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -16,19 +16,19 @@ limitations under the License.
package bttest_test package bttest_test
import ( import (
"context"
"fmt" "fmt"
"log" "log"
"cloud.google.com/go/bigtable" "cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/bttest" "cloud.google.com/go/bigtable/bttest"
"golang.org/x/net/context"
"google.golang.org/api/option" "google.golang.org/api/option"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
func ExampleNewServer() { func ExampleNewServer() {
srv, err := bttest.NewServer("127.0.0.1:0") srv, err := bttest.NewServer("localhost:0")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,106 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bttest
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
iampb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var _ btapb.BigtableInstanceAdminServer = (*server)(nil)
var errUnimplemented = status.Error(codes.Unimplemented, "unimplemented feature")
func (s *server) CreateInstance(ctx context.Context, req *btapb.CreateInstanceRequest) (*longrunning.Operation, error) {
return nil, errUnimplemented
}
func (s *server) GetInstance(ctx context.Context, req *btapb.GetInstanceRequest) (*btapb.Instance, error) {
return nil, errUnimplemented
}
func (s *server) ListInstances(ctx context.Context, req *btapb.ListInstancesRequest) (*btapb.ListInstancesResponse, error) {
return nil, errUnimplemented
}
func (s *server) UpdateInstance(ctx context.Context, req *btapb.Instance) (*btapb.Instance, error) {
return nil, errUnimplemented
}
func (s *server) PartialUpdateInstance(ctx context.Context, req *btapb.PartialUpdateInstanceRequest) (*longrunning.Operation, error) {
return nil, errUnimplemented
}
func (s *server) DeleteInstance(ctx context.Context, req *btapb.DeleteInstanceRequest) (*empty.Empty, error) {
return nil, errUnimplemented
}
func (s *server) CreateCluster(ctx context.Context, req *btapb.CreateClusterRequest) (*longrunning.Operation, error) {
return nil, errUnimplemented
}
func (s *server) GetCluster(ctx context.Context, req *btapb.GetClusterRequest) (*btapb.Cluster, error) {
return nil, errUnimplemented
}
func (s *server) ListClusters(ctx context.Context, req *btapb.ListClustersRequest) (*btapb.ListClustersResponse, error) {
return nil, errUnimplemented
}
func (s *server) UpdateCluster(ctx context.Context, req *btapb.Cluster) (*longrunning.Operation, error) {
return nil, errUnimplemented
}
func (s *server) DeleteCluster(ctx context.Context, req *btapb.DeleteClusterRequest) (*empty.Empty, error) {
return nil, errUnimplemented
}
func (s *server) CreateAppProfile(ctx context.Context, req *btapb.CreateAppProfileRequest) (*btapb.AppProfile, error) {
return nil, errUnimplemented
}
func (s *server) GetAppProfile(ctx context.Context, req *btapb.GetAppProfileRequest) (*btapb.AppProfile, error) {
return nil, errUnimplemented
}
func (s *server) ListAppProfiles(ctx context.Context, req *btapb.ListAppProfilesRequest) (*btapb.ListAppProfilesResponse, error) {
return nil, errUnimplemented
}
func (s *server) UpdateAppProfile(ctx context.Context, req *btapb.UpdateAppProfileRequest) (*longrunning.Operation, error) {
return nil, errUnimplemented
}
func (s *server) DeleteAppProfile(ctx context.Context, req *btapb.DeleteAppProfileRequest) (*empty.Empty, error) {
return nil, errUnimplemented
}
func (s *server) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
return nil, errUnimplemented
}
func (s *server) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
return nil, errUnimplemented
}
func (s *server) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
return nil, errUnimplemented
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -17,6 +17,10 @@ package main
import ( import (
"testing" "testing"
"time" "time"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
) )
func TestParseDuration(t *testing.T) { func TestParseDuration(t *testing.T) {
@@ -57,3 +61,113 @@ func TestParseDuration(t *testing.T) {
} }
} }
} }
func TestParseArgs(t *testing.T) {
got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"})
if err != nil {
t.Fatal(err)
}
want := map[string]string{"a": "1", "b": "2"}
if !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
if _, err := parseArgs([]string{"a1"}, []string{"a1"}); err == nil {
t.Error("malformed: got nil, want error")
}
if _, err := parseArgs([]string{"a=1"}, []string{"b"}); err == nil {
t.Error("invalid: got nil, want error")
}
}
func TestParseColumnsFilter(t *testing.T) {
tests := []struct {
in string
out bigtable.Filter
fail bool
}{
{
in: "columnA",
out: bigtable.ColumnFilter("columnA"),
},
{
in: "familyA:columnA",
out: bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
},
{
in: "columnA,columnB",
out: bigtable.InterleaveFilters(bigtable.ColumnFilter("columnA"), bigtable.ColumnFilter("columnB")),
},
{
in: "familyA:columnA,columnB",
out: bigtable.InterleaveFilters(
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
bigtable.ColumnFilter("columnB"),
),
},
{
in: "columnA,familyB:columnB",
out: bigtable.InterleaveFilters(
bigtable.ColumnFilter("columnA"),
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
),
},
{
in: "familyA:columnA,familyB:columnB",
out: bigtable.InterleaveFilters(
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
),
},
{
in: "familyA:",
out: bigtable.FamilyFilter("familyA"),
},
{
in: ":columnA",
out: bigtable.ColumnFilter("columnA"),
},
{
in: ",:columnA,,familyB:columnB,",
out: bigtable.InterleaveFilters(
bigtable.ColumnFilter("columnA"),
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
),
},
{
in: "familyA:columnA:cellA",
fail: true,
},
{
in: "familyA::columnA",
fail: true,
},
}
for _, tc := range tests {
got, err := parseColumnsFilter(tc.in)
if !tc.fail && err != nil {
t.Errorf("parseColumnsFilter(%q) unexpectedly failed: %v", tc.in, err)
continue
}
if tc.fail && err == nil {
t.Errorf("parseColumnsFilter(%q) did not fail", tc.in)
continue
}
if tc.fail {
continue
}
var cmpOpts cmp.Options
cmpOpts =
append(
cmpOpts,
cmp.AllowUnexported(bigtable.ChainFilters([]bigtable.Filter{}...)),
cmp.AllowUnexported(bigtable.InterleaveFilters([]bigtable.Filter{}...)))
if !cmp.Equal(got, tc.out, cmpOpts) {
t.Errorf("parseColumnsFilter(%q) = %v, want %v", tc.in, got, tc.out)
}
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,10 +14,12 @@
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. // DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
// Run "go generate" to regenerate. // Run "go generate" to regenerate.
//go:generate go run cbt.go -o cbtdoc.go doc //go:generate go run cbt.go gcpolicy.go -o cbtdoc.go doc
/* /*
Cbt is a tool for doing basic interactions with Cloud Bigtable. Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
install the cbt tool, see the
[cbt overview](https://cloud.google.com/bigtable/docs/cbt-overview).
Usage: Usage:
@@ -26,33 +28,74 @@ Usage:
The commands are: The commands are:
count Count rows in a table count Count rows in a table
createinstance Create an instance with an initial cluster
createcluster Create a cluster in the configured instance
createfamily Create a column family createfamily Create a column family
createtable Create a table createtable Create a table
updatecluster Update a cluster in the configured instance
deleteinstance Delete an instance
deletecluster Delete a cluster from the configured instance
deletecolumn Delete all cells in a column
deletefamily Delete a column family deletefamily Delete a column family
deleterow Delete a row deleterow Delete a row
deletetable Delete a table deletetable Delete a table
doc Print godoc-suitable documentation for cbt doc Print godoc-suitable documentation for cbt
help Print help text help Print help text
listinstances List instances in a project listinstances List instances in a project
listclusters List clusters in an instance
lookup Read from a single row lookup Read from a single row
ls List tables and column families ls List tables and column families
mddoc Print documentation for cbt in Markdown format mddoc Print documentation for cbt in Markdown format
read Read rows read Read rows
set Set value of a cell set Set value of a cell
setgcpolicy Set the GC policy for a column family setgcpolicy Set the GC policy for a column family
waitforreplication Block until all the completed writes have been replicated to all the clusters
createtablefromsnapshot Create a table from a snapshot (snapshots alpha)
createsnapshot Create a snapshot from a source table (snapshots alpha)
listsnapshots List snapshots in a cluster (snapshots alpha)
getsnapshot Get snapshot info (snapshots alpha)
deletesnapshot Delete snapshot in a cluster (snapshots alpha)
version Print the current cbt version
createappprofile Creates app profile for an instance
getappprofile Reads app profile for an instance
listappprofile Lists app profile for an instance
updateappprofile Updates app profile for an instance
deleteappprofile Deletes app profile for an instance
Use "cbt help <command>" for more information about a command. Use "cbt help <command>" for more information about a command.
The options are: The options are:
-project string -project string
project ID project ID, if unset uses gcloud configured project
-instance string -instance string
Cloud Bigtable instance Cloud Bigtable instance
-creds string -creds string
if set, use application credentials in this file if set, use application credentials in this file
Alpha features are not currently available to most Cloud Bigtable customers. The
features might be changed in backward-incompatible ways and are not recommended
for production use. They are not subject to any SLA or deprecation policy.
Note: cbt does not support specifying arbitrary bytes on the command line for
any value that Bigtable otherwise supports (e.g., row key, column qualifier,
etc.).
For convenience, values of the -project, -instance, -creds,
-admin-endpoint and -data-endpoint flags may be specified in
~/.cbtrc in this format:
project = my-project-123
instance = my-instance
creds = path-to-account-key.json
admin-endpoint = hostname:port
data-endpoint = hostname:port
All values are optional, and all will be overridden by flags.
Count rows in a table Count rows in a table
Usage: Usage:
@@ -61,6 +104,34 @@ Usage:
Create an instance with an initial cluster
Usage:
cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>
instance-id Permanent, unique id for the instance
display-name Description of the instance
cluster-id Permanent, unique id for the cluster in the instance
zone The zone in which to create the cluster
num-nodes The number of nodes to create
storage-type SSD or HDD
Create a cluster in the configured instance
Usage:
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>
cluster-id Permanent, unique id for the cluster in the instance
zone The zone in which to create the cluster
num-nodes The number of nodes to create
storage-type SSD or HDD
Create a column family Create a column family
Usage: Usage:
@@ -72,7 +143,47 @@ Usage:
Create a table Create a table
Usage: Usage:
cbt createtable <table> cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...]
families: Column families and their associated GC policies. For gcpolicy,
see "setgcpolicy".
Example: families=family1:maxage=1w,family2:maxversions=1
splits: Row key to be used to initially split the table
Update a cluster in the configured instance
Usage:
cbt updatecluster <cluster-id> [num-nodes=num-nodes]
cluster-id Permanent, unique id for the cluster in the instance
num-nodes The number of nodes to update to
Delete an instance
Usage:
cbt deleteinstance <instance>
Delete a cluster from the configured instance
Usage:
cbt deletecluster <cluster>
Delete all cells in a column
Usage:
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request
@@ -88,7 +199,9 @@ Usage:
Delete a row Delete a row
Usage: Usage:
cbt deleterow <table> <row> cbt deleterow <table> <row> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request
@@ -125,10 +238,22 @@ Usage:
List clusters in an instance
Usage:
cbt listclusters
Read from a single row Read from a single row
Usage: Usage:
cbt lookup <table> <row> cbt lookup <table> <row> [columns=[family]:[qualifier],...] [cells-per-column=<n>] [app-profile=<app profile id>]
columns=[family]:[qualifier],... Read only these columns, comma-separated
cells-per-column=<n> Read only this many cells per column
app-profile=<app profile id> The app profile id to use for the request
@@ -153,11 +278,15 @@ Usage:
Read rows Read rows
Usage: Usage:
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>] cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [columns=[family]:[qualifier],...] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>]
start=<row> Start reading at this row start=<row> Start reading at this row
end=<row> Stop reading before this row end=<row> Stop reading before this row
prefix=<prefix> Read rows with this prefix prefix=<prefix> Read rows with this prefix
count=<n> Read only this many rows regex=<regex> Read rows with keys matching this regex
columns=[family]:[qualifier],... Read only these columns, comma-separated
count=<n> Read only this many rows
cells-per-column=<n> Read only this many cells per column
app-profile=<app profile id> The app profile id to use for the request
@@ -166,7 +295,8 @@ Usage:
Set value of a cell Set value of a cell
Usage: Usage:
cbt set <table> <row> family:column=val[@ts] ... cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...
app-profile=<app profile id> The app profile id to use for the request
family:column=val[@ts] may be repeated to set multiple cells. family:column=val[@ts] may be repeated to set multiple cells.
ts is an optional integer timestamp. ts is an optional integer timestamp.
@@ -179,7 +309,7 @@ Usage:
Set the GC policy for a column family Set the GC policy for a column family
Usage: Usage:
cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> ) cbt setgcpolicy <table> <family> ((maxage=<d> | maxversions=<n>) [(and|or) (maxage=<d> | maxversions=<n>),...] | never)
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d") maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
maxversions=<n> Maximum number of versions to preserve maxversions=<n> Maximum number of versions to preserve
@@ -187,5 +317,109 @@ Usage:
Block until all the completed writes have been replicated to all the clusters
Usage:
cbt waitforreplication <table>
Create a table from a snapshot (snapshots alpha)
Usage:
cbt createtablefromsnapshot <table> <cluster> <snapshot>
table The name of the table to create
cluster The cluster where the snapshot is located
snapshot The snapshot to restore
Create a snapshot from a source table (snapshots alpha)
Usage:
cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]
[ttl=<d>] Lifespan of the snapshot (e.g. "1h", "4d")
List snapshots in a cluster (snapshots alpha)
Usage:
cbt listsnapshots [<cluster>]
Get snapshot info (snapshots alpha)
Usage:
cbt getsnapshot <cluster> <snapshot>
Delete snapshot in a cluster (snapshots alpha)
Usage:
cbt deletesnapshot <cluster> <snapshot>
Print the current cbt version
Usage:
cbt version
Creates app profile for an instance
Usage:
usage: cbt createappprofile <instance-id> <profile-id> <description> (route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag]
optional flags may be `force`
Reads app profile for an instance
Usage:
cbt getappprofile <instance-id> <profile-id>
Lists app profile for an instance
Usage:
cbt listappprofile <instance-id>
Updates app profile for an instance
Usage:
usage: cbt updateappprofile <instance-id> <profile-id> <description>(route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag]
optional flags may be `force`
Deletes app profile for an instance
Usage:
cbt deleteappprofile <instance-id> <profile-id>
*/ */
package main package main

Some files were not shown because too many files have changed in this diff Show More