1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #95 from betabrand/auth-providers

Allow other auth providers (GCP, OIDC, Azure, OpenStack)
This commit is contained in:
Avesh Agarwal
2018-05-23 13:01:27 -04:00
committed by GitHub
1483 changed files with 199399 additions and 6 deletions

124
glide.lock generated
View File

@@ -1,10 +1,24 @@
hash: 374e925e75f6b711fd89523308d62c744e1a1846bc5923b36b864c19809eef8b
updated: 2018-01-09T00:39:38.149357059-05:00
hash: ed51a8e643db6e9996ef0ffca671fb31ab5b7fe0d61ecdda828192871f9da366
updated: 2018-05-22T18:05:00.26435-07:00
imports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
subpackages:
- compute/metadata
- internal
- name: github.com/Azure/go-autorest
version: e14a70c556c8e0db173358d1a903dca345a8e75e
subpackages:
- autorest
- autorest/adal
- autorest/azure
- autorest/date
- name: github.com/davecgh/go-spew
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
subpackages:
- spew
- name: github.com/dgrijalva/jwt-go
version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20
- name: github.com/docker/distribution
version: edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
subpackages:
@@ -51,6 +65,15 @@ imports:
- OpenAPIv2
- compiler
- extensions
- name: github.com/gophercloud/gophercloud
version: 8183543f90d1aef267a5ecc209f2e0715b355acb
subpackages:
- openstack
- openstack/identity/v2/tenants
- openstack/identity/v2/tokens
- openstack/identity/v3/tokens
- openstack/utils
- pagination
- name: github.com/gregjones/httpcache
version: 787624de3eb7bd915c329cba748687a3b22666a6
subpackages:
@@ -106,6 +129,7 @@ imports:
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
subpackages:
- context
- context/ctxhttp
- html
- html/atom
- http2
@@ -115,6 +139,13 @@ imports:
- lex/httplex
- trace
- websocket
- name: golang.org/x/oauth2
version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4
subpackages:
- google
- internal
- jws
- jwt
- name: golang.org/x/sys
version: 95c6576299259db960f6c5b9b69ea52422860fce
subpackages:
@@ -138,6 +169,18 @@ imports:
version: 8cab8a1319f0be9798e7fe78b15da75e5f94b2e9
subpackages:
- imports
- name: google.golang.org/appengine
version: b1f26356af11148e710935ed1ac8a7f5702c7612
subpackages:
- internal
- internal/app_identity
- internal/base
- internal/datastore
- internal/log
- internal/modules
- internal/remote_api
- internal/urlfetch
- urlfetch
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/yaml.v2
@@ -145,6 +188,7 @@ imports:
- name: k8s.io/api
version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a
subpackages:
- admission/v1beta1
- admissionregistration/v1alpha1
- admissionregistration/v1beta1
- apps/v1
@@ -163,6 +207,7 @@ imports:
- core/v1
- events/v1beta1
- extensions/v1beta1
- imagepolicy/v1alpha1
- networking/v1
- policy/v1beta1
- rbac/v1
@@ -294,9 +339,15 @@ imports:
- kubernetes/typed/storage/v1beta1/fake
- listers/core/v1
- pkg/version
- plugin/pkg/client/auth
- plugin/pkg/client/auth/azure
- plugin/pkg/client/auth/gcp
- plugin/pkg/client/auth/oidc
- plugin/pkg/client/auth/openstack
- rest
- rest/watch
- testing
- third_party/forked/golang/template
- tools/auth
- tools/cache
- tools/clientcmd
@@ -312,6 +363,7 @@ imports:
- util/flowcontrol
- util/homedir
- util/integer
- util/jsonpath
- name: k8s.io/code-generator
version: fef8bcdbaf36ac6a1a18c9ef7d85200b249fad30
- name: k8s.io/gengo
@@ -328,25 +380,85 @@ imports:
version: 925c127ec6b946659ad0fd596fa959be43f0cc05
subpackages:
- pkg/api/legacyscheme
- pkg/api/v1
- pkg/api/testapi
- pkg/api/v1/resource
- pkg/apis/admission
- pkg/apis/admission/install
- pkg/apis/admission/v1beta1
- pkg/apis/admissionregistration
- pkg/apis/admissionregistration/install
- pkg/apis/admissionregistration/v1alpha1
- pkg/apis/admissionregistration/v1beta1
- pkg/apis/apps
- pkg/apis/apps/install
- pkg/apis/apps/v1
- pkg/apis/apps/v1beta1
- pkg/apis/apps/v1beta2
- pkg/apis/authentication
- pkg/apis/authentication/install
- pkg/apis/authentication/v1
- pkg/apis/authentication/v1beta1
- pkg/apis/authorization
- pkg/apis/authorization/install
- pkg/apis/authorization/v1
- pkg/apis/authorization/v1beta1
- pkg/apis/autoscaling
- pkg/apis/autoscaling/install
- pkg/apis/autoscaling/v1
- pkg/apis/autoscaling/v2beta1
- pkg/apis/batch
- pkg/apis/batch/install
- pkg/apis/batch/v1
- pkg/apis/batch/v1beta1
- pkg/apis/batch/v2alpha1
- pkg/apis/certificates
- pkg/apis/certificates/install
- pkg/apis/certificates/v1beta1
- pkg/apis/componentconfig
- pkg/apis/componentconfig/install
- pkg/apis/componentconfig/v1alpha1
- pkg/apis/core
- pkg/apis/core/helper
- pkg/apis/core/helper/qos
- pkg/apis/core/install
- pkg/apis/core/v1
- pkg/apis/core/v1/helper
- pkg/apis/core/v1/helper/qos
- pkg/apis/events
- pkg/apis/events/install
- pkg/apis/events/v1beta1
- pkg/apis/extensions
- pkg/apis/extensions/install
- pkg/apis/extensions/v1beta1
- pkg/apis/imagepolicy
- pkg/apis/imagepolicy/install
- pkg/apis/imagepolicy/v1alpha1
- pkg/apis/networking
- pkg/apis/networking/install
- pkg/apis/networking/v1
- pkg/apis/policy
- pkg/apis/policy/install
- pkg/apis/policy/v1beta1
- pkg/apis/rbac
- pkg/apis/rbac/install
- pkg/apis/rbac/v1
- pkg/apis/rbac/v1alpha1
- pkg/apis/rbac/v1beta1
- pkg/apis/scheduling
- pkg/apis/scheduling/install
- pkg/apis/scheduling/v1alpha1
- pkg/apis/settings
- pkg/apis/settings/install
- pkg/apis/settings/v1alpha1
- pkg/apis/storage
- pkg/apis/storage/install
- pkg/apis/storage/v1
- pkg/apis/storage/v1alpha1
- pkg/apis/storage/v1beta1
- pkg/features
- pkg/kubelet/apis
- pkg/kubelet/types
- pkg/master/ports
- pkg/util/parsers
- pkg/util/pointer
- plugin/pkg/scheduler/algorithm/priorities/util
- plugin/pkg/scheduler/schedulercache
- plugin/pkg/scheduler/util
testImports: []

View File

@@ -18,6 +18,8 @@ import:
- package: k8s.io/gengo
- package: github.com/ugorji/go
version: v.1.1-beta
- package: github.com/Azure/go-autorest
version: e14a70c556c8e0db173358d1a903dca345a8e75e
- package: golang.org/x/tools
subpackages:
- imports

View File

@@ -20,6 +20,7 @@ import (
"fmt"
clientset "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)

11
vendor/cloud.google.com/go/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
sudo: false
language: go
go:
- 1.6
- 1.7
install:
- go get -v cloud.google.com/go/...
script:
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
go test -race -v cloud.google.com/go/...

15
vendor/cloud.google.com/go/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

126
vendor/cloud.google.com/go/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,126 @@
# Contributing
1. Sign one of the contributor license agreements below.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote git
origin is https://code.googlesource.com/gocloud:
git remote set-url origin https://code.googlesource.com/gocloud
1. Make sure your auth is configured correctly by visiting
https://code.googlesource.com, clicking "Generate Password", and following
the directions.
1. Make changes and create a change by running `git codereview change <name>`,
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
1. Keep amending to the change and mail as your receive feedback.
## Integration Tests
In addition to the unit tests, you may run the integration test suite.
To run the integrations tests, creating and configuration of a project in the
Google Developers Console is required.
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
(or **Editor** and **Logs Configuration Writer** roles) are added to the
service account.
Once you create a project, set the following environment variables to be able to
run the against the actual APIs.
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
to create the indexes used in the datastore integration tests with indexes
found in `datastore/testdata/index.yaml`:
From the project's root directory:
``` sh
# Set the default project in your env
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
# Authenticate the gcloud tool with your account
$ gcloud auth login
# Create the indexes
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
```
The Sink integration tests in preview/logging require a Google Cloud storage
bucket with the same name as your test project, and with the Stackdriver Logging
service account as owner:
``` sh
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
```
Once you've set the environment variables, you can run the integration tests by
running:
``` sh
$ go test -v cloud.google.com/go/...
```
## Contributor License Agreements
Before we can accept your pull requests you'll need to sign a Contributor
License Agreement (CLA):
- **If you are an individual writing original source code** and **you own the
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
- **If you work for a company that wants to allow you to contribute your work**,
then you'll need to sign a [corporate CLA][corpcla].
You can sign these electronically (just scroll to the bottom). After that,
we'll be able to accept your pull requests.
## Contributor Code of Conduct
As contributors and maintainers of this project,
and in the interest of fostering an open and welcoming community,
we pledge to respect all people who contribute through reporting issues,
posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project
a harassment-free experience for everyone,
regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information,
such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct.
By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently
applying these principles to every aspect of managing this project.
Project maintainers who do not follow or enforce the Code of Conduct
may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by opening an issue
or contacting one or more of the project maintainers.
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
[indvcla]: https://developers.google.com/open-source/cla/individual
[corpcla]: https://developers.google.com/open-source/cla/corporate

34
vendor/cloud.google.com/go/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,34 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

202
vendor/cloud.google.com/go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

245
vendor/cloud.google.com/go/README.md generated vendored Normal file
View File

@@ -0,0 +1,245 @@
# Google Cloud for Go
[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
``` go
import "cloud.google.com/go"
```
Go packages for Google Cloud Platform services.
**NOTE:** These packages are under development, and may occasionally make
backwards-incompatible changes.
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
## News
_September 8, 2016_
* New clients for some of Google's Machine Learning APIs: Vision, Speech, and
Natural Language.
* Preview version of a new [Stackdriver Logging][cloud-logging] client in
[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
This client uses gRPC as its transport layer, and supports log reading, sinks
and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
## Supported APIs
Google API | Status | Package
-------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | experimental | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Logging][cloud-logging] | experimental | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref]
[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref]
[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
> **Experimental status**: the API is still being actively developed. As a
> result, it might change in backward-incompatible ways and is not recommended
> for production use.
>
> **Beta status**: the API is largely complete, but still has outstanding
> features and bugs to be addressed. There may be minor backwards-incompatible
> changes where necessary.
>
> **Stable status**: the API is mature and ready for production use. We will
> continue addressing bugs and feature requests.
Documentation and examples are available at
https://godoc.org/cloud.google.com/go
Visit or join the
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
for updates on these packages.
## Go Versions Supported
We support the two most recent major versions of Go. If Google App Engine uses
an older version, we support that as well. You can see which versions are
currently supported by looking at the lines following `go:` in
[`.travis.yml`](.travis.yml).
## Authorization
By default, each API will use [Google Application Default Credentials][default-creds]
for authorization credentials used in calling the API endpoints. This will allow your
application to run in many environments without requiring explicit configuration.
Manually-configured authorization can be achieved using the
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
create an `oauth2.TokenSource`. This token source can be passed to the `NewClient`
function for the relevant API using a
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
option.
## Google Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)
[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully-
managed, schemaless database for storing non-relational data. Cloud Datastore
automatically scales with your users and supports ACID transactions, high availability
of reads and writes, strong consistency for reads and ancestor queries, and eventual
consistency for all other queries.
Follow the [activation instructions][cloud-datastore-activation] to use the Google
Cloud Datastore API with your project.
First create a `datastore.Client` to use throughout your application:
```go
client, err := datastore.NewClient(ctx, "my-project-id")
if err != nil {
log.Fatalln(err)
}
```
Then use that client to interact with the API:
```go
type Post struct {
Title string
Body string `datastore:",noindex"`
PublishedAt time.Time
}
keys := []*datastore.Key{
datastore.NewKey(ctx, "Post", "post1", 0, nil),
datastore.NewKey(ctx, "Post", "post2", 0, nil),
}
posts := []*Post{
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
}
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
log.Fatal(err)
}
```
## Google Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
data on Google infrastructure with very high reliability, performance and availability,
and can be used to distribute large data objects to users via direct download.
https://godoc.org/cloud.google.com/go/storage
First create a `storage.Client` to use throughout your application:
```go
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
```
```go
// Read the object1 from bucket.
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
if err != nil {
log.Fatal(err)
}
defer rc.Close()
body, err := ioutil.ReadAll(rc)
if err != nil {
log.Fatal(err)
}
```
## Google Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)
[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
your services with reliable, many-to-many, asynchronous messaging hosted on Google's
infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
for building your own robust, global services.
First create a `pubsub.Client` to use throughout your application:
```go
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
```
```go
// Publish "hello world" on topic1.
topic := client.Topic("topic1")
msgIDs, err := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"),
})
if err != nil {
log.Fatal(err)
}
// Create an iterator to pull messages via subscription1.
it, err := client.Subscription("subscription1").Pull(ctx)
if err != nil {
log.Println(err)
}
defer it.Stop()
// Consume N messages from the iterator.
for i := 0; i < N; i++ {
msg, err := it.Next()
if err == pubsub.Done {
break
}
if err != nil {
log.Fatalf("Failed to retrieve message: %v", err)
}
fmt.Printf("Message %d: %s\n", i, msg.Data)
msg.Done(true) // Acknowledge that we've consumed the message.
}
```
## Contributing
Contributions are welcome. Please, see the
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
document for details. We're using Gerrit for our code reviews. Please don't open pull
requests against this repo, new pull requests will be automatically closed.
Please note that this project is released with a Contributor Code of Conduct.
By participating in this project you agree to abide by its terms.
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
for more information.
[cloud-datastore]: https://cloud.google.com/datastore/
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
[cloud-pubsub]: https://cloud.google.com/pubsub/
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
[cloud-storage]: https://cloud.google.com/storage/
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
[cloud-bigtable]: https://cloud.google.com/bigtable/
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
[cloud-bigquery]: https://cloud.google.com/bigquery/
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
[cloud-logging]: https://cloud.google.com/logging/
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
[cloud-vision]: https://cloud.google.com/vision/
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
[cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1
[cloud-speech]: https://cloud.google.com/speech
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

26
vendor/cloud.google.com/go/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,26 @@
# This file configures AppVeyor (http://www.appveyor.com),
# a Windows-based CI service similar to Travis.
# Identifier for this run
version: "{build}"
# Clone the repo into this path, which conforms to the standard
# Go workspace structure.
clone_folder: c:\gopath\src\cloud.google.com\go
environment:
GOPATH: c:\gopath
install:
# Info for debugging.
- echo %PATH%
- go version
- go env
- go get -v -d -t ./...
# Provide a build script, or AppVeyor will call msbuild.
build_script:
- go install -v ./...
test_script:
- go test -short -v ./...

60
vendor/cloud.google.com/go/authexample_test.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloud_test
import (
"cloud.google.com/go/datastore"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
func Example_applicationDefaultCredentials() {
ctx := context.Background()
// Use Google Application Default Credentials to authorize and authenticate the client.
// More information about Application Default Credentials and how to enable is at
// https://developers.google.com/identity/protocols/application-default-credentials.
//
// This is the recommended way of authorizing and authenticating.
//
// Note: The example uses the datastore client, but the same steps apply to
// the other client libraries underneath this package.
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
// TODO: handle error.
}
// Use the client.
_ = client
}
func Example_serviceAccountFile() {
// Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS
// and use the Application Default Credentials.
ctx := context.Background()
// Use a JSON key file associated with a Google service account to
// authenticate and authorize.
// Go to https://console.developers.google.com/permissions/serviceaccounts to create
// and download a service account key for your project.
//
// Note: The example uses the datastore client, but the same steps apply to
// the other client libraries underneath this package.
client, err := datastore.NewClient(ctx,
"project-id",
option.WithServiceAccountFile("/path/to/service-account-key.json"))
if err != nil {
// TODO: handle error.
}
// Use the client.
_ = client
}

175
vendor/cloud.google.com/go/bigquery/bigquery.go generated vendored Normal file
View File

@@ -0,0 +1,175 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
// TODO(mcgreevy): support dry-run mode when creating jobs.
import (
"fmt"
"google.golang.org/api/option"
"google.golang.org/api/transport"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
// A Source is a source of data for the Copy function.
type Source interface {
implementsSource()
}
// A Destination is a destination of data for the Copy function.
type Destination interface {
implementsDestination()
}
// An Option is an optional argument to Copy.
type Option interface {
implementsOption()
}
// A ReadSource is a source of data for the Read function.
type ReadSource interface {
implementsReadSource()
}
// A ReadOption is an optional argument to Read.
type ReadOption interface {
customizeRead(conf *pagingConf)
}
const Scope = "https://www.googleapis.com/auth/bigquery"
const userAgent = "gcloud-golang-bigquery/20160429"
// Client may be used to perform BigQuery operations.
type Client struct {
service service
projectID string
}
// NewClient constructs a new Client which can perform BigQuery operations.
// Operations performed via the client are billed to the specified GCP project.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
o := []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(Scope),
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
s, err := newBigqueryService(httpClient, endpoint)
if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err)
}
c := &Client{
service: s,
projectID: projectID,
}
return c, nil
}
// initJobProto creates and returns a bigquery Job proto.
// The proto is customized using any jobOptions in options.
// The list of Options is returned with the jobOptions removed.
func initJobProto(projectID string, options []Option) (*bq.Job, []Option) {
job := &bq.Job{}
var other []Option
for _, opt := range options {
if o, ok := opt.(jobOption); ok {
o.customizeJob(job, projectID)
} else {
other = append(other, opt)
}
}
return job, other
}
// Copy starts a BigQuery operation to copy data from a Source to a Destination.
func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) {
switch dst := dst.(type) {
case *Table:
switch src := src.(type) {
case *GCSReference:
return c.load(ctx, dst, src, options)
case *Table:
return c.cp(ctx, dst, Tables{src}, options)
case Tables:
return c.cp(ctx, dst, src, options)
case *Query:
return c.query(ctx, dst, src, options)
}
case *GCSReference:
if src, ok := src.(*Table); ok {
return c.extract(ctx, dst, src, options)
}
}
return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src)
}
// Query creates a query with string q. You may optionally set
// DefaultProjectID and DefaultDatasetID on the returned query before using it.
func (c *Client) Query(q string) *Query {
return &Query{Q: q, client: c}
}
// Read submits a query for execution and returns the results via an Iterator.
//
// Read uses a temporary table to hold the results of the query job.
//
// For more control over how a query is performed, don't use this method but
// instead pass the Query as a Source to Client.Copy, and call Read on the
// resulting Job.
func (q *Query) Read(ctx context.Context, options ...ReadOption) (*Iterator, error) {
dest := &Table{}
job, err := q.client.Copy(ctx, dest, q, WriteTruncate)
if err != nil {
return nil, err
}
return job.Read(ctx, options...)
}
// executeQuery submits a query for execution and returns the results via an Iterator.
func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) {
dest := &Table{}
job, err := c.Copy(ctx, dest, q, WriteTruncate)
if err != nil {
return nil, err
}
return c.Read(ctx, job, options...)
}
// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
}
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
return &Dataset{
projectID: projectID,
id: datasetID,
service: c.service,
}
}

47
vendor/cloud.google.com/go/bigquery/copy_op.go generated vendored Normal file
View File

@@ -0,0 +1,47 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type copyOption interface {
customizeCopy(conf *bq.JobConfigurationTableCopy)
}
func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationTableCopy{}
dst.customizeCopyDst(payload)
src.customizeCopySrc(payload)
for _, opt := range options {
o, ok := opt.(copyOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeCopy(payload)
}
job.Configuration = &bq.JobConfiguration{
Copy: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

104
vendor/cloud.google.com/go/bigquery/copy_test.go generated vendored Normal file
View File

@@ -0,0 +1,104 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func defaultCopyJob() *bq.Job {
return &bq.Job{
Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{
ProjectId: "d-project-id",
DatasetId: "d-dataset-id",
TableId: "d-table-id",
},
SourceTables: []*bq.TableReference{
{
ProjectId: "s-project-id",
DatasetId: "s-dataset-id",
TableId: "s-table-id",
},
},
},
},
}
}
func TestCopy(t *testing.T) {
testCases := []struct {
dst *Table
src Tables
options []Option
want *bq.Job
}{
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
src: Tables{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
want: defaultCopyJob(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
src: Tables{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
options: []Option{CreateNever, WriteTruncate},
want: func() *bq.Job {
j := defaultCopyJob()
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
return j
}(),
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling cp: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
}
}

View File

@@ -0,0 +1,79 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type createTableRecorder struct {
conf *createTableConf
service
}
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
rec.conf = conf
return nil
}
func TestCreateTableOptions(t *testing.T) {
s := &createTableRecorder{}
c := &Client{
projectID: "p",
service: s,
}
ds := c.Dataset("d")
table := ds.Table("t")
exp := time.Now()
q := "query"
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q)); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want := createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
viewQuery: q,
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
// No need for an elaborate schema, that is tested in schema_test.go.
schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
}

55
vendor/cloud.google.com/go/bigquery/dataset.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import "golang.org/x/net/context"
// Dataset is a reference to a BigQuery dataset.
type Dataset struct {
projectID string
id string
service service
}
// ListTables returns a list of all the tables contained in the Dataset.
func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) {
var tables []*Table
err := getPages("", func(pageToken string) (string, error) {
ts, tok, err := d.service.listTables(ctx, d.projectID, d.id, pageToken)
if err == nil {
tables = append(tables, ts...)
}
return tok, err
})
if err != nil {
return nil, err
}
return tables, nil
}
// Create creates a dataset in the BigQuery service. An error will be returned
// if the dataset already exists.
func (d *Dataset) Create(ctx context.Context) error {
return d.service.insertDataset(ctx, d.id, d.projectID)
}
// Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it.
func (d *Dataset) Table(tableID string) *Table {
return &Table{ProjectID: d.projectID, DatasetID: d.id, TableID: tableID, service: d.service}
}

105
vendor/cloud.google.com/go/bigquery/dataset_test.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"reflect"
"testing"
"golang.org/x/net/context"
)
// readServiceStub services read requests by returning data from an in-memory list of values.
type listTablesServiceStub struct {
expectedProject, expectedDataset string
values [][]*Table // contains pages of tables.
pageTokens map[string]string // maps incoming page token to returned page token.
service
}
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
if projectID != s.expectedProject {
return nil, "", errors.New("wrong project id")
}
if datasetID != s.expectedDataset {
return nil, "", errors.New("wrong dataset id")
}
tables := s.values[0]
s.values = s.values[1:]
return tables, s.pageTokens[pageToken], nil
}
func TestListTables(t *testing.T) {
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
testCases := []struct {
data [][]*Table
pageTokens map[string]string
want []*Table
}{
{
data: [][]*Table{{t1, t2}, {t3}},
pageTokens: map[string]string{"": "a", "a": ""},
want: []*Table{t1, t2, t3},
},
{
data: [][]*Table{{t1, t2}, {t3}},
pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: []*Table{t1, t2},
},
}
for _, tc := range testCases {
c := &Client{
service: &listTablesServiceStub{
expectedProject: "x",
expectedDataset: "y",
values: tc.data,
pageTokens: tc.pageTokens,
},
projectID: "x",
}
got, err := c.Dataset("y").ListTables(context.Background())
if err != nil {
t.Errorf("err calling ListTables: %v", err)
continue
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
}
}
}
func TestListTablesError(t *testing.T) {
c := &Client{
service: &listTablesServiceStub{
expectedProject: "x",
expectedDataset: "y",
},
projectID: "x",
}
// Test that service read errors are propagated back to the caller.
// Passing "not y" as the dataset id will cause the service to return an error.
_, err := c.Dataset("not y").ListTables(context.Background())
if err == nil {
// Read should not return an error; only Err should.
t.Errorf("ListTables expected: non-nil err, got: nil")
}
}

18
vendor/cloud.google.com/go/bigquery/doc.go generated vendored Normal file
View File

@@ -0,0 +1,18 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package bigquery provides a client for the BigQuery service.
//
// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected.
package bigquery // import "cloud.google.com/go/bigquery"

82
vendor/cloud.google.com/go/bigquery/error.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
bq "google.golang.org/api/bigquery/v2"
)
// An Error contains detailed information about a failed bigquery operation.
type Error struct {
// Mirrors bq.ErrorProto, but drops DebugInfo
Location, Message, Reason string
}
func (e Error) Error() string {
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
}
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
if ep == nil {
return nil
}
return &Error{
Location: ep.Location,
Message: ep.Message,
Reason: ep.Reason,
}
}
// A MultiError contains multiple related errors.
type MultiError []error
func (m MultiError) Error() string {
switch len(m) {
case 0:
return "(0 errors)"
case 1:
return m[0].Error()
case 2:
return m[0].Error() + " (and 1 other error)"
}
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
}
// RowInsertionError contains all errors that occurred when attempting to insert a row.
type RowInsertionError struct {
InsertID string // The InsertID associated with the affected row.
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
Errors MultiError
}
func (e *RowInsertionError) Error() string {
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
}
// PutMultiError contains an error for each row which was not successfully inserted
// into a BigQuery table.
type PutMultiError []RowInsertionError
func (pme PutMultiError) Error() string {
plural := "s"
if len(pme) == 1 {
plural = ""
}
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
}

109
vendor/cloud.google.com/go/bigquery/error_test.go generated vendored Normal file
View File

@@ -0,0 +1,109 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"reflect"
"strings"
"testing"
bq "google.golang.org/api/bigquery/v2"
)
func rowInsertionError(msg string) RowInsertionError {
return RowInsertionError{Errors: []error{errors.New(msg)}}
}
func TestPutMultiErrorString(t *testing.T) {
testCases := []struct {
errs PutMultiError
want string
}{
{
errs: PutMultiError{},
want: "0 row insertions failed",
},
{
errs: PutMultiError{rowInsertionError("a")},
want: "1 row insertion failed",
},
{
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
want: "2 row insertions failed",
},
}
for _, tc := range testCases {
if tc.errs.Error() != tc.want {
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
}
}
}
func TestMultiErrorString(t *testing.T) {
testCases := []struct {
errs MultiError
want string
}{
{
errs: MultiError{},
want: "(0 errors)",
},
{
errs: MultiError{errors.New("a")},
want: "a",
},
{
errs: MultiError{errors.New("a"), errors.New("b")},
want: "a (and 1 other error)",
},
{
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
want: "a (and 2 other errors)",
},
}
for _, tc := range testCases {
if tc.errs.Error() != tc.want {
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
}
}
}
func TestErrorFromErrorProto(t *testing.T) {
for _, test := range []struct {
in *bq.ErrorProto
want *Error
}{
{nil, nil},
{
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
want: &Error{Location: "L", Message: "M", Reason: "R"},
},
} {
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
}
}
}
func TestErrorString(t *testing.T) {
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
got := e.Error()
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
}
}

59
vendor/cloud.google.com/go/bigquery/extract_op.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type extractOption interface {
customizeExtract(conf *bq.JobConfigurationExtract)
}
// DisableHeader returns an Option that disables the printing of a header row in exported data.
func DisableHeader() Option { return disableHeader{} }
type disableHeader struct{}
func (opt disableHeader) implementsOption() {}
func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract) {
f := false
conf.PrintHeader = &f
}
func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationExtract{}
dst.customizeExtractDst(payload)
src.customizeExtractSrc(payload)
for _, opt := range options {
o, ok := opt.(extractOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeExtract(payload)
}
job.Configuration = &bq.JobConfiguration{
Extract: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

97
vendor/cloud.google.com/go/bigquery/extract_test.go generated vendored Normal file
View File

@@ -0,0 +1,97 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func defaultExtractJob() *bq.Job {
return &bq.Job{
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
DestinationUris: []string{"uri"},
},
},
}
}
func TestExtract(t *testing.T) {
testCases := []struct {
dst *GCSReference
src *Table
options []Option
want *bq.Job
}{
{
dst: defaultGCS,
src: defaultTable(nil),
want: defaultExtractJob(),
},
{
dst: defaultGCS,
src: defaultTable(nil),
options: []Option{
DisableHeader(),
},
want: func() *bq.Job {
j := defaultExtractJob()
f := false
j.Configuration.Extract.PrintHeader = &f
return j
}(),
},
{
dst: &GCSReference{
uris: []string{"uri"},
Compression: Gzip,
DestinationFormat: JSON,
FieldDelimiter: "\t",
},
src: defaultTable(nil),
want: func() *bq.Job {
j := defaultExtractJob()
j.Configuration.Extract.Compression = "GZIP"
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Extract.FieldDelimiter = "\t"
return j
}(),
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling extract: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
}
}

112
vendor/cloud.google.com/go/bigquery/gcs.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import bq "google.golang.org/api/bigquery/v2"
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
// an input or output to a BigQuery operation.
type GCSReference struct {
uris []string
// FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data.
// The default is ",".
FieldDelimiter string
// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
SkipLeadingRows int64
// SourceFormat is the format of the GCS data to be loaded into BigQuery.
// Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV.
SourceFormat DataFormat
// Only used when loading data.
Encoding Encoding
// Quote is the value used to quote data sections in a CSV file.
// The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true.
// Only used when loading data.
Quote string
ForceZeroQuote bool
// DestinationFormat is the format to use when writing exported files.
// Allowed values are: CSV, Avro, JSON. The default is CSV.
// CSV is not supported for tables with nested or repeated fields.
DestinationFormat DataFormat
// Only used when writing data. Default is None.
Compression Compression
}
func (gcs *GCSReference) implementsSource() {}
func (gcs *GCSReference) implementsDestination() {}
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
// For more information about the treatment of wildcards and multiple URIs,
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
func (c *Client) NewGCSReference(uri ...string) *GCSReference {
return &GCSReference{uris: uri}
}
type DataFormat string
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
)
// Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used.
type Encoding string
const (
UTF_8 Encoding = "UTF-8"
ISO_8859_1 Encoding = "ISO-8859-1"
)
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
type Compression string
const (
None Compression = "NONE"
Gzip Compression = "GZIP"
)
func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad) {
conf.SourceUris = gcs.uris
conf.SkipLeadingRows = gcs.SkipLeadingRows
conf.SourceFormat = string(gcs.SourceFormat)
conf.Encoding = string(gcs.Encoding)
conf.FieldDelimiter = gcs.FieldDelimiter
if gcs.ForceZeroQuote {
quote := ""
conf.Quote = &quote
} else if gcs.Quote != "" {
conf.Quote = &gcs.Quote
}
}
func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract) {
conf.DestinationUris = gcs.uris
conf.Compression = string(gcs.Compression)
conf.DestinationFormat = string(gcs.DestinationFormat)
conf.FieldDelimiter = gcs.FieldDelimiter
}

154
vendor/cloud.google.com/go/bigquery/integration_test.go generated vendored Normal file
View File

@@ -0,0 +1,154 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"net/http"
"reflect"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
)
func TestIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, Scope)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projID := testutil.ProjID()
c, err := NewClient(ctx, projID, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
ds := c.Dataset("bigquery_integration_test")
if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
t.Fatal(err)
}
schema := Schema([]*FieldSchema{
{Name: "name", Type: StringFieldType},
{Name: "num", Type: IntegerFieldType},
})
table := ds.Table("t1")
// Delete the table in case it already exists. (Ignore errors.)
table.Delete(ctx)
// Create the table.
err = table.Create(ctx, schema, TableExpiration(time.Now().Add(5*time.Minute)))
if err != nil {
t.Fatal(err)
}
// Check table metadata.
md, err := table.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// TODO(jba): check md more thorougly.
if got, want := md.ID, fmt.Sprintf("%s:%s.%s", projID, ds.id, table.TableID); got != want {
t.Errorf("metadata.ID: got %q, want %q", got, want)
}
if got, want := md.Type, RegularTable; got != want {
t.Errorf("metadata.Type: got %v, want %v", got, want)
}
// List tables in the dataset.
tables, err := ds.ListTables(ctx)
if err != nil {
t.Fatal(err)
}
if got, want := len(tables), 1; got != want {
t.Fatalf("ListTables: got %d, want %d", got, want)
}
want := *table
if got := tables[0]; !reflect.DeepEqual(got, &want) {
t.Errorf("ListTables: got %v, want %v", got, &want)
}
// Populate the table.
upl := table.NewUploader()
var rows []*ValuesSaver
for i, name := range []string{"a", "b", "c"} {
rows = append(rows, &ValuesSaver{
Schema: schema,
InsertID: name,
Row: []Value{name, i},
})
}
if err := upl.Put(ctx, rows); err != nil {
t.Fatal(err)
}
checkRead := func(src ReadSource) {
it, err := c.Read(ctx, src)
if err != nil {
t.Fatal(err)
}
for i := 0; it.Next(ctx); i++ {
var vals ValueList
if err := it.Get(&vals); err != nil {
t.Fatal(err)
}
if got, want := vals, rows[i].Row; !reflect.DeepEqual([]Value(got), want) {
t.Errorf("got %v, want %v", got, want)
}
}
}
// Read the table.
checkRead(table)
// Query the table.
q := &Query{
Q: "select name, num from t1",
DefaultProjectID: projID,
DefaultDatasetID: ds.id,
}
checkRead(q)
// Query the long way.
dest := &Table{}
job1, err := c.Copy(ctx, dest, q, WriteTruncate)
if err != nil {
t.Fatal(err)
}
job2, err := c.JobFromID(ctx, job1.ID())
if err != nil {
t.Fatal(err)
}
// TODO(jba): poll status until job is done
_, err = job2.Status(ctx)
if err != nil {
t.Fatal(err)
}
checkRead(job2)
// TODO(jba): patch the table
}
func hasStatusCode(err error, code int) bool {
if e, ok := err.(*googleapi.Error); ok && e.Code == code {
return true
}
return false
}

186
vendor/cloud.google.com/go/bigquery/iterator.go generated vendored Normal file
View File

@@ -0,0 +1,186 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"golang.org/x/net/context"
)
// A pageFetcher returns a page of rows, starting from the row specified by token.
type pageFetcher interface {
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
}
// Iterator provides access to the result of a BigQuery lookup.
// Next must be called before the first call to Get.
type Iterator struct {
service service
err error // contains any error encountered during calls to Next.
// Once Next has been called at least once, schema has the result schema, rs contains the current
// page of data, and nextToken contains the token for fetching the next
// page (empty if there is no more data to be fetched).
schema Schema
rs [][]Value
nextToken string
// The remaining fields contain enough information to fetch the current
// page of data, and determine which row of data from this page is the
// current row.
pf pageFetcher
pageToken string
// The offset from the start of the current page to the current row.
// For a new iterator, this is -1.
offset int64
}
func newIterator(s service, pf pageFetcher) *Iterator {
return &Iterator{
service: s,
pf: pf,
offset: -1,
}
}
// fetchPage loads the current page of data from the server.
// The contents of rs and nextToken are replaced with the loaded data.
// If there is an error while fetching, the error is stored in it.err and false is returned.
func (it *Iterator) fetchPage(ctx context.Context) bool {
var res *readDataResult
var err error
for {
res, err = it.pf.fetch(ctx, it.service, it.pageToken)
if err != errIncompleteJob {
break
}
}
if err != nil {
it.err = err
return false
}
it.schema = res.schema
it.rs = res.rows
it.nextToken = res.pageToken
return true
}
// getEnoughData loads new data into rs until offset no longer points beyond the end of rs.
func (it *Iterator) getEnoughData(ctx context.Context) bool {
if len(it.rs) == 0 {
// Either we have not yet fetched any pages, or we are iterating over an empty dataset.
// In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken.
// In the latter case, it is harmless to fetch a page of data.
if !it.fetchPage(ctx) {
return false
}
}
for it.offset >= int64(len(it.rs)) {
// If offset is still outside the bounds of the loaded data,
// but there are no more pages of data to fetch, then we have
// failed to satisfy the offset.
if it.nextToken == "" {
return false
}
// offset cannot be satisfied with the currently loaded data,
// so we fetch the next page. We no longer need the existing
// cached rows, so we remove them and update the offset to be
// relative to the new page that we're about to fetch.
// NOTE: we can't just set offset to 0, because after
// marshalling/unmarshalling, it's possible for the offset to
// point arbitrarily far beyond the end of rs.
// This can happen if the server returns a different size
// results page before and after marshalling.
it.offset -= int64(len(it.rs))
it.pageToken = it.nextToken
if !it.fetchPage(ctx) {
return false
}
}
return true
}
// Next advances the Iterator to the next row, making that row available
// via the Get method.
// Next must be called before the first call to Get or Schema, and blocks until data is available.
// Next returns false when there are no more rows available, either because
// the end of the output was reached, or because there was an error (consult
// the Err method to determine which).
func (it *Iterator) Next(ctx context.Context) bool {
if it.err != nil {
return false
}
// Advance offset to where we want it to be for the next call to Get.
it.offset++
// offset may now point beyond the end of rs, so we fetch data
// until offset is within its bounds again. If there are no more
// results available, offset will be left pointing beyond the bounds
// of rs.
// At the end of this method, rs will contain at least one element
// unless the dataset we are iterating over is empty.
return it.getEnoughData(ctx)
}
// Err returns the last error encountered by Next, or nil for no error.
func (it *Iterator) Err() error {
return it.err
}
// verifyState checks that the iterator is pointing to a valid row.
func (it *Iterator) verifyState() error {
if it.err != nil {
return fmt.Errorf("called on iterator in error state: %v", it.err)
}
// If Next has been called, then offset should always index into a
// valid row in rs, as long as there is still data available.
if it.offset >= int64(len(it.rs)) || it.offset < 0 {
return errors.New("called without preceding successful call to Next")
}
return nil
}
// Get loads the current row into dst, which must implement ValueLoader.
func (it *Iterator) Get(dst interface{}) error {
if err := it.verifyState(); err != nil {
return fmt.Errorf("Get %v", err)
}
if dst, ok := dst.(ValueLoader); ok {
return dst.Load(it.rs[it.offset])
}
return errors.New("Get called with unsupported argument type")
}
// Schema returns the schema of the result rows.
func (it *Iterator) Schema() (Schema, error) {
if err := it.verifyState(); err != nil {
return nil, fmt.Errorf("Schema %v", err)
}
return it.schema, nil
}

538
vendor/cloud.google.com/go/bigquery/iterator_test.go generated vendored Normal file
View File

@@ -0,0 +1,538 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"reflect"
"testing"
"golang.org/x/net/context"
)
type fetchResponse struct {
result *readDataResult // The result to return.
err error // The error to return.
}
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
type pageFetcherStub struct {
fetchResponses map[string]fetchResponse
err error
}
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
call, ok := pf.fetchResponses[token]
if !ok {
pf.err = fmt.Errorf("Unexpected page token: %q", token)
}
return call.result, call.err
}
func TestIterator(t *testing.T) {
fetchFailure := errors.New("fetch failure")
testCases := []struct {
desc string
alreadyConsumed int64 // amount to advance offset before commencing reading.
fetchResponses map[string]fetchResponse
want []ValueList
wantErr error
wantSchema Schema
}{
{
desc: "Iteration over single empty page",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{},
schema: Schema{},
},
},
},
want: []ValueList{},
wantSchema: Schema{},
},
{
desc: "Iteration over single page",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{1, 2}, {11, 12}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Iteration over single page with different schema",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{"1", 2}, {"11", 12}},
schema: Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{"1", 2}, {"11", 12}},
wantSchema: Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Iteration over two pages",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Server response includes empty page",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "b",
rows: [][]Value{},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"b": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Fetch error",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
// We returns some data from this fetch, but also an error.
// So the end result should include only data from the previous fetch.
err: fetchFailure,
result: &readDataResult{
pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{1, 2}, {11, 12}},
wantErr: fetchFailure,
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip over a single element",
alreadyConsumed: 1,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip over an entire page",
alreadyConsumed: 2,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip beyond start of second page",
alreadyConsumed: 3,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip beyond all data",
alreadyConsumed: 4,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
// In this test case, Next will return false on its first call,
// so we won't even attempt to call Get.
want: []ValueList{},
wantSchema: Schema{},
},
}
for _, tc := range testCases {
pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses,
}
it := newIterator(nil, pf)
it.offset += tc.alreadyConsumed
values, schema, err := consumeIterator(it)
if err != nil {
t.Fatalf("%s: %v", tc.desc, err)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
}
if it.Err() != tc.wantErr {
t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr)
}
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
}
}
}
// consumeIterator reads the schema and all values from an iterator and returns them.
func consumeIterator(it *Iterator) ([]ValueList, Schema, error) {
var got []ValueList
var schema Schema
for it.Next(context.Background()) {
var vals ValueList
var err error
if err = it.Get(&vals); err != nil {
return nil, Schema{}, fmt.Errorf("err calling Get: %v", err)
}
got = append(got, vals)
if schema, err = it.Schema(); err != nil {
return nil, Schema{}, fmt.Errorf("err calling Schema: %v", err)
}
}
return got, schema, nil
}
func TestGetBeforeNext(t *testing.T) {
// TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators.
pf := &pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
}
it := newIterator(nil, pf)
var vals ValueList
if err := it.Get(&vals); err == nil {
t.Errorf("Expected error calling Get before Next")
}
}
type delayedPageFetcher struct {
pageFetcherStub
delayCount int
}
func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
if pf.delayCount > 0 {
pf.delayCount--
return nil, errIncompleteJob
}
return pf.pageFetcherStub.fetch(ctx, s, token)
}
func TestIterateIncompleteJob(t *testing.T) {
want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
pf := pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
},
},
},
}
dpf := &delayedPageFetcher{
pageFetcherStub: pf,
delayCount: 1,
}
it := newIterator(nil, dpf)
values, _, err := consumeIterator(it)
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
}
if it.Err() != nil {
t.Fatalf("iterator.Err: got:\n%v", it.Err())
}
if dpf.delayCount != 0 {
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
}
}
func TestGetDuringErrorState(t *testing.T) {
pf := &pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {err: errors.New("bang")},
},
}
it := newIterator(nil, pf)
var vals ValueList
it.Next(context.Background())
if it.Err() == nil {
t.Errorf("Expected error after calling Next")
}
if err := it.Get(&vals); err == nil {
t.Errorf("Expected error calling Get when iterator has a non-nil error.")
}
}
func TestGetAfterFinished(t *testing.T) {
testCases := []struct {
alreadyConsumed int64 // amount to advance offset before commencing reading.
fetchResponses map[string]fetchResponse
want []ValueList
}{
{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
want: []ValueList{{1, 2}, {11, 12}},
},
{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{},
},
},
},
want: []ValueList{},
},
{
alreadyConsumed: 100,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
want: []ValueList{},
},
}
for _, tc := range testCases {
pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses,
}
it := newIterator(nil, pf)
it.offset += tc.alreadyConsumed
values, _, err := consumeIterator(it)
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
}
if it.Err() != nil {
t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err())
}
// Try calling Get again.
var vals ValueList
if err := it.Get(&vals); err == nil {
t.Errorf("Expected error calling Get when there are no more values")
}
}
}

131
vendor/cloud.google.com/go/bigquery/job.go generated vendored Normal file
View File

@@ -0,0 +1,131 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// A Job represents an operation which has been submitted to BigQuery for processing.
type Job struct {
service service
projectID string
jobID string
isQuery bool
}
// JobFromID creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package. For example, the job may have
// been created in the BigQuery console.
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
jobType, err := c.service.getJobType(ctx, c.projectID, id)
if err != nil {
return nil, err
}
return &Job{
service: c.service,
projectID: c.projectID,
jobID: id,
isQuery: jobType == queryJobType,
}, nil
}
func (j *Job) ID() string {
return j.jobID
}
// State is one of a sequence of states that a Job progresses through as it is processed.
type State int
const (
Pending State = iota
Running
Done
)
// JobStatus contains the current State of a job, and errors encountered while processing that job.
type JobStatus struct {
State State
err error
// All errors encountered during the running of the job.
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
Errors []*Error
}
// jobOption is an Option which modifies a bq.Job proto.
// This is used for configuring values that apply to all operations, such as setting a jobReference.
type jobOption interface {
customizeJob(job *bq.Job, projectID string)
}
type jobID string
// JobID returns an Option that sets the job ID of a BigQuery job.
// If this Option is not used, a job ID is generated automatically.
func JobID(ID string) Option {
return jobID(ID)
}
func (opt jobID) implementsOption() {}
func (opt jobID) customizeJob(job *bq.Job, projectID string) {
job.JobReference = &bq.JobReference{
JobId: string(opt),
ProjectId: projectID,
}
}
// Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
func (s *JobStatus) Done() bool {
return s.State == Done
}
// Err returns the error that caused the job to complete unsuccesfully (if any).
func (s *JobStatus) Err() error {
return s.err
}
// Status returns the current status of the job. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
return j.service.jobStatus(ctx, j.projectID, j.jobID)
}
// Cancel requests that a job be cancelled. This method returns without waiting for
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
// Cancelled jobs may still incur costs.
func (j *Job) Cancel(ctx context.Context) error {
return j.service.jobCancel(ctx, j.projectID, j.jobID)
}
func (j *Job) implementsReadSource() {}
func (j *Job) customizeReadQuery(cursor *readQueryConf) error {
// There are mulitple kinds of jobs, but only a query job is suitable for reading.
if !j.isQuery {
return errors.New("Cannot read from a non-query job")
}
cursor.projectID = j.projectID
cursor.jobID = j.jobID
return nil
}

70
vendor/cloud.google.com/go/bigquery/legacy.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
)
// OpenTable creates a handle to an existing BigQuery table. If the table does
// not already exist, subsequent uses of the *Table will fail.
//
// Deprecated: use Client.DatasetInProject.Table instead.
func (c *Client) OpenTable(projectID, datasetID, tableID string) *Table {
return c.Table(projectID, datasetID, tableID)
}
// Table creates a handle to a BigQuery table.
//
// Use this method to reference a table in a project other than that of the
// Client.
//
// Deprecated: use Client.DatasetInProject.Table instead.
func (c *Client) Table(projectID, datasetID, tableID string) *Table {
return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service}
}
// CreateTable creates a table in the BigQuery service and returns a handle to it.
//
// Deprecated: use Table.Create instead.
func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, options ...CreateTableOption) (*Table, error) {
t := c.Table(projectID, datasetID, tableID)
if err := t.Create(ctx, options...); err != nil {
return nil, err
}
return t, nil
}
// Read fetches data from a ReadSource and returns the data via an Iterator.
//
// Deprecated: use Query.Read, Job.Read or Table.Read instead.
func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) {
switch src := src.(type) {
case *Job:
return src.Read(ctx, options...)
case *Query:
// For compatibility, support Query values created by literal, rather
// than Client.Query.
if src.client == nil {
src.client = c
}
return src.Read(ctx, options...)
case *Table:
return src.Read(ctx, options...)
}
return nil, fmt.Errorf("src (%T) does not support the Read operation", src)
}

112
vendor/cloud.google.com/go/bigquery/load_op.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type loadOption interface {
customizeLoad(conf *bq.JobConfigurationLoad)
}
// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table.
// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table.
// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup.
// schema must not be nil.
func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} }
type destSchema struct {
Schema
}
func (opt destSchema) implementsOption() {}
func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.Schema = opt.asTableSchema()
}
// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored.
// If this maximum is exceeded, the operation will be unsuccessful.
func MaxBadRecords(n int64) Option { return maxBadRecords(n) }
type maxBadRecords int64
func (opt maxBadRecords) implementsOption() {}
func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.MaxBadRecords = int64(opt)
}
// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls.
func AllowJaggedRows() Option { return allowJaggedRows{} }
type allowJaggedRows struct{}
func (opt allowJaggedRows) implementsOption() {}
func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.AllowJaggedRows = true
}
// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data.
func AllowQuotedNewlines() Option { return allowQuotedNewlines{} }
type allowQuotedNewlines struct{}
func (opt allowQuotedNewlines) implementsOption() {}
func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.AllowQuotedNewlines = true
}
// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated.
// Unknown values are ignored. For CSV this ignores extra values at the end of a line.
// For JSON this ignores named values that do not match any column name.
// If this Option is not used, records containing unknown values are treated as bad records.
// The MaxBadRecords Option can be used to customize how bad records are handled.
func IgnoreUnknownValues() Option { return ignoreUnknownValues{} }
type ignoreUnknownValues struct{}
func (opt ignoreUnknownValues) implementsOption() {}
func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.IgnoreUnknownValues = true
}
func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationLoad{}
dst.customizeLoadDst(payload)
src.customizeLoadSrc(payload)
for _, opt := range options {
o, ok := opt.(loadOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeLoad(payload)
}
job.Configuration = &bq.JobConfiguration{
Load: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

198
vendor/cloud.google.com/go/bigquery/load_test.go generated vendored Normal file
View File

@@ -0,0 +1,198 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func defaultLoadJob() *bq.Job {
return &bq.Job{
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
SourceUris: []string{"uri"},
},
},
}
}
func stringFieldSchema() *FieldSchema {
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
}
func nestedFieldSchema() *FieldSchema {
return &FieldSchema{
Name: "nested",
Type: RecordFieldType,
Schema: Schema{stringFieldSchema()},
}
}
func bqStringFieldSchema() *bq.TableFieldSchema {
return &bq.TableFieldSchema{
Name: "fieldname",
Type: "STRING",
}
}
func bqNestedFieldSchema() *bq.TableFieldSchema {
return &bq.TableFieldSchema{
Name: "nested",
Type: "RECORD",
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
}
}
func TestLoad(t *testing.T) {
testCases := []struct {
dst *Table
src *GCSReference
options []Option
want *bq.Job
}{
{
dst: defaultTable(nil),
src: defaultGCS,
want: defaultLoadJob(),
},
{
dst: defaultTable(nil),
src: defaultGCS,
options: []Option{
MaxBadRecords(1),
AllowJaggedRows(),
AllowQuotedNewlines(),
IgnoreUnknownValues(),
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.MaxBadRecords = 1
j.Configuration.Load.AllowJaggedRows = true
j.Configuration.Load.AllowQuotedNewlines = true
j.Configuration.Load.IgnoreUnknownValues = true
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
options: []Option{CreateNever, WriteTruncate},
src: defaultGCS,
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
src: defaultGCS,
options: []Option{
DestinationSchema(Schema{
stringFieldSchema(),
nestedFieldSchema(),
}),
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.Schema = &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}}
return j
}(),
},
{
dst: defaultTable(nil),
src: &GCSReference{
uris: []string{"uri"},
SkipLeadingRows: 1,
SourceFormat: JSON,
Encoding: UTF_8,
FieldDelimiter: "\t",
Quote: "-",
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SkipLeadingRows = 1
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Load.Encoding = "UTF-8"
j.Configuration.Load.FieldDelimiter = "\t"
hyphen := "-"
j.Configuration.Load.Quote = &hyphen
return j
}(),
},
{
dst: defaultTable(nil),
src: &GCSReference{
uris: []string{"uri"},
Quote: "",
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.Quote = nil
return j
}(),
},
{
dst: defaultTable(nil),
src: &GCSReference{
uris: []string{"uri"},
Quote: "",
ForceZeroQuote: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
empty := ""
j.Configuration.Load.Quote = &empty
return j
}(),
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling load: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
}
}

44
vendor/cloud.google.com/go/bigquery/query.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import bq "google.golang.org/api/bigquery/v2"
// Query represents a query to be executed. Use Client.Query to create a query.
type Query struct {
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
Q string
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
// If DefaultProjectID is set, DefaultDatasetID must also be set.
DefaultProjectID string
DefaultDatasetID string
client *Client
}
func (q *Query) implementsSource() {}
func (q *Query) implementsReadSource() {}
func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery) {
conf.Query = q.Q
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
conf.DefaultDataset = &bq.DatasetReference{
DatasetId: q.DefaultDatasetID,
ProjectId: q.DefaultProjectID,
}
}
}

148
vendor/cloud.google.com/go/bigquery/query_op.go generated vendored Normal file
View File

@@ -0,0 +1,148 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type queryOption interface {
customizeQuery(conf *bq.JobConfigurationQuery)
}
// DisableQueryCache returns an Option that prevents results being fetched from the query cache.
// If this Option is not used, results are fetched from the cache if they are available.
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
// Cached results are only available when TableID is unspecified in the query's destination Table.
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
func DisableQueryCache() Option { return disableQueryCache{} }
type disableQueryCache struct{}
func (opt disableQueryCache) implementsOption() {}
func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery) {
f := false
conf.UseQueryCache = &f
}
// DisableFlattenedResults returns an Option that prevents results being flattened.
// If this Option is not used, results from nested and repeated fields are flattened.
// DisableFlattenedResults implies AllowLargeResults
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
func DisableFlattenedResults() Option { return disableFlattenedResults{} }
type disableFlattenedResults struct{}
func (opt disableFlattenedResults) implementsOption() {}
func (opt disableFlattenedResults) customizeQuery(conf *bq.JobConfigurationQuery) {
f := false
conf.FlattenResults = &f
// DisableFlattenedResults implies AllowLargeResults
allowLargeResults{}.customizeQuery(conf)
}
// AllowLargeResults returns an Option that allows the query to produce arbitrarily large result tables.
// The destination must be a table.
// When using this option, queries will take longer to execute, even if the result set is small.
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
func AllowLargeResults() Option { return allowLargeResults{} }
type allowLargeResults struct{}
func (opt allowLargeResults) implementsOption() {}
func (opt allowLargeResults) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.AllowLargeResults = true
}
// JobPriority returns an Option that causes a query to be scheduled with the specified priority.
// The default priority is InteractivePriority.
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
func JobPriority(priority string) Option { return jobPriority(priority) }
type jobPriority string
func (opt jobPriority) implementsOption() {}
func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.Priority = string(opt)
}
const (
BatchPriority = "BATCH"
InteractivePriority = "INTERACTIVE"
)
// MaxBillingTier returns an Option that sets the maximum billing tier for a Query.
// Queries that have resource usage beyond this tier will fail (without
// incurring a charge). If this Option is not used, the project default will be used.
func MaxBillingTier(tier int) Option { return maxBillingTier(tier) }
type maxBillingTier int
func (opt maxBillingTier) implementsOption() {}
func (opt maxBillingTier) customizeQuery(conf *bq.JobConfigurationQuery) {
tier := int64(opt)
conf.MaximumBillingTier = &tier
}
// MaxBytesBilled returns an Option that limits the number of bytes billed for
// this job. Queries that would exceed this limit will fail (without incurring
// a charge).
// If this Option is not used, or bytes is < 1, the project default will be
// used.
func MaxBytesBilled(bytes int64) Option { return maxBytesBilled(bytes) }
type maxBytesBilled int64
func (opt maxBytesBilled) implementsOption() {}
func (opt maxBytesBilled) customizeQuery(conf *bq.JobConfigurationQuery) {
if opt >= 1 {
conf.MaximumBytesBilled = int64(opt)
}
}
func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationQuery{}
dst.customizeQueryDst(payload)
src.customizeQuerySrc(payload)
for _, opt := range options {
o, ok := opt.(queryOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeQuery(payload)
}
job.Configuration = &bq.JobConfiguration{
Query: payload,
}
j, err := c.service.insertJob(ctx, job, c.projectID)
if err != nil {
return nil, err
}
j.isQuery = true
return j, nil
}

168
vendor/cloud.google.com/go/bigquery/query_test.go generated vendored Normal file
View File

@@ -0,0 +1,168 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func defaultQueryJob() *bq.Job {
return &bq.Job{
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
Query: "query string",
DefaultDataset: &bq.DatasetReference{
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
},
},
}
}
func TestQuery(t *testing.T) {
testCases := []struct {
dst *Table
src *Query
options []Option
want *bq.Job
}{
{
dst: defaultTable(nil),
src: defaultQuery,
want: defaultQueryJob(),
},
{
dst: defaultTable(nil),
src: &Query{
Q: "query string",
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
return j
}(),
},
{
dst: &Table{},
src: defaultQuery,
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DestinationTable = nil
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
src: defaultQuery,
options: []Option{CreateNever, WriteTruncate},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{DisableQueryCache()},
want: func() *bq.Job {
j := defaultQueryJob()
f := false
j.Configuration.Query.UseQueryCache = &f
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{AllowLargeResults()},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.AllowLargeResults = true
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{DisableFlattenedResults()},
want: func() *bq.Job {
j := defaultQueryJob()
f := false
j.Configuration.Query.FlattenResults = &f
j.Configuration.Query.AllowLargeResults = true
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{JobPriority("low")},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.Priority = "low"
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{MaxBillingTier(3), MaxBytesBilled(5)},
want: func() *bq.Job {
j := defaultQueryJob()
tier := int64(3)
j.Configuration.Query.MaximumBillingTier = &tier
j.Configuration.Query.MaximumBytesBilled = 5
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{MaxBytesBilled(-1)},
want: defaultQueryJob(),
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling query: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
}
}

70
vendor/cloud.google.com/go/bigquery/read_op.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import "golang.org/x/net/context"
// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery.
func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) }
type recordsPerRequest int64
func (opt recordsPerRequest) customizeRead(conf *pagingConf) {
conf.recordsPerRequest = int64(opt)
conf.setRecordsPerRequest = true
}
// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from.
func StartIndex(i uint64) ReadOption { return startIndex(i) }
type startIndex uint64
func (opt startIndex) customizeRead(conf *pagingConf) {
conf.startIndex = uint64(opt)
}
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readTabledata(ctx, conf, token)
}
// Read fetches the contents of the table.
func (t *Table) Read(_ context.Context, options ...ReadOption) (*Iterator, error) {
conf := &readTableConf{}
t.customizeReadSrc(conf)
for _, o := range options {
o.customizeRead(&conf.paging)
}
return newIterator(t.service, conf), nil
}
func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readQuery(ctx, conf, token)
}
// Read fetches the results of a query job.
func (j *Job) Read(_ context.Context, options ...ReadOption) (*Iterator, error) {
conf := &readQueryConf{}
if err := j.customizeReadQuery(conf); err != nil {
return nil, err
}
for _, o := range options {
o.customizeRead(&conf.paging)
}
return newIterator(j.service, conf), nil
}

308
vendor/cloud.google.com/go/bigquery/read_test.go generated vendored Normal file
View File

@@ -0,0 +1,308 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"reflect"
"testing"
"golang.org/x/net/context"
)
type readTabledataArgs struct {
conf *readTableConf
tok string
}
type readQueryArgs struct {
conf *readQueryConf
tok string
}
// readServiceStub services read requests by returning data from an in-memory list of values.
type readServiceStub struct {
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
values [][][]Value // contains pages / rows / columns.
pageTokens map[string]string // maps incoming page token to returned page token.
// arguments are recorded for later inspection.
readTabledataCalls []readTabledataArgs
readQueryCalls []readQueryArgs
service
}
func (s *readServiceStub) readValues(tok string) *readDataResult {
result := &readDataResult{
pageToken: s.pageTokens[tok],
rows: s.values[0],
}
s.values = s.values[1:]
return result
}
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
return s.readValues(token), nil
}
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token})
return s.readValues(token), nil
}
func TestRead(t *testing.T) {
// The data for the service stub to return is populated for each test case in the testCases for loop.
service := &readServiceStub{}
c := &Client{
service: service,
}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: service,
isQuery: true,
}
for _, src := range []ReadSource{defaultTable(service), queryJob} {
testCases := []struct {
data [][][]Value
pageTokens map[string]string
want []ValueList
}{
{
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": "a", "a": ""},
want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
},
{
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: []ValueList{{1, 2}, {11, 12}},
},
}
for _, tc := range testCases {
service.values = tc.data
service.pageTokens = tc.pageTokens
if got, ok := doRead(t, c, src); ok {
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
}
}
}
}
}
// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned.
func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) {
it, err := c.Read(context.Background(), src)
if err != nil {
t.Errorf("err calling Read: %v", err)
return nil, false
}
var got []ValueList
for it.Next(context.Background()) {
var vals ValueList
if err := it.Get(&vals); err != nil {
t.Errorf("err calling Get: %v", err)
return nil, false
} else {
got = append(got, vals)
}
}
return got, true
}
func TestNoMoreValues(t *testing.T) {
c := &Client{
service: &readServiceStub{
values: [][][]Value{{{1, 2}, {11, 12}}},
},
}
it, err := c.Read(context.Background(), defaultTable(c.service))
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
var vals ValueList
// We expect to retrieve two values and then fail on the next attempt.
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
if err := it.Get(&vals); err != nil {
t.Fatalf("Get: got: %v: want: nil", err)
}
if it.Next(context.Background()) {
t.Fatalf("Next: got: true: want: false")
}
if err := it.Get(&vals); err == nil {
t.Fatalf("Get: got: %v: want: non-nil", err)
}
}
// delayedReadStub simulates reading results from a query that has not yet
// completed. Its readQuery method initially reports that the query job is not
// yet complete. Subsequently, it proxies the request through to another
// service stub.
type delayedReadStub struct {
numDelays int
readServiceStub
}
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
if s.numDelays > 0 {
s.numDelays--
return nil, errIncompleteJob
}
return s.readServiceStub.readQuery(ctx, conf, token)
}
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
func TestIncompleteJob(t *testing.T) {
service := &delayedReadStub{
numDelays: 2,
readServiceStub: readServiceStub{
values: [][][]Value{{{1, 2}}},
},
}
c := &Client{service: service}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: service,
isQuery: true,
}
it, err := c.Read(context.Background(), queryJob)
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
var got ValueList
want := ValueList{1, 2}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
if err := it.Get(&got); err != nil {
t.Fatalf("Error calling Get: %v", err)
}
if service.numDelays != 0 {
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
}
}
type errorReadService struct {
service
}
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
return nil, errors.New("bang!")
}
func TestReadError(t *testing.T) {
// test that service read errors are propagated back to the caller.
c := &Client{service: &errorReadService{}}
it, err := c.Read(context.Background(), defaultTable(c.service))
if err != nil {
// Read should not return an error; only Err should.
t.Fatalf("err calling Read: %v", err)
}
if it.Next(context.Background()) {
t.Fatalf("Next: got: true: want: false")
}
if err := it.Err(); err.Error() != "bang!" {
t.Fatalf("Get: got: %v: want: bang!", err)
}
}
func TestReadTabledataOptions(t *testing.T) {
// test that read options are propagated.
s := &readServiceStub{
values: [][][]Value{{{1, 2}}},
}
c := &Client{service: s}
it, err := c.Read(context.Background(), defaultTable(s), RecordsPerRequest(5))
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
want := []readTabledataArgs{{
conf: &readTableConf{
projectID: "project-id",
datasetID: "dataset-id",
tableID: "table-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
}}
if !reflect.DeepEqual(s.readTabledataCalls, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
}
}
func TestReadQueryOptions(t *testing.T) {
// test that read options are propagated.
s := &readServiceStub{
values: [][][]Value{{{1, 2}}},
}
c := &Client{service: s}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: s,
isQuery: true,
}
it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5))
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
want := []readQueryArgs{{
conf: &readQueryConf{
projectID: "project-id",
jobID: "job-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
}}
if !reflect.DeepEqual(s.readQueryCalls, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
}
}

233
vendor/cloud.google.com/go/bigquery/schema.go generated vendored Normal file
View File

@@ -0,0 +1,233 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"reflect"
bq "google.golang.org/api/bigquery/v2"
)
// Schema describes the fields in a table or query result.
type Schema []*FieldSchema
type FieldSchema struct {
// The field name.
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
// and must start with a letter or underscore.
// The maximum length is 128 characters.
Name string
// A description of the field. The maximum length is 16,384 characters.
Description string
// Whether the field may contain multiple values.
Repeated bool
// Whether the field is required. Ignored if Repeated is true.
Required bool
// The field data type. If Type is Record, then this field contains a nested schema,
// which is described by Schema.
Type FieldType
// Describes the nested schema if Type is set to Record.
Schema Schema
}
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
tfs := &bq.TableFieldSchema{
Description: fs.Description,
Name: fs.Name,
Type: string(fs.Type),
}
if fs.Repeated {
tfs.Mode = "REPEATED"
} else if fs.Required {
tfs.Mode = "REQUIRED"
} // else leave as default, which is interpreted as NULLABLE.
for _, f := range fs.Schema {
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
}
return tfs
}
func (s Schema) asTableSchema() *bq.TableSchema {
var fields []*bq.TableFieldSchema
for _, f := range s {
fields = append(fields, f.asTableFieldSchema())
}
return &bq.TableSchema{Fields: fields}
}
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
func (s Schema) customizeCreateTable(conf *createTableConf) {
conf.schema = s.asTableSchema()
}
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{
Description: tfs.Description,
Name: tfs.Name,
Repeated: tfs.Mode == "REPEATED",
Required: tfs.Mode == "REQUIRED",
Type: FieldType(tfs.Type),
}
for _, f := range tfs.Fields {
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
}
return fs
}
func convertTableSchema(ts *bq.TableSchema) Schema {
var s Schema
for _, f := range ts.Fields {
s = append(s, convertTableFieldSchema(f))
}
return s
}
type FieldType string
const (
StringFieldType FieldType = "STRING"
IntegerFieldType FieldType = "INTEGER"
FloatFieldType FieldType = "FLOAT"
BooleanFieldType FieldType = "BOOLEAN"
TimestampFieldType FieldType = "TIMESTAMP"
RecordFieldType FieldType = "RECORD"
)
var errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
var errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
// NOTE: All fields in the returned Schema are configured to be required,
// unless the corresponding field in the supplied struct is a slice or array.
// It is considered an error if the struct (including nested structs) contains
// any exported fields that are pointers or one of the following types:
// map, interface, complex64, complex128, func, chan.
// In these cases, an error will be returned.
// Future versions may handle these cases without error.
func InferSchema(st interface{}) (Schema, error) {
return inferStruct(reflect.TypeOf(st))
}
func inferStruct(rt reflect.Type) (Schema, error) {
switch rt.Kind() {
case reflect.Struct:
return inferFields(rt)
default:
return nil, errNoStruct
}
}
// inferFieldSchema infers the FieldSchema for a Go type
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
switch {
case isByteSlice(rt):
return &FieldSchema{Required: true, Type: StringFieldType}, nil
case isTimeTime(rt):
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
case isRepeated(rt):
et := rt.Elem()
if isRepeated(et) && !isByteSlice(et) {
// Multi dimensional slices/arrays are not supported by BigQuery
return nil, errUnsupportedFieldType
}
f, err := inferFieldSchema(et)
if err != nil {
return nil, err
}
f.Repeated = true
f.Required = false
return f, nil
case isStruct(rt):
nested, err := inferFields(rt)
if err != nil {
return nil, err
}
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
}
switch rt.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
case reflect.String:
return &FieldSchema{Required: true, Type: StringFieldType}, nil
case reflect.Bool:
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
default:
return nil, errUnsupportedFieldType
}
}
// inferFields extracts all exported field types from struct type.
func inferFields(rt reflect.Type) (Schema, error) {
var s Schema
for i := 0; i < rt.NumField(); i++ {
field := rt.Field(i)
if field.PkgPath != "" {
// field is unexported.
continue
}
if field.Anonymous {
// TODO(nightlyone) support embedded (see https://github.com/GoogleCloudPlatform/google-cloud-go/issues/238)
return nil, errUnsupportedFieldType
}
f, err := inferFieldSchema(field.Type)
if err != nil {
return nil, err
}
f.Name = field.Name
s = append(s, f)
}
return s, nil
}
func isByteSlice(rt reflect.Type) bool {
return rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8
}
func isTimeTime(rt reflect.Type) bool {
return rt.PkgPath() == "time" && rt.Name() == "Time"
}
func isStruct(rt reflect.Type) bool {
return rt.Kind() == reflect.Struct
}
func isRepeated(rt reflect.Type) bool {
switch rt.Kind() {
case reflect.Slice, reflect.Array:
return true
default:
return false
}
}

495
vendor/cloud.google.com/go/bigquery/schema_test.go generated vendored Normal file
View File

@@ -0,0 +1,495 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"reflect"
"testing"
"time"
bq "google.golang.org/api/bigquery/v2"
)
func (fs *FieldSchema) GoString() string {
if fs == nil {
return "<nil>"
}
return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}",
fs.Name,
fs.Description,
fs.Repeated,
fs.Required,
fs.Type,
fmt.Sprintf("%#v", fs.Schema),
)
}
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema {
return &bq.TableFieldSchema{
Description: desc,
Name: name,
Mode: mode,
Type: typ,
}
}
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema {
return &FieldSchema{
Description: desc,
Name: name,
Repeated: repeated,
Required: required,
Type: FieldType(typ),
}
}
func TestSchemaConversion(t *testing.T) {
testCases := []struct {
schema Schema
bqSchema *bq.TableSchema
}{
{
// required
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
schema: Schema{
fieldSchema("desc", "name", "STRING", false, true),
},
},
{
// repeated
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"),
},
},
schema: Schema{
fieldSchema("desc", "name", "STRING", true, false),
},
},
{
// nullable, string
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", ""),
},
},
schema: Schema{
fieldSchema("desc", "name", "STRING", false, false),
},
},
{
// integer
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "INTEGER", ""),
},
},
schema: Schema{
fieldSchema("desc", "name", "INTEGER", false, false),
},
},
{
// float
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "FLOAT", ""),
},
},
schema: Schema{
fieldSchema("desc", "name", "FLOAT", false, false),
},
},
{
// boolean
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "BOOLEAN", ""),
},
},
schema: Schema{
fieldSchema("desc", "name", "BOOLEAN", false, false),
},
},
{
// timestamp
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""),
},
},
schema: Schema{
fieldSchema("desc", "name", "TIMESTAMP", false, false),
},
},
{
// nested
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
{
Description: "An outer schema wrapping a nested schema",
Name: "outer",
Mode: "REQUIRED",
Type: "RECORD",
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("inner field", "inner", "STRING", ""),
},
},
},
},
schema: Schema{
&FieldSchema{
Description: "An outer schema wrapping a nested schema",
Name: "outer",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Description: "inner field",
Name: "inner",
Type: "STRING",
},
},
},
},
},
}
for _, tc := range testCases {
bqSchema := tc.schema.asTableSchema()
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema)
}
schema := convertTableSchema(tc.bqSchema)
if !reflect.DeepEqual(schema, tc.schema) {
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
}
}
}
type allStrings struct {
String string
ByteSlice []byte
}
type allSignedIntegers struct {
Int64 int64
Int32 int32
Int16 int16
Int8 int8
Int int
}
type allUnsignedIntegers struct {
Uint64 uint64
Uint32 uint32
Uint16 uint16
Uint8 uint8
Uintptr uintptr
Uint uint
}
type allFloat struct {
Float64 float64
Float32 float32
// NOTE: Complex32 and Complex64 are unsupported by BigQuery
}
type allBoolean struct {
Bool bool
}
type allTime struct {
Time time.Time
}
func TestSimpleInference(t *testing.T) {
testCases := []struct {
in interface{}
want Schema
}{
{
in: allSignedIntegers{},
want: Schema{
fieldSchema("", "Int64", "INTEGER", false, true),
fieldSchema("", "Int32", "INTEGER", false, true),
fieldSchema("", "Int16", "INTEGER", false, true),
fieldSchema("", "Int8", "INTEGER", false, true),
fieldSchema("", "Int", "INTEGER", false, true),
},
},
{
in: allUnsignedIntegers{},
want: Schema{
fieldSchema("", "Uint64", "INTEGER", false, true),
fieldSchema("", "Uint32", "INTEGER", false, true),
fieldSchema("", "Uint16", "INTEGER", false, true),
fieldSchema("", "Uint8", "INTEGER", false, true),
fieldSchema("", "Uintptr", "INTEGER", false, true),
fieldSchema("", "Uint", "INTEGER", false, true),
},
},
{
in: allFloat{},
want: Schema{
fieldSchema("", "Float64", "FLOAT", false, true),
fieldSchema("", "Float32", "FLOAT", false, true),
},
},
{
in: allBoolean{},
want: Schema{
fieldSchema("", "Bool", "BOOLEAN", false, true),
},
},
{
in: allTime{},
want: Schema{
fieldSchema("", "Time", "TIMESTAMP", false, true),
},
},
{
in: allStrings{},
want: Schema{
fieldSchema("", "String", "STRING", false, true),
fieldSchema("", "ByteSlice", "STRING", false, true),
},
},
}
for i, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want)
}
}
}
type containsNested struct {
hidden string
NotNested int
Nested struct {
Inside int
}
}
type containsDoubleNested struct {
NotNested int
Nested struct {
InsideNested struct {
Inside int
}
}
}
func TestNestedInference(t *testing.T) {
testCases := []struct {
in interface{}
want Schema
}{
{
in: containsNested{},
want: Schema{
fieldSchema("", "NotNested", "INTEGER", false, true),
&FieldSchema{
Name: "Nested",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
},
},
},
{
in: containsDoubleNested{},
want: Schema{
fieldSchema("", "NotNested", "INTEGER", false, true),
&FieldSchema{
Name: "Nested",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "InsideNested",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
},
},
},
},
},
}
for i, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want)
}
}
}
type simpleRepeated struct {
NotRepeated []byte
RepeatedByteSlice [][]byte
Repeated []int
}
type simpleNestedRepeated struct {
NotRepeated int
Repeated []struct {
Inside int
}
}
func TestRepeatedInference(t *testing.T) {
testCases := []struct {
in interface{}
want Schema
}{
{
in: simpleRepeated{},
want: Schema{
fieldSchema("", "NotRepeated", "STRING", false, true),
fieldSchema("", "RepeatedByteSlice", "STRING", true, false),
fieldSchema("", "Repeated", "INTEGER", true, false),
},
},
{
in: simpleNestedRepeated{},
want: Schema{
fieldSchema("", "NotRepeated", "INTEGER", false, true),
&FieldSchema{
Name: "Repeated",
Repeated: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
},
},
},
}
for i, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want)
}
}
}
type Embedded struct {
Embedded int
}
type nestedEmbedded struct {
Embedded
}
func TestSchemaErrors(t *testing.T) {
testCases := []struct {
in interface{}
err error
}{
{
in: []byte{},
err: errNoStruct,
},
{
in: new(int),
err: errNoStruct,
},
{
in: new(allStrings),
err: errNoStruct,
},
{
in: struct{ Complex complex64 }{},
err: errUnsupportedFieldType,
},
{
in: struct{ Map map[string]int }{},
err: errUnsupportedFieldType,
},
{
in: struct{ Chan chan bool }{},
err: errUnsupportedFieldType,
},
{
in: struct{ Ptr *int }{},
err: errUnsupportedFieldType,
},
{
in: struct{ Interface interface{} }{},
err: errUnsupportedFieldType,
},
{
in: struct{ MultiDimensional [][]int }{},
err: errUnsupportedFieldType,
},
{
in: struct{ MultiDimensional [][][]byte }{},
err: errUnsupportedFieldType,
},
{
in: struct{ ChanSlice []chan bool }{},
err: errUnsupportedFieldType,
},
{
in: struct{ NestedChan struct{ Chan []chan bool } }{},
err: errUnsupportedFieldType,
},
{
in: nestedEmbedded{},
err: errUnsupportedFieldType,
},
}
for i, tc := range testCases {
want := tc.err
_, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
}
}
}

484
vendor/cloud.google.com/go/bigquery/service.go generated vendored Normal file
View File

@@ -0,0 +1,484 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// service provides an internal abstraction to isolate the generated
// BigQuery API; most of this package uses this interface instead.
// The single implementation, *bigqueryService, contains all the knowledge
// of the generated BigQuery API.
type service interface {
// Jobs
insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error)
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
jobCancel(ctx context.Context, projectId, jobID string) error
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
// Tables
createTable(ctx context.Context, conf *createTableConf) error
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
// Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
// Datasets
insertDataset(ctx context.Context, datasetID, projectID string) error
// Misc
// readQuery reads data resulting from a query job. If the job is
// incomplete, an errIncompleteJob is returned. readQuery may be called
// repeatedly to poll for job completion.
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
}
type bigqueryService struct {
s *bq.Service
}
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
s, err := bq.New(client)
if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err)
}
s.BasePath = endpoint
return &bigqueryService{s: s}, nil
}
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
for {
var err error
token, err = getPage(token)
if err != nil {
return err
}
if token == "" {
return nil
}
}
}
func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
res, err := s.s.Jobs.Insert(projectID, job).Context(ctx).Do()
if err != nil {
return nil, err
}
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
}
type pagingConf struct {
recordsPerRequest int64
setRecordsPerRequest bool
startIndex uint64
}
type readTableConf struct {
projectID, datasetID, tableID string
paging pagingConf
schema Schema // lazily initialized when the first page of data is fetched.
}
type readDataResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}
type readQueryConf struct {
projectID, jobID string
paging pagingConf
}
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
// Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
// Fetch the table schema in the background, if necessary.
var schemaErr error
var schemaFetch sync.WaitGroup
if conf.schema == nil {
schemaFetch.Add(1)
go func() {
defer schemaFetch.Done()
var t *bq.Table
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
if schemaErr == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema)
}
}()
}
res, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
schemaFetch.Wait()
if schemaErr != nil {
return nil, schemaErr
}
result := &readDataResult{
pageToken: res.PageToken,
totalRows: uint64(res.TotalRows),
schema: conf.schema,
}
result.rows, err = convertRows(res.Rows, conf.schema)
if err != nil {
return nil, err
}
return result, nil
}
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
// getQueryResultsTimeout controls the maximum duration of a request to the
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
// cause increased overall latency, as results are returned as soon as they are
// available.
const getQueryResultsTimeout = time.Minute
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
res, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
if !res.JobComplete {
return nil, errIncompleteJob
}
schema := convertTableSchema(res.Schema)
result := &readDataResult{
pageToken: res.PageToken,
totalRows: res.TotalRows,
schema: schema,
}
result.rows, err = convertRows(res.Rows, schema)
if err != nil {
return nil, err
}
return result, nil
}
type insertRowsConf struct {
templateSuffix string
ignoreUnknownValues bool
skipInvalidRows bool
}
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: conf.templateSuffix,
IgnoreUnknownValues: conf.ignoreUnknownValues,
SkipInvalidRows: conf.skipInvalidRows,
}
for _, row := range rows {
m := make(map[string]bq.JsonValue)
for k, v := range row.Row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: row.InsertID,
Json: m,
})
}
res, err := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do()
if err != nil {
return err
}
if len(res.InsertErrors) == 0 {
return nil
}
var errs PutMultiError
for _, e := range res.InsertErrors {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertID,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
}
errs = append(errs, rie)
}
return errs
}
type jobType int
const (
copyJobType jobType = iota
extractJobType
loadJobType
queryJobType
)
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("configuration").
Context(ctx).
Do()
if err != nil {
return 0, err
}
switch {
case res.Configuration.Copy != nil:
return copyJobType, nil
case res.Configuration.Extract != nil:
return extractJobType, nil
case res.Configuration.Load != nil:
return loadJobType, nil
case res.Configuration.Query != nil:
return queryJobType, nil
default:
return 0, errors.New("unknown job type")
}
}
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
// Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
_, err := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx).
Do()
return err
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("status"). // Only fetch what we need.
Context(ctx).
Do()
if err != nil {
return nil, err
}
return jobStatusFromProto(res.Status)
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
state, ok := stateMap[status.State]
if !ok {
return nil, fmt.Errorf("unexpected job state: %v", status.State)
}
newStatus := &JobStatus{
State: state,
err: nil,
}
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
newStatus.err = err
}
for _, ep := range status.Errors {
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
}
return newStatus, nil
}
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
var tables []*Table
res, err := s.s.Tables.List(projectID, datasetID).
PageToken(pageToken).
Context(ctx).
Do()
if err != nil {
return nil, "", err
}
for _, t := range res.Tables {
tables = append(tables, s.convertListedTable(t))
}
return tables, res.NextPageToken, nil
}
type createTableConf struct {
projectID, datasetID, tableID string
expiration time.Time
viewQuery string
schema *bq.TableSchema
}
// createTable creates a table in the BigQuery service.
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
// If viewQuery is non-empty, the created table will be of type VIEW.
// Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
table := &bq.Table{
TableReference: &bq.TableReference{
ProjectId: conf.projectID,
DatasetId: conf.datasetID,
TableId: conf.tableID,
},
}
if !conf.expiration.IsZero() {
table.ExpirationTime = conf.expiration.UnixNano() / 1000
}
// TODO(jba): make it impossible to provide both a view query and a schema.
if conf.viewQuery != "" {
table.View = &bq.ViewDefinition{
Query: conf.viewQuery,
}
}
if conf.schema != nil {
table.Schema = conf.schema
}
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
return err
}
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
}
func bqTableToMetadata(t *bq.Table) *TableMetadata {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
ID: t.Id,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
}
if t.ExpirationTime != 0 {
md.ExpirationTime = time.Unix(0, t.ExpirationTime*1e6)
}
if t.CreationTime != 0 {
md.CreationTime = time.Unix(0, t.CreationTime*1e6)
}
if t.LastModifiedTime != 0 {
md.LastModifiedTime = time.Unix(0, int64(t.LastModifiedTime*1e6))
}
if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema)
}
if t.View != nil {
md.View = t.View.Query
}
return md
}
func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table {
return &Table{
ProjectID: t.TableReference.ProjectId,
DatasetID: t.TableReference.DatasetId,
TableID: t.TableReference.TableId,
service: s,
}
}
// patchTableConf contains fields to be patched.
type patchTableConf struct {
// These fields are omitted from the patch operation if nil.
Description *string
Name *string
}
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}
if conf.Description != nil {
t.Description = *conf.Description
forceSend("Description")
}
if conf.Name != nil {
t.FriendlyName = *conf.Name
forceSend("FriendlyName")
}
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
Context(ctx).
Do()
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
ds := &bq.Dataset{
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
}
_, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do()
return err
}

281
vendor/cloud.google.com/go/bigquery/table.go generated vendored Normal file
View File

@@ -0,0 +1,281 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// A Table is a reference to a BigQuery table.
type Table struct {
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
// In this case the result will be stored in an ephemeral table.
ProjectID string
DatasetID string
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
// The maximum length is 1,024 characters.
TableID string
service service
}
// TableMetadata contains information about a BigQuery table.
type TableMetadata struct {
Description string // The user-friendly description of this table.
Name string // The user-friendly name for this table.
Schema Schema
View string
ID string // An opaque ID uniquely identifying the table.
Type TableType
// The time when this table expires. If not set, the table will persist
// indefinitely. Expired tables will be deleted and their storage reclaimed.
ExpirationTime time.Time
CreationTime time.Time
LastModifiedTime time.Time
// The size of the table in bytes.
// This does not include data that is being buffered during a streaming insert.
NumBytes int64
// The number of rows of data in this table.
// This does not include data that is being buffered during a streaming insert.
NumRows uint64
}
// Tables is a group of tables. The tables may belong to differing projects or datasets.
type Tables []*Table
// CreateDisposition specifies the circumstances under which destination table will be created.
// Default is CreateIfNeeded.
type TableCreateDisposition string
const (
// The table will be created if it does not already exist. Tables are created atomically on successful completion of a job.
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
// The table must already exist and will not be automatically created.
CreateNever TableCreateDisposition = "CREATE_NEVER"
)
func CreateDisposition(disp TableCreateDisposition) Option { return disp }
func (opt TableCreateDisposition) implementsOption() {}
func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.CreateDisposition = string(opt)
}
func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) {
conf.CreateDisposition = string(opt)
}
func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.CreateDisposition = string(opt)
}
// TableWriteDisposition specifies how existing data in a destination table is treated.
// Default is WriteAppend.
type TableWriteDisposition string
const (
// Data will be appended to any existing data in the destination table.
// Data is appended atomically on successful completion of a job.
WriteAppend TableWriteDisposition = "WRITE_APPEND"
// Existing data in the destination table will be overwritten.
// Data is overwritten atomically on successful completion of a job.
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
// Writes will fail if the destination table already contains data.
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
)
func WriteDisposition(disp TableWriteDisposition) Option { return disp }
func (opt TableWriteDisposition) implementsOption() {}
func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.WriteDisposition = string(opt)
}
func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) {
conf.WriteDisposition = string(opt)
}
func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.WriteDisposition = string(opt)
}
// TableType is the type of table.
type TableType string
const (
RegularTable TableType = "TABLE"
ViewTable TableType = "VIEW"
)
func (t *Table) implementsSource() {}
func (t *Table) implementsReadSource() {}
func (t *Table) implementsDestination() {}
func (ts Tables) implementsSource() {}
func (t *Table) tableRefProto() *bq.TableReference {
return &bq.TableReference{
ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
TableId: t.TableID,
}
}
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
func (t *Table) FullyQualifiedName() string {
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
}
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
func (t *Table) implicitTable() bool {
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
}
func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad) {
conf.DestinationTable = t.tableRefProto()
}
func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract) {
conf.SourceTable = t.tableRefProto()
}
func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy) {
conf.DestinationTable = t.tableRefProto()
}
func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy) {
for _, t := range ts {
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
}
}
func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery) {
if !t.implicitTable() {
conf.DestinationTable = t.tableRefProto()
}
}
func (t *Table) customizeReadSrc(cursor *readTableConf) {
cursor.projectID = t.ProjectID
cursor.datasetID = t.DatasetID
cursor.tableID = t.TableID
}
// Create creates a table in the BigQuery service.
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
conf := &createTableConf{
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
}
for _, o := range options {
o.customizeCreateTable(conf)
}
return t.service.createTable(ctx, conf)
}
// Metadata fetches the metadata for the table.
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
return t.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
}
// Delete deletes the table.
func (t *Table) Delete(ctx context.Context) error {
return t.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
}
// A CreateTableOption is an optional argument to CreateTable.
type CreateTableOption interface {
customizeCreateTable(*createTableConf)
}
type tableExpiration time.Time
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
conf.expiration = time.Time(opt)
}
type viewQuery string
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
// For more information see: https://cloud.google.com/bigquery/querying-data#views
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
conf.viewQuery = string(opt)
}
// TableMetadataPatch represents a set of changes to a table's metadata.
type TableMetadataPatch struct {
s service
projectID, datasetID, tableID string
conf patchTableConf
}
// Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields.
// In order to apply the changes, the TableMetadataPatch's Apply method must be called.
func (t *Table) Patch() *TableMetadataPatch {
return &TableMetadataPatch{
s: t.service,
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
}
}
// Description sets the table description.
func (p *TableMetadataPatch) Description(desc string) {
p.conf.Description = &desc
}
// Name sets the table name.
func (p *TableMetadataPatch) Name(name string) {
p.conf.Name = &name
}
// TODO(mcgreevy): support patching the schema.
// Apply applies the patch operation.
func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error) {
return p.s.patchTable(ctx, p.projectID, p.datasetID, p.tableID, &p.conf)
}
// NewUploader returns an *Uploader that can be used to append rows to t.
func (t *Table) NewUploader(opts ...UploadOption) *Uploader {
uploader := &Uploader{t: t}
for _, o := range opts {
o.customizeInsertRows(&uploader.conf)
}
return uploader
}

121
vendor/cloud.google.com/go/bigquery/uploader.go generated vendored Normal file
View File

@@ -0,0 +1,121 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"reflect"
"golang.org/x/net/context"
)
// An UploadOption is an optional argument to NewUploader.
type UploadOption interface {
customizeInsertRows(conf *insertRowsConf)
}
// An Uploader does streaming inserts into a BigQuery table.
// It is safe for concurrent use.
type Uploader struct {
conf insertRowsConf
t *Table
}
// SkipInvalidRows returns an UploadOption that causes rows containing invalid data to be silently ignored.
// The default value is false, which causes the entire request to fail, if there is an attempt to insert an invalid row.
func SkipInvalidRows() UploadOption { return skipInvalidRows{} }
type skipInvalidRows struct{}
func (opt skipInvalidRows) customizeInsertRows(conf *insertRowsConf) {
conf.skipInvalidRows = true
}
// UploadIgnoreUnknownValues returns an UploadOption that causes values not matching the schema to be ignored.
// If this option is not used, records containing such values are treated as invalid records.
func UploadIgnoreUnknownValues() UploadOption { return uploadIgnoreUnknownValues{} }
type uploadIgnoreUnknownValues struct{}
func (opt uploadIgnoreUnknownValues) customizeInsertRows(conf *insertRowsConf) {
conf.ignoreUnknownValues = true
}
// A TableTemplateSuffix allows Uploaders to create tables automatically.
//
// Experimental: this option is experimental and may be modified or removed in future versions,
// regardless of any other documented package stability guarantees.
//
// When you specify a suffix, the table you upload data to
// will be used as a template for creating a new table, with the same schema,
// called <table> + <suffix>.
//
// More information is available at
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
func TableTemplateSuffix(suffix string) UploadOption { return tableTemplateSuffix(suffix) }
type tableTemplateSuffix string
func (opt tableTemplateSuffix) customizeInsertRows(conf *insertRowsConf) {
conf.templateSuffix = string(opt)
}
// Put uploads one or more rows to the BigQuery service. src must implement ValueSaver or be a slice of ValueSavers.
// Put returns a PutMultiError if one or more rows failed to be uploaded.
// The PutMultiError contains a RowInsertionError for each failed row.
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
// TODO(mcgreevy): Support structs which do not implement ValueSaver as src, a la Datastore.
if saver, ok := src.(ValueSaver); ok {
return u.putMulti(ctx, []ValueSaver{saver})
}
srcVal := reflect.ValueOf(src)
if srcVal.Kind() != reflect.Slice {
return fmt.Errorf("%T is not a ValueSaver or slice of ValueSavers", src)
}
var savers []ValueSaver
for i := 0; i < srcVal.Len(); i++ {
s := srcVal.Index(i).Interface()
saver, ok := s.(ValueSaver)
if !ok {
return fmt.Errorf("element %d of src is of type %T, which is not a ValueSaver", i, s)
}
savers = append(savers, saver)
}
return u.putMulti(ctx, savers)
}
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
var rows []*insertionRow
for _, saver := range src {
row, insertID, err := saver.Save()
if err != nil {
return err
}
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
}
return u.t.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &u.conf)
}
// An insertionRow represents a row of data to be inserted into a table.
type insertionRow struct {
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
// this row on a best-effort basis.
InsertID string
// The data to be inserted, represented as a map from field name to Value.
Row map[string]Value
}

234
vendor/cloud.google.com/go/bigquery/uploader_test.go generated vendored Normal file
View File

@@ -0,0 +1,234 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
)
type testSaver struct {
ir *insertionRow
err error
}
func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.ir.Row, ts.ir.InsertID, ts.err
}
func TestRejectsNonValueSavers(t *testing.T) {
u := Uploader{t: defaultTable(nil)}
testCases := []struct {
src interface{}
}{
{
src: 1,
},
{
src: []int{1, 2},
},
{
src: []interface{}{
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
1,
},
},
}
for _, tc := range testCases {
if err := u.Put(context.Background(), tc.src); err == nil {
t.Errorf("put value: %v; got err: %v; want nil", tc.src, err)
}
}
}
type insertRowsRecorder struct {
rowBatches [][]*insertionRow
service
}
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
irr.rowBatches = append(irr.rowBatches, rows)
return nil
}
func TestInsertsData(t *testing.T) {
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
}
testCases := []struct {
data [][]*insertionRow
}{
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
{
&insertionRow{"b", map[string]Value{"two": 2}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
{
&insertionRow{"c", map[string]Value{"three": 3}},
&insertionRow{"d", map[string]Value{"four": 4}},
},
},
},
}
for _, tc := range testCases {
irr := &insertRowsRecorder{}
table.service = irr
u := Uploader{t: table}
for _, batch := range tc.data {
if len(batch) == 0 {
continue
}
var toUpload interface{}
if len(batch) == 1 {
toUpload = testSaver{ir: batch[0]}
} else {
savers := []testSaver{}
for _, row := range batch {
savers = append(savers, testSaver{ir: row})
}
toUpload = savers
}
err := u.Put(context.Background(), toUpload)
if err != nil {
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
}
}
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
}
}
type uploadOptionRecorder struct {
received *insertRowsConf
service
}
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
u.received = conf
return nil
}
func TestUploadOptionsPropagate(t *testing.T) {
// we don't care for the data in this testcase.
dummyData := testSaver{ir: &insertionRow{}}
tests := [...]struct {
opts []UploadOption
conf insertRowsConf
}{
{ // test zero options lead to zero value for insertRowsConf
},
{
opts: []UploadOption{
TableTemplateSuffix("suffix"),
},
conf: insertRowsConf{
templateSuffix: "suffix",
},
},
{
opts: []UploadOption{
UploadIgnoreUnknownValues(),
},
conf: insertRowsConf{
ignoreUnknownValues: true,
},
},
{
opts: []UploadOption{
SkipInvalidRows(),
},
conf: insertRowsConf{
skipInvalidRows: true,
},
},
{ // multiple upload options combine
opts: []UploadOption{
TableTemplateSuffix("suffix"),
SkipInvalidRows(),
UploadIgnoreUnknownValues(),
},
conf: insertRowsConf{
templateSuffix: "suffix",
skipInvalidRows: true,
ignoreUnknownValues: true,
},
},
}
for i, tc := range tests {
recorder := new(uploadOptionRecorder)
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
service: recorder,
}
u := table.NewUploader(tc.opts...)
err := u.Put(context.Background(), dummyData)
if err != nil {
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
}
if recorder.received == nil {
t.Fatalf("%d: received no options at all!", i)
}
want := tc.conf
got := *recorder.received
if got != want {
t.Errorf("%d: got %#v, want %#v, opts=%#v", i, got, want, tc.opts)
}
}
}

54
vendor/cloud.google.com/go/bigquery/utils_test.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
var defaultGCS = &GCSReference{
uris: []string{"uri"},
}
var defaultQuery = &Query{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
func defaultTable(s service) *Table {
return &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
service: s,
}
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
s.Job = job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

195
vendor/cloud.google.com/go/bigquery/value.go generated vendored Normal file
View File

@@ -0,0 +1,195 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"strconv"
"time"
bq "google.golang.org/api/bigquery/v2"
)
// Value stores the contents of a single cell from a BigQuery result.
type Value interface{}
// ValueLoader stores a slice of Values representing a result row from a Read operation.
// See Iterator.Get for more information.
type ValueLoader interface {
Load(v []Value) error
}
// ValueList converts a []Value to implement ValueLoader.
type ValueList []Value
// Load stores a sequence of values in a ValueList.
func (vs *ValueList) Load(v []Value) error {
*vs = append(*vs, v...)
return nil
}
// A ValueSaver returns a row of data to be inserted into a table.
type ValueSaver interface {
// Save returns a row to be inserted into a BigQuery table, represented
// as a map from field name to Value.
// If insertID is non-empty, BigQuery will use it to de-duplicate
// insertions of this row on a best-effort basis.
Save() (row map[string]Value, insertID string, err error)
}
// ValuesSaver implements ValueSaver for a slice of Values.
type ValuesSaver struct {
Schema Schema
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
// of this row on a best-effort basis.
InsertID string
Row []Value
}
// Save implements ValueSaver
func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
m, err := valuesToMap(vls.Row, vls.Schema)
return m, vls.InsertID, err
}
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
if len(vs) != len(schema) {
return nil, errors.New("Schema does not match length of row to be inserted")
}
m := make(map[string]Value)
for i, fieldSchema := range schema {
if fieldSchema.Type == RecordFieldType {
nested, ok := vs[i].([]Value)
if !ok {
return nil, errors.New("Nested record is not a []Value")
}
value, err := valuesToMap(nested, fieldSchema.Schema)
if err != nil {
return nil, err
}
m[fieldSchema.Name] = value
} else {
m[fieldSchema.Name] = vs[i]
}
}
return m, nil
}
// convertRows converts a series of TableRows into a series of Value slices.
// schema is used to interpret the data from rows; its length must match the
// length of each row.
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
var rs [][]Value
for _, r := range rows {
row, err := convertRow(r, schema)
if err != nil {
return nil, err
}
rs = append(rs, row)
}
return rs, nil
}
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
if len(schema) != len(r.F) {
return nil, errors.New("schema length does not match row length")
}
var values []Value
for i, cell := range r.F {
fs := schema[i]
v, err := convertValue(cell.V, fs.Type, fs.Schema)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
switch val := val.(type) {
case nil:
return nil, nil
case []interface{}:
return convertRepeatedRecord(val, typ, schema)
case map[string]interface{}:
return convertNestedRecord(val, schema)
case string:
return convertBasicType(val, typ)
default:
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
}
}
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
var values []Value
for _, cell := range vals {
// each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"]
v, err := convertValue(val, typ, schema)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.
// Nested records are wrapped in a map with a single key, "f".
record := val["f"].([]interface{})
if len(record) != len(schema) {
return nil, errors.New("schema length does not match record length")
}
var values []Value
for i, cell := range record {
// each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"]
fs := schema[i]
v, err := convertValue(val, fs.Type, fs.Schema)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}
// convertBasicType returns val as an interface with a concrete type specified by typ.
func convertBasicType(val string, typ FieldType) (Value, error) {
switch typ {
case StringFieldType:
return val, nil
case IntegerFieldType:
return strconv.Atoi(val)
case FloatFieldType:
return strconv.ParseFloat(val, 64)
case BooleanFieldType:
return strconv.ParseBool(val)
case TimestampFieldType:
f, err := strconv.ParseFloat(val, 64)
return Value(time.Unix(0, int64(f*1e9))), err
default:
return nil, errors.New("unrecognized type")
}
}

416
vendor/cloud.google.com/go/bigquery/value_test.go generated vendored Normal file
View File

@@ -0,0 +1,416 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"reflect"
"testing"
"time"
bq "google.golang.org/api/bigquery/v2"
)
func TestConvertBasicValues(t *testing.T) {
schema := []*FieldSchema{
{Type: StringFieldType},
{Type: IntegerFieldType},
{Type: FloatFieldType},
{Type: BooleanFieldType},
}
row := &bq.TableRow{
F: []*bq.TableCell{
{V: "a"},
{V: "1"},
{V: "1.2"},
{V: "true"},
},
}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
want := []Value{"a", 1, 1.2, true}
if !reflect.DeepEqual(got, want) {
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
}
}
func TestConvertTime(t *testing.T) {
schema := []*FieldSchema{
{Type: TimestampFieldType},
}
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
row := &bq.TableRow{
F: []*bq.TableCell{
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
},
}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
if !got[0].(time.Time).Equal(thyme) {
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
}
}
func TestConvertNullValues(t *testing.T) {
schema := []*FieldSchema{
{Type: StringFieldType},
}
row := &bq.TableRow{
F: []*bq.TableCell{
{V: nil},
},
}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
want := []Value{nil}
if !reflect.DeepEqual(got, want) {
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
}
}
func TestBasicRepetition(t *testing.T) {
schema := []*FieldSchema{
{Type: IntegerFieldType, Repeated: true},
}
row := &bq.TableRow{
F: []*bq.TableCell{
{
V: []interface{}{
map[string]interface{}{
"v": "1",
},
map[string]interface{}{
"v": "2",
},
map[string]interface{}{
"v": "3",
},
},
},
},
}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
want := []Value{[]Value{1, 2, 3}}
if !reflect.DeepEqual(got, want) {
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
func TestNestedRecordContainingRepetition(t *testing.T) {
schema := []*FieldSchema{
{
Type: RecordFieldType,
Schema: Schema{
{Type: IntegerFieldType, Repeated: true},
},
},
}
row := &bq.TableRow{
F: []*bq.TableCell{
{
V: map[string]interface{}{
"f": []interface{}{
map[string]interface{}{
"v": []interface{}{
map[string]interface{}{"v": "1"},
map[string]interface{}{"v": "2"},
map[string]interface{}{"v": "3"},
},
},
},
},
},
},
}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
want := []Value{[]Value{[]Value{1, 2, 3}}}
if !reflect.DeepEqual(got, want) {
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
func TestRepeatedRecordContainingRepetition(t *testing.T) {
schema := []*FieldSchema{
{
Type: RecordFieldType,
Repeated: true,
Schema: Schema{
{Type: IntegerFieldType, Repeated: true},
},
},
}
row := &bq.TableRow{F: []*bq.TableCell{
{
V: []interface{}{ // repeated records.
map[string]interface{}{ // first record.
"v": map[string]interface{}{ // pointless single-key-map wrapper.
"f": []interface{}{ // list of record fields.
map[string]interface{}{ // only record (repeated ints)
"v": []interface{}{ // pointless wrapper.
map[string]interface{}{
"v": "1",
},
map[string]interface{}{
"v": "2",
},
map[string]interface{}{
"v": "3",
},
},
},
},
},
},
map[string]interface{}{ // second record.
"v": map[string]interface{}{
"f": []interface{}{
map[string]interface{}{
"v": []interface{}{
map[string]interface{}{
"v": "4",
},
map[string]interface{}{
"v": "5",
},
map[string]interface{}{
"v": "6",
},
},
},
},
},
},
},
},
}}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
[]Value{ // the record is a list of length 1, containing an entry for the repeated integer field.
[]Value{1, 2, 3}, // the repeated integer field is a list of length 3.
},
[]Value{ // second record
[]Value{4, 5, 6},
},
},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
func TestRepeatedRecordContainingRecord(t *testing.T) {
schema := []*FieldSchema{
{
Type: RecordFieldType,
Repeated: true,
Schema: Schema{
{
Type: StringFieldType,
},
{
Type: RecordFieldType,
Schema: Schema{
{Type: IntegerFieldType},
{Type: StringFieldType},
},
},
},
},
}
row := &bq.TableRow{F: []*bq.TableCell{
{
V: []interface{}{ // repeated records.
map[string]interface{}{ // first record.
"v": map[string]interface{}{ // pointless single-key-map wrapper.
"f": []interface{}{ // list of record fields.
map[string]interface{}{ // first record field (name)
"v": "first repeated record",
},
map[string]interface{}{ // second record field (nested record).
"v": map[string]interface{}{ // pointless single-key-map wrapper.
"f": []interface{}{ // nested record fields
map[string]interface{}{
"v": "1",
},
map[string]interface{}{
"v": "two",
},
},
},
},
},
},
},
map[string]interface{}{ // second record.
"v": map[string]interface{}{
"f": []interface{}{
map[string]interface{}{
"v": "second repeated record",
},
map[string]interface{}{
"v": map[string]interface{}{
"f": []interface{}{
map[string]interface{}{
"v": "3",
},
map[string]interface{}{
"v": "four",
},
},
},
},
},
},
},
},
},
}}
got, err := convertRow(row, schema)
if err != nil {
t.Fatalf("error converting: %v", err)
}
// TODO: test with flattenresults.
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
[]Value{ // record contains a string followed by a nested record.
"first repeated record",
[]Value{
1,
"two",
},
},
[]Value{ // second record.
"second repeated record",
[]Value{
3,
"four",
},
},
},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
}
}
func TestValuesSaverConvertsToMap(t *testing.T) {
testCases := []struct {
vs ValuesSaver
want *insertionRow
}{
{
vs: ValuesSaver{
Schema: []*FieldSchema{
{Name: "intField", Type: IntegerFieldType},
{Name: "strField", Type: StringFieldType},
},
InsertID: "iid",
Row: []Value{1, "a"},
},
want: &insertionRow{
InsertID: "iid",
Row: map[string]Value{"intField": 1, "strField": "a"},
},
},
{
vs: ValuesSaver{
Schema: []*FieldSchema{
{Name: "intField", Type: IntegerFieldType},
{
Name: "recordField",
Type: RecordFieldType,
Schema: []*FieldSchema{
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
},
},
},
InsertID: "iid",
Row: []Value{1, []Value{[]Value{2, 3}}},
},
want: &insertionRow{
InsertID: "iid",
Row: map[string]Value{
"intField": 1,
"recordField": map[string]Value{
"nestedInt": []Value{2, 3},
},
},
},
},
}
for _, tc := range testCases {
data, insertID, err := tc.vs.Save()
if err != nil {
t.Errorf("Expected successful save; got: %v", err)
}
got := &insertionRow{insertID, data}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("saving ValuesSaver: got:\n%v\nwant:\n%v", got, tc.want)
}
}
}
func TestConvertRows(t *testing.T) {
schema := []*FieldSchema{
{Type: StringFieldType},
{Type: IntegerFieldType},
{Type: FloatFieldType},
{Type: BooleanFieldType},
}
rows := []*bq.TableRow{
{F: []*bq.TableCell{
{V: "a"},
{V: "1"},
{V: "1.2"},
{V: "true"},
}},
{F: []*bq.TableCell{
{V: "b"},
{V: "2"},
{V: "2.2"},
{V: "false"},
}},
}
want := [][]Value{
{"a", 1, 1.2, true},
{"b", 2, 2.2, false},
}
got, err := convertRows(rows, schema)
if err != nil {
t.Fatalf("got %v, want nil", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("\ngot %v\nwant %v", got, want)
}
}

267
vendor/cloud.google.com/go/bigtable/admin.go generated vendored Normal file
View File

@@ -0,0 +1,267 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"fmt"
"regexp"
"strings"
btopt "cloud.google.com/go/bigtable/internal/option"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const adminAddr = "bigtableadmin.googleapis.com:443"
// AdminClient is a client type for performing admin operations within a specific instance.
type AdminClient struct {
conn *grpc.ClientConn
tClient btapb.BigtableTableAdminClient
project, instance string
// Metadata to be sent with each request.
md metadata.MD
}
// NewAdminClient creates a new AdminClient for a given project and instance.
func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) {
o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent)
if err != nil {
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &AdminClient{
conn: conn,
tClient: btapb.NewBigtableTableAdminClient(conn),
project: project,
instance: instance,
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
}, nil
}
// Close closes the AdminClient.
func (ac *AdminClient) Close() error {
return ac.conn.Close()
}
func (ac *AdminClient) instancePrefix() string {
return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance)
}
// Tables returns a list of the tables in the instance.
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ListTablesRequest{
Parent: prefix,
}
res, err := ac.tClient.ListTables(ctx, req)
if err != nil {
return nil, err
}
names := make([]string, 0, len(res.Tables))
for _, tbl := range res.Tables {
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
}
return names, nil
}
// CreateTable creates a new table in the instance.
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
}
_, err := ac.tClient.CreateTable(ctx, req)
if err != nil {
return err
}
return nil
}
// CreateColumnFamily creates a new column family in a table.
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{
{
Id: family,
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
},
},
}
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
return err
}
// DeleteTable deletes a table and all of its data.
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.DeleteTableRequest{
Name: prefix + "/tables/" + table,
}
_, err := ac.tClient.DeleteTable(ctx, req)
return err
}
// DeleteColumnFamily deletes a column family in a table and all of its data.
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{
{
Id: family,
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{Drop: true},
},
},
}
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
return err
}
// TableInfo represents information about a table.
type TableInfo struct {
Families []string
}
// TableInfo retrieves information about a table.
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.GetTableRequest{
Name: prefix + "/tables/" + table,
}
res, err := ac.tClient.GetTable(ctx, req)
if err != nil {
return nil, err
}
ti := &TableInfo{}
for fam := range res.ColumnFamilies {
ti.Families = append(ti.Families, fam)
}
return ti, nil
}
// SetGCPolicy specifies which cells in a column family should be garbage collected.
// GC executes opportunistically in the background; table reads may return data
// matching the GC policy.
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
ctx = metadata.NewContext(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{
{
Id: family,
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}},
},
},
}
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
return err
}
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
// InstanceAdminClient is a client type for performing admin operations on instances.
// These operations can be substantially more dangerous than those provided by AdminClient.
type InstanceAdminClient struct {
conn *grpc.ClientConn
iClient btapb.BigtableInstanceAdminClient
project string
// Metadata to be sent with each request.
md metadata.MD
}
// NewInstanceAdminClient creates a new InstanceAdminClient for a given project.
func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) {
o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent)
if err != nil {
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &InstanceAdminClient{
conn: conn,
iClient: btapb.NewBigtableInstanceAdminClient(conn),
project: project,
md: metadata.Pairs(resourcePrefixHeader, "projects/"+project),
}, nil
}
// Close closes the InstanceAdminClient.
func (iac *InstanceAdminClient) Close() error {
return iac.conn.Close()
}
// InstanceInfo represents information about an instance
type InstanceInfo struct {
Name string // name of the instance
DisplayName string // display name for UIs
}
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
// Instances returns a list of instances in the project.
func (cac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
ctx = metadata.NewContext(ctx, cac.md)
req := &btapb.ListInstancesRequest{
Parent: "projects/" + cac.project,
}
res, err := cac.iClient.ListInstances(ctx, req)
if err != nil {
return nil, err
}
var is []*InstanceInfo
for _, i := range res.Instances {
m := instanceNameRegexp.FindStringSubmatch(i.Name)
if m == nil {
return nil, fmt.Errorf("malformed instance name %q", i.Name)
}
is = append(is, &InstanceInfo{
Name: m[2],
DisplayName: i.DisplayName,
})
}
return is, nil
}

73
vendor/cloud.google.com/go/bigtable/admin_test.go generated vendored Normal file
View File

@@ -0,0 +1,73 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigtable
import (
"reflect"
"sort"
"testing"
"time"
"cloud.google.com/go/bigtable/bttest"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
func TestAdminIntegration(t *testing.T) {
srv, err := bttest.NewServer("127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer srv.Close()
t.Logf("bttest.Server running on %s", srv.Addr)
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
if err != nil {
t.Fatalf("grpc.Dial: %v", err)
}
adminClient, err := NewAdminClient(ctx, "proj", "instance", option.WithGRPCConn(conn))
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
list := func() []string {
tbls, err := adminClient.Tables(ctx)
if err != nil {
t.Fatalf("Fetching list of tables: %v", err)
}
sort.Strings(tbls)
return tbls
}
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
t.Fatalf("Creating table: %v", err)
}
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
t.Fatalf("Creating table: %v", err)
}
if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
t.Fatalf("Deleting table: %v", err)
}
if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}
}

717
vendor/cloud.google.com/go/bigtable/bigtable.go generated vendored Normal file
View File

@@ -0,0 +1,717 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable // import "cloud.google.com/go/bigtable"
import (
"errors"
"fmt"
"io"
"strconv"
"time"
"cloud.google.com/go/bigtable/internal/gax"
btopt "cloud.google.com/go/bigtable/internal/option"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
const prodAddr = "bigtable.googleapis.com:443"
// Client is a client for reading and writing data to tables in an instance.
//
// A Client is safe to use concurrently, except for its Close method.
type Client struct {
conn *grpc.ClientConn
client btpb.BigtableClient
project, instance string
}
// NewClient creates a new Client for a given project and instance.
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
if err != nil {
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: btpb.NewBigtableClient(conn),
project: project,
instance: instance,
}, nil
}
// Close closes the Client.
func (c *Client) Close() error {
return c.conn.Close()
}
var (
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted}
isIdempotentRetryCode = make(map[codes.Code]bool)
retryOptions = []gax.CallOption{
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
gax.WithRetryCodes(idempotentRetryCodes),
}
)
func init() {
for _, code := range idempotentRetryCodes {
isIdempotentRetryCode[code] = true
}
}
func (c *Client) fullTableName(table string) string {
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
}
// A Table refers to a table.
//
// A Table is safe to use concurrently.
type Table struct {
c *Client
table string
// Metadata to be sent with each request.
md metadata.MD
}
// Open opens a table.
func (c *Client) Open(table string) *Table {
return &Table{
c: c,
table: table,
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
}
}
// TODO(dsymonds): Read method that returns a sequence of ReadItems.
// ReadRows reads rows from a table. f is called for each row.
// If f returns false, the stream is shut down and ReadRows returns.
// f owns its argument, and f is called serially in order by row key.
//
// By default, the yielded rows will contain all values in all cells.
// Use RowFilter to limit the cells returned.
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
ctx = metadata.NewContext(ctx, t.md)
var prevRowKey string
err := gax.Invoke(ctx, func(ctx context.Context) error {
req := &btpb.ReadRowsRequest{
TableName: t.c.fullTableName(t.table),
Rows: arg.proto(),
}
for _, opt := range opts {
opt.set(req)
}
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()
stream, err := t.c.client.ReadRows(ctx, req)
if err != nil {
return err
}
cr := newChunkReader()
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// Reset arg for next Invoke call.
arg = arg.retainRowsAfter(prevRowKey)
return err
}
for _, cc := range res.Chunks {
row, err := cr.Process(cc)
if err != nil {
// No need to prepare for a retry, this is an unretryable error.
return err
}
if row == nil {
continue
}
prevRowKey = row.Key()
if !f(row) {
// Cancel and drain stream.
cancel()
for {
if _, err := stream.Recv(); err != nil {
// The stream has ended. We don't return an error
// because the caller has intentionally interrupted the scan.
return nil
}
}
}
}
if err := cr.Close(); err != nil {
// No need to prepare for a retry, this is an unretryable error.
return err
}
}
return err
}, retryOptions...)
return err
}
// ReadRow is a convenience implementation of a single-row reader.
// A missing row will return a zero-length map and a nil error.
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
var r Row
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
r = rr
return true
}, opts...)
return r, err
}
// decodeFamilyProto adds the cell data from f to the given row.
func decodeFamilyProto(r Row, row string, f *btpb.Family) {
fam := f.Name // does not have colon
for _, col := range f.Columns {
for _, cell := range col.Cells {
ri := ReadItem{
Row: row,
Column: fam + ":" + string(col.Qualifier),
Timestamp: Timestamp(cell.TimestampMicros),
Value: cell.Value,
}
r[fam] = append(r[fam], ri)
}
}
}
// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange.
type RowSet interface {
proto() *btpb.RowSet
// retainRowsAfter returns a new RowSet that does not include the
// given row key or any row key lexicographically less than it.
retainRowsAfter(lastRowKey string) RowSet
}
// RowList is a sequence of row keys.
type RowList []string
func (r RowList) proto() *btpb.RowSet {
keys := make([][]byte, len(r))
for i, row := range r {
keys[i] = []byte(row)
}
return &btpb.RowSet{RowKeys: keys}
}
func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
var retryKeys RowList
for _, key := range r {
if key > lastRowKey {
retryKeys = append(retryKeys, key)
}
}
return retryKeys
}
// A RowRange is a half-open interval [Start, Limit) encompassing
// all the rows with keys at least as large as Start, and less than Limit.
// (Bigtable string comparison is the same as Go's.)
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
type RowRange struct {
start string
limit string
}
// NewRange returns the new RowRange [begin, end).
func NewRange(begin, end string) RowRange {
return RowRange{
start: begin,
limit: end,
}
}
// Unbounded tests whether a RowRange is unbounded.
func (r RowRange) Unbounded() bool {
return r.limit == ""
}
// Contains says whether the RowRange contains the key.
func (r RowRange) Contains(row string) bool {
return r.start <= row && (r.limit == "" || r.limit > row)
}
// String provides a printable description of a RowRange.
func (r RowRange) String() string {
a := strconv.Quote(r.start)
if r.Unbounded() {
return fmt.Sprintf("[%s,∞)", a)
}
return fmt.Sprintf("[%s,%q)", a, r.limit)
}
func (r RowRange) proto() *btpb.RowSet {
var rr *btpb.RowRange
rr = &btpb.RowRange{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}}
if !r.Unbounded() {
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)}
}
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
}
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
// Set the beginning of the range to the row after the last scanned.
start := lastRowKey + "\x00"
if r.Unbounded() {
return InfiniteRange(start)
}
return NewRange(start, r.limit)
}
// SingleRow returns a RowRange for reading a single row.
func SingleRow(row string) RowRange {
return RowRange{
start: row,
limit: row + "\x00",
}
}
// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
func PrefixRange(prefix string) RowRange {
return RowRange{
start: prefix,
limit: prefixSuccessor(prefix),
}
}
// InfiniteRange returns the RowRange consisting of all keys at least as
// large as start.
func InfiniteRange(start string) RowRange {
return RowRange{
start: start,
limit: "",
}
}
// prefixSuccessor returns the lexically smallest string greater than the
// prefix, if it exists, or "" otherwise. In either case, it is the string
// needed for the Limit of a RowRange.
func prefixSuccessor(prefix string) string {
if prefix == "" {
return "" // infinite range
}
n := len(prefix)
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
}
if n == -1 {
return ""
}
ans := []byte(prefix[:n])
ans = append(ans, prefix[n]+1)
return string(ans)
}
// A ReadOption is an optional argument to ReadRows.
type ReadOption interface {
set(req *btpb.ReadRowsRequest)
}
// RowFilter returns a ReadOption that applies f to the contents of read rows.
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
type rowFilter struct{ f Filter }
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }
// LimitRows returns a ReadOption that will limit the number of rows to be read.
func LimitRows(limit int64) ReadOption { return limitRows{limit} }
type limitRows struct{ limit int64 }
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }
// mutationsAreRetryable returns true if all mutations are idempotent
// and therefore retryable. A mutation is idempotent iff all cell timestamps
// have an explicit timestamp set and do not rely on the timestamp being set on the server.
func mutationsAreRetryable(muts []*btpb.Mutation) bool {
serverTime := int64(ServerTime)
for _, mut := range muts {
setCell := mut.GetSetCell()
if setCell != nil && setCell.TimestampMicros == serverTime {
return false
}
}
return true
}
// Apply applies a Mutation to a specific row.
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
ctx = metadata.NewContext(ctx, t.md)
after := func(res proto.Message) {
for _, o := range opts {
o.after(res)
}
}
var callOptions []gax.CallOption
if m.cond == nil {
req := &btpb.MutateRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
Mutations: m.ops,
}
if mutationsAreRetryable(m.ops) {
callOptions = retryOptions
}
var res *btpb.MutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
res, err = t.c.client.MutateRow(ctx, req)
return err
}, callOptions...)
if err == nil {
after(res)
}
return err
}
req := &btpb.CheckAndMutateRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
PredicateFilter: m.cond.proto(),
}
if m.mtrue != nil {
req.TrueMutations = m.mtrue.ops
}
if m.mfalse != nil {
req.FalseMutations = m.mfalse.ops
}
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
callOptions = retryOptions
}
var cmRes *btpb.CheckAndMutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
return err
}, callOptions...)
if err == nil {
after(cmRes)
}
return err
}
// An ApplyOption is an optional argument to Apply.
type ApplyOption interface {
after(res proto.Message)
}
type applyAfterFunc func(res proto.Message)
func (a applyAfterFunc) after(res proto.Message) { a(res) }
// GetCondMutationResult returns an ApplyOption that reports whether the conditional
// mutation's condition matched.
func GetCondMutationResult(matched *bool) ApplyOption {
return applyAfterFunc(func(res proto.Message) {
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
*matched = res.PredicateMatched
}
})
}
// Mutation represents a set of changes for a single row of a table.
type Mutation struct {
ops []*btpb.Mutation
// for conditional mutations
cond Filter
mtrue, mfalse *Mutation
}
// NewMutation returns a new mutation.
func NewMutation() *Mutation {
return new(Mutation)
}
// NewCondMutation returns a conditional mutation.
// The given row filter determines which mutation is applied:
// If the filter matches any cell in the row, mtrue is applied;
// otherwise, mfalse is applied.
// Either given mutation may be nil.
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
}
// Set sets a value in a specified column, with the given timestamp.
// The timestamp will be truncated to millisecond resolution.
// A timestamp of ServerTime means to use the server timestamp.
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
if ts != ServerTime {
// Truncate to millisecond resolution, since that's the default table config.
// TODO(dsymonds): Provide a way to override this behaviour.
ts -= ts % 1000
}
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
FamilyName: family,
ColumnQualifier: []byte(column),
TimestampMicros: int64(ts),
Value: value,
}}})
}
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
func (m *Mutation) DeleteCellsInColumn(family, column string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
FamilyName: family,
ColumnQualifier: []byte(column),
}}})
}
// DeleteTimestampRange deletes all cells whose columns are family:column
// and whose timestamps are in the half-open interval [start, end).
// If end is zero, it will be interpreted as infinity.
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
FamilyName: family,
ColumnQualifier: []byte(column),
TimeRange: &btpb.TimestampRange{
StartTimestampMicros: int64(start),
EndTimestampMicros: int64(end),
},
}}})
}
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
func (m *Mutation) DeleteCellsInFamily(family string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
FamilyName: family,
}}})
}
// DeleteRow deletes the entire row.
func (m *Mutation) DeleteRow() {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
}
// entryErr is a container that combines an entry with the error that was returned for it.
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
type entryErr struct {
Entry *btpb.MutateRowsRequest_Entry
Err error
}
// ApplyBulk applies multiple Mutations.
// Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order.
//
// Two types of failures may occur. If the entire process
// fails, (nil, err) will be returned. If specific mutations
// fail to apply, ([]err, nil) will be returned, and the errors
// will correspond to the relevant rowKeys/muts arguments.
//
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
ctx = metadata.NewContext(ctx, t.md)
if len(rowKeys) != len(muts) {
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
}
origEntries := make([]*entryErr, len(rowKeys))
for i, key := range rowKeys {
mut := muts[i]
if mut.cond != nil {
return nil, errors.New("conditional mutations cannot be applied in bulk")
}
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
}
// entries will be reduced after each invocation to just what needs to be retried.
entries := make([]*entryErr, len(rowKeys))
copy(entries, origEntries)
err := gax.Invoke(ctx, func(ctx context.Context) error {
err := t.doApplyBulk(ctx, entries, opts...)
if err != nil {
// We want to retry the entire request with the current entries
return err
}
entries = t.getApplyBulkRetries(entries)
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
// We have at least one mutation that needs to be retried.
// Return an arbitrary error that is retryable according to callOptions.
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
}
return nil
}, retryOptions...)
if err != nil {
return nil, err
}
// Accumulate all of the errors into an array to return, interspersed with nils for successful
// entries. The absence of any errors means we should return nil.
var errs []error
var foundErr bool
for _, entry := range origEntries {
if entry.Err != nil {
foundErr = true
}
errs = append(errs, entry.Err)
}
if foundErr {
return errs, nil
}
return nil, nil
}
// getApplyBulkRetries returns the entries that need to be retried
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
var retryEntries []*entryErr
for _, entry := range entries {
err := entry.Err
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
// There was an error and the entry is retryable.
retryEntries = append(retryEntries, entry)
}
}
return retryEntries
}
// doApplyBulk does the work of a single ApplyBulk invocation
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
after := func(res proto.Message) {
for _, o := range opts {
o.after(res)
}
}
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
for i, entryErr := range entryErrs {
entries[i] = entryErr.Entry
}
req := &btpb.MutateRowsRequest{
TableName: t.c.fullTableName(t.table),
Entries: entries,
}
stream, err := t.c.client.MutateRows(ctx, req)
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
for i, entry := range res.Entries {
status := entry.Status
if status.Code == int32(codes.OK) {
entryErrs[i].Err = nil
} else {
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
}
}
after(res)
}
return nil
}
// Timestamp is in units of microseconds since 1 January 1970.
type Timestamp int64
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
// It indicates that the server's timestamp should be used.
const ServerTime Timestamp = -1
// Time converts a time.Time into a Timestamp.
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
// Now returns the Timestamp representation of the current time on the client.
func Now() Timestamp { return Time(time.Now()) }
// Time converts a Timestamp into a time.Time.
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
// It returns the newly written cells.
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
ctx = metadata.NewContext(ctx, t.md)
req := &btpb.ReadModifyWriteRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
Rules: m.ops,
}
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
if err != nil {
return nil, err
}
r := make(Row)
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
decodeFamilyProto(r, row, fam)
}
return r, nil
}
// ReadModifyWrite represents a set of operations on a single row of a table.
// It is like Mutation but for non-idempotent changes.
// When applied, these operations operate on the latest values of the row's cells,
// and result in a new value being written to the relevant cell with a timestamp
// that is max(existing timestamp, current server time).
//
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
// be executed serially by the server.
type ReadModifyWrite struct {
ops []*btpb.ReadModifyWriteRule
}
// NewReadModifyWrite returns a new ReadModifyWrite.
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
// AppendValue appends a value to a specific cell's value.
// If the cell is unset, it will be treated as an empty value.
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family,
ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
})
}
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
// and adds a value to it. If the cell is unset, it will be treated as zero.
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
// operation will fail.
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family,
ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
})
}

596
vendor/cloud.google.com/go/bigtable/bigtable_test.go generated vendored Normal file
View File

@@ -0,0 +1,596 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"flag"
"fmt"
"math/rand"
"reflect"
"sort"
"strings"
"sync"
"testing"
"time"
"cloud.google.com/go/bigtable/bttest"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
func TestPrefix(t *testing.T) {
tests := []struct {
prefix, succ string
}{
{"", ""},
{"\xff", ""}, // when used, "" means Infinity
{"x\xff", "y"},
{"\xfe", "\xff"},
}
for _, tc := range tests {
got := prefixSuccessor(tc.prefix)
if got != tc.succ {
t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ)
continue
}
r := PrefixRange(tc.prefix)
if tc.succ == "" && r.limit != "" {
t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit)
}
if tc.succ != "" && r.limit != tc.succ {
t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ)
}
}
}
var useProd = flag.String("use_prod", "", `if set to "proj,instance,table", run integration test against production`)
func TestClientIntegration(t *testing.T) {
start := time.Now()
lastCheckpoint := start
checkpoint := func(s string) {
n := time.Now()
t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
lastCheckpoint = n
}
proj, instance, table := "proj", "instance", "mytable"
var clientOpts []option.ClientOption
timeout := 20 * time.Second
if *useProd == "" {
srv, err := bttest.NewServer("127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer srv.Close()
t.Logf("bttest.Server running on %s", srv.Addr)
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
if err != nil {
t.Fatalf("grpc.Dial: %v", err)
}
clientOpts = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
t.Logf("Running test against production")
a := strings.SplitN(*useProd, ",", 3)
proj, instance, table = a[0], a[1], a[2]
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)
client, err := NewClient(ctx, proj, instance, clientOpts...)
if err != nil {
t.Fatalf("NewClient: %v", err)
}
defer client.Close()
checkpoint("dialed Client")
adminClient, err := NewAdminClient(ctx, proj, instance, clientOpts...)
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
checkpoint("dialed AdminClient")
// Delete the table at the end of the test.
// Do this even before creating the table so that if this is running
// against production and CreateTable fails there's a chance of cleaning it up.
defer adminClient.DeleteTable(ctx, table)
if err := adminClient.CreateTable(ctx, table); err != nil {
t.Fatalf("Creating table: %v", err)
}
checkpoint("created table")
if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil {
t.Fatalf("Creating column family: %v", err)
}
checkpoint(`created "follows" column family`)
tbl := client.Open(table)
// Insert some data.
initialData := map[string][]string{
"wmckinley": {"tjefferson"},
"gwashington": {"jadams"},
"tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below
"jadams": {"gwashington", "tjefferson"},
}
for row, ss := range initialData {
mut := NewMutation()
for _, name := range ss {
mut.Set("follows", name, 0, []byte("1"))
}
if err := tbl.Apply(ctx, row, mut); err != nil {
t.Errorf("Mutating row %q: %v", row, err)
}
}
checkpoint("inserted initial data")
// Do a conditional mutation with a complex filter.
mutTrue := NewMutation()
mutTrue.Set("follows", "wmckinley", 0, []byte("1"))
filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter("."))
mut := NewCondMutation(filter, mutTrue, nil)
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
t.Errorf("Conditionally mutating row: %v", err)
}
// Do a second condition mutation with a filter that does not match,
// and thus no changes should be made.
mutTrue = NewMutation()
mutTrue.DeleteRow()
filter = ColumnFilter("snoop.dogg")
mut = NewCondMutation(filter, mutTrue, nil)
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
t.Errorf("Conditionally mutating row: %v", err)
}
checkpoint("did two conditional mutations")
// Fetch a row.
row, err := tbl.ReadRow(ctx, "jadams")
if err != nil {
t.Fatalf("Reading a row: %v", err)
}
wantRow := Row{
"follows": []ReadItem{
{Row: "jadams", Column: "follows:gwashington", Value: []byte("1")},
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
},
}
for _, ris := range row {
sort.Sort(byColumn(ris))
}
if !reflect.DeepEqual(row, wantRow) {
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
}
checkpoint("tested ReadRow")
// Do a bunch of reads with filters.
readTests := []struct {
desc string
rr RowRange
filter Filter // may be nil
// We do the read, grab all the cells, turn them into "<row>-<col>-<val>",
// sort that list, and join with a comma.
want string
}{
{
desc: "read all, unfiltered",
rr: RowRange{},
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
},
{
desc: "read with InfiniteRange, unfiltered",
rr: InfiniteRange("tjefferson"),
want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
},
{
desc: "read with NewRange, unfiltered",
rr: NewRange("gargamel", "hubbard"),
want: "gwashington-jadams-1",
},
{
desc: "read with PrefixRange, unfiltered",
rr: PrefixRange("jad"),
want: "jadams-gwashington-1,jadams-tjefferson-1",
},
{
desc: "read with SingleRow, unfiltered",
rr: SingleRow("wmckinley"),
want: "wmckinley-tjefferson-1",
},
{
desc: "read all, with ColumnFilter",
rr: RowRange{},
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
},
}
for _, tc := range readTests {
var opts []ReadOption
if tc.filter != nil {
opts = append(opts, RowFilter(tc.filter))
}
var elt []string
err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool {
for _, ris := range r {
for _, ri := range ris {
elt = append(elt, formatReadItem(ri))
}
}
return true
}, opts...)
if err != nil {
t.Errorf("%s: %v", tc.desc, err)
continue
}
sort.Strings(elt)
if got := strings.Join(elt, ","); got != tc.want {
t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want)
}
}
// Read a RowList
var elt []string
keys := RowList{"wmckinley", "gwashington", "jadams"}
want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1"
err = tbl.ReadRows(ctx, keys, func(r Row) bool {
for _, ris := range r {
for _, ri := range ris {
elt = append(elt, formatReadItem(ri))
}
}
return true
})
if err != nil {
t.Errorf("read RowList: %v", err)
}
sort.Strings(elt)
if got := strings.Join(elt, ","); got != want {
t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want)
}
checkpoint("tested ReadRows in a few ways")
// Do a scan and stop part way through.
// Verify that the ReadRows callback doesn't keep running.
stopped := false
err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool {
if r.Key() < "h" {
return true
}
if !stopped {
stopped = true
return false
}
t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key())
return false
})
if err != nil {
t.Errorf("Partial ReadRows: %v", err)
}
checkpoint("did partial ReadRows test")
// Delete a row and check it goes away.
mut = NewMutation()
mut.DeleteRow()
if err := tbl.Apply(ctx, "wmckinley", mut); err != nil {
t.Errorf("Apply DeleteRow: %v", err)
}
row, err = tbl.ReadRow(ctx, "wmckinley")
if err != nil {
t.Fatalf("Reading a row after DeleteRow: %v", err)
}
if len(row) != 0 {
t.Fatalf("Read non-zero row after DeleteRow: %v", row)
}
checkpoint("exercised DeleteRow")
// Check ReadModifyWrite.
if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil {
t.Fatalf("Creating column family: %v", err)
}
appendRMW := func(b []byte) *ReadModifyWrite {
rmw := NewReadModifyWrite()
rmw.AppendValue("counter", "likes", b)
return rmw
}
incRMW := func(n int64) *ReadModifyWrite {
rmw := NewReadModifyWrite()
rmw.Increment("counter", "likes", n)
return rmw
}
rmwSeq := []struct {
desc string
rmw *ReadModifyWrite
want []byte
}{
{
desc: "append #1",
rmw: appendRMW([]byte{0, 0, 0}),
want: []byte{0, 0, 0},
},
{
desc: "append #2",
rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17
want: []byte{0, 0, 0, 0, 0, 0, 0, 17},
},
{
desc: "increment",
rmw: incRMW(8),
want: []byte{0, 0, 0, 0, 0, 0, 0, 25},
},
}
for _, step := range rmwSeq {
row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw)
if err != nil {
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
}
clearTimestamps(row)
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
if !reflect.DeepEqual(row, wantRow) {
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
}
}
checkpoint("tested ReadModifyWrite")
// Test arbitrary timestamps more thoroughly.
if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil {
t.Fatalf("Creating column family: %v", err)
}
const numVersions = 4
mut = NewMutation()
for i := 0; i < numVersions; i++ {
// Timestamps are used in thousands because the server
// only permits that granularity.
mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
}
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
t.Fatalf("Mutating row: %v", err)
}
r, err := tbl.ReadRow(ctx, "testrow")
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
// These should be returned in descending timestamp order.
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
}
// Do the same read, but filter to the latest two versions.
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
}
// Delete the cell with timestamp 2000 and repeat the last read,
// checking that we get ts 3000 and ts 1000.
mut = NewMutation()
mut.DeleteTimestampRange("ts", "col", 2000, 3000) // half-open interval
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
t.Fatalf("Mutating row: %v", err)
}
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
}
checkpoint("tested multiple versions in a cell")
// Do highly concurrent reads/writes.
// TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved.
const maxConcurrency = 100
var wg sync.WaitGroup
for i := 0; i < maxConcurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
switch r := rand.Intn(100); { // r ∈ [0,100)
case 0 <= r && r < 30:
// Do a read.
_, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1)))
if err != nil {
t.Errorf("Concurrent read: %v", err)
}
case 30 <= r && r < 100:
// Do a write.
mut := NewMutation()
mut.Set("ts", "col", 0, []byte("data"))
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
t.Errorf("Concurrent write: %v", err)
}
}
}()
}
wg.Wait()
checkpoint("tested high concurrency")
// Large reads, writes and scans.
bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB.
nonsense := []byte("lorem ipsum dolor sit amet, ")
fill(bigBytes, nonsense)
mut = NewMutation()
mut.Set("ts", "col", 0, bigBytes)
if err := tbl.Apply(ctx, "bigrow", mut); err != nil {
t.Errorf("Big write: %v", err)
}
r, err = tbl.ReadRow(ctx, "bigrow")
if err != nil {
t.Errorf("Big read: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "bigrow", Column: "ts:col", Value: bigBytes},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Big read returned incorrect bytes: %v", r)
}
// Now write 1000 rows, each with 82 KB values, then scan them all.
medBytes := make([]byte, 82<<10)
fill(medBytes, nonsense)
sem := make(chan int, 50) // do up to 50 mutations at a time.
for i := 0; i < 1000; i++ {
mut := NewMutation()
mut.Set("ts", "big-scan", 0, medBytes)
row := fmt.Sprintf("row-%d", i)
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-sem }()
sem <- 1
if err := tbl.Apply(ctx, row, mut); err != nil {
t.Errorf("Preparing large scan: %v", err)
}
}()
}
wg.Wait()
n := 0
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
for _, ris := range r {
for _, ri := range ris {
n += len(ri.Value)
}
}
return true
}, RowFilter(ColumnFilter("big-scan")))
if err != nil {
t.Errorf("Doing large scan: %v", err)
}
if want := 1000 * len(medBytes); n != want {
t.Errorf("Large scan returned %d bytes, want %d", n, want)
}
// Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption.
rc := 0
wantRc := 3
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
rc++
return true
}, LimitRows(int64(wantRc)))
if rc != wantRc {
t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc)
}
checkpoint("tested big read/write/scan")
// Test bulk mutations
if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil {
t.Fatalf("Creating column family: %v", err)
}
bulkData := map[string][]string{
"red sox": {"2004", "2007", "2013"},
"patriots": {"2001", "2003", "2004", "2014"},
"celtics": {"1981", "1984", "1986", "2008"},
}
var rowKeys []string
var muts []*Mutation
for row, ss := range bulkData {
mut := NewMutation()
for _, name := range ss {
mut.Set("bulk", name, 0, []byte("1"))
}
rowKeys = append(rowKeys, row)
muts = append(muts, mut)
}
status, err := tbl.ApplyBulk(ctx, rowKeys, muts)
if err != nil {
t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err)
}
if status != nil {
t.Errorf("non-nil errors: %v", err)
}
checkpoint("inserted bulk data")
// Read each row back
for rowKey, ss := range bulkData {
row, err := tbl.ReadRow(ctx, rowKey)
if err != nil {
t.Fatalf("Reading a bulk row: %v", err)
}
for _, ris := range row {
sort.Sort(byColumn(ris))
}
var wantItems []ReadItem
for _, val := range ss {
wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")})
}
wantRow := Row{"bulk": wantItems}
if !reflect.DeepEqual(row, wantRow) {
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
}
}
checkpoint("tested reading from bulk insert")
// Test bulk write errors.
// Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error.
badMut := NewMutation()
badMut.Set("badfamily", "col", ServerTime, nil)
badMut2 := NewMutation()
badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1"))
status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2})
if err != nil {
t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err)
}
if status == nil {
t.Errorf("No errors for bad bulk mutation")
} else if status[0] == nil || status[1] == nil {
t.Errorf("No error for bad bulk mutation")
}
}
func formatReadItem(ri ReadItem) string {
// Use the column qualifier only to make the test data briefer.
col := ri.Column[strings.Index(ri.Column, ":")+1:]
return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value)
}
func fill(b, sub []byte) {
for len(b) > len(sub) {
n := copy(b, sub)
b = b[n:]
}
}
type byColumn []ReadItem
func (b byColumn) Len() int { return len(b) }
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
func clearTimestamps(r Row) {
for _, ris := range r {
for i := range ris {
ris[i].Timestamp = 0
}
}
}

View File

@@ -0,0 +1,83 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bttest_test
import (
"fmt"
"log"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/bttest"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
func ExampleNewServer() {
srv, err := bttest.NewServer("127.0.0.1:0")
if err != nil {
log.Fatalln(err)
}
ctx := context.Background()
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
if err != nil {
log.Fatalln(err)
}
proj, instance := "proj", "instance"
adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn))
if err != nil {
log.Fatalln(err)
}
if err = adminClient.CreateTable(ctx, "example"); err != nil {
log.Fatalln(err)
}
if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil {
log.Fatalln(err)
}
client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn))
if err != nil {
log.Fatalln(err)
}
tbl := client.Open("example")
mut := bigtable.NewMutation()
mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!"))
if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil {
log.Fatalln(err)
}
if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil {
log.Fatalln(err)
} else {
for _, column := range row["links"] {
fmt.Println(column.Column)
fmt.Println(string(column.Value))
}
}
// Output:
// links:golang.org
// Gophers!
}

947
vendor/cloud.google.com/go/bigtable/bttest/inmem.go generated vendored Normal file
View File

@@ -0,0 +1,947 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package bttest contains test helpers for working with the bigtable package.
To use a Server, create it, and then connect to it with no security:
(The project/instance values are ignored.)
srv, err := bttest.NewServer("127.0.0.1:0")
...
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
...
client, err := bigtable.NewClient(ctx, proj, instance,
option.WithGRPCConn(conn))
...
*/
package bttest // import "cloud.google.com/go/bigtable/bttest"
import (
"encoding/binary"
"fmt"
"log"
"math/rand"
"net"
"regexp"
"sort"
"strings"
"sync"
"time"
emptypb "github.com/golang/protobuf/ptypes/empty"
"github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
statpb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// Server is an in-memory Cloud Bigtable fake.
// It is unauthenticated, and only a rough approximation.
type Server struct {
Addr string
l net.Listener
srv *grpc.Server
s *server
}
// server is the real implementation of the fake.
// It is a separate and unexported type so the API won't be cluttered with
// methods that are only relevant to the fake's implementation.
type server struct {
mu sync.Mutex
tables map[string]*table // keyed by fully qualified name
gcc chan int // set when gcloop starts, closed when server shuts down
// Any unimplemented methods will cause a panic.
btapb.BigtableTableAdminServer
btpb.BigtableServer
}
// NewServer creates a new Server.
// The Server will be listening for gRPC connections, without TLS,
// on the provided address. The resolved address is named by the Addr field.
func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) {
l, err := net.Listen("tcp", laddr)
if err != nil {
return nil, err
}
s := &Server{
Addr: l.Addr().String(),
l: l,
srv: grpc.NewServer(opt...),
s: &server{
tables: make(map[string]*table),
},
}
btapb.RegisterBigtableTableAdminServer(s.srv, s.s)
btpb.RegisterBigtableServer(s.srv, s.s)
go s.srv.Serve(s.l)
return s, nil
}
// Close shuts down the server.
func (s *Server) Close() {
s.s.mu.Lock()
if s.s.gcc != nil {
close(s.s.gcc)
}
s.s.mu.Unlock()
s.srv.Stop()
s.l.Close()
}
func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) {
tbl := req.Parent + "/tables/" + req.TableId
s.mu.Lock()
if _, ok := s.tables[tbl]; ok {
s.mu.Unlock()
return nil, fmt.Errorf("table %q already exists", tbl)
}
s.tables[tbl] = newTable(req)
s.mu.Unlock()
return &btapb.Table{Name: tbl}, nil
}
func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) {
res := &btapb.ListTablesResponse{}
prefix := req.Parent + "/tables/"
s.mu.Lock()
for tbl := range s.tables {
if strings.HasPrefix(tbl, prefix) {
res.Tables = append(res.Tables, &btapb.Table{Name: tbl})
}
}
s.mu.Unlock()
return res, nil
}
func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) {
tbl := req.Name
s.mu.Lock()
tblIns, ok := s.tables[tbl]
s.mu.Unlock()
if !ok {
return nil, fmt.Errorf("table %q not found", tbl)
}
return &btapb.Table{
Name: tbl,
ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()),
}, nil
}
func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.tables[req.Name]; !ok {
return nil, fmt.Errorf("no such table %q", req.Name)
}
delete(s.tables, req.Name)
return &emptypb.Empty{}, nil
}
func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) {
tblName := req.Name[strings.LastIndex(req.Name, "/")+1:]
s.mu.Lock()
tbl, ok := s.tables[req.Name]
s.mu.Unlock()
if !ok {
return nil, fmt.Errorf("no such table %q", req.Name)
}
tbl.mu.Lock()
defer tbl.mu.Unlock()
for _, mod := range req.Modifications {
if create := mod.GetCreate(); create != nil {
if _, ok := tbl.families[mod.Id]; ok {
return nil, fmt.Errorf("family %q already exists", mod.Id)
}
newcf := &columnFamily{
name: req.Name + "/columnFamilies/" + mod.Id,
gcRule: create.GcRule,
}
tbl.families[mod.Id] = newcf
} else if mod.GetDrop() {
if _, ok := tbl.families[mod.Id]; !ok {
return nil, fmt.Errorf("can't delete unknown family %q", mod.Id)
}
delete(tbl.families, mod.Id)
} else if modify := mod.GetUpdate(); modify != nil {
if _, ok := tbl.families[mod.Id]; !ok {
return nil, fmt.Errorf("no such family %q", mod.Id)
}
newcf := &columnFamily{
name: req.Name + "/columnFamilies/" + mod.Id,
gcRule: modify.GcRule,
}
// assume that we ALWAYS want to replace by the new setting
// we may need partial update through
tbl.families[mod.Id] = newcf
}
}
s.needGC()
return &btapb.Table{
Name: tblName,
ColumnFamilies: toColumnFamilies(tbl.families),
}, nil
}
func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error {
s.mu.Lock()
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return fmt.Errorf("no such table %q", req.TableName)
}
// Rows to read can be specified by a set of row keys and/or a set of row ranges.
// Output is a stream of sorted, de-duped rows.
tbl.mu.RLock()
rowSet := make(map[string]*row)
if req.Rows != nil {
// Add the explicitly given keys
for _, key := range req.Rows.RowKeys {
start := string(key)
addRows(start, start+"\x00", tbl, rowSet)
}
// Add keys from row ranges
for _, rr := range req.Rows.RowRanges {
var start, end string
switch sk := rr.StartKey.(type) {
case *btpb.RowRange_StartKeyClosed:
start = string(sk.StartKeyClosed)
case *btpb.RowRange_StartKeyOpen:
start = string(sk.StartKeyOpen) + "\x00"
}
switch ek := rr.EndKey.(type) {
case *btpb.RowRange_EndKeyClosed:
end = string(ek.EndKeyClosed) + "\x00"
case *btpb.RowRange_EndKeyOpen:
end = string(ek.EndKeyOpen)
}
addRows(start, end, tbl, rowSet)
}
} else {
// Read all rows
addRows("", "", tbl, rowSet)
}
tbl.mu.RUnlock()
rows := make([]*row, 0, len(rowSet))
for _, r := range rowSet {
rows = append(rows, r)
}
sort.Sort(byRowKey(rows))
limit := int(req.RowsLimit)
for i, r := range rows {
if limit > 0 && i >= limit {
return nil
}
if err := streamRow(stream, r, req.Filter); err != nil {
return err
}
}
return nil
}
func addRows(start, end string, tbl *table, rowSet map[string]*row) {
si, ei := 0, len(tbl.rows) // half-open interval
if start != "" {
si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start })
}
// Types that are valid to be assigned to StartKey:
// *RowRange_StartKeyClosed
// *RowRange_StartKeyOpen
if end != "" {
ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end })
}
if si < ei {
for _, row := range tbl.rows[si:ei] {
rowSet[row.key] = row
}
}
}
func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) error {
r.mu.Lock()
nr := r.copy()
r.mu.Unlock()
r = nr
filterRow(f, r)
rrr := &btpb.ReadRowsResponse{}
for col, cells := range r.cells {
i := strings.Index(col, ":") // guaranteed to exist
fam, col := col[:i], col[i+1:]
if len(cells) == 0 {
continue
}
// TODO(dsymonds): Apply transformers.
for _, cell := range cells {
rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{
RowKey: []byte(r.key),
FamilyName: &wrappers.StringValue{Value: fam},
Qualifier: &wrappers.BytesValue{Value: []byte(col)},
TimestampMicros: cell.ts,
Value: cell.value,
})
}
}
// We can't have a cell with just COMMIT set, which would imply a new empty cell.
// So modify the last cell to have the COMMIT flag set.
if len(rrr.Chunks) > 0 {
rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}
}
return stream.Send(rrr)
}
// filterRow modifies a row with the given filter.
func filterRow(f *btpb.RowFilter, r *row) {
if f == nil {
return
}
// Handle filters that apply beyond just including/excluding cells.
switch f := f.Filter.(type) {
case *btpb.RowFilter_Chain_:
for _, sub := range f.Chain.Filters {
filterRow(sub, r)
}
return
case *btpb.RowFilter_Interleave_:
srs := make([]*row, 0, len(f.Interleave.Filters))
for _, sub := range f.Interleave.Filters {
sr := r.copy()
filterRow(sub, sr)
srs = append(srs, sr)
}
// merge
// TODO(dsymonds): is this correct?
r.cells = make(map[string][]cell)
for _, sr := range srs {
for col, cs := range sr.cells {
r.cells[col] = append(r.cells[col], cs...)
}
}
for _, cs := range r.cells {
sort.Sort(byDescTS(cs))
}
return
case *btpb.RowFilter_CellsPerColumnLimitFilter:
lim := int(f.CellsPerColumnLimitFilter)
for col, cs := range r.cells {
if len(cs) > lim {
r.cells[col] = cs[:lim]
}
}
return
}
// Any other case, operate on a per-cell basis.
for key, cs := range r.cells {
i := strings.Index(key, ":") // guaranteed to exist
fam, col := key[:i], key[i+1:]
r.cells[key] = filterCells(f, fam, col, cs)
}
}
func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell {
var ret []cell
for _, cell := range cs {
if includeCell(f, fam, col, cell) {
ret = append(ret, cell)
}
}
return ret
}
func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool {
if f == nil {
return true
}
// TODO(dsymonds): Implement many more filters.
switch f := f.Filter.(type) {
default:
log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f)
return true
case *btpb.RowFilter_FamilyNameRegexFilter:
pat := string(f.FamilyNameRegexFilter)
rx, err := regexp.Compile(pat)
if err != nil {
log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err)
return false
}
return rx.MatchString(fam)
case *btpb.RowFilter_ColumnQualifierRegexFilter:
pat := string(f.ColumnQualifierRegexFilter)
rx, err := regexp.Compile(pat)
if err != nil {
log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err)
return false
}
return rx.MatchString(col)
case *btpb.RowFilter_ValueRegexFilter:
pat := string(f.ValueRegexFilter)
rx, err := regexp.Compile(pat)
if err != nil {
log.Printf("Bad value_regex_filter pattern %q: %v", pat, err)
return false
}
return rx.Match(cell.value)
}
}
func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) {
s.mu.Lock()
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return nil, fmt.Errorf("no such table %q", req.TableName)
}
fs := tbl.columnFamiliesSet()
r := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer r.mu.Unlock()
if err := applyMutations(tbl, r, req.Mutations, fs); err != nil {
return nil, err
}
return &btpb.MutateRowResponse{}, nil
}
func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error {
s.mu.Lock()
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return fmt.Errorf("no such table %q", req.TableName)
}
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
fs := tbl.columnFamiliesSet()
for i, entry := range req.Entries {
r := tbl.mutableRow(string(entry.RowKey))
r.mu.Lock()
code, msg := int32(codes.OK), ""
if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
code = int32(codes.Internal)
msg = err.Error()
}
res.Entries[i] = &btpb.MutateRowsResponse_Entry{
Index: int64(i),
Status: &statpb.Status{Code: code, Message: msg},
}
r.mu.Unlock()
}
stream.Send(res)
return nil
}
func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) {
s.mu.Lock()
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return nil, fmt.Errorf("no such table %q", req.TableName)
}
res := &btpb.CheckAndMutateRowResponse{}
fs := tbl.columnFamiliesSet()
r := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer r.mu.Unlock()
// Figure out which mutation to apply.
whichMut := false
if req.PredicateFilter == nil {
// Use true_mutations iff row contains any cells.
whichMut = len(r.cells) > 0
} else {
// Use true_mutations iff any cells in the row match the filter.
// TODO(dsymonds): This could be cheaper.
nr := r.copy()
filterRow(req.PredicateFilter, nr)
for _, cs := range nr.cells {
if len(cs) > 0 {
whichMut = true
break
}
}
// TODO(dsymonds): Figure out if this is supposed to be set
// even when there's no predicate filter.
res.PredicateMatched = whichMut
}
muts := req.FalseMutations
if whichMut {
muts = req.TrueMutations
}
if err := applyMutations(tbl, r, muts, fs); err != nil {
return nil, err
}
return res, nil
}
// applyMutations applies a sequence of mutations to a row.
// fam should be a snapshot of the keys of tbl.families.
// It assumes r.mu is locked.
func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]bool) error {
for _, mut := range muts {
switch mut := mut.Mutation.(type) {
default:
return fmt.Errorf("can't handle mutation type %T", mut)
case *btpb.Mutation_SetCell_:
set := mut.SetCell
if !fs[set.FamilyName] {
return fmt.Errorf("unknown family %q", set.FamilyName)
}
ts := set.TimestampMicros
if ts == -1 { // bigtable.ServerTime
ts = newTimestamp()
}
if !tbl.validTimestamp(ts) {
return fmt.Errorf("invalid timestamp %d", ts)
}
col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier)
newCell := cell{ts: ts, value: set.Value}
r.cells[col] = appendOrReplaceCell(r.cells[col], newCell)
case *btpb.Mutation_DeleteFromColumn_:
del := mut.DeleteFromColumn
col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier)
cs := r.cells[col]
if del.TimeRange != nil {
tsr := del.TimeRange
if !tbl.validTimestamp(tsr.StartTimestampMicros) {
return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros)
}
if !tbl.validTimestamp(tsr.EndTimestampMicros) {
return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros)
}
// Find half-open interval to remove.
// Cells are in descending timestamp order,
// so the predicates to sort.Search are inverted.
si, ei := 0, len(cs)
if tsr.StartTimestampMicros > 0 {
ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros })
}
if tsr.EndTimestampMicros > 0 {
si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros })
}
if si < ei {
copy(cs[si:], cs[ei:])
cs = cs[:len(cs)-(ei-si)]
}
} else {
cs = nil
}
if len(cs) == 0 {
delete(r.cells, col)
} else {
r.cells[col] = cs
}
case *btpb.Mutation_DeleteFromRow_:
r.cells = make(map[string][]cell)
}
}
return nil
}
func maxTimestamp(x, y int64) int64 {
if x > y {
return x
}
return y
}
func newTimestamp() int64 {
ts := time.Now().UnixNano() / 1e3
ts -= ts % 1000 // round to millisecond granularity
return ts
}
func appendOrReplaceCell(cs []cell, newCell cell) []cell {
replaced := false
for i, cell := range cs {
if cell.ts == newCell.ts {
cs[i] = newCell
replaced = true
break
}
}
if !replaced {
cs = append(cs, newCell)
}
sort.Sort(byDescTS(cs))
return cs
}
func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) {
s.mu.Lock()
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return nil, fmt.Errorf("no such table %q", req.TableName)
}
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
fs := tbl.columnFamiliesSet()
r := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer r.mu.Unlock()
// Assume all mutations apply to the most recent version of the cell.
// TODO(dsymonds): Verify this assumption and document it in the proto.
for _, rule := range req.Rules {
if !fs[rule.FamilyName] {
return nil, fmt.Errorf("unknown family %q", rule.FamilyName)
}
key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier)
cells := r.cells[key]
ts := newTimestamp()
var newCell, prevCell cell
isEmpty := len(cells) == 0
if !isEmpty {
prevCell = cells[0]
// ts is the max of now or the prev cell's timestamp in case the
// prev cell is in the future
ts = maxTimestamp(ts, prevCell.ts)
}
switch rule := rule.Rule.(type) {
default:
return nil, fmt.Errorf("unknown RMW rule oneof %T", rule)
case *btpb.ReadModifyWriteRule_AppendValue:
newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)}
case *btpb.ReadModifyWriteRule_IncrementAmount:
var v int64
if !isEmpty {
prevVal := prevCell.value
if len(prevVal) != 8 {
return nil, fmt.Errorf("increment on non-64-bit value")
}
v = int64(binary.BigEndian.Uint64(prevVal))
}
v += rule.IncrementAmount
var val [8]byte
binary.BigEndian.PutUint64(val[:], uint64(v))
newCell = cell{ts: ts, value: val[:]}
}
updates[key] = newCell
r.cells[key] = appendOrReplaceCell(r.cells[key], newCell)
}
res := &btpb.Row{
Key: req.RowKey,
}
for col, cell := range updates {
i := strings.Index(col, ":")
fam, qual := col[:i], col[i+1:]
var f *btpb.Family
for _, ff := range res.Families {
if ff.Name == fam {
f = ff
break
}
}
if f == nil {
f = &btpb.Family{Name: fam}
res.Families = append(res.Families, f)
}
f.Columns = append(f.Columns, &btpb.Column{
Qualifier: []byte(qual),
Cells: []*btpb.Cell{{
Value: cell.value,
}},
})
}
return &btpb.ReadModifyWriteRowResponse{Row: res}, nil
}
// needGC is invoked whenever the server needs gcloop running.
func (s *server) needGC() {
s.mu.Lock()
if s.gcc == nil {
s.gcc = make(chan int)
go s.gcloop(s.gcc)
}
s.mu.Unlock()
}
func (s *server) gcloop(done <-chan int) {
const (
minWait = 500 // ms
maxWait = 1500 // ms
)
for {
// Wait for a random time interval.
d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond
select {
case <-time.After(d):
case <-done:
return // server has been closed
}
// Do a GC pass over all tables.
var tables []*table
s.mu.Lock()
for _, tbl := range s.tables {
tables = append(tables, tbl)
}
s.mu.Unlock()
for _, tbl := range tables {
tbl.gc()
}
}
}
type table struct {
mu sync.RWMutex
families map[string]*columnFamily // keyed by plain family name
rows []*row // sorted by row key
rowIndex map[string]*row // indexed by row key
}
func newTable(ctr *btapb.CreateTableRequest) *table {
fams := make(map[string]*columnFamily)
if ctr.Table != nil {
for id, cf := range ctr.Table.ColumnFamilies {
fams[id] = &columnFamily{
name: ctr.Parent + "/columnFamilies/" + id,
gcRule: cf.GcRule,
}
}
}
return &table{
families: fams,
rowIndex: make(map[string]*row),
}
}
func (t *table) validTimestamp(ts int64) bool {
// Assume millisecond granularity is required.
return ts%1000 == 0
}
func (t *table) columnFamilies() map[string]*columnFamily {
cp := make(map[string]*columnFamily)
t.mu.RLock()
for fam, cf := range t.families {
cp[fam] = cf
}
t.mu.RUnlock()
return cp
}
func (t *table) columnFamiliesSet() map[string]bool {
fs := make(map[string]bool)
for fam := range t.columnFamilies() {
fs[fam] = true
}
return fs
}
func (t *table) mutableRow(row string) *row {
// Try fast path first.
t.mu.RLock()
r := t.rowIndex[row]
t.mu.RUnlock()
if r != nil {
return r
}
// We probably need to create the row.
t.mu.Lock()
r = t.rowIndex[row]
if r == nil {
r = newRow(row)
t.rowIndex[row] = r
t.rows = append(t.rows, r)
sort.Sort(byRowKey(t.rows)) // yay, inefficient!
}
t.mu.Unlock()
return r
}
func (t *table) gc() {
// This method doesn't add or remove rows, so we only need a read lock for the table.
t.mu.RLock()
defer t.mu.RUnlock()
// Gather GC rules we'll apply.
rules := make(map[string]*btapb.GcRule) // keyed by "fam"
for fam, cf := range t.families {
if cf.gcRule != nil {
rules[fam] = cf.gcRule
}
}
if len(rules) == 0 {
return
}
for _, r := range t.rows {
r.mu.Lock()
r.gc(rules)
r.mu.Unlock()
}
}
type byRowKey []*row
func (b byRowKey) Len() int { return len(b) }
func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key }
type row struct {
key string
mu sync.Mutex
cells map[string][]cell // keyed by full column name; cells are in descending timestamp order
}
func newRow(key string) *row {
return &row{
key: key,
cells: make(map[string][]cell),
}
}
// copy returns a copy of the row.
// Cell values are aliased.
// r.mu should be held.
func (r *row) copy() *row {
nr := &row{
key: r.key,
cells: make(map[string][]cell, len(r.cells)),
}
for col, cs := range r.cells {
// Copy the []cell slice, but not the []byte inside each cell.
nr.cells[col] = append([]cell(nil), cs...)
}
return nr
}
// gc applies the given GC rules to the row.
// r.mu should be held.
func (r *row) gc(rules map[string]*btapb.GcRule) {
for col, cs := range r.cells {
fam := col[:strings.Index(col, ":")]
rule, ok := rules[fam]
if !ok {
continue
}
r.cells[col] = applyGC(cs, rule)
}
}
var gcTypeWarn sync.Once
// applyGC applies the given GC rule to the cells.
func applyGC(cells []cell, rule *btapb.GcRule) []cell {
switch rule := rule.Rule.(type) {
default:
// TODO(dsymonds): Support GcRule_Intersection_
gcTypeWarn.Do(func() {
log.Printf("Unsupported GC rule type %T", rule)
})
case *btapb.GcRule_Union_:
for _, sub := range rule.Union.Rules {
cells = applyGC(cells, sub)
}
return cells
case *btapb.GcRule_MaxAge:
// Timestamps are in microseconds.
cutoff := time.Now().UnixNano() / 1e3
cutoff -= rule.MaxAge.Seconds * 1e6
cutoff -= int64(rule.MaxAge.Nanos) / 1e3
// The slice of cells in in descending timestamp order.
// This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff.
si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff })
if si < len(cells) {
log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si)
}
return cells[:si]
case *btapb.GcRule_MaxNumVersions:
n := int(rule.MaxNumVersions)
if len(cells) > n {
cells = cells[:n]
}
return cells
}
return cells
}
type cell struct {
ts int64
value []byte
}
type byDescTS []cell
func (b byDescTS) Len() int { return len(b) }
func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts }
type columnFamily struct {
name string
gcRule *btapb.GcRule
}
func (c *columnFamily) proto() *btapb.ColumnFamily {
return &btapb.ColumnFamily{
GcRule: c.gcRule,
}
}
func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily {
fs := make(map[string]*btapb.ColumnFamily)
for k, v := range families {
fs[k] = v.proto()
}
return fs
}

View File

@@ -0,0 +1,175 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bttest
import (
"fmt"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/context"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
)
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
s := &server{
tables: make(map[string]*table),
}
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
if _, err := s.CreateTable(
ctx,
&btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil {
t.Fatal(err)
}
const name = `cluster/tables/t`
tbl := s.tables[name]
req := &btapb.ModifyColumnFamiliesRequest{
Name: name,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{
{
Id: "cf",
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
},
},
}
_, err := s.ModifyColumnFamilies(ctx, req)
if err != nil {
t.Fatal(err)
}
req = &btapb.ModifyColumnFamiliesRequest{
Name: name,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{
{
Id: "cf",
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{
Update: &btapb.ColumnFamily{GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}},
},
},
}
if _, err := s.ModifyColumnFamilies(ctx, req); err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
var ts int64
ms := func() []*btpb.Mutation {
return []*btpb.Mutation{
{
Mutation: &btpb.Mutation_SetCell_{
SetCell: &btpb.Mutation_SetCell{
FamilyName: "cf",
ColumnQualifier: []byte(`col`),
TimestampMicros: atomic.AddInt64(&ts, 1000),
},
},
},
}
}
rmw := func() *btpb.ReadModifyWriteRowRequest {
return &btpb.ReadModifyWriteRowRequest{
TableName: name,
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
Rules: []*btpb.ReadModifyWriteRule{
{
FamilyName: "cf",
ColumnQualifier: []byte("col"),
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1},
},
},
}
}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for ctx.Err() == nil {
req := &btpb.MutateRowRequest{
TableName: name,
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
Mutations: ms(),
}
s.MutateRow(ctx, req)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for ctx.Err() == nil {
_, _ = s.ReadModifyWriteRow(ctx, rmw())
}
}()
wg.Add(1)
go func() {
defer wg.Done()
tbl.gc()
}()
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(100 * time.Millisecond):
t.Error("Concurrent mutations and GCs haven't completed after 100ms")
}
}
func TestCreateTableWithFamily(t *testing.T) {
// The Go client currently doesn't support creating a table with column families
// in one operation but it is allowed by the API. This must still be supported by the
// fake server so this test lives here instead of in the main bigtable
// integration test.
s := &server{
tables: make(map[string]*table),
}
ctx := context.Background()
newTbl := btapb.Table{
ColumnFamilies: map[string]*btapb.ColumnFamily{
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}},
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}},
},
}
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
if err != nil {
t.Fatalf("Creating table: %v", err)
}
tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name})
if err != nil {
t.Fatalf("Getting table: %v", err)
}
cf := tbl.ColumnFamilies["cf1"]
if cf == nil {
t.Fatalf("Missing col family cf1")
}
if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want {
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got)
}
cf = tbl.ColumnFamilies["cf2"]
if cf == nil {
t.Fatalf("Missing col family cf2")
}
if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want {
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got)
}
}

738
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go generated vendored Normal file
View File

@@ -0,0 +1,738 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// Command docs are in cbtdoc.go.
import (
"bytes"
"flag"
"fmt"
"go/format"
"io"
"log"
"os"
"regexp"
"sort"
"strconv"
"strings"
"text/tabwriter"
"text/template"
"time"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtrc"
"golang.org/x/net/context"
)
var (
oFlag = flag.String("o", "", "if set, redirect stdout to this file")
config *cbtrc.Config
client *bigtable.Client
adminClient *bigtable.AdminClient
instanceAdminClient *bigtable.InstanceAdminClient
)
func getClient() *bigtable.Client {
if client == nil {
var err error
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
}
return client
}
func getAdminClient() *bigtable.AdminClient {
if adminClient == nil {
var err error
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
if err != nil {
log.Fatalf("Making bigtable.AdminClient: %v", err)
}
}
return adminClient
}
func getInstanceAdminClient() *bigtable.InstanceAdminClient {
if instanceAdminClient == nil {
var err error
instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project)
if err != nil {
log.Fatalf("Making bigtable.InstanceAdminClient: %v", err)
}
}
return instanceAdminClient
}
func main() {
var err error
config, err = cbtrc.Load()
if err != nil {
log.Fatal(err)
}
config.RegisterFlags()
flag.Usage = func() { usage(os.Stderr) }
flag.Parse()
if err := config.CheckFlags(); err != nil {
log.Fatal(err)
}
if config.Creds != "" {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
}
if flag.NArg() == 0 {
usage(os.Stderr)
os.Exit(1)
}
if *oFlag != "" {
f, err := os.Create(*oFlag)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := f.Close(); err != nil {
log.Fatal(err)
}
}()
os.Stdout = f
}
ctx := context.Background()
for _, cmd := range commands {
if cmd.Name == flag.Arg(0) {
cmd.do(ctx, flag.Args()[1:]...)
return
}
}
log.Fatalf("Unknown command %q", flag.Arg(0))
}
func usage(w io.Writer) {
fmt.Fprintf(w, "Usage: %s [flags] <command> ...\n", os.Args[0])
flag.CommandLine.SetOutput(w)
flag.CommandLine.PrintDefaults()
fmt.Fprintf(w, "\n%s", cmdSummary)
}
var cmdSummary string // generated in init, below
func init() {
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0)
for _, cmd := range commands {
fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc)
}
tw.Flush()
buf.WriteString(configHelp)
cmdSummary = buf.String()
}
var configHelp = `
For convenience, values of the -project, -instance and -creds flags
may be specified in ` + cbtrc.Filename() + ` in this format:
project = my-project-123
instance = my-instance
creds = path-to-account-key.json
All values are optional, and all will be overridden by flags.
`
var commands = []struct {
Name, Desc string
do func(context.Context, ...string)
Usage string
}{
{
Name: "count",
Desc: "Count rows in a table",
do: doCount,
Usage: "cbt count <table>",
},
{
Name: "createfamily",
Desc: "Create a column family",
do: doCreateFamily,
Usage: "cbt createfamily <table> <family>",
},
{
Name: "createtable",
Desc: "Create a table",
do: doCreateTable,
Usage: "cbt createtable <table>",
},
{
Name: "deletefamily",
Desc: "Delete a column family",
do: doDeleteFamily,
Usage: "cbt deletefamily <table> <family>",
},
{
Name: "deleterow",
Desc: "Delete a row",
do: doDeleteRow,
Usage: "cbt deleterow <table> <row>",
},
{
Name: "deletetable",
Desc: "Delete a table",
do: doDeleteTable,
Usage: "cbt deletetable <table>",
},
{
Name: "doc",
Desc: "Print godoc-suitable documentation for cbt",
do: doDoc,
Usage: "cbt doc",
},
{
Name: "help",
Desc: "Print help text",
do: doHelp,
Usage: "cbt help [command]",
},
{
Name: "listinstances",
Desc: "List instances in a project",
do: doListInstances,
Usage: "cbt listinstances",
},
{
Name: "lookup",
Desc: "Read from a single row",
do: doLookup,
Usage: "cbt lookup <table> <row>",
},
{
Name: "ls",
Desc: "List tables and column families",
do: doLS,
Usage: "cbt ls List tables\n" +
"cbt ls <table> List column families in <table>",
},
{
Name: "mddoc",
Desc: "Print documentation for cbt in Markdown format",
do: doMDDoc,
Usage: "cbt mddoc",
},
{
Name: "read",
Desc: "Read rows",
do: doRead,
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" +
" start=<row> Start reading at this row\n" +
" end=<row> Stop reading before this row\n" +
" prefix=<prefix> Read rows with this prefix\n" +
" count=<n> Read only this many rows\n",
},
{
Name: "set",
Desc: "Set value of a cell",
do: doSet,
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
"\n" +
" ts is an optional integer timestamp.\n" +
" If it cannot be parsed, the `@ts` part will be\n" +
" interpreted as part of the value.",
},
{
Name: "setgcpolicy",
Desc: "Set the GC policy for a column family",
do: doSetGCPolicy,
Usage: "cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )\n" +
"\n" +
` maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
" maxversions=<n> Maximum number of versions to preserve",
},
}
func doCount(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt count <table>")
}
tbl := getClient().Open(args[0])
n := 0
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
n++
return true
}, bigtable.RowFilter(bigtable.StripValueFilter()))
if err != nil {
log.Fatalf("Reading rows: %v", err)
}
fmt.Println(n)
}
func doCreateFamily(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt createfamily <table> <family>")
}
err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])
if err != nil {
log.Fatalf("Creating column family: %v", err)
}
}
func doCreateTable(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt createtable <table>")
}
err := getAdminClient().CreateTable(ctx, args[0])
if err != nil {
log.Fatalf("Creating table: %v", err)
}
}
func doDeleteFamily(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt deletefamily <table> <family>")
}
err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])
if err != nil {
log.Fatalf("Deleting column family: %v", err)
}
}
func doDeleteRow(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt deleterow <table> <row>")
}
tbl := getClient().Open(args[0])
mut := bigtable.NewMutation()
mut.DeleteRow()
if err := tbl.Apply(ctx, args[1], mut); err != nil {
log.Fatalf("Deleting row: %v", err)
}
}
func doDeleteTable(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatalf("Can't do `cbt deletetable %s`", args)
}
err := getAdminClient().DeleteTable(ctx, args[0])
if err != nil {
log.Fatalf("Deleting table: %v", err)
}
}
// to break circular dependencies
var (
doDocFn func(ctx context.Context, args ...string)
doHelpFn func(ctx context.Context, args ...string)
doMDDocFn func(ctx context.Context, args ...string)
)
func init() {
doDocFn = doDocReal
doHelpFn = doHelpReal
doMDDocFn = doMDDocReal
}
func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) }
func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) }
func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) }
func docFlags() []*flag.Flag {
// Only include specific flags, in a specific order.
var flags []*flag.Flag
for _, name := range []string{"project", "instance", "creds"} {
f := flag.Lookup(name)
if f == nil {
log.Fatalf("Flag not linked: -%s", name)
}
flags = append(flags, f)
}
return flags
}
func doDocReal(ctx context.Context, args ...string) {
data := map[string]interface{}{
"Commands": commands,
"Flags": docFlags(),
}
var buf bytes.Buffer
if err := docTemplate.Execute(&buf, data); err != nil {
log.Fatalf("Bad doc template: %v", err)
}
out, err := format.Source(buf.Bytes())
if err != nil {
log.Fatalf("Bad doc output: %v", err)
}
os.Stdout.Write(out)
}
func indentLines(s, ind string) string {
ss := strings.Split(s, "\n")
for i, p := range ss {
ss[i] = ind + p
}
return strings.Join(ss, "\n")
}
var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
"indent": indentLines,
}).
Parse(`
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
// Run "go generate" to regenerate.
//go:generate go run cbt.go -o cbtdoc.go doc
/*
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Usage:
cbt [options] command [arguments]
The commands are:
{{range .Commands}}
{{printf "%-25s %s" .Name .Desc}}{{end}}
Use "cbt help <command>" for more information about a command.
The options are:
{{range .Flags}}
-{{.Name}} string
{{.Usage}}{{end}}
{{range .Commands}}
{{.Desc}}
Usage:
{{indent .Usage "\t"}}
{{end}}
*/
package main
`))
func doHelpReal(ctx context.Context, args ...string) {
if len(args) == 0 {
usage(os.Stdout)
return
}
for _, cmd := range commands {
if cmd.Name == args[0] {
fmt.Println(cmd.Usage)
return
}
}
log.Fatalf("Don't know command %q", args[0])
}
func doListInstances(ctx context.Context, args ...string) {
if len(args) != 0 {
log.Fatalf("usage: cbt listinstances")
}
is, err := getInstanceAdminClient().Instances(ctx)
if err != nil {
log.Fatalf("Getting list of instances: %v", err)
}
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
fmt.Fprintf(tw, "Instance Name\tInfo\n")
fmt.Fprintf(tw, "-------------\t----\n")
for _, i := range is {
fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName)
}
tw.Flush()
}
func doLookup(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatalf("usage: cbt lookup <table> <row>")
}
table, row := args[0], args[1]
tbl := getClient().Open(table)
r, err := tbl.ReadRow(ctx, row)
if err != nil {
log.Fatalf("Reading row: %v", err)
}
printRow(r)
}
func printRow(r bigtable.Row) {
fmt.Println(strings.Repeat("-", 40))
fmt.Println(r.Key())
var fams []string
for fam := range r {
fams = append(fams, fam)
}
sort.Strings(fams)
for _, fam := range fams {
ris := r[fam]
sort.Sort(byColumn(ris))
for _, ri := range ris {
ts := time.Unix(0, int64(ri.Timestamp)*1e3)
fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000"))
fmt.Printf(" %q\n", ri.Value)
}
}
}
type byColumn []bigtable.ReadItem
func (b byColumn) Len() int { return len(b) }
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
func doLS(ctx context.Context, args ...string) {
switch len(args) {
default:
log.Fatalf("Can't do `cbt ls %s`", args)
case 0:
tables, err := getAdminClient().Tables(ctx)
if err != nil {
log.Fatalf("Getting list of tables: %v", err)
}
sort.Strings(tables)
for _, table := range tables {
fmt.Println(table)
}
case 1:
table := args[0]
ti, err := getAdminClient().TableInfo(ctx, table)
if err != nil {
log.Fatalf("Getting table info: %v", err)
}
sort.Strings(ti.Families)
for _, fam := range ti.Families {
fmt.Println(fam)
}
}
}
func doMDDocReal(ctx context.Context, args ...string) {
data := map[string]interface{}{
"Commands": commands,
"Flags": docFlags(),
}
var buf bytes.Buffer
if err := mddocTemplate.Execute(&buf, data); err != nil {
log.Fatalf("Bad mddoc template: %v", err)
}
io.Copy(os.Stdout, &buf)
}
var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{
"indent": indentLines,
}).
Parse(`
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Usage:
cbt [options] command [arguments]
The commands are:
{{range .Commands}}
{{printf "%-25s %s" .Name .Desc}}{{end}}
Use "cbt help <command>" for more information about a command.
The options are:
{{range .Flags}}
-{{.Name}} string
{{.Usage}}{{end}}
{{range .Commands}}
## {{.Desc}}
{{indent .Usage "\t"}}
{{end}}
`))
func doRead(ctx context.Context, args ...string) {
if len(args) < 1 {
log.Fatalf("usage: cbt read <table> [args ...]")
}
tbl := getClient().Open(args[0])
parsed := make(map[string]string)
for _, arg := range args[1:] {
i := strings.Index(arg, "=")
if i < 0 {
log.Fatalf("Bad arg %q", arg)
}
key, val := arg[:i], arg[i+1:]
switch key {
default:
log.Fatalf("Unknown arg key %q", key)
case "limit":
// Be nicer; we used to support this, but renamed it to "end".
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
case "start", "end", "prefix", "count":
parsed[key] = val
}
}
if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" {
log.Fatal(`"start"/"end" may not be mixed with "prefix"`)
}
var rr bigtable.RowRange
if start, end := parsed["start"], parsed["end"]; end != "" {
rr = bigtable.NewRange(start, end)
} else if start != "" {
rr = bigtable.InfiniteRange(start)
}
if prefix := parsed["prefix"]; prefix != "" {
rr = bigtable.PrefixRange(prefix)
}
var opts []bigtable.ReadOption
if count := parsed["count"]; count != "" {
n, err := strconv.ParseInt(count, 0, 64)
if err != nil {
log.Fatalf("Bad count %q: %v", count, err)
}
opts = append(opts, bigtable.LimitRows(n))
}
// TODO(dsymonds): Support filters.
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
printRow(r)
return true
}, opts...)
if err != nil {
log.Fatalf("Reading rows: %v", err)
}
}
var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
func doSet(ctx context.Context, args ...string) {
if len(args) < 3 {
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
}
tbl := getClient().Open(args[0])
row := args[1]
mut := bigtable.NewMutation()
for _, arg := range args[2:] {
m := setArg.FindStringSubmatch(arg)
if m == nil {
log.Fatalf("Bad set arg %q", arg)
}
val := m[3]
ts := bigtable.Now()
if i := strings.LastIndex(val, "@"); i >= 0 {
// Try parsing a timestamp.
n, err := strconv.ParseInt(val[i+1:], 0, 64)
if err == nil {
val = val[:i]
ts = bigtable.Timestamp(n)
}
}
mut.Set(m[1], m[2], ts, []byte(val))
}
if err := tbl.Apply(ctx, row, mut); err != nil {
log.Fatalf("Applying mutation: %v", err)
}
}
func doSetGCPolicy(ctx context.Context, args ...string) {
if len(args) < 3 {
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
}
table := args[0]
fam := args[1]
var pol bigtable.GCPolicy
switch p := args[2]; {
case strings.HasPrefix(p, "maxage="):
d, err := parseDuration(p[7:])
if err != nil {
log.Fatal(err)
}
pol = bigtable.MaxAgePolicy(d)
case strings.HasPrefix(p, "maxversions="):
n, err := strconv.ParseUint(p[12:], 10, 16)
if err != nil {
log.Fatal(err)
}
pol = bigtable.MaxVersionsPolicy(int(n))
default:
log.Fatalf("Bad GC policy %q", p)
}
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
log.Fatalf("Setting GC policy: %v", err)
}
}
// parseDuration parses a duration string.
// It is similar to Go's time.ParseDuration, except with a different set of supported units,
// and only simple formats supported.
func parseDuration(s string) (time.Duration, error) {
// [0-9]+[a-z]+
// Split [0-9]+ from [a-z]+.
i := 0
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
}
ds, u := s[:i], s[i:]
if ds == "" || u == "" {
return 0, fmt.Errorf("invalid duration %q", s)
}
// Parse them.
d, err := strconv.ParseUint(ds, 10, 32)
if err != nil {
return 0, fmt.Errorf("invalid duration %q: %v", s, err)
}
unit, ok := unitMap[u]
if !ok {
return 0, fmt.Errorf("unknown unit %q in duration %q", u, s)
}
if d > uint64((1<<63-1)/unit) {
// overflow
return 0, fmt.Errorf("invalid duration %q overflows", s)
}
return time.Duration(d) * unit, nil
}
var unitMap = map[string]time.Duration{
"ms": time.Millisecond,
"s": time.Second,
"m": time.Minute,
"h": time.Hour,
"d": 24 * time.Hour,
}

View File

@@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"testing"
"time"
)
func TestParseDuration(t *testing.T) {
tests := []struct {
in string
// out or fail are mutually exclusive
out time.Duration
fail bool
}{
{in: "10ms", out: 10 * time.Millisecond},
{in: "3s", out: 3 * time.Second},
{in: "60m", out: 60 * time.Minute},
{in: "12h", out: 12 * time.Hour},
{in: "7d", out: 168 * time.Hour},
{in: "", fail: true},
{in: "0", fail: true},
{in: "7ns", fail: true},
{in: "14mo", fail: true},
{in: "3.5h", fail: true},
{in: "106752d", fail: true}, // overflow
}
for _, tc := range tests {
got, err := parseDuration(tc.in)
if !tc.fail && err != nil {
t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err)
continue
}
if tc.fail && err == nil {
t.Errorf("parseDuration(%q) did not fail", tc.in)
continue
}
if tc.fail {
continue
}
if got != tc.out {
t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out)
}
}
}

191
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go generated vendored Normal file
View File

@@ -0,0 +1,191 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
// Run "go generate" to regenerate.
//go:generate go run cbt.go -o cbtdoc.go doc
/*
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Usage:
cbt [options] command [arguments]
The commands are:
count Count rows in a table
createfamily Create a column family
createtable Create a table
deletefamily Delete a column family
deleterow Delete a row
deletetable Delete a table
doc Print godoc-suitable documentation for cbt
help Print help text
listinstances List instances in a project
lookup Read from a single row
ls List tables and column families
mddoc Print documentation for cbt in Markdown format
read Read rows
set Set value of a cell
setgcpolicy Set the GC policy for a column family
Use "cbt help <command>" for more information about a command.
The options are:
-project string
project ID
-instance string
Cloud Bigtable instance
-creds string
if set, use application credentials in this file
Count rows in a table
Usage:
cbt count <table>
Create a column family
Usage:
cbt createfamily <table> <family>
Create a table
Usage:
cbt createtable <table>
Delete a column family
Usage:
cbt deletefamily <table> <family>
Delete a row
Usage:
cbt deleterow <table> <row>
Delete a table
Usage:
cbt deletetable <table>
Print godoc-suitable documentation for cbt
Usage:
cbt doc
Print help text
Usage:
cbt help [command]
List instances in a project
Usage:
cbt listinstances
Read from a single row
Usage:
cbt lookup <table> <row>
List tables and column families
Usage:
cbt ls List tables
cbt ls <table> List column families in <table>
Print documentation for cbt in Markdown format
Usage:
cbt mddoc
Read rows
Usage:
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]
start=<row> Start reading at this row
end=<row> Stop reading before this row
prefix=<prefix> Read rows with this prefix
count=<n> Read only this many rows
Set value of a cell
Usage:
cbt set <table> <row> family:column=val[@ts] ...
family:column=val[@ts] may be repeated to set multiple cells.
ts is an optional integer timestamp.
If it cannot be parsed, the `@ts` part will be
interpreted as part of the value.
Set the GC policy for a column family
Usage:
cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
maxversions=<n> Maximum number of versions to preserve
*/
package main

View File

@@ -0,0 +1,42 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
cbtemulator launches the in-memory Cloud Bigtable server on the given address.
*/
package main
import (
"flag"
"fmt"
"log"
"cloud.google.com/go/bigtable/bttest"
)
var (
host = flag.String("host", "localhost", "the address to bind to on the local machine")
port = flag.Int("port", 9000, "the port number to bind to on the local machine")
)
func main() {
flag.Parse()
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port))
if err != nil {
log.Fatalf("failed to start emulator: %v", err)
}
fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr)
select {}
}

View File

@@ -0,0 +1,186 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Loadtest does some load testing through the Go client library for Cloud Bigtable.
*/
package main
import (
"bytes"
"flag"
"fmt"
"log"
"math/rand"
"os"
"sync"
"sync/atomic"
"time"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtrc"
"cloud.google.com/go/bigtable/internal/stat"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
var (
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
csvOutput = flag.String("csv_output", "",
"output path for statistics in .csv format. If this file already exists it will be overwritten.")
poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client")
reqCount = flag.Int("req_count", 100, "number of concurrent requests")
config *cbtrc.Config
client *bigtable.Client
adminClient *bigtable.AdminClient
)
func main() {
var err error
config, err = cbtrc.Load()
if err != nil {
log.Fatal(err)
}
config.RegisterFlags()
flag.Parse()
if err := config.CheckFlags(); err != nil {
log.Fatal(err)
}
if config.Creds != "" {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
}
if flag.NArg() != 0 {
flag.Usage()
os.Exit(1)
}
var options []option.ClientOption
if *poolSize > 1 {
options = append(options, option.WithGRPCConnectionPool(*poolSize))
}
var csvFile *os.File
if *csvOutput != "" {
csvFile, err = os.Create(*csvOutput)
if err != nil {
log.Fatalf("creating csv output file: %v", err)
}
defer csvFile.Close()
log.Printf("Writing statistics to %q ...", *csvOutput)
}
log.Printf("Dialing connections...")
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
defer client.Close()
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
if err != nil {
log.Fatalf("Making bigtable.AdminClient: %v", err)
}
defer adminClient.Close()
// Create a scratch table.
log.Printf("Setting up scratch table...")
if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
}
if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
log.Fatalf("Making scratch table column family: %v", err)
}
// Upon a successful run, delete the table. Don't bother checking for errors.
defer adminClient.DeleteTable(context.Background(), *scratchTable)
log.Printf("Starting load test... (run for %v)", *runFor)
tbl := client.Open(*scratchTable)
sem := make(chan int, *reqCount) // limit the number of requests happening at once
var reads, writes stats
stopTime := time.Now().Add(*runFor)
var wg sync.WaitGroup
for time.Now().Before(stopTime) {
sem <- 1
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-sem }()
ok := true
opStart := time.Now()
var stats *stats
defer func() {
stats.Record(ok, time.Since(opStart))
}()
row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows
switch rand.Intn(10) {
default:
// read
stats = &reads
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
if err != nil {
log.Printf("Error doing read: %v", err)
ok = false
}
case 0, 1, 2, 3, 4:
// write
stats = &writes
mut := bigtable.NewMutation()
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
if err := tbl.Apply(context.Background(), row, mut); err != nil {
log.Printf("Error doing mutation: %v", err)
ok = false
}
}
}()
}
wg.Wait()
readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok)
writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok)
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg)
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg)
if csvFile != nil {
stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile)
}
}
var allStats int64 // atomic
type stats struct {
mu sync.Mutex
tries, ok int
ds []time.Duration
}
func (s *stats) Record(ok bool, d time.Duration) {
s.mu.Lock()
s.tries++
if ok {
s.ok++
}
s.ds = append(s.ds, d)
s.mu.Unlock()
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
log.Printf("Progress: done %d ops", n)
}
}

View File

@@ -0,0 +1,155 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Scantest does scan-related load testing against Cloud Bigtable. The logic here
mimics a similar test written using the Java client.
*/
package main
import (
"bytes"
"flag"
"fmt"
"log"
"math/rand"
"os"
"sync"
"sync/atomic"
"text/tabwriter"
"time"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtrc"
"cloud.google.com/go/bigtable/internal/stat"
"golang.org/x/net/context"
)
var (
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans")
rowLimit = flag.Int("row_limit", 10000, "max number of records per scan")
config *cbtrc.Config
client *bigtable.Client
)
func main() {
flag.Usage = func() {
fmt.Printf("Usage: scantest [options] <table_name>\n\n")
flag.PrintDefaults()
}
var err error
config, err = cbtrc.Load()
if err != nil {
log.Fatal(err)
}
config.RegisterFlags()
flag.Parse()
if err := config.CheckFlags(); err != nil {
log.Fatal(err)
}
if config.Creds != "" {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
}
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}
table := flag.Arg(0)
log.Printf("Dialing connections...")
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
defer client.Close()
log.Printf("Starting scan test... (run for %v)", *runFor)
tbl := client.Open(table)
sem := make(chan int, *numScans) // limit the number of requests happening at once
var scans stats
stopTime := time.Now().Add(*runFor)
var wg sync.WaitGroup
for time.Now().Before(stopTime) {
sem <- 1
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-sem }()
ok := true
opStart := time.Now()
defer func() {
scans.Record(ok, time.Since(opStart))
}()
// Start at a random row key
key := fmt.Sprintf("user%d", rand.Int63())
limit := bigtable.LimitRows(int64(*rowLimit))
noop := func(bigtable.Row) bool { return true }
if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil {
log.Printf("Error during scan: %v", err)
ok = false
}
}()
}
wg.Wait()
agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok)
log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v",
scans.ok, scans.tries, agg, throughputString(agg))
}
func throughputString(agg *stat.Aggregate) string {
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
rowLimitF := float64(*rowLimit)
fmt.Fprintf(
tw,
"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n",
rowLimitF/agg.Max.Seconds(),
rowLimitF/agg.Median.Seconds(),
rowLimitF/agg.Min.Seconds())
tw.Flush()
return buf.String()
}
var allStats int64 // atomic
type stats struct {
mu sync.Mutex
tries, ok int
ds []time.Duration
}
func (s *stats) Record(ok bool, d time.Duration) {
s.mu.Lock()
s.tries++
if ok {
s.ok++
}
s.ds = append(s.ds, d)
s.mu.Unlock()
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
log.Printf("Progress: done %d ops", n)
}
}

119
vendor/cloud.google.com/go/bigtable/doc.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package bigtable is an API to Google Cloud Bigtable.
See https://cloud.google.com/bigtable/docs/ for general product documentation.
Setup and Credentials
Use NewClient or NewAdminClient to create a client that can be used to access
the data or admin APIs respectively. Both require credentials that have permission
to access the Cloud Bigtable API.
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials
(https://developers.google.com/accounts/docs/application-default-credentials)
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
For instance, you can use service account credentials by visiting
https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
...
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.
...
client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx)))
...
Here, `google` means the golang.org/x/oauth2/google package
and `option` means the google.golang.org/api/option package.
Reading
The principal way to read from a Bigtable is to use the ReadRows method on *Table.
A RowRange specifies a contiguous portion of a table. A Filter may be provided through
RowFilter to limit or transform the data that is returned.
tbl := client.Open("mytable")
...
// Read all the rows starting with "com.google.",
// but only fetch the columns in the "links" family.
rr := bigtable.PrefixRange("com.google.")
err := tbl.ReadRows(ctx, rr, func(r Row) bool {
// do something with r
return true // keep going
}, bigtable.RowFilter(bigtable.FamilyFilter("links")))
...
To read a single row, use the ReadRow helper method.
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key
...
Writing
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.
The former expresses idempotent operations.
The latter expresses non-idempotent operations and returns the new values of updated cells.
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite
methods on a Table.
For instance, to set a couple of cells in a table,
tbl := client.Open("mytable")
mut := bigtable.NewMutation()
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1"))
mut.Set("links", "golang.org", bigtable.Now(), []byte("1"))
err := tbl.Apply(ctx, "com.google.cloud", mut)
...
To increment an encoded value in one cell,
tbl := client.Open("mytable")
rmw := bigtable.NewReadModifyWrite()
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org"
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw)
...
Retries
If a read or write operation encounters a transient error it will be retried until a successful
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls
will not re-scan rows that have already been processed.
*/
package bigtable // import "cloud.google.com/go/bigtable"
// Scope constants for authentication credentials.
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile.
const (
// Scope is the OAuth scope for Cloud Bigtable data operations.
Scope = "https://www.googleapis.com/auth/bigtable.data"
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly"
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations.
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table"
// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations.
InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster"
)
// clientUserAgent identifies the version of this package.
// It should be bumped upon significant changes only.
const clientUserAgent = "cbt-go/20160628"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"

156
vendor/cloud.google.com/go/bigtable/filter.go generated vendored Normal file
View File

@@ -0,0 +1,156 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"fmt"
"strings"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
)
// A Filter represents a row filter.
type Filter interface {
String() string
proto() *btpb.RowFilter
}
// ChainFilters returns a filter that applies a sequence of filters.
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }
type chainFilter struct {
sub []Filter
}
func (cf chainFilter) String() string {
var ss []string
for _, sf := range cf.sub {
ss = append(ss, sf.String())
}
return "(" + strings.Join(ss, " | ") + ")"
}
func (cf chainFilter) proto() *btpb.RowFilter {
chain := &btpb.RowFilter_Chain{}
for _, sf := range cf.sub {
chain.Filters = append(chain.Filters, sf.proto())
}
return &btpb.RowFilter{
Filter: &btpb.RowFilter_Chain_{chain},
}
}
// InterleaveFilters returns a filter that applies a set of filters in parallel
// and interleaves the results.
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }
type interleaveFilter struct {
sub []Filter
}
func (ilf interleaveFilter) String() string {
var ss []string
for _, sf := range ilf.sub {
ss = append(ss, sf.String())
}
return "(" + strings.Join(ss, " + ") + ")"
}
func (ilf interleaveFilter) proto() *btpb.RowFilter {
inter := &btpb.RowFilter_Interleave{}
for _, sf := range ilf.sub {
inter.Filters = append(inter.Filters, sf.proto())
}
return &btpb.RowFilter{
Filter: &btpb.RowFilter_Interleave_{inter},
}
}
// RowKeyFilter returns a filter that matches cells from rows whose
// key matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }
type rowKeyFilter string
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
}
// FamilyFilter returns a filter that matches cells whose family name
// matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) }
type familyFilter string
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
func (ff familyFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
}
// ColumnFilter returns a filter that matches cells whose column name
// matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) }
type columnFilter string
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
func (cf columnFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
}
// ValueFilter returns a filter that matches cells whose value
// matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func ValueFilter(pattern string) Filter { return valueFilter(pattern) }
type valueFilter string
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
func (vf valueFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
}
// LatestNFilter returns a filter that matches the most recent N cells in each column.
func LatestNFilter(n int) Filter { return latestNFilter(n) }
type latestNFilter int32
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
func (lnf latestNFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
}
// StripValueFilter returns a filter that replaces each value with the empty string.
func StripValueFilter() Filter { return stripValueFilter{} }
type stripValueFilter struct{}
func (stripValueFilter) String() string { return "strip_value()" }
func (stripValueFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
}
// TODO(dsymonds): More filters: cond, col/ts/value range, sampling

131
vendor/cloud.google.com/go/bigtable/gc.go generated vendored Normal file
View File

@@ -0,0 +1,131 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"fmt"
"strings"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
)
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection.
type GCPolicy interface {
String() string
proto() *bttdpb.GcRule
}
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply.
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} }
type intersectionPolicy struct {
sub []GCPolicy
}
func (ip intersectionPolicy) String() string {
var ss []string
for _, sp := range ip.sub {
ss = append(ss, sp.String())
}
return "(" + strings.Join(ss, " && ") + ")"
}
func (ip intersectionPolicy) proto() *bttdpb.GcRule {
inter := &bttdpb.GcRule_Intersection{}
for _, sp := range ip.sub {
inter.Rules = append(inter.Rules, sp.proto())
}
return &bttdpb.GcRule{
Rule: &bttdpb.GcRule_Intersection_{inter},
}
}
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply.
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} }
type unionPolicy struct {
sub []GCPolicy
}
func (up unionPolicy) String() string {
var ss []string
for _, sp := range up.sub {
ss = append(ss, sp.String())
}
return "(" + strings.Join(ss, " || ") + ")"
}
func (up unionPolicy) proto() *bttdpb.GcRule {
union := &bttdpb.GcRule_Union{}
for _, sp := range up.sub {
union.Rules = append(union.Rules, sp.proto())
}
return &bttdpb.GcRule{
Rule: &bttdpb.GcRule_Union_{union},
}
}
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell
// except for the most recent n.
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) }
type maxVersionsPolicy int
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
}
// MaxAgePolicy returns a GC policy that applies to all cells
// older than the given age.
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) }
type maxAgePolicy time.Duration
var units = []struct {
d time.Duration
suffix string
}{
{24 * time.Hour, "d"},
{time.Hour, "h"},
{time.Minute, "m"},
}
func (ma maxAgePolicy) String() string {
d := time.Duration(ma)
for _, u := range units {
if d%u.d == 0 {
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix)
}
}
return fmt.Sprintf("age() > %d", d/time.Microsecond)
}
func (ma maxAgePolicy) proto() *bttdpb.GcRule {
// This doesn't handle overflows, etc.
// Fix this if people care about GC policies over 290 years.
ns := time.Duration(ma).Nanoseconds()
return &bttdpb.GcRule{
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
Seconds: ns / 1e9,
Nanos: int32(ns % 1e9),
}},
}
}

View File

@@ -0,0 +1,99 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cbtrc encapsulates common code for reading .cbtrc files.
package cbtrc
import (
"bufio"
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
// Config represents a configuration.
type Config struct {
Project, Instance string // required
Creds string // optional
}
// RegisterFlags registers a set of standard flags for this config.
// It should be called before flag.Parse.
func (c *Config) RegisterFlags() {
flag.StringVar(&c.Project, "project", c.Project, "project ID")
flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance")
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file")
}
// CheckFlags checks that the required config values are set.
func (c *Config) CheckFlags() error {
var missing []string
if c.Project == "" {
missing = append(missing, "-project")
}
if c.Instance == "" {
missing = append(missing, "-instance")
}
if len(missing) > 0 {
return fmt.Errorf("Missing %s", strings.Join(missing, " and "))
}
return nil
}
// Filename returns the filename consulted for standard configuration.
func Filename() string {
// TODO(dsymonds): Might need tweaking for Windows.
return filepath.Join(os.Getenv("HOME"), ".cbtrc")
}
// Load loads a .cbtrc file.
// If the file is not present, an empty config is returned.
func Load() (*Config, error) {
filename := Filename()
data, err := ioutil.ReadFile(filename)
if err != nil {
// silent fail if the file isn't there
if os.IsNotExist(err) {
return &Config{}, nil
}
return nil, fmt.Errorf("Reading %s: %v", filename, err)
}
c := new(Config)
s := bufio.NewScanner(bytes.NewReader(data))
for s.Scan() {
line := s.Text()
i := strings.Index(line, "=")
if i < 0 {
return nil, fmt.Errorf("Bad line in %s: %q", filename, line)
}
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
switch key {
default:
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key)
case "project":
c.Project = val
case "instance":
c.Instance = val
case "creds":
c.Creds = val
}
}
return c, s.Err()
}

View File

@@ -0,0 +1,106 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
package gax
import (
"time"
"google.golang.org/grpc/codes"
)
type CallOption interface {
Resolve(*CallSettings)
}
type callOptions []CallOption
func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
for _, opt := range opts {
opt.Resolve(s)
}
return s
}
// Encapsulates the call settings for a particular API call.
type CallSettings struct {
Timeout time.Duration
RetrySettings RetrySettings
}
// Per-call configurable settings for retrying upon transient failure.
type RetrySettings struct {
RetryCodes map[codes.Code]bool
BackoffSettings BackoffSettings
}
// Parameters to the exponential backoff algorithm for retrying.
type BackoffSettings struct {
DelayTimeoutSettings MultipliableDuration
RPCTimeoutSettings MultipliableDuration
}
type MultipliableDuration struct {
Initial time.Duration
Max time.Duration
Multiplier float64
}
func (w CallSettings) Resolve(s *CallSettings) {
s.Timeout = w.Timeout
s.RetrySettings = w.RetrySettings
s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes))
for key, value := range w.RetrySettings.RetryCodes {
s.RetrySettings.RetryCodes[key] = value
}
}
type withRetryCodes []codes.Code
func (w withRetryCodes) Resolve(s *CallSettings) {
s.RetrySettings.RetryCodes = make(map[codes.Code]bool)
for _, code := range w {
s.RetrySettings.RetryCodes[code] = true
}
}
// WithRetryCodes sets a list of Google API canonical error codes upon which a
// retry should be attempted.
func WithRetryCodes(retryCodes []codes.Code) CallOption {
return withRetryCodes(retryCodes)
}
type withDelayTimeoutSettings MultipliableDuration
func (w withDelayTimeoutSettings) Resolve(s *CallSettings) {
s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w)
}
// WithDelayTimeoutSettings specifies:
// - The initial delay time, in milliseconds, between the completion of
// the first failed request and the initiation of the first retrying
// request.
// - The multiplier by which to increase the delay time between the
// completion of failed requests, and the initiation of the subsequent
// retrying request.
// - The maximum delay time, in milliseconds, between requests. When this
// value is reached, `RetryDelayMultiplier` will no longer be used to
// increase delay time.
func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption {
return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier})
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
package gax
import (
"math/rand"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"log"
"os"
)
var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
// A user defined call stub.
type APICall func(context.Context) error
// scaleDuration returns the product of a and mult.
func scaleDuration(a time.Duration, mult float64) time.Duration {
ns := float64(a) * mult
return time.Duration(ns)
}
// invokeWithRetry calls stub using an exponential backoff retry mechanism
// based on the values provided in callSettings.
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error {
retrySettings := callSettings.RetrySettings
backoffSettings := callSettings.RetrySettings.BackoffSettings
delay := backoffSettings.DelayTimeoutSettings.Initial
for {
// If the deadline is exceeded...
if ctx.Err() != nil {
return ctx.Err()
}
err := stub(ctx)
code := grpc.Code(err)
if code == codes.OK {
return nil
}
if !retrySettings.RetryCodes[code] {
return err
}
// Sleep a random amount up to the current delay
d := time.Duration(rand.Int63n(int64(delay)))
delayCtx, _ := context.WithTimeout(ctx, delay)
logger.Printf("Retryable error: %v, retrying in %v", err, d)
<-delayCtx.Done()
delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
if delay > backoffSettings.DelayTimeoutSettings.Max {
delay = backoffSettings.DelayTimeoutSettings.Max
}
}
}
// Invoke calls stub with a child of context modified by the specified options.
func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error {
settings := &CallSettings{}
callOptions(opts).Resolve(settings)
if len(settings.RetrySettings.RetryCodes) > 0 {
return invokeWithRetry(ctx, stub, *settings)
}
return stub(ctx)
}

View File

@@ -0,0 +1,49 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gax
import (
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func TestRandomizedDelays(t *testing.T) {
max := 200 * time.Millisecond
settings := []CallOption{
WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}),
WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5),
}
deadline := time.Now().Add(1 * time.Second)
ctx, _ := context.WithDeadline(context.Background(), deadline)
var invokeTime time.Time
Invoke(ctx, func(childCtx context.Context) error {
// Keep failing, make sure we never slept more than max (plus a fudge factor)
if !invokeTime.IsZero() {
if time.Since(invokeTime) > (max + 20*time.Millisecond) {
t.Fatalf("Slept too long: %v", max)
}
}
invokeTime = time.Now()
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90
errf := grpc.Errorf
return errf(codes.Unavailable, "")
}, settings...)
}

View File

@@ -0,0 +1,48 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package option contains common code for dealing with client options.
package option
import (
"fmt"
"os"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
// DefaultClientOptions returns the default client options to use for the
// client's gRPC connection.
func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) {
var o []option.ClientOption
// Check the environment variables for the bigtable emulator.
// Dial it directly and don't pass any credentials.
if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("emulator grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
option.WithEndpoint(endpoint),
option.WithScopes(scope),
option.WithUserAgent(userAgent),
}
}
return o, nil
}

View File

@@ -0,0 +1,144 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stat
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"math"
"sort"
"strconv"
"text/tabwriter"
"time"
)
type byDuration []time.Duration
func (data byDuration) Len() int { return len(data) }
func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] }
func (data byDuration) Less(i, j int) bool { return data[i] < data[j] }
// quantile returns a value representing the kth of q quantiles.
// May alter the order of data.
func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) {
if len(data) < 1 {
return 0, false
}
if k > q {
return 0, false
}
if k < 0 || q < 1 {
return 0, false
}
sort.Sort(byDuration(data))
if k == 0 {
return data[0], true
}
if k == q {
return data[len(data)-1], true
}
bucketSize := float64(len(data)-1) / float64(q)
i := float64(k) * bucketSize
lower := int(math.Trunc(i))
var upper int
if i > float64(lower) && lower+1 < len(data) {
// If the quantile lies between two elements
upper = lower + 1
} else {
upper = lower
}
weightUpper := i - float64(lower)
weightLower := 1 - weightUpper
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
}
type Aggregate struct {
Name string
Count, Errors int
Min, Median, Max time.Duration
P75, P90, P95, P99 time.Duration // percentiles
}
// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data.
func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate {
agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount}
if len(latencies) == 0 {
return nil
}
var ok bool
if agg.Min, ok = quantile(latencies, 0, 2); !ok {
return nil
}
if agg.Median, ok = quantile(latencies, 1, 2); !ok {
return nil
}
if agg.Max, ok = quantile(latencies, 2, 2); !ok {
return nil
}
if agg.P75, ok = quantile(latencies, 75, 100); !ok {
return nil
}
if agg.P90, ok = quantile(latencies, 90, 100); !ok {
return nil
}
if agg.P95, ok = quantile(latencies, 95, 100); !ok {
return nil
}
if agg.P99, ok = quantile(latencies, 99, 100); !ok {
return nil
}
return &agg
}
func (agg *Aggregate) String() string {
if agg == nil {
return "no data"
}
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n",
agg.Min, agg.Median, agg.Max, agg.P95, agg.P99)
tw.Flush()
return buf.String()
}
// WriteCSV writes a csv file to the given Writer,
// with a header row and one row per aggregate.
func WriteCSV(aggs []*Aggregate, iow io.Writer) error {
w := csv.NewWriter(iow)
defer w.Flush()
err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
if err != nil {
return err
}
for _, agg := range aggs {
err = w.Write([]string{
agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors),
agg.Min.String(), agg.Median.String(), agg.Max.String(),
agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(),
})
if err != nil {
return err
}
}
return nil
}

250
vendor/cloud.google.com/go/bigtable/reader.go generated vendored Normal file
View File

@@ -0,0 +1,250 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"bytes"
"fmt"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
)
// A Row is returned by ReadRows. The map is keyed by column family (the prefix
// of the column name before the colon). The values are the returned ReadItems
// for that column family in the order returned by Read.
type Row map[string][]ReadItem
// Key returns the row's key, or "" if the row is empty.
func (r Row) Key() string {
for _, items := range r {
if len(items) > 0 {
return items[0].Row
}
}
return ""
}
// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.
type ReadItem struct {
Row, Column string
Timestamp Timestamp
Value []byte
}
// The current state of the read rows state machine.
type rrState int64
const (
newRow rrState = iota
rowInProgress
cellInProgress
)
// chunkReader handles cell chunks from the read rows response and combines
// them into full Rows.
type chunkReader struct {
state rrState
curKey []byte
curFam string
curQual []byte
curTS int64
curVal []byte
curRow Row
lastKey string
}
// newChunkReader returns a new chunkReader for handling read rows responses.
func newChunkReader() *chunkReader {
return &chunkReader{state: newRow}
}
// Process takes a cell chunk and returns a new Row if the given chunk
// completes a Row, or nil otherwise.
func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) {
var row Row
switch cr.state {
case newRow:
if err := cr.validateNewRow(cc); err != nil {
return nil, err
}
cr.curRow = make(Row)
cr.curKey = cc.RowKey
cr.curFam = cc.FamilyName.Value
cr.curQual = cc.Qualifier.Value
cr.curTS = cc.TimestampMicros
row = cr.handleCellValue(cc)
case rowInProgress:
if err := cr.validateRowInProgress(cc); err != nil {
return nil, err
}
if cc.GetResetRow() {
cr.resetToNewRow()
return nil, nil
}
if cc.FamilyName != nil {
cr.curFam = cc.FamilyName.Value
}
if cc.Qualifier != nil {
cr.curQual = cc.Qualifier.Value
}
cr.curTS = cc.TimestampMicros
row = cr.handleCellValue(cc)
case cellInProgress:
if err := cr.validateCellInProgress(cc); err != nil {
return nil, err
}
if cc.GetResetRow() {
cr.resetToNewRow()
return nil, nil
}
row = cr.handleCellValue(cc)
}
return row, nil
}
// Close must be called after all cell chunks from the response
// have been processed. An error will be returned if the reader is
// in an invalid state, in which case the error should be propagated to the caller.
func (cr *chunkReader) Close() error {
if cr.state != newRow {
return fmt.Errorf("invalid state for end of stream %q", cr.state)
}
return nil
}
// handleCellValue returns a Row if the cell value includes a commit, otherwise nil.
func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row {
if cc.ValueSize > 0 {
// ValueSize is specified so expect a split value of ValueSize bytes
if cr.curVal == nil {
cr.curVal = make([]byte, 0, cc.ValueSize)
}
cr.curVal = append(cr.curVal, cc.Value...)
cr.state = cellInProgress
} else {
// This cell is either the complete value or the last chunk of a split
if cr.curVal == nil {
cr.curVal = cc.Value
} else {
cr.curVal = append(cr.curVal, cc.Value...)
}
cr.finishCell()
if cc.GetCommitRow() {
return cr.commitRow()
} else {
cr.state = rowInProgress
}
}
return nil
}
func (cr *chunkReader) finishCell() {
ri := ReadItem{
Row: string(cr.curKey),
Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual),
Timestamp: Timestamp(cr.curTS),
Value: cr.curVal,
}
cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri)
cr.curVal = nil
}
func (cr *chunkReader) commitRow() Row {
row := cr.curRow
cr.lastKey = cr.curRow.Key()
cr.resetToNewRow()
return row
}
func (cr *chunkReader) resetToNewRow() {
cr.curKey = nil
cr.curFam = ""
cr.curQual = nil
cr.curVal = nil
cr.curRow = nil
cr.curTS = 0
cr.state = newRow
}
func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error {
if cc.GetResetRow() {
return fmt.Errorf("reset_row not allowed between rows")
}
if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil {
return fmt.Errorf("missing key field for new row %v", cc)
}
if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) {
return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey))
}
return nil
}
func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
if err := cr.validateRowStatus(cc); err != nil {
return err
}
if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) {
return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey)
}
if cc.FamilyName != nil && cc.Qualifier == nil {
return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName)
}
return nil
}
func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
if err := cr.validateRowStatus(cc); err != nil {
return err
}
if cr.curVal == nil {
return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc)
}
if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) {
return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc)
}
return nil
}
func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool {
return cc.RowKey != nil ||
cc.FamilyName != nil ||
cc.Qualifier != nil ||
cc.TimestampMicros != 0
}
// Validate a RowStatus, commit or reset, if present.
func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error {
// Resets can't be specified with any other part of a cell
if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) ||
cc.Value != nil ||
cc.ValueSize != 0 ||
cc.Labels != nil) {
return fmt.Errorf("reset must not be specified with other fields %v", cc)
}
if cc.GetCommitRow() && cc.ValueSize > 0 {
return fmt.Errorf("commit row found in between chunks in a cell")
}
return nil
}

343
vendor/cloud.google.com/go/bigtable/reader_test.go generated vendored Normal file
View File

@@ -0,0 +1,343 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"encoding/json"
"fmt"
"io/ioutil"
"reflect"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/wrappers"
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
)
// Indicates that a field in the proto should be omitted, rather than included
// as a wrapped empty string.
const nilStr = "<>"
func TestSingleCell(t *testing.T) {
cr := newChunkReader()
// All in one cell
row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
if row == nil {
t.Fatalf("Missing row")
}
if len(row["fm"]) != 1 {
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"]))
}
want := []ReadItem{ri("rk", "fm", "col", 1, "value")}
if !reflect.DeepEqual(row["fm"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want)
}
if err := cr.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
func TestMultipleCells(t *testing.T) {
cr := newChunkReader()
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false))
row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
if row == nil {
t.Fatalf("Missing row")
}
want := []ReadItem{
ri("rs", "fm1", "col1", 0, "val1"),
ri("rs", "fm1", "col1", 1, "val2"),
ri("rs", "fm1", "col2", 0, "val3"),
}
if !reflect.DeepEqual(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
want = []ReadItem{
ri("rs", "fm2", "col1", 0, "val4"),
ri("rs", "fm2", "col2", 1, "extralongval5"),
}
if !reflect.DeepEqual(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
}
if err := cr.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
func TestSplitCells(t *testing.T) {
cr := newChunkReader()
cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false))
cr.Process(ccData("world", 0, false))
row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
if row == nil {
t.Fatalf("Missing row")
}
want := []ReadItem{
ri("rs", "fm1", "col1", 0, "hello world"),
ri("rs", "fm1", "col2", 0, "val2"),
}
if !reflect.DeepEqual(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
if err := cr.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
func TestMultipleRows(t *testing.T) {
cr := newChunkReader()
row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
if !reflect.DeepEqual(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
}
if err := cr.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
func TestBlankQualifier(t *testing.T) {
cr := newChunkReader()
row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true))
if err != nil {
t.Fatalf("Processing chunk: %v", err)
}
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
if !reflect.DeepEqual(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
}
if err := cr.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
func TestReset(t *testing.T) {
cr := newChunkReader()
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
cr.Process(ccReset())
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) {
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
}
if err := cr.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
func TestNewFamEmptyQualifier(t *testing.T) {
cr := newChunkReader()
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
_, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true))
if err == nil {
t.Fatalf("Expected error on second chunk with no qualifier set")
}
}
// The read rows acceptance test reads a json file specifying a number of tests,
// each consisting of one or more cell chunk text protos and one or more resulting
// cells or errors.
type AcceptanceTest struct {
Tests []TestCase `json:"tests"`
}
type TestCase struct {
Name string `json:"name"`
Chunks []string `json:"chunks"`
Results []TestResult `json:"results"`
}
type TestResult struct {
RK string `json:"rk"`
FM string `json:"fm"`
Qual string `json:"qual"`
TS int64 `json:"ts"`
Value string `json:"value"`
Error bool `json:"error"` // If true, expect an error. Ignore any other field.
}
func TestAcceptance(t *testing.T) {
testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json")
if err != nil {
t.Fatalf("could not open acceptance test file %v", err)
}
var accTest AcceptanceTest
err = json.Unmarshal(testJson, &accTest)
if err != nil {
t.Fatalf("could not parse acceptance test file: %v", err)
}
for _, test := range accTest.Tests {
runTestCase(t, test)
}
}
func runTestCase(t *testing.T, test TestCase) {
// Increment an index into the result array as we get results
cr := newChunkReader()
var results []TestResult
var seenErr bool
for _, chunkText := range test.Chunks {
// Parse and pass each cell chunk to the ChunkReader
cc := &btspb.ReadRowsResponse_CellChunk{}
err := proto.UnmarshalText(chunkText, cc)
if err != nil {
t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err)
return
}
row, err := cr.Process(cc)
if err != nil {
results = append(results, TestResult{Error: true})
seenErr = true
break
} else {
// Turn the Row into TestResults
for fm, ris := range row {
for _, ri := range ris {
tr := TestResult{
RK: ri.Row,
FM: fm,
Qual: strings.Split(ri.Column, ":")[1],
TS: int64(ri.Timestamp),
Value: string(ri.Value),
}
results = append(results, tr)
}
}
}
}
// Only Close if we don't have an error yet, otherwise Close: is expected.
if !seenErr {
err := cr.Close()
if err != nil {
results = append(results, TestResult{Error: true})
}
}
got := toSet(results)
want := toSet(test.Results)
if !reflect.DeepEqual(got, want) {
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want)
}
}
func toSet(res []TestResult) map[TestResult]bool {
set := make(map[TestResult]bool)
for _, tr := range res {
set[tr] = true
}
return set
}
// ri returns a ReadItem for the given components
func ri(rk string, fm string, qual string, ts int64, val string) ReadItem {
return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)}
}
// cc returns a CellChunk proto
func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {
// The components of the cell key are wrapped and can be null or empty
var rkWrapper []byte
if rk == nilStr {
rkWrapper = nil
} else {
rkWrapper = []byte(rk)
}
var fmWrapper *wrappers.StringValue
if fm != nilStr {
fmWrapper = &wrappers.StringValue{Value: fm}
} else {
fmWrapper = nil
}
var qualWrapper *wrappers.BytesValue
if qual != nilStr {
qualWrapper = &wrappers.BytesValue{Value: []byte(qual)}
} else {
qualWrapper = nil
}
return &btspb.ReadRowsResponse_CellChunk{
RowKey: rkWrapper,
FamilyName: fmWrapper,
Qualifier: qualWrapper,
TimestampMicros: ts,
Value: []byte(val),
ValueSize: size,
RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}}
}
// ccData returns a CellChunk with only a value and size
func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {
return cc(nilStr, nilStr, nilStr, 0, val, size, commit)
}
// ccReset returns a CellChunk with RestRow set to true
func ccReset() *btspb.ReadRowsResponse_CellChunk {
return &btspb.ReadRowsResponse_CellChunk{
RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}}
}

359
vendor/cloud.google.com/go/bigtable/retry_test.go generated vendored Normal file
View File

@@ -0,0 +1,359 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"reflect"
"strings"
"testing"
"time"
"cloud.google.com/go/bigtable/bttest"
"github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
"google.golang.org/api/option"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
srv, err := bttest.NewServer("127.0.0.1:0", opt...)
if err != nil {
return nil, nil, err
}
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
if err != nil {
return nil, nil, err
}
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
if err != nil {
return nil, nil, err
}
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
if err != nil {
return nil, nil, err
}
if err := adminClient.CreateTable(context.Background(), "table"); err != nil {
return nil, nil, err
}
if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil {
return nil, nil, err
}
t := client.Open("table")
cleanupFunc := func() {
adminClient.Close()
client.Close()
srv.Close()
}
return t, cleanupFunc, nil
}
func TestRetryApply(t *testing.T) {
ctx := context.Background()
errCount := 0
code := codes.Unavailable // Will be retried
// Intercept requests and return an error or defer to the underlying handler
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 {
errCount++
return nil, grpc.Errorf(code, "")
}
return handler(ctx, req)
}
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector))
defer cleanup()
if err != nil {
t.Fatalf("fake server setup: %v", err)
}
mut := NewMutation()
mut.Set("cf", "col", 1, []byte("val"))
if err := tbl.Apply(ctx, "row1", mut); err != nil {
t.Errorf("applying single mutation with retries: %v", err)
}
row, err := tbl.ReadRow(ctx, "row1")
if err != nil {
t.Errorf("reading single value with retries: %v", err)
}
if row == nil {
t.Errorf("applying single mutation with retries: could not read back row")
}
code = codes.FailedPrecondition // Won't be retried
errCount = 0
if err := tbl.Apply(ctx, "row", mut); err == nil {
t.Errorf("applying single mutation with no retries: no error")
}
// Check and mutate
mutTrue := NewMutation()
mutTrue.DeleteRow()
mutFalse := NewMutation()
mutFalse.Set("cf", "col", 1, []byte("val"))
condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse)
errCount = 0
code = codes.Unavailable // Will be retried
if err := tbl.Apply(ctx, "row1", condMut); err != nil {
t.Errorf("conditionally mutating row with retries: %v", err)
}
row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table
if err != nil {
t.Errorf("reading single value after conditional mutation: %v", err)
}
if row != nil {
t.Errorf("reading single value after conditional mutation: row not deleted")
}
errCount = 0
code = codes.FailedPrecondition // Won't be retried
if err := tbl.Apply(ctx, "row", condMut); err == nil {
t.Errorf("conditionally mutating row with no retries: no error")
}
}
func TestRetryApplyBulk(t *testing.T) {
ctx := context.Background()
// Intercept requests and delegate to an interceptor defined by the test case
errCount := 0
var f func(grpc.ServerStream) error
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if strings.HasSuffix(info.FullMethod, "MutateRows") {
return f(ss)
}
return handler(ctx, ss)
}
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector))
defer cleanup()
if err != nil {
t.Fatalf("fake server setup: %v", err)
}
errCount = 0
// Test overall request failure and retries
f = func(ss grpc.ServerStream) error {
if errCount < 3 {
errCount++
return grpc.Errorf(codes.Aborted, "")
}
return nil
}
mut := NewMutation()
mut.Set("cf", "col", 1, []byte{})
errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut})
if errors != nil || err != nil {
t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err)
}
// Test failures and retries in one request
errCount = 0
m1 := NewMutation()
m1.Set("cf", "col", 1, []byte{})
m2 := NewMutation()
m2.Set("cf", "col2", 1, []byte{})
m3 := NewMutation()
m3.Set("cf", "col3", 1, []byte{})
f = func(ss grpc.ServerStream) error {
var err error
req := new(btpb.MutateRowsRequest)
ss.RecvMsg(req)
switch errCount {
case 0:
// Retryable request failure
err = grpc.Errorf(codes.Unavailable, "")
case 1:
// Two mutations fail
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
err = nil
case 2:
// Two failures were retried. One will succeed.
if want, got := 2, len(req.Entries); want != got {
t.Errorf("2 bulk retries, got: %d, want %d", got, want)
}
writeMutateRowsResponse(ss, codes.OK, codes.Aborted)
err = nil
case 3:
// One failure was retried and will succeed.
if want, got := 1, len(req.Entries); want != got {
t.Errorf("1 bulk retry, got: %d, want %d", got, want)
}
writeMutateRowsResponse(ss, codes.OK)
err = nil
}
errCount++
return err
}
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
if errors != nil || err != nil {
t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err)
}
// Test unretryable errors
niMut := NewMutation()
niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent
errCount = 0
f = func(ss grpc.ServerStream) error {
var err error
req := new(btpb.MutateRowsRequest)
ss.RecvMsg(req)
switch errCount {
case 0:
// Give non-idempotent mutation a retryable error code.
// Nothing should be retried.
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted)
err = nil
case 1:
t.Errorf("unretryable errors: got one retry, want no retries")
}
errCount++
return err
}
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut})
if err != nil {
t.Errorf("unretryable errors: request failed %v")
}
want := []error{
grpc.Errorf(codes.FailedPrecondition, ""),
grpc.Errorf(codes.Aborted, ""),
}
if !reflect.DeepEqual(want, errors) {
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
}
// Test individual errors and a deadline exceeded
f = func(ss grpc.ServerStream) error {
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
return nil
}
ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
wantErr := context.DeadlineExceeded
if wantErr != err {
t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr)
}
if errors != nil {
t.Errorf("deadline exceeded errors: got: %v, want: nil", err)
}
}
func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error {
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))}
for i, code := range codes {
res.Entries[i] = &btpb.MutateRowsResponse_Entry{
Index: int64(i),
Status: &rpcpb.Status{Code: int32(code), Message: ""},
}
}
return ss.SendMsg(res)
}
func TestRetainRowsAfter(t *testing.T) {
prevRowRange := NewRange("a", "z")
prevRowKey := "m"
want := NewRange("m\x00", "z")
got := prevRowRange.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(want, got) {
t.Errorf("range retry: got %v, want %v", got, want)
}
prevRowList := RowList{"a", "b", "c", "d", "e", "f"}
prevRowKey = "b"
wantList := RowList{"c", "d", "e", "f"}
got = prevRowList.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(wantList, got) {
t.Errorf("list retry: got %v, want %v", got, wantList)
}
}
func TestRetryReadRows(t *testing.T) {
ctx := context.Background()
// Intercept requests and delegate to an interceptor defined by the test case
errCount := 0
var f func(grpc.ServerStream) error
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if strings.HasSuffix(info.FullMethod, "ReadRows") {
return f(ss)
}
return handler(ctx, ss)
}
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector))
defer cleanup()
if err != nil {
t.Fatalf("fake server setup: %v", err)
}
errCount = 0
// Test overall request failure and retries
f = func(ss grpc.ServerStream) error {
var err error
req := new(btpb.ReadRowsRequest)
ss.RecvMsg(req)
switch errCount {
case 0:
// Retryable request failure
err = grpc.Errorf(codes.Unavailable, "")
case 1:
// Write two rows then error
writeReadRowsResponse(ss, "a", "b")
err = grpc.Errorf(codes.Unavailable, "")
case 2:
// Retryable request failure
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
t.Errorf("2 range retries: got %q, want %q", got, want)
}
err = grpc.Errorf(codes.Unavailable, "")
case 3:
// Write two more rows
writeReadRowsResponse(ss, "c", "d")
err = nil
}
errCount++
return err
}
var got []string
tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
got = append(got, r.Key())
return true
})
want := []string{"a", "b", "c", "d"}
if !reflect.DeepEqual(got, want) {
t.Errorf("retry range integration: got %v, want %v", got, want)
}
}
func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error {
var chunks []*btpb.ReadRowsResponse_CellChunk
for _, key := range rowKeys {
chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{
RowKey: []byte(key),
FamilyName: &wrappers.StringValue{Value: "fm"},
Qualifier: &wrappers.BytesValue{Value: []byte("col")},
RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true},
})
}
return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks})
}

File diff suppressed because it is too large Load Diff

20
vendor/cloud.google.com/go/cloud.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cloud is the root of the packages used to access Google Cloud
// Services. See https://godoc.org/cloud.google.com/go for a full list
// of sub-packages.
//
// This package documents how to authorize and authenticate the sub packages.
package cloud // import "cloud.google.com/go"

438
vendor/cloud.google.com/go/compute/metadata/metadata.go generated vendored Normal file
View File

@@ -0,0 +1,438 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"cloud.google.com/go/internal"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
metaClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
},
}
subscribeClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
},
}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View File

@@ -0,0 +1,48 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"os"
"sync"
"testing"
)
func TestOnGCE_Stress(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
var last bool
for i := 0; i < 100; i++ {
onGCEOnce = sync.Once{}
now := OnGCE()
if i > 0 && now != last {
t.Errorf("%d. changed from %v to %v", i, last, now)
}
last = now
}
t.Logf("OnGCE() = %v", last)
}
func TestOnGCE_Force(t *testing.T) {
onGCEOnce = sync.Once{}
old := os.Getenv(metadataHostEnv)
defer os.Setenv(metadataHostEnv, old)
os.Setenv(metadataHostEnv, "127.0.0.1")
if !OnGCE() {
t.Error("OnGCE() = false; want true")
}
}

273
vendor/cloud.google.com/go/container/container.go generated vendored Normal file
View File

@@ -0,0 +1,273 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package container contains a Google Container Engine client.
//
// For more information about the API,
// see https://cloud.google.com/container-engine/docs
package container // import "cloud.google.com/go/container"
import (
"errors"
"fmt"
"time"
"golang.org/x/net/context"
raw "google.golang.org/api/container/v1"
"google.golang.org/api/option"
"google.golang.org/api/transport"
)
type Type string
const (
TypeCreate = Type("createCluster")
TypeDelete = Type("deleteCluster")
)
type Status string
const (
StatusDone = Status("done")
StatusPending = Status("pending")
StatusRunning = Status("running")
StatusError = Status("error")
StatusProvisioning = Status("provisioning")
StatusStopping = Status("stopping")
)
const prodAddr = "https://container.googleapis.com/"
const userAgent = "gcloud-golang-container/20151008"
// Client is a Google Container Engine client, which may be used to manage
// clusters with a project. It must be constructed via NewClient.
type Client struct {
projectID string
svc *raw.Service
}
// NewClient creates a new Google Container Engine client.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
o := []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(raw.CloudPlatformScope),
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
svc, err := raw.New(httpClient)
if err != nil {
return nil, fmt.Errorf("constructing container client: %v", err)
}
svc.BasePath = endpoint
c := &Client{
projectID: projectID,
svc: svc,
}
return c, nil
}
// Resource is a Google Container Engine cluster resource.
type Resource struct {
// Name is the name of this cluster. The name must be unique
// within this project and zone, and can be up to 40 characters.
Name string
// Description is the description of the cluster. Optional.
Description string
// Zone is the Google Compute Engine zone in which the cluster resides.
Zone string
// Status is the current status of the cluster. It could either be
// StatusError, StatusProvisioning, StatusRunning or StatusStopping.
Status Status
// Num is the number of the nodes in this cluster resource.
Num int64
// APIVersion is the version of the Kubernetes master and kubelets running
// in this cluster. Allowed value is 0.4.2, or leave blank to
// pick up the latest stable release.
APIVersion string
// Endpoint is the IP address of this cluster's Kubernetes master.
// The endpoint can be accessed at https://username:password@endpoint/.
// See Username and Password fields for the username and password information.
Endpoint string
// Username is the username to use when accessing the Kubernetes master endpoint.
Username string
// Password is the password to use when accessing the Kubernetes master endpoint.
Password string
// ContainerIPv4CIDR is the IP addresses of the container pods in
// this cluster, in CIDR notation (e.g. 1.2.3.4/29).
ContainerIPv4CIDR string
// ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this
// cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are
// always in the 10.0.0.0/16 range.
ServicesIPv4CIDR string
// MachineType is a Google Compute Engine machine type (e.g. n1-standard-1).
// If none set, the default type is used while creating a new cluster.
MachineType string
// This field is ignored. It was removed from the underlying container API in v1.
SourceImage string
// Created is the creation time of this cluster.
Created time.Time
}
func resourceFromRaw(c *raw.Cluster) *Resource {
if c == nil {
return nil
}
r := &Resource{
Name: c.Name,
Description: c.Description,
Zone: c.Zone,
Status: Status(c.Status),
Num: c.InitialNodeCount,
APIVersion: c.InitialClusterVersion,
Endpoint: c.Endpoint,
Username: c.MasterAuth.Username,
Password: c.MasterAuth.Password,
ContainerIPv4CIDR: c.ClusterIpv4Cidr,
ServicesIPv4CIDR: c.ServicesIpv4Cidr,
MachineType: c.NodeConfig.MachineType,
}
r.Created, _ = time.Parse(time.RFC3339, c.CreateTime)
return r
}
func resourcesFromRaw(c []*raw.Cluster) []*Resource {
r := make([]*Resource, len(c))
for i, val := range c {
r[i] = resourceFromRaw(val)
}
return r
}
// Op represents a Google Container Engine API operation.
type Op struct {
// Name is the name of the operation.
Name string
// Zone is the Google Compute Engine zone.
Zone string
// This field is ignored. It was removed from the underlying container API in v1.
TargetURL string
// Type is the operation type. It could be either be TypeCreate or TypeDelete.
Type Type
// Status is the current status of this operation. It could be either
// OpDone or OpPending.
Status Status
}
func opFromRaw(o *raw.Operation) *Op {
if o == nil {
return nil
}
return &Op{
Name: o.Name,
Zone: o.Zone,
Type: Type(o.OperationType),
Status: Status(o.Status),
}
}
func opsFromRaw(o []*raw.Operation) []*Op {
ops := make([]*Op, len(o))
for i, val := range o {
ops[i] = opFromRaw(val)
}
return ops
}
// Clusters returns a list of cluster resources from the specified zone.
// If no zone is specified, it returns all clusters under the user project.
func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) {
if zone == "" {
zone = "-"
}
resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do()
if err != nil {
return nil, err
}
return resourcesFromRaw(resp.Clusters), nil
}
// Cluster returns metadata about the specified cluster.
func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) {
resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do()
if err != nil {
return nil, err
}
return resourceFromRaw(resp), nil
}
// CreateCluster creates a new cluster with the provided metadata
// in the specified zone.
func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) {
panic("not implemented")
}
// DeleteCluster deletes a cluster.
func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error {
_, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do()
return err
}
// Operations returns a list of operations from the specified zone.
// If no zone is specified, it looks up for all of the operations
// that are running under the user's project.
func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) {
if zone == "" {
resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do()
if err != nil {
return nil, err
}
return opsFromRaw(resp.Operations), nil
}
resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do()
if err != nil {
return nil, err
}
return opsFromRaw(resp.Operations), nil
}
// Operation returns an operation.
func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) {
resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do()
if err != nil {
return nil, err
}
if resp.StatusMessage != "" {
return nil, errors.New(resp.StatusMessage)
}
return opFromRaw(resp), nil
}

588
vendor/cloud.google.com/go/datastore/datastore.go generated vendored Normal file
View File

@@ -0,0 +1,588 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"log"
"os"
"reflect"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
prodAddr = "datastore.googleapis.com:443"
userAgent = "gcloud-golang-datastore/20160401"
)
// ScopeDatastore grants permissions to view and/or manage datastore entities
const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// protoClient is an interface for *transport.ProtoClient to support injecting
// fake clients in tests.
type protoClient interface {
Call(context.Context, string, proto.Message, proto.Message) error
}
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(resourcePrefixHeader, "projects/"+projectID),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
return dc.c.Lookup(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
return dc.c.RunQuery(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
return dc.c.BeginTransaction(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
return dc.c.Commit(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
return dc.c.Rollback(metadata.NewContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
return dc.c.AllocateIds(metadata.NewContext(ctx, dc.md), in, opts...)
}
// Client is a client for reading and writing data in a datastore dataset.
type Client struct {
conn *grpc.ClientConn
client pb.DatastoreClient
endpoint string
dataset string // Called dataset by the datastore API, synonym for project ID.
}
// NewClient creates a new Client for a given dataset.
// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable.
// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value
// to connect to a locally-running datastore emulator.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
var o []option.ClientOption
// Environment variables for gcd emulator:
// https://cloud.google.com/datastore/docs/tools/datastore-emulator
// If the emulator is available, dial it directly (and don't pass any credentials).
if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(ScopeDatastore),
option.WithUserAgent(userAgent),
}
}
// Warn if we see the legacy emulator environment variables.
if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.")
}
if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.")
}
if projectID == "" {
projectID = os.Getenv("DATASTORE_PROJECT_ID")
}
if projectID == "" {
return nil, errors.New("datastore: missing project/dataset id")
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: newDatastoreClient(conn, projectID),
dataset: projectID,
}, nil
}
var (
// ErrInvalidEntityType is returned when functions like Get or Next are
// passed a dst or src argument of invalid type.
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
// ErrInvalidKey is returned when an invalid key is presented.
ErrInvalidKey = errors.New("datastore: invalid key")
// ErrNoSuchEntity is returned when no entity was found for a given key.
ErrNoSuchEntity = errors.New("datastore: no such entity")
)
type multiArgType int
const (
multiArgTypeInvalid multiArgType = iota
multiArgTypePropertyLoadSaver
multiArgTypeStruct
multiArgTypeStructPtr
multiArgTypeInterface
)
// nsKey is the type of the context.Context key to store the datastore
// namespace.
type nsKey struct{}
// WithNamespace returns a new context that limits the scope its parent
// context with a Datastore namespace.
func WithNamespace(parent context.Context, namespace string) context.Context {
return context.WithValue(parent, nsKey{}, namespace)
}
// ctxNamespace returns the active namespace for a context.
// It defaults to "" if no namespace was specified.
func ctxNamespace(ctx context.Context) string {
v, _ := ctx.Value(nsKey{}).(string)
return v
}
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct.
// StructType is the type of the struct pointed to by the destination argument
// passed to Get or to Iterator.Next.
type ErrFieldMismatch struct {
StructType reflect.Type
FieldName string
Reason string
}
func (e *ErrFieldMismatch) Error() string {
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
e.FieldName, e.StructType, e.Reason)
}
// GeoPoint represents a location as latitude/longitude in degrees.
type GeoPoint struct {
Lat, Lng float64
}
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
func (g GeoPoint) Valid() bool {
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
}
func keyToProto(k *Key) *pb.Key {
if k == nil {
return nil
}
// TODO(jbd): Eliminate unrequired allocations.
var path []*pb.Key_PathElement
for {
el := &pb.Key_PathElement{Kind: k.kind}
if k.id != 0 {
el.IdType = &pb.Key_PathElement_Id{k.id}
} else if k.name != "" {
el.IdType = &pb.Key_PathElement_Name{k.name}
}
path = append([]*pb.Key_PathElement{el}, path...)
if k.parent == nil {
break
}
k = k.parent
}
key := &pb.Key{Path: path}
if k.namespace != "" {
key.PartitionId = &pb.PartitionId{
NamespaceId: k.namespace,
}
}
return key
}
// protoToKey decodes a protocol buffer representation of a key into an
// equivalent *Key object. If the key is invalid, protoToKey will return the
// invalid key along with ErrInvalidKey.
func protoToKey(p *pb.Key) (*Key, error) {
var key *Key
var namespace string
if partition := p.PartitionId; partition != nil {
namespace = partition.NamespaceId
}
for _, el := range p.Path {
key = &Key{
namespace: namespace,
kind: el.Kind,
id: el.GetId(),
name: el.GetName(),
parent: key,
}
}
if !key.valid() { // Also detects key == nil.
return key, ErrInvalidKey
}
return key, nil
}
// multiKeyToProto is a batch version of keyToProto.
func multiKeyToProto(keys []*Key) []*pb.Key {
ret := make([]*pb.Key, len(keys))
for i, k := range keys {
ret[i] = keyToProto(k)
}
return ret
}
// multiKeyToProto is a batch version of keyToProto.
func multiProtoToKey(keys []*pb.Key) ([]*Key, error) {
hasErr := false
ret := make([]*Key, len(keys))
err := make(MultiError, len(keys))
for i, k := range keys {
ret[i], err[i] = protoToKey(k)
if err[i] != nil {
hasErr = true
}
}
if hasErr {
return nil, err
}
return ret, nil
}
// multiValid is a batch version of Key.valid. It returns an error, not a
// []bool.
func multiValid(key []*Key) error {
invalid := false
for _, k := range key {
if !k.valid() {
invalid = true
break
}
}
if !invalid {
return nil
}
err := make(MultiError, len(key))
for i, k := range key {
if !k.valid() {
err[i] = ErrInvalidKey
}
}
return err
}
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
// type S, for some interface type I, or some non-interface non-pointer type P
// such that P or *P implements PropertyLoadSaver.
//
// It returns what category the slice's elements are, and the reflect.Type
// that represents S, I or P.
//
// As a special case, PropertyList is an invalid type for v.
//
// TODO(djd): multiArg is very confusing. Fold this logic into the
// relevant Put/Get methods to make the logic less opaque.
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
if v.Kind() != reflect.Slice {
return multiArgTypeInvalid, nil
}
if v.Type() == typeOfPropertyList {
return multiArgTypeInvalid, nil
}
elemType = v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return multiArgTypePropertyLoadSaver, elemType
}
switch elemType.Kind() {
case reflect.Struct:
return multiArgTypeStruct, elemType
case reflect.Interface:
return multiArgTypeInterface, elemType
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return multiArgTypeStructPtr, elemType
}
}
return multiArgTypeInvalid, nil
}
// Close closes the Client.
func (c *Client) Close() {
c.conn.Close()
}
// Get loads the entity stored for key into dst, which must be a struct pointer
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
// returns ErrNoSuchEntity.
//
// The values of dst's unmatched struct fields are not modified, and matching
// slice-typed fields are not reset before appending to them. In particular, it
// is recommended to pass a pointer to a zero valued struct on each Get call.
//
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
return ErrInvalidEntityType
}
err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
// type I, or some non-interface non-pointer type P such that P or *P
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {
return c.get(ctx, keys, dst, nil)
}
func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {
v := reflect.ValueOf(dst)
multiArgType, _ := checkMultiArg(v)
// Sanity checks
if multiArgType == multiArgTypeInvalid {
return errors.New("datastore: dst has invalid type")
}
if len(keys) != v.Len() {
return errors.New("datastore: keys and dst slices have different length")
}
if len(keys) == 0 {
return nil
}
// Go through keys, validate them, serialize then, and create a dict mapping them to their index
multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string]int)
pbKeys := make([]*pb.Key, len(keys))
for i, k := range keys {
if !k.valid() {
multiErr[i] = ErrInvalidKey
any = true
} else {
keyMap[k.String()] = i
pbKeys[i] = keyToProto(k)
}
}
if any {
return multiErr
}
req := &pb.LookupRequest{
ProjectId: c.dataset,
Keys: pbKeys,
ReadOptions: opts,
}
resp, err := c.client.Lookup(ctx, req)
if err != nil {
return err
}
if len(resp.Deferred) > 0 {
// TODO(jbd): Assess whether we should retry the deferred keys.
return errors.New("datastore: some entities temporarily unavailable")
}
if len(keys) != len(resp.Found)+len(resp.Missing) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
for _, e := range resp.Found {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
index := keyMap[k.String()]
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntity(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
}
}
for _, e := range resp.Missing {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
multiErr[keyMap[k.String()]] = ErrNoSuchEntity
any = true
}
if any {
return multiErr
}
return nil
}
// Put saves the entity src into the datastore with key k. src must be a struct
// pointer or implement PropertyLoadSaver; if a struct pointer then any
// unexported fields of that struct will be skipped. If k is an incomplete key,
// the returned key will be a unique key generated by the datastore.
func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {
k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(MultiError); ok {
return nil, me[0]
}
return nil, err
}
return k[0], nil
}
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
}
// Make the request.
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
resp, err := c.client.Commit(ctx, req)
if err != nil {
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret := make([]*Key, len(keys))
for i, key := range keys {
if key.Incomplete() {
// This key is in the mutation results.
ret[i], err = protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
} else {
ret[i] = key
}
}
return ret, nil
}
func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
v := reflect.ValueOf(src)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return nil, errors.New("datastore: src has invalid type")
}
if len(keys) != v.Len() {
return nil, errors.New("datastore: key and src slices have different length")
}
if len(keys) == 0 {
return nil, nil
}
if err := multiValid(keys); err != nil {
return nil, err
}
mutations := make([]*pb.Mutation, 0, len(keys))
for i, k := range keys {
elem := v.Index(i)
// Two cases where we need to take the address:
// 1) multiArgTypePropertyLoadSaver => &elem implements PLS
// 2) multiArgTypeStruct => saveEntity needs *struct
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
p, err := saveEntity(k, elem.Interface())
if err != nil {
return nil, fmt.Errorf("datastore: Error while saving %v: %v", k.String(), err)
}
var mut *pb.Mutation
if k.Incomplete() {
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{p}}
} else {
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{p}}
}
mutations = append(mutations, mut)
}
return mutations, nil
}
// Delete deletes the entity for the given key.
func (c *Client) Delete(ctx context.Context, key *Key) error {
err := c.DeleteMulti(ctx, []*Key{key})
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
mutations, err := deleteMutations(keys)
if err != nil {
return err
}
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
_, err = c.client.Commit(ctx, req)
return err
}
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys))
for _, k := range keys {
if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
}
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{keyToProto(k)},
})
}
return mutations, nil
}

1688
vendor/cloud.google.com/go/datastore/datastore_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

320
vendor/cloud.google.com/go/datastore/doc.go generated vendored Normal file
View File

@@ -0,0 +1,320 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package datastore provides a client for Google Cloud Datastore.
Basic Operations
Entities are the unit of storage and are associated with a key. A key
consists of an optional parent key, a string application ID, a string kind
(also known as an entity type), and either a StringID or an IntID. A
StringID is also known as an entity name or key name.
It is valid to create a key with a zero StringID and a zero IntID; this is
called an incomplete key, and does not refer to any saved entity. Putting an
entity into the datastore under an incomplete key will cause a unique key
to be generated for that entity, with a non-zero IntID.
An entity's contents are a mapping from case-sensitive field names to values.
Valid value types are:
- signed integers (int, int8, int16, int32 and int64),
- bool,
- string,
- float32 and float64,
- []byte (up to 1 megabyte in length),
- any type whose underlying type is one of the above predeclared types,
- *Key,
- GeoPoint,
- time.Time (stored with microsecond precision),
- structs whose fields are all valid value types,
- slices of any of the above.
Slices of structs are valid, as are structs that contain slices. However, if
one struct contains another, then at most one of those can be repeated. This
disqualifies recursively defined struct types: any struct T that (directly or
indirectly) contains a []T.
The Get and Put functions load and save an entity's contents. An entity's
contents are typically represented by a struct pointer.
Example code:
type Entity struct {
Value string
}
func main() {
ctx := context.Background()
// Create a datastore client. In a typical application, you would create
// a single client which is reused for every datastore operation.
dsClient, err := datastore.NewClient(ctx, "my-project")
if err != nil {
// Handle error.
}
k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
e := new(Entity)
if err := dsClient.Get(ctx, k, e); err != nil {
// Handle error.
}
old := e.Value
e.Value = "Hello World!"
if _, err := dsClient.Put(ctx, k, e); err != nil {
// Handle error.
}
fmt.Printf("Updated value from %q to %q\n", old, e.Value)
}
GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
Delete functions. They take a []*Key instead of a *Key, and may return a
datastore.MultiError when encountering partial failure.
Properties
An entity's contents can be represented by a variety of types. These are
typically struct pointers, but can also be any type that implements the
PropertyLoadSaver interface. If using a struct pointer, you do not have to
explicitly implement the PropertyLoadSaver interface; the datastore will
automatically convert via reflection. If a struct pointer does implement that
interface then those methods will be used in preference to the default
behavior for struct pointers. Struct pointers are more strongly typed and are
easier to use; PropertyLoadSavers are more flexible.
The actual types passed do not have to match between Get and Put calls or even
across different calls to datastore. It is valid to put a *PropertyList and
get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
Conceptually, any entity is saved as a sequence of properties, and is loaded
into the destination value on a property-by-property basis. When loading into
a struct pointer, an entity that cannot be completely represented (such as a
missing field) will result in an ErrFieldMismatch error but it is up to the
caller whether this error is fatal, recoverable or ignorable.
By default, for struct pointers, all properties are potentially indexed, and
the property name is the same as the field name (and hence must start with an
upper case letter). Fields may have a `datastore:"name,options"` tag. The tag
name is the property name, which must be one or more valid Go identifiers
joined by ".", but may start with a lower case letter. An empty tag name means
to just use the field name. A "-" tag name means that the datastore will
ignore that field. If options is "noindex" then the field will not be indexed.
If the options is "" then the comma may be omitted. There are no other
recognized options.
All fields are indexed by default. Strings or byte slices longer than 1500
bytes cannot be indexed; fields used to store long strings and byte slices must
be tagged with "noindex" or they will cause Put operations to fail.
Example code:
// A and B are renamed to a and b.
// A, C and J are not indexed.
// D's tag is equivalent to having no tag at all (E).
// I is ignored entirely by the datastore.
// J has tag information for both the datastore and json packages.
type TaggedStruct struct {
A int `datastore:"a,noindex"`
B int `datastore:"b"`
C int `datastore:",noindex"`
D int `datastore:""`
E int
I int `datastore:"-"`
J int `datastore:",noindex" json:"j"`
}
Structured Properties
If the struct pointed to contains other structs, then the nested or embedded
structs are flattened. For example, given these definitions:
type Inner1 struct {
W int32
X string
}
type Inner2 struct {
Y float64
}
type Inner3 struct {
Z bool
}
type Outer struct {
A int16
I []Inner1
J Inner2
Inner3
}
then an Outer's properties would be equivalent to those of:
type OuterEquivalent struct {
A int16
IDotW []int32 `datastore:"I.W"`
IDotX []string `datastore:"I.X"`
JDotY float64 `datastore:"J.Y"`
Z bool
}
If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
If an outer struct is tagged "noindex" then all of its implicit flattened
fields are effectively "noindex".
The PropertyLoadSaver Interface
An entity's contents can also be represented by any type that implements the
PropertyLoadSaver interface. This type may be a struct pointer, but it does
not have to be. The datastore package will call Load when getting the entity's
contents, and Save when putting the entity's contents.
Possible uses include deriving non-stored fields, verifying fields, or indexing
a field only if its value is positive.
Example code:
type CustomPropsExample struct {
I, J int
// Sum is not stored, but should always be equal to I + J.
Sum int `datastore:"-"`
}
func (x *CustomPropsExample) Load(ps []datastore.Property) error {
// Load I and J as usual.
if err := datastore.LoadStruct(x, ps); err != nil {
return err
}
// Derive the Sum field.
x.Sum = x.I + x.J
return nil
}
func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
// Validate the Sum field.
if x.Sum != x.I + x.J {
return errors.New("CustomPropsExample has inconsistent sum")
}
// Save I and J as usual. The code below is equivalent to calling
// "return datastore.SaveStruct(x)", but is done manually for
// demonstration purposes.
return []datastore.Property{
{
Name: "I",
Value: int64(x.I),
},
{
Name: "J",
Value: int64(x.J),
},
}
}
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
arbitrary entity's contents.
Queries
Queries retrieve entities based on their properties or key's ancestry. Running
a query yields an iterator of results: either keys or (key, entity) pairs.
Queries are re-usable and it is safe to call Query.Run from concurrent
goroutines. Iterators are not safe for concurrent use.
Queries are immutable, and are either created by calling NewQuery, or derived
from an existing query by calling a method like Filter or Order that returns a
new query value. A query is typically constructed by calling NewQuery followed
by a chain of zero or more such methods. These methods are:
- Ancestor and Filter constrain the entities returned by running a query.
- Order affects the order in which they are returned.
- Project constrains the fields returned.
- Distinct de-duplicates projected entities.
- KeysOnly makes the iterator return only keys, not (key, entity) pairs.
- Start, End, Offset and Limit define which sub-sequence of matching entities
to return. Start and End take cursors, Offset and Limit take integers. Start
and Offset affect the first result, End and Limit affect the last result.
If both Start and Offset are set, then the offset is relative to Start.
If both End and Limit are set, then the earliest constraint wins. Limit is
relative to Start+Offset, not relative to End. As a special case, a
negative limit means unlimited.
Example code:
type Widget struct {
Description string
Price int
}
func printWidgets(ctx context.Context, client *datastore.Client) {
q := datastore.NewQuery("Widget").
Filter("Price <", 1000).
Order("-Price")
for t := dsClient.Run(ctx, q); ; {
var x Widget
key, err := t.Next(&x)
if err == datastore.Done {
break
}
if err != nil {
// Handle error.
}
fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x)
}
}
Transactions
Client.RunInTransaction runs a function in a transaction.
Example code:
type Counter struct {
Count int
}
func incCount(ctx context.Context, client *datastore.Client) {
var count int
key := datastore.NewKey(ctx, "Counter", "singleton", 0, nil)
err := dsClient.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
var x Counter
if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity {
return err
}
x.Count++
if _, err := tx.Put(key, &x); err != nil {
return err
}
count = x.Count
}, nil)
if err != nil {
// Handle error.
}
// The value of count is only valid once the transaction is successful
// (RunInTransaction has returned nil).
fmt.Printf("Count=%d\n", count)
}
*/
package datastore // import "cloud.google.com/go/datastore"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"

47
vendor/cloud.google.com/go/datastore/errors.go generated vendored Normal file
View File

@@ -0,0 +1,47 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file provides error functions for common API failure modes.
package datastore
import (
"fmt"
)
// MultiError is returned by batch operations when there are errors with
// particular elements. Errors will be in a one-to-one correspondence with
// the input elements; successful elements will have a nil entry.
type MultiError []error
func (m MultiError) Error() string {
s, n := "", 0
for _, e := range m {
if e != nil {
if n == 0 {
s = e.Error()
}
n++
}
}
switch n {
case 0:
return "(0 errors)"
case 1:
return s
case 2:
return s + " (and 1 other error)"
}
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
}

235
vendor/cloud.google.com/go/datastore/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,235 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore_test
import (
"log"
"time"
"cloud.google.com/go/datastore"
"golang.org/x/net/context"
)
// TODO(jbd): Document other authorization methods and refer to them here.
func Example_auth() *datastore.Client {
ctx := context.Background()
// Use Google Application Default Credentials to authorize and authenticate the client.
// More information about Application Default Credentials and how to enable is at
// https://developers.google.com/identity/protocols/application-default-credentials.
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
// Use the client (see other examples).
return client
}
func ExampleGet() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
type Article struct {
Title string
Description string
Body string `datastore:",noindex"`
Author *datastore.Key
PublishedAt time.Time
}
key := datastore.NewKey(ctx, "Article", "articled1", 0, nil)
article := &Article{}
if err := client.Get(ctx, key, article); err != nil {
log.Fatal(err)
}
}
func ExamplePut() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
type Article struct {
Title string
Description string
Body string `datastore:",noindex"`
Author *datastore.Key
PublishedAt time.Time
}
newKey := datastore.NewIncompleteKey(ctx, "Article", nil)
_, err = client.Put(ctx, newKey, &Article{
Title: "The title of the article",
Description: "The description of the article...",
Body: "...",
Author: datastore.NewKey(ctx, "Author", "jbd", 0, nil),
PublishedAt: time.Now(),
})
if err != nil {
log.Fatal(err)
}
}
func ExampleDelete() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
key := datastore.NewKey(ctx, "Article", "articled1", 0, nil)
if err := client.Delete(ctx, key); err != nil {
log.Fatal(err)
}
}
type Post struct {
Title string
PublishedAt time.Time
Comments int
}
func ExampleGetMulti() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
keys := []*datastore.Key{
datastore.NewKey(ctx, "Post", "post1", 0, nil),
datastore.NewKey(ctx, "Post", "post2", 0, nil),
datastore.NewKey(ctx, "Post", "post3", 0, nil),
}
posts := make([]Post, 3)
if err := client.GetMulti(ctx, keys, posts); err != nil {
log.Println(err)
}
}
func ExamplePutMulti_slice() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
keys := []*datastore.Key{
datastore.NewKey(ctx, "Post", "post1", 0, nil),
datastore.NewKey(ctx, "Post", "post2", 0, nil),
}
// PutMulti with a Post slice.
posts := []*Post{
{Title: "Post 1", PublishedAt: time.Now()},
{Title: "Post 2", PublishedAt: time.Now()},
}
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
log.Fatal(err)
}
}
func ExamplePutMulti_interfaceSlice() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
keys := []*datastore.Key{
datastore.NewKey(ctx, "Post", "post1", 0, nil),
datastore.NewKey(ctx, "Post", "post2", 0, nil),
}
// PutMulti with an empty interface slice.
posts := []interface{}{
&Post{Title: "Post 1", PublishedAt: time.Now()},
&Post{Title: "Post 2", PublishedAt: time.Now()},
}
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
log.Fatal(err)
}
}
func ExampleQuery() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
// Count the number of the post entities.
q := datastore.NewQuery("Post")
n, err := client.Count(ctx, q)
if err != nil {
log.Fatal(err)
}
log.Printf("There are %d posts.", n)
// List the posts published since yesterday.
yesterday := time.Now().Add(-24 * time.Hour)
q = datastore.NewQuery("Post").Filter("PublishedAt >", yesterday)
it := client.Run(ctx, q)
// Use the iterator.
_ = it
// Order the posts by the number of comments they have recieved.
datastore.NewQuery("Post").Order("-Comments")
// Start listing from an offset and limit the results.
datastore.NewQuery("Post").Offset(20).Limit(10)
}
func ExampleTransaction() {
ctx := context.Background()
client, err := datastore.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
const retries = 3
// Increment a counter.
// See https://cloud.google.com/appengine/articles/sharding_counters for
// a more scalable solution.
type Counter struct {
Count int
}
key := datastore.NewKey(ctx, "counter", "CounterA", 0, nil)
for i := 0; i < retries; i++ {
tx, err := client.NewTransaction(ctx)
if err != nil {
break
}
var c Counter
if err := tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity {
break
}
c.Count++
if _, err := tx.Put(key, &c); err != nil {
break
}
// Attempt to commit the transaction. If there's a conflict, try again.
if _, err := tx.Commit(); err != datastore.ErrConcurrentTransaction {
break
}
}
}

731
vendor/cloud.google.com/go/datastore/examples_test.go generated vendored Normal file
View File

@@ -0,0 +1,731 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore_test
import (
"fmt"
"log"
"time"
"cloud.google.com/go/datastore"
"golang.org/x/net/context"
)
type Task struct {
Category string
Done bool
Priority int
Description string `datastore:",noindex"`
PercentComplete float64
Created time.Time
Tags []string
Collaborators []string
}
func ExampleNewIncompleteKey() {
ctx := context.Background()
// [START incomplete_key]
taskKey := datastore.NewIncompleteKey(ctx, "Task", nil)
// [END incomplete_key]
_ = taskKey // Use the task key for datastore operations.
}
func ExampleNewKey() {
ctx := context.Background()
// [START named_key]
taskKey := datastore.NewKey(ctx, "Task", "sampletask", 0, nil)
// [END named_key]
_ = taskKey // Use the task key for datastore operations.
}
func ExampleNewKey_withParent() {
ctx := context.Background()
// [START key_with_parent]
parentKey := datastore.NewKey(ctx, "TaskList", "default", 0, nil)
taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, parentKey)
// [END key_with_parent]
_ = taskKey // Use the task key for datastore operations.
}
func ExampleNewKey_withMultipleParents() {
ctx := context.Background()
// [START key_with_multilevel_parent]
userKey := datastore.NewKey(ctx, "User", "alice", 0, nil)
parentKey := datastore.NewKey(ctx, "TaskList", "default", 0, userKey)
taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, parentKey)
// [END key_with_multilevel_parent]
_ = taskKey // Use the task key for datastore operations.
}
func ExampleClient_Put() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START entity_with_parent]
parentKey := datastore.NewKey(ctx, "TaskList", "default", 0, nil)
key := datastore.NewIncompleteKey(ctx, "Task", parentKey)
task := Task{
Category: "Personal",
Done: false,
Priority: 4,
Description: "Learn Cloud Datastore",
}
// A complete key is assigned to the entity when it is Put.
var err error
key, err = client.Put(ctx, key, &task)
// [END entity_with_parent]
_ = err // Make sure you check err.
}
func Example_properties() {
// [START properties]
type Task struct {
Category string
Done bool
Priority int
Description string `datastore:",noindex"`
PercentComplete float64
Created time.Time
}
task := &Task{
Category: "Personal",
Done: false,
Priority: 4,
Description: "Learn Cloud Datastore",
PercentComplete: 10.0,
Created: time.Now(),
}
// [END properties]
_ = task // Use the task in a datastore Put operation.
}
func Example_sliceProperties() {
// [START array_value]
type Task struct {
Tags []string
Collaborators []string
}
task := &Task{
Tags: []string{"fun", "programming"},
Collaborators: []string{"alice", "bob"},
}
// [END array_value]
_ = task // Use the task in a datastore Put operation.
}
func Example_basicEntity() {
// [START basic_entity]
type Task struct {
Category string
Done bool
Priority float64
Description string `datastore:",noindex"`
PercentComplete float64
Created time.Time
}
task := &Task{
Category: "Personal",
Done: false,
Priority: 4,
Description: "Learn Cloud Datastore",
PercentComplete: 10.0,
Created: time.Now(),
}
// [END basic_entity]
_ = task // Use the task in a datastore Put operation.
}
func ExampleClient_Put_upsert() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
task := &Task{} // Populated with appropriate data.
key := datastore.NewIncompleteKey(ctx, "Task", nil)
// [START upsert]
key, err := client.Put(ctx, key, task)
// [END upsert]
_ = err // Make sure you check err.
_ = key // key is the complete key for the newly stored task
}
func ExampleTransaction_insert() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
task := Task{} // Populated with appropriate data.
taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, nil)
// [START insert]
_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
// We first check that there is no entity stored with the given key.
var empty Task
if err := tx.Get(taskKey, &empty); err != datastore.ErrNoSuchEntity {
return err
}
// If there was no matching entity, store it now.
_, err := tx.Put(taskKey, &task)
return err
})
// [END insert]
_ = err // Make sure you check err.
}
func ExampleClient_Get() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, nil)
// [START lookup]
var task Task
err := client.Get(ctx, taskKey, &task)
// [END lookup]
_ = err // Make sure you check err.
}
func ExampleTransaction_update() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, nil)
// [START update]
tx, err := client.NewTransaction(ctx)
if err != nil {
log.Fatalf("client.NewTransaction: %v", err)
}
var task Task
if err := tx.Get(taskKey, &task); err != nil {
log.Fatalf("tx.Get: %v", err)
}
task.Priority = 5
if _, err := tx.Put(taskKey, task); err != nil {
log.Fatalf("tx.Put: %v", err)
}
if _, err := tx.Commit(); err != nil {
log.Fatalf("tx.Commit: %v", err)
}
// [END update]
}
func ExampleClient_Delete() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
key := datastore.NewKey(ctx, "Task", "sampletask", 0, nil)
// [START delete]
err := client.Delete(ctx, key)
// [END delete]
_ = err // Make sure you check err.
}
func ExampleClient_PutMulti() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START batch_upsert]
tasks := []*Task{
{
Category: "Personal",
Done: false,
Priority: 4,
Description: "Learn Cloud Datastore",
},
{
Category: "Personal",
Done: false,
Priority: 5,
Description: "Integrate Cloud Datastore",
},
}
keys := []*datastore.Key{
datastore.NewIncompleteKey(ctx, "Task", nil),
datastore.NewIncompleteKey(ctx, "Task", nil),
}
keys, err := client.PutMulti(ctx, keys, tasks)
// [END batch_upsert]
_ = err // Make sure you check err.
_ = keys // keys now has the complete keys for the newly stored tasks.
}
func ExampleClient_GetMulti() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
var taskKeys []*datastore.Key // Populated with incomplete keys.
// [START batch_lookup]
var tasks []*Task
err := client.GetMulti(ctx, taskKeys, &tasks)
// [END batch_lookup]
_ = err // Make sure you check err.
}
func ExampleClient_DeleteMulti() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
var taskKeys []*datastore.Key // Populated with incomplete keys.
// [START batch_delete]
err := client.DeleteMulti(ctx, taskKeys)
// [END batch_delete]
_ = err // Make sure you check err.
}
func ExampleQuery_basic() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START basic_query]
query := datastore.NewQuery("Task").
Filter("Done =", false).
Filter("Priority >=", 4).
Order("-Priority")
// [END basic_query]
// [START run_query]
it := client.Run(ctx, query)
for {
var task Task
_, err := it.Next(&task)
if err == datastore.Done {
break
}
if err != nil {
log.Fatalf("Error fetching next task: %v", err)
}
fmt.Printf("Task %q, Priority %d\n", task.Description, task.Priority)
}
// [END run_query]
}
func ExampleQuery_propertyFilter() {
// [START property_filter]
query := datastore.NewQuery("Task").Filter("Done =", false)
// [END property_filter]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_compositeFilter() {
// [START composite_filter]
query := datastore.NewQuery("Task").Filter("Done =", false).Filter("Priority =", 4)
// [END composite_filter]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_keyFilter() {
ctx := context.Background()
// [START key_filter]
key := datastore.NewKey(ctx, "Task", "someTask", 0, nil)
query := datastore.NewQuery("Task").Filter("__key__ >", key)
// [END key_filter]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_sortAscending() {
// [START ascending_sort]
query := datastore.NewQuery("Task").Order("created")
// [END ascending_sort]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_sortDescending() {
// [START descending_sort]
query := datastore.NewQuery("Task").Order("-created")
// [END descending_sort]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_sortMulti() {
// [START multi_sort]
query := datastore.NewQuery("Task").Order("-priority").Order("created")
// [END multi_sort]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_kindless() {
var lastSeenKey *datastore.Key
// [START kindless_query]
query := datastore.NewQuery("").Filter("__key__ >", lastSeenKey)
// [END kindless_query]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_Ancestor() {
ctx := context.Background()
// [START ancestor_query]
ancestor := datastore.NewKey(ctx, "TaskList", "default", 0, nil)
query := datastore.NewQuery("Task").Ancestor(ancestor)
// [END ancestor_query]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_Project() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START projection_query]
query := datastore.NewQuery("Task").Project("Priority", "PercentComplete")
// [END projection_query]
// [START run_query_projection]
var priorities []int
var percents []float64
it := client.Run(ctx, query)
for {
var task Task
if _, err := it.Next(&task); err == datastore.Done {
break
} else if err != nil {
log.Fatal(err)
}
priorities = append(priorities, task.Priority)
percents = append(percents, task.PercentComplete)
}
// [END run_query_projection]
}
func ExampleQuery_KeysOnly() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START keys_only_query]
query := datastore.NewQuery("Task").KeysOnly()
// [END keys_only_query]
// [START run_keys_only_query]
keys, err := client.GetAll(ctx, query, nil)
// [END run_keys_only_query]
_ = err // Make sure you check err.
_ = keys // Keys contains keys for all stored tasks.
}
func ExampleQuery_Distinct() {
// [START distinct_query]
query := datastore.NewQuery("Task").
Project("Priority", "PercentComplete").
Distinct().
Order("Category").Order("Priority")
// [END distinct_query]
_ = query // Use client.Run or client.GetAll to execute the query.
// [START distinct_on_query]
// DISTINCT ON not supported in Go API
// [END distinct_on_query]
}
func ExampleQuery_Filter_arrayInequality() {
// [START array_value_inequality_range]
query := datastore.NewQuery("Task").
Filter("Tag >", "learn").
Filter("Tag <", "math")
// [END array_value_inequality_range]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_Filter_arrayEquality() {
// [START array_value_equality]
query := datastore.NewQuery("Task").
Filter("Tag =", "fun").
Filter("Tag =", "programming")
// [END array_value_equality]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_Filter_inequality() {
// [START inequality_range]
query := datastore.NewQuery("Task").
Filter("Created >", time.Date(1990, 1, 1, 0, 0, 0, 0, time.UTC)).
Filter("Created <", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))
// [END inequality_range]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_Filter_invalidInequality() {
// [START inequality_invalid]
query := datastore.NewQuery("Task").
Filter("Created >", time.Date(1990, 1, 1, 0, 0, 0, 0, time.UTC)).
Filter("Priority >", 3)
// [END inequality_invalid]
_ = query // The query is invalid.
}
func ExampleQuery_Filter_mixed() {
// [START equal_and_inequality_range]
query := datastore.NewQuery("Task").
Filter("Priority =", 4).
Filter("Done =", false).
Filter("Created >", time.Date(1990, 1, 1, 0, 0, 0, 0, time.UTC)).
Filter("Created <", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))
// [END equal_and_inequality_range]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_inequalitySort() {
// [START inequality_sort]
query := datastore.NewQuery("Task").
Filter("Priority >", 3).
Order("Priority").
Order("Created")
// [END inequality_sort]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_invalidInequalitySortA() {
// [START inequality_sort_invalid_not_same]
query := datastore.NewQuery("Task").
Filter("Priority >", 3).
Order("Created")
// [END inequality_sort_invalid_not_same]
_ = query // The query is invalid.
}
func ExampleQuery_invalidInequalitySortB() {
// [START inequality_sort_invalid_not_first]
query := datastore.NewQuery("Task").
Filter("Priority >", 3).
Order("Created").
Order("Priority")
// [END inequality_sort_invalid_not_first]
_ = query // The query is invalid.
}
func ExampleQuery_Limit() {
// [START limit]
query := datastore.NewQuery("Task").Limit(5)
// [END limit]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleIterator_Cursor() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
cursorStr := ""
// [START cursor_paging]
const pageSize = 5
query := datastore.NewQuery("Tasks").Limit(pageSize)
if cursorStr != "" {
cursor, err := datastore.DecodeCursor(cursorStr)
if err != nil {
log.Fatalf("Bad cursor %q: %v", cursorStr, err)
}
query = query.Start(cursor)
}
// Read the tasks.
var tasks []Task
var task Task
it := client.Run(ctx, query)
_, err := it.Next(&task)
for err == nil {
tasks = append(tasks, task)
_, err = it.Next(&task)
}
if err != datastore.Done {
log.Fatalf("Failed fetching results: %v", err)
}
// Get the cursor for the next page of results.
nextCursor, err := it.Cursor()
// [END cursor_paging]
_ = err // Check the error.
_ = nextCursor // Use nextCursor.String as the next page's token.
}
func ExampleQuery_EventualConsistency() {
ctx := context.Background()
// [START eventual_consistent_query]
ancestor := datastore.NewKey(ctx, "TaskList", "default", 0, nil)
query := datastore.NewQuery("Task").Ancestor(ancestor).EventualConsistency()
// [END eventual_consistent_query]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func ExampleQuery_unindexed() {
// [START unindexed_property_query]
query := datastore.NewQuery("Tasks").Filter("Description =", "A task description")
// [END unindexed_property_query]
_ = query // Use client.Run or client.GetAll to execute the query.
}
func Example_explodingProperties() {
// [START exploding_properties]
task := &Task{
Tags: []string{"fun", "programming", "learn"},
Collaborators: []string{"alice", "bob", "charlie"},
Created: time.Now(),
}
// [END exploding_properties]
_ = task // Use the task in a datastore Put operation.
}
func Example_Transaction() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
var to, from *datastore.Key
// [START transactional_update]
type BankAccount struct {
Balance int
}
const amount = 50
keys := []*datastore.Key{to, from}
tx, err := client.NewTransaction(ctx)
if err != nil {
log.Fatalf("client.NewTransaction: %v", err)
}
accs := make([]BankAccount, 2)
if err := tx.GetMulti(keys, accs); err != nil {
tx.Rollback()
log.Fatalf("tx.GetMulti: %v", err)
}
accs[0].Balance += amount
accs[1].Balance -= amount
if _, err := tx.PutMulti(keys, accs); err != nil {
tx.Rollback()
log.Fatalf("tx.PutMulti: %v", err)
}
if _, err = tx.Commit(); err != nil {
log.Fatalf("tx.Commit: %v", err)
}
// [END transactional_update]
}
func Example_Client_RunInTransaction() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
var to, from *datastore.Key
// [START transactional_retry]
type BankAccount struct {
Balance int
}
const amount = 50
_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
keys := []*datastore.Key{to, from}
accs := make([]BankAccount, 2)
if err := tx.GetMulti(keys, accs); err != nil {
return err
}
accs[0].Balance += amount
accs[1].Balance -= amount
_, err := tx.PutMulti(keys, accs)
return err
})
// [END transactional_retry]
_ = err // Check error.
}
func ExampleTransaction_getOrCreate() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
key := datastore.NewKey(ctx, "Task", "sampletask", 0, nil)
// [START transactional_get_or_create]
_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
var task Task
if err := tx.Get(key, &task); err != datastore.ErrNoSuchEntity {
return err
}
_, err := tx.Put(key, &Task{
Category: "Personal",
Done: false,
Priority: 4,
Description: "Learn Cloud Datastore",
})
return err
})
// [END transactional_get_or_create]
_ = err // Check error.
}
func ExampleTransaction_runQuery() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START transactional_single_entity_group_read_only]
tx, err := client.NewTransaction(ctx)
if err != nil {
log.Fatalf("client.NewTransaction: %v", err)
}
defer tx.Rollback() // Transaction only used for read.
ancestor := datastore.NewKey(ctx, "TaskList", "default", 0, nil)
query := datastore.NewQuery("Task").Ancestor(ancestor).Transaction(tx)
var tasks []Task
_, err = client.GetAll(ctx, query, &tasks)
// [END transactional_single_entity_group_read_only]
_ = err // Check error.
}
func Example_metadataNamespaces() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START namespace_run_query]
const (
startNamespace = "g"
endNamespace = "h"
)
query := datastore.NewQuery("__namespace__").
Filter("__key__ >=", startNamespace).
Filter("__key__ <", endNamespace).
KeysOnly()
keys, err := client.GetAll(ctx, query, nil)
if err != nil {
log.Fatalf("client.GetAll: %v", err)
}
namespaces := make([]string, 0, len(keys))
for _, k := range keys {
namespaces = append(namespaces, k.Name())
}
// [END namespace_run_query]
}
func Example_metadataKinds() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START kind_run_query]
query := datastore.NewQuery("__kind__").KeysOnly()
keys, err := client.GetAll(ctx, query, nil)
if err != nil {
log.Fatalf("client.GetAll: %v", err)
}
kinds := make([]string, 0, len(keys))
for _, k := range keys {
kinds = append(kinds, k.Name())
}
// [END kind_run_query]
}
func Example_metadataProperties() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START property_run_query]
query := datastore.NewQuery("__property__").KeysOnly()
keys, err := client.GetAll(ctx, query, nil)
if err != nil {
log.Fatalf("client.GetAll: %v", err)
}
props := make(map[string][]string) // Map from kind to slice of properties.
for _, k := range keys {
prop := k.Name()
kind := k.Parent().Name()
props[kind] = append(props[kind], prop)
}
// [END property_run_query]
}
func Example_metadataPropertiesForKind() {
ctx := context.Background()
client, _ := datastore.NewClient(ctx, "my-proj")
// [START property_by_kind_run_query]
kindKey := datastore.NewKey(ctx, "__kind__", "Task", 0, nil)
query := datastore.NewQuery("__property__").Ancestor(kindKey)
type Prop struct {
Repr []string `datastore:"property_representation"`
}
var props []Prop
keys, err := client.GetAll(ctx, query, &props)
// [END property_by_kind_run_query]
_ = err // Check error.
_ = keys // Use keys to find property names, and props for their representations.
}

View File

@@ -0,0 +1,993 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
// TODO(djd): Make test entity clean up more robust: some test entities may
// be left behind if tests are aborted, the transport fails, etc.
// suffix is a timestamp-based suffix which is appended to key names,
// particularly for the root keys of entity groups. This reduces flakiness
// when the tests are run in parallel.
var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano())
func newClient(ctx context.Context, t *testing.T) *Client {
ts := testutil.TokenSource(ctx, ScopeDatastore)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
if err != nil {
t.Fatalf("NewClient: %v", err)
}
return client
}
func TestBasics(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*20)
client := newClient(ctx, t)
defer client.Close()
type X struct {
I int
S string
T time.Time
}
x0 := X{66, "99", time.Now().Truncate(time.Millisecond)}
k, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsX", nil), &x0)
if err != nil {
t.Fatalf("client.Put: %v", err)
}
x1 := X{}
err = client.Get(ctx, k, &x1)
if err != nil {
t.Errorf("client.Get: %v", err)
}
err = client.Delete(ctx, k)
if err != nil {
t.Errorf("client.Delete: %v", err)
}
if !reflect.DeepEqual(x0, x1) {
t.Errorf("compare: x0=%v, x1=%v", x0, x1)
}
}
func TestListValues(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
p0 := PropertyList{
{Name: "L", Value: []interface{}{int64(12), "string", true}},
}
k, err := client.Put(ctx, NewIncompleteKey(ctx, "ListValue", nil), &p0)
if err != nil {
t.Fatalf("client.Put: %v", err)
}
var p1 PropertyList
if err := client.Get(ctx, k, &p1); err != nil {
t.Errorf("client.Get: %v", err)
}
if !reflect.DeepEqual(p0, p1) {
t.Errorf("compare:\np0=%v\np1=%#v", p0, p1)
}
if err = client.Delete(ctx, k); err != nil {
t.Errorf("client.Delete: %v", err)
}
}
func TestGetMulti(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
type X struct {
I int
}
p := NewKey(ctx, "X", "x"+suffix, 0, nil)
cases := []struct {
key *Key
put bool
}{
{key: NewKey(ctx, "X", "item1", 0, p), put: true},
{key: NewKey(ctx, "X", "item2", 0, p), put: false},
{key: NewKey(ctx, "X", "item3", 0, p), put: false},
{key: NewKey(ctx, "X", "item4", 0, p), put: true},
}
var src, dst []*X
var srcKeys, dstKeys []*Key
for _, c := range cases {
dst = append(dst, &X{})
dstKeys = append(dstKeys, c.key)
if c.put {
src = append(src, &X{})
srcKeys = append(srcKeys, c.key)
}
}
if _, err := client.PutMulti(ctx, srcKeys, src); err != nil {
t.Error(err)
}
err := client.GetMulti(ctx, dstKeys, dst)
if err == nil {
t.Errorf("client.GetMulti got %v, expected error", err)
}
e, ok := err.(MultiError)
if !ok {
t.Errorf("client.GetMulti got %T, expected MultiError", err)
}
for i, err := range e {
got, want := err, (error)(nil)
if !cases[i].put {
got, want = err, ErrNoSuchEntity
}
if got != want {
t.Errorf("MultiError[%d] == %v, want %v", i, got, want)
}
}
}
type Z struct {
S string
T string `datastore:",noindex"`
P []byte
K []byte `datastore:",noindex"`
}
func (z Z) String() string {
var lens []string
v := reflect.ValueOf(z)
for i := 0; i < v.NumField(); i++ {
if l := v.Field(i).Len(); l > 0 {
lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l))
}
}
return fmt.Sprintf("Z{ %s }", strings.Join(lens, ","))
}
func TestUnindexableValues(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
x1500 := strings.Repeat("x", 1500)
x1501 := strings.Repeat("x", 1501)
testCases := []struct {
in Z
wantErr bool
}{
{in: Z{S: x1500}, wantErr: false},
{in: Z{S: x1501}, wantErr: true},
{in: Z{T: x1500}, wantErr: false},
{in: Z{T: x1501}, wantErr: false},
{in: Z{P: []byte(x1500)}, wantErr: false},
{in: Z{P: []byte(x1501)}, wantErr: true},
{in: Z{K: []byte(x1500)}, wantErr: false},
{in: Z{K: []byte(x1501)}, wantErr: false},
}
for _, tt := range testCases {
_, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsZ", nil), &tt.in)
if (err != nil) != tt.wantErr {
t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr)
}
}
}
func TestNilKey(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
testCases := []struct {
in K0
wantErr bool
}{
{in: K0{K: testKey0}, wantErr: false},
{in: K0{}, wantErr: false},
}
for _, tt := range testCases {
_, err := client.Put(ctx, NewIncompleteKey(ctx, "NilKey", nil), &tt.in)
if (err != nil) != tt.wantErr {
t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr)
}
}
}
type SQChild struct {
I, J int
T, U int64
}
type SQTestCase struct {
desc string
q *Query
wantCount int
wantSum int
}
func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild,
testCases []SQTestCase, extraTests ...func()) {
keys := make([]*Key, len(children))
for i := range keys {
keys[i] = NewIncompleteKey(ctx, "SQChild", parent)
}
keys, err := client.PutMulti(ctx, keys, children)
if err != nil {
t.Fatalf("client.PutMulti: %v", err)
}
defer func() {
err := client.DeleteMulti(ctx, keys)
if err != nil {
t.Errorf("client.DeleteMulti: %v", err)
}
}()
for _, tc := range testCases {
count, err := client.Count(ctx, tc.q)
if err != nil {
t.Errorf("Count %q: %v", tc.desc, err)
continue
}
if count != tc.wantCount {
t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount)
continue
}
}
for _, tc := range testCases {
var got []SQChild
_, err := client.GetAll(ctx, tc.q, &got)
if err != nil {
t.Errorf("client.GetAll %q: %v", tc.desc, err)
continue
}
sum := 0
for _, c := range got {
sum += c.I + c.J
}
if sum != tc.wantSum {
t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum)
continue
}
}
for _, x := range extraTests {
x()
}
}
func TestFilters(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
parent := NewKey(ctx, "SQParent", "TestFilters"+suffix, 0, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
children := []*SQChild{
{I: 0, T: now, U: now},
{I: 1, T: now, U: now},
{I: 2, T: now, U: now},
{I: 3, T: now, U: now},
{I: 4, T: now, U: now},
{I: 5, T: now, U: now},
{I: 6, T: now, U: now},
{I: 7, T: now, U: now},
}
baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now)
testSmallQueries(t, ctx, client, parent, children, []SQTestCase{
{
"I>1",
baseQuery.Filter("I>", 1),
6,
2 + 3 + 4 + 5 + 6 + 7,
},
{
"I>2 AND I<=5",
baseQuery.Filter("I>", 2).Filter("I<=", 5),
3,
3 + 4 + 5,
},
{
"I>=3 AND I<3",
baseQuery.Filter("I>=", 3).Filter("I<", 3),
0,
0,
},
{
"I=4",
baseQuery.Filter("I=", 4),
1,
4,
},
}, func() {
got := []*SQChild{}
want := []*SQChild{
{I: 0, T: now, U: now},
{I: 1, T: now, U: now},
{I: 2, T: now, U: now},
{I: 3, T: now, U: now},
{I: 4, T: now, U: now},
{I: 5, T: now, U: now},
{I: 6, T: now, U: now},
{I: 7, T: now, U: now},
}
_, err := client.GetAll(ctx, baseQuery.Order("I"), &got)
if err != nil {
t.Errorf("client.GetAll: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("compare: got=%v, want=%v", got, want)
}
}, func() {
got := []*SQChild{}
want := []*SQChild{
{I: 7, T: now, U: now},
{I: 6, T: now, U: now},
{I: 5, T: now, U: now},
{I: 4, T: now, U: now},
{I: 3, T: now, U: now},
{I: 2, T: now, U: now},
{I: 1, T: now, U: now},
{I: 0, T: now, U: now},
}
_, err := client.GetAll(ctx, baseQuery.Order("-I"), &got)
if err != nil {
t.Errorf("client.GetAll: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("compare: got=%v, want=%v", got, want)
}
})
}
func TestLargeQuery(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
parent := NewKey(ctx, "LQParent", "TestFilters"+suffix, 0, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
// Make a large number of children entities.
const n = 800
children := make([]*SQChild, 0, n)
keys := make([]*Key, 0, n)
for i := 0; i < n; i++ {
children = append(children, &SQChild{I: i, T: now, U: now})
keys = append(keys, NewIncompleteKey(ctx, "SQChild", parent))
}
// Store using PutMulti in batches.
const batchSize = 500
for i := 0; i < n; i = i + 500 {
j := i + batchSize
if j > n {
j = n
}
fullKeys, err := client.PutMulti(ctx, keys[i:j], children[i:j])
if err != nil {
t.Fatalf("PutMulti(%d, %d): %v", i, j, err)
}
defer func() {
err := client.DeleteMulti(ctx, fullKeys)
if err != nil {
t.Errorf("client.DeleteMulti: %v", err)
}
}()
}
q := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Order("I")
// Wait group to allow us to run query tests in parallel below.
var wg sync.WaitGroup
// Check we get the expected count and results for various limits/offsets.
queryTests := []struct {
limit, offset, want int
}{
// Just limit.
{limit: 0, want: 0},
{limit: 100, want: 100},
{limit: 501, want: 501},
{limit: n, want: n},
{limit: n * 2, want: n},
{limit: -1, want: n},
// Just offset.
{limit: -1, offset: 100, want: n - 100},
{limit: -1, offset: 500, want: n - 500},
{limit: -1, offset: n, want: 0},
// Limit and offset.
{limit: 100, offset: 100, want: 100},
{limit: 1000, offset: 100, want: n - 100},
{limit: 500, offset: 500, want: n - 500},
}
for _, tt := range queryTests {
q := q.Limit(tt.limit).Offset(tt.offset)
wg.Add(1)
go func(limit, offset, want int) {
defer wg.Done()
// Check Count returns the expected number of results.
count, err := client.Count(ctx, q)
if err != nil {
t.Errorf("client.Count(limit=%d offset=%d): %v", limit, offset, err)
return
}
if count != want {
t.Errorf("Count(limit=%d offset=%d) returned %d, want %d", limit, offset, count, want)
}
var got []SQChild
_, err = client.GetAll(ctx, q, &got)
if err != nil {
t.Errorf("client.GetAll(limit=%d offset=%d): %v", limit, offset, err)
return
}
if len(got) != want {
t.Errorf("GetAll(limit=%d offset=%d) returned %d, want %d", limit, offset, len(got), want)
}
for i, child := range got {
if got, want := child.I, i+offset; got != want {
t.Errorf("GetAll(limit=%d offset=%d) got[%d].I == %d; want %d", limit, offset, i, got, want)
break
}
}
}(tt.limit, tt.offset, tt.want)
}
// Also check iterator cursor behaviour.
cursorTests := []struct {
limit, offset int // Query limit and offset.
count int // The number of times to call "next"
want int // The I value of the desired element, -1 for "Done".
}{
// No limits.
{count: 0, limit: -1, want: 0},
{count: 5, limit: -1, want: 5},
{count: 500, limit: -1, want: 500},
{count: 1000, limit: -1, want: -1}, // No more results.
// Limits.
{count: 5, limit: 5, want: 5},
{count: 500, limit: 5, want: 5},
{count: 1000, limit: 1000, want: -1}, // No more results.
// Offsets.
{count: 0, offset: 5, limit: -1, want: 5},
{count: 5, offset: 5, limit: -1, want: 10},
{count: 200, offset: 500, limit: -1, want: 700},
{count: 200, offset: 1000, limit: -1, want: -1}, // No more results.
}
for _, tt := range cursorTests {
wg.Add(1)
go func(count, limit, offset, want int) {
defer wg.Done()
// Run iterator through count calls to Next.
it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly())
for i := 0; i < count; i++ {
_, err := it.Next(nil)
if err == Done {
break
}
if err != nil {
t.Errorf("count=%d, limit=%d, offset=%d: it.Next failed at i=%d", count, limit, offset, i)
return
}
}
// Grab the cursor.
cursor, err := it.Cursor()
if err != nil {
t.Errorf("count=%d, limit=%d, offset=%d: it.Cursor: %v", count, limit, offset, err)
return
}
// Make a request for the next element.
it = client.Run(ctx, q.Limit(1).Start(cursor))
var entity SQChild
_, err = it.Next(&entity)
switch {
case want == -1:
if err != Done {
t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor %v, want Done", count, limit, offset, err)
}
case err != nil:
t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor: %v, want nil", count, limit, offset, err)
case entity.I != want:
t.Errorf("count=%d, limit=%d, offset=%d: got.I = %d, want %d", count, limit, offset, entity.I, want)
}
}(tt.count, tt.limit, tt.offset, tt.want)
}
wg.Wait()
}
func TestEventualConsistency(t *testing.T) {
// TODO(jba): either make this actually test eventual consistency, or
// delete it. Currently it behaves the same with or without the
// EventualConsistency call.
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
parent := NewKey(ctx, "SQParent", "TestEventualConsistency"+suffix, 0, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
children := []*SQChild{
{I: 0, T: now, U: now},
{I: 1, T: now, U: now},
{I: 2, T: now, U: now},
}
query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency()
testSmallQueries(t, ctx, client, parent, children, nil, func() {
got, err := client.Count(ctx, query)
if err != nil {
t.Fatalf("Count: %v", err)
}
if got < 0 || 3 < got {
t.Errorf("Count: got %d, want [0,3]", got)
}
})
}
func TestProjection(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
parent := NewKey(ctx, "SQParent", "TestProjection"+suffix, 0, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
children := []*SQChild{
{I: 1 << 0, J: 100, T: now, U: now},
{I: 1 << 1, J: 100, T: now, U: now},
{I: 1 << 2, J: 200, T: now, U: now},
{I: 1 << 3, J: 300, T: now, U: now},
{I: 1 << 4, J: 300, T: now, U: now},
}
baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150)
testSmallQueries(t, ctx, client, parent, children, []SQTestCase{
{
"project",
baseQuery.Project("J"),
3,
200 + 300 + 300,
},
{
"distinct",
baseQuery.Project("J").Distinct(),
2,
200 + 300,
},
{
"project on meaningful (GD_WHEN) field",
baseQuery.Project("U"),
3,
0,
},
})
}
func TestAllocateIDs(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
keys := make([]*Key, 5)
for i := range keys {
keys[i] = NewIncompleteKey(ctx, "AllocID", nil)
}
keys, err := client.AllocateIDs(ctx, keys)
if err != nil {
t.Errorf("AllocID #0 failed: %v", err)
}
if want := len(keys); want != 5 {
t.Errorf("Expected to allocate 5 keys, %d keys are found", want)
}
for _, k := range keys {
if k.Incomplete() {
t.Errorf("Unexpeceted incomplete key found: %v", k)
}
}
}
func TestGetAllWithFieldMismatch(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
type Fat struct {
X, Y int
}
type Thin struct {
X int
}
// Ancestor queries (those within an entity group) are strongly consistent
// by default, which prevents a test from being flaky.
// See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency
// for more information.
parent := NewKey(ctx, "SQParent", "TestGetAllWithFieldMismatch"+suffix, 0, nil)
putKeys := make([]*Key, 3)
for i := range putKeys {
putKeys[i] = NewKey(ctx, "GetAllThing", "", int64(10+i), parent)
_, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i})
if err != nil {
t.Fatalf("client.Put: %v", err)
}
}
var got []Thin
want := []Thin{
{X: 20},
{X: 21},
{X: 22},
}
getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got)
if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) {
t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want)
}
if _, ok := err.(*ErrFieldMismatch); !ok {
t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err)
}
}
func TestKindlessQueries(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
type Dee struct {
I int
Why string
}
type Dum struct {
I int
Pling string
}
parent := NewKey(ctx, "Tweedle", "tweedle"+suffix, 0, nil)
keys := []*Key{
NewKey(ctx, "Dee", "dee0", 0, parent),
NewKey(ctx, "Dum", "dum1", 0, parent),
NewKey(ctx, "Dum", "dum2", 0, parent),
NewKey(ctx, "Dum", "dum3", 0, parent),
}
src := []interface{}{
&Dee{1, "binary0001"},
&Dum{2, "binary0010"},
&Dum{4, "binary0100"},
&Dum{8, "binary1000"},
}
keys, err := client.PutMulti(ctx, keys, src)
if err != nil {
t.Fatalf("put: %v", err)
}
testCases := []struct {
desc string
query *Query
want []int
wantErr string
}{
{
desc: "Dee",
query: NewQuery("Dee"),
want: []int{1},
},
{
desc: "Doh",
query: NewQuery("Doh"),
want: nil},
{
desc: "Dum",
query: NewQuery("Dum"),
want: []int{2, 4, 8},
},
{
desc: "",
query: NewQuery(""),
want: []int{1, 2, 4, 8},
},
{
desc: "Kindless filter",
query: NewQuery("").Filter("__key__ =", keys[2]),
want: []int{4},
},
{
desc: "Kindless order",
query: NewQuery("").Order("__key__"),
want: []int{1, 2, 4, 8},
},
{
desc: "Kindless bad filter",
query: NewQuery("").Filter("I =", 4),
wantErr: "kind is required",
},
{
desc: "Kindless bad order",
query: NewQuery("").Order("-__key__"),
wantErr: "kind is required for all orders except __key__ ascending",
},
}
loop:
for _, tc := range testCases {
q := tc.query.Ancestor(parent)
gotCount, err := client.Count(ctx, q)
if err != nil {
if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) {
t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr)
}
continue
}
if tc.wantErr != "" {
t.Errorf("count %q: want err %q", tc.desc, tc.wantErr)
continue
}
if gotCount != len(tc.want) {
t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want))
continue
}
var got []int
for iter := client.Run(ctx, q); ; {
var dst struct {
I int
Why, Pling string
}
_, err := iter.Next(&dst)
if err == Done {
break
}
if err != nil {
t.Errorf("iter.Next %q: %v", tc.desc, err)
continue loop
}
got = append(got, dst.I)
}
sort.Ints(got)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want)
continue
}
}
}
func TestTransaction(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
type Counter struct {
N int
T time.Time
}
bangErr := errors.New("bang")
tests := []struct {
desc string
causeConflict []bool
retErr []error
want int
wantErr error
}{
{
desc: "3 attempts, no conflicts",
causeConflict: []bool{false},
retErr: []error{nil},
want: 11,
},
{
desc: "1 attempt, user error",
causeConflict: []bool{false},
retErr: []error{bangErr},
wantErr: bangErr,
},
{
desc: "2 attempts, 1 conflict",
causeConflict: []bool{true, false},
retErr: []error{nil, nil},
want: 13, // Each conflict increments by 2.
},
{
desc: "3 attempts, 3 conflicts",
causeConflict: []bool{true, true, true},
retErr: []error{nil, nil, nil},
wantErr: ErrConcurrentTransaction,
},
}
for i, tt := range tests {
// Put a new counter.
c := &Counter{N: 10, T: time.Now()}
key, err := client.Put(ctx, NewIncompleteKey(ctx, "TransCounter", nil), c)
if err != nil {
t.Errorf("%s: client.Put: %v", tt.desc, err)
continue
}
defer client.Delete(ctx, key)
// Increment the counter in a transaction.
// The test case can manually cause a conflict or return an
// error at each attempt.
var attempts int
_, err = client.RunInTransaction(ctx, func(tx *Transaction) error {
attempts++
if attempts > len(tt.causeConflict) {
return fmt.Errorf("too many attempts. Got %d, max %d", attempts, len(tt.causeConflict))
}
var c Counter
if err := tx.Get(key, &c); err != nil {
return err
}
c.N++
if _, err := tx.Put(key, &c); err != nil {
return err
}
if tt.causeConflict[attempts-1] {
c.N += 1
if _, err := client.Put(ctx, key, &c); err != nil {
return err
}
}
return tt.retErr[attempts-1]
}, MaxAttempts(i))
// Check the error returned by RunInTransaction.
if err != tt.wantErr {
t.Errorf("%s: got err %v, want %v", tt.desc, err, tt.wantErr)
continue
}
if err != nil {
continue
}
// Check the final value of the counter.
if err := client.Get(ctx, key, c); err != nil {
t.Errorf("%s: client.Get: %v", tt.desc, err)
continue
}
if c.N != tt.want {
t.Errorf("%s: counter N=%d, want N=%d", tt.desc, c.N, tt.want)
}
}
}
func TestNilPointers(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
type X struct {
S string
}
src := []*X{{"zero"}, {"one"}}
keys := []*Key{NewIncompleteKey(ctx, "NilX", nil), NewIncompleteKey(ctx, "NilX", nil)}
keys, err := client.PutMulti(ctx, keys, src)
if err != nil {
t.Fatalf("PutMulti: %v", err)
}
// It's okay to store into a slice of nil *X.
xs := make([]*X, 2)
if err := client.GetMulti(ctx, keys, xs); err != nil {
t.Errorf("GetMulti: %v", err)
} else if !reflect.DeepEqual(xs, src) {
t.Errorf("GetMulti fetched %v, want %v", xs, src)
}
// It isn't okay to store into a single nil *X.
var x0 *X
if err, want := client.Get(ctx, keys[0], x0), ErrInvalidEntityType; err != want {
t.Errorf("Get: err %v; want %v", err, want)
}
if err := client.DeleteMulti(ctx, keys); err != nil {
t.Errorf("Delete: %v", err)
}
}
func TestNestedRepeatedElementNoIndex(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
defer client.Close()
type Inner struct {
Name string
Value string `datastore:",noindex"`
}
type Outer struct {
Config []Inner
}
m := &Outer{
Config: []Inner{
{Name: "short", Value: "a"},
{Name: "long", Value: strings.Repeat("a", 2000)},
},
}
key := NewKey(ctx, "Nested", "Nested"+suffix, 0, nil)
if _, err := client.Put(ctx, key, m); err != nil {
t.Fatalf("client.Put: %v", err)
}
if err := client.Delete(ctx, key); err != nil {
t.Fatalf("client.Delete: %v", err)
}
}

279
vendor/cloud.google.com/go/datastore/key.go generated vendored Normal file
View File

@@ -0,0 +1,279 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"bytes"
"encoding/base64"
"encoding/gob"
"errors"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
// Key represents the datastore key for a stored entity, and is immutable.
type Key struct {
kind string
id int64
name string
parent *Key
namespace string
}
func (k *Key) Kind() string {
return k.kind
}
func (k *Key) ID() int64 {
return k.id
}
func (k *Key) Name() string {
return k.name
}
func (k *Key) Parent() *Key {
return k.parent
}
func (k *Key) SetParent(v *Key) {
if v.Incomplete() {
panic("can't set an incomplete key as parent")
}
k.parent = v
}
func (k *Key) Namespace() string {
return k.namespace
}
// Complete returns whether the key does not refer to a stored entity.
func (k *Key) Incomplete() bool {
return k.name == "" && k.id == 0
}
// valid returns whether the key is valid.
func (k *Key) valid() bool {
if k == nil {
return false
}
for ; k != nil; k = k.parent {
if k.kind == "" {
return false
}
if k.name != "" && k.id != 0 {
return false
}
if k.parent != nil {
if k.parent.Incomplete() {
return false
}
if k.parent.namespace != k.namespace {
return false
}
}
}
return true
}
func (k *Key) Equal(o *Key) bool {
for {
if k == nil || o == nil {
return k == o // if either is nil, both must be nil
}
if k.namespace != o.namespace || k.name != o.name || k.id != o.id || k.kind != o.kind {
return false
}
if k.parent == nil && o.parent == nil {
return true
}
k = k.parent
o = o.parent
}
}
// marshal marshals the key's string representation to the buffer.
func (k *Key) marshal(b *bytes.Buffer) {
if k.parent != nil {
k.parent.marshal(b)
}
b.WriteByte('/')
b.WriteString(k.kind)
b.WriteByte(',')
if k.name != "" {
b.WriteString(k.name)
} else {
b.WriteString(strconv.FormatInt(k.id, 10))
}
}
// String returns a string representation of the key.
func (k *Key) String() string {
if k == nil {
return ""
}
b := bytes.NewBuffer(make([]byte, 0, 512))
k.marshal(b)
return b.String()
}
// Note: Fields not renamed compared to appengine gobKey struct
// This ensures gobs created by appengine can be read here, and vice/versa
type gobKey struct {
Kind string
StringID string
IntID int64
Parent *gobKey
AppID string
Namespace string
}
func keyToGobKey(k *Key) *gobKey {
if k == nil {
return nil
}
return &gobKey{
Kind: k.kind,
StringID: k.name,
IntID: k.id,
Parent: keyToGobKey(k.parent),
Namespace: k.namespace,
}
}
func gobKeyToKey(gk *gobKey) *Key {
if gk == nil {
return nil
}
return &Key{
kind: gk.Kind,
name: gk.StringID,
id: gk.IntID,
parent: gobKeyToKey(gk.Parent),
namespace: gk.Namespace,
}
}
func (k *Key) GobEncode() ([]byte, error) {
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (k *Key) GobDecode(buf []byte) error {
gk := new(gobKey)
if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
return err
}
*k = *gobKeyToKey(gk)
return nil
}
func (k *Key) MarshalJSON() ([]byte, error) {
return []byte(`"` + k.Encode() + `"`), nil
}
func (k *Key) UnmarshalJSON(buf []byte) error {
if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
return errors.New("datastore: bad JSON key")
}
k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
if err != nil {
return err
}
*k = *k2
return nil
}
// Encode returns an opaque representation of the key
// suitable for use in HTML and URLs.
// This is compatible with the Python and Java runtimes.
func (k *Key) Encode() string {
pKey := keyToProto(k)
b, err := proto.Marshal(pKey)
if err != nil {
panic(err)
}
// Trailing padding is stripped.
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// DecodeKey decodes a key from the opaque representation returned by Encode.
func DecodeKey(encoded string) (*Key, error) {
// Re-add padding.
if m := len(encoded) % 4; m != 0 {
encoded += strings.Repeat("=", 4-m)
}
b, err := base64.URLEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
pKey := new(pb.Key)
if err := proto.Unmarshal(b, pKey); err != nil {
return nil, err
}
return protoToKey(pKey)
}
// NewIncompleteKey creates a new incomplete key.
// kind cannot be empty.
func NewIncompleteKey(ctx context.Context, kind string, parent *Key) *Key {
return NewKey(ctx, kind, "", 0, parent)
}
// NewKey creates a new key.
// kind cannot be empty.
// At least one of name and id must be zero. If both are zero, the key returned
// is incomplete.
// parent must either be a complete key or nil.
func NewKey(ctx context.Context, kind, name string, id int64, parent *Key) *Key {
return &Key{
kind: kind,
name: name,
id: id,
parent: parent,
namespace: ctxNamespace(ctx),
}
}
// AllocateIDs accepts a slice of incomplete keys and returns a
// slice of complete keys that are guaranteed to be valid in the datastore
func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) {
if keys == nil {
return nil, nil
}
req := &pb.AllocateIdsRequest{
ProjectId: c.dataset,
Keys: multiKeyToProto(keys),
}
resp, err := c.client.AllocateIds(ctx, req)
if err != nil {
return nil, err
}
return multiProtoToKey(resp.Keys)
}

242
vendor/cloud.google.com/go/datastore/key_test.go generated vendored Normal file
View File

@@ -0,0 +1,242 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"bytes"
"encoding/gob"
"encoding/json"
"testing"
"golang.org/x/net/context"
)
func TestNamespace(t *testing.T) {
c := context.Background()
k := NewIncompleteKey(c, "foo", nil)
if got, want := k.Namespace(), ""; got != want {
t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want)
}
c = WithNamespace(c, "gopherspace")
k = NewIncompleteKey(c, "foo", nil)
if got, want := k.Namespace(), "gopherspace"; got != want {
t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want)
}
}
func TestParent(t *testing.T) {
c := context.Background()
k := NewIncompleteKey(c, "foo", nil)
par := NewKey(c, "foomum", "", 1248, nil)
k.SetParent(par)
if got := k.Parent(); got != par {
t.Errorf("k.Parent() = %v; want %v", got, par)
}
}
func TestEqual(t *testing.T) {
c := context.Background()
cN := WithNamespace(c, "gopherspace")
testCases := []struct {
x, y *Key
equal bool
}{
{
x: nil,
y: nil,
equal: true,
},
{
x: NewKey(c, "kindA", "", 0, nil),
y: NewIncompleteKey(c, "kindA", nil),
equal: true,
},
{
x: NewKey(c, "kindA", "nameA", 0, nil),
y: NewKey(c, "kindA", "nameA", 0, nil),
equal: true,
},
{
x: NewKey(cN, "kindA", "nameA", 0, nil),
y: NewKey(cN, "kindA", "nameA", 0, nil),
equal: true,
},
{
x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)),
y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)),
equal: true,
},
{
x: NewKey(c, "kindA", "nameA", 0, nil),
y: NewKey(c, "kindB", "nameA", 0, nil),
equal: false,
},
{
x: NewKey(c, "kindA", "nameA", 0, nil),
y: NewKey(c, "kindA", "nameB", 0, nil),
equal: false,
},
{
x: NewKey(c, "kindA", "nameA", 0, nil),
y: NewKey(c, "kindA", "", 1337, nil),
equal: false,
},
{
x: NewKey(c, "kindA", "nameA", 0, nil),
y: NewKey(cN, "kindA", "nameA", 0, nil),
equal: false,
},
{
x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)),
y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindY", "nameX", 0, nil)),
equal: false,
},
{
x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)),
y: NewKey(c, "kindA", "", 1337, nil),
equal: false,
},
}
for _, tt := range testCases {
if got := tt.x.Equal(tt.y); got != tt.equal {
t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal)
}
if got := tt.y.Equal(tt.x); got != tt.equal {
t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal)
}
}
}
func TestEncoding(t *testing.T) {
c := context.Background()
cN := WithNamespace(c, "gopherspace")
testCases := []struct {
k *Key
valid bool
}{
{
k: nil,
valid: false,
},
{
k: NewKey(c, "", "", 0, nil),
valid: false,
},
{
k: NewKey(c, "kindA", "", 0, nil),
valid: true,
},
{
k: NewKey(cN, "kindA", "", 0, nil),
valid: true,
},
{
k: NewKey(c, "kindA", "nameA", 0, nil),
valid: true,
},
{
k: NewKey(c, "kindA", "", 1337, nil),
valid: true,
},
{
k: NewKey(c, "kindA", "nameA", 1337, nil),
valid: false,
},
{
k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "nameB", 0, nil)),
valid: true,
},
{
k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "", 0, nil)),
valid: false,
},
{
k: NewKey(c, "kindA", "", 0, NewKey(cN, "kindB", "nameB", 0, nil)),
valid: false,
},
}
for _, tt := range testCases {
if got := tt.k.valid(); got != tt.valid {
t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid)
}
// Check encoding/decoding for valid keys.
if !tt.valid {
continue
}
enc := tt.k.Encode()
dec, err := DecodeKey(enc)
if err != nil {
t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err)
continue
}
if !tt.k.Equal(dec) {
t.Logf("Proto: %s", keyToProto(tt.k))
t.Errorf("Decoded key %v not equal to %v", dec, tt.k)
}
b, err := json.Marshal(tt.k)
if err != nil {
t.Errorf("json.Marshal(%v): %v", tt.k, err)
continue
}
key := &Key{}
if err := json.Unmarshal(b, key); err != nil {
t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err)
continue
}
if !tt.k.Equal(key) {
t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k)
}
buf := &bytes.Buffer{}
gobEnc := gob.NewEncoder(buf)
if err := gobEnc.Encode(tt.k); err != nil {
t.Errorf("gobEnc.Encode(%v): %v", tt.k, err)
continue
}
gobDec := gob.NewDecoder(buf)
key = &Key{}
if err := gobDec.Decode(key); err != nil {
t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err)
}
if !tt.k.Equal(key) {
t.Errorf("gob decoded key %v not equal to %v", dec, tt.k)
}
}
}
func TestInvalidKeyDecode(t *testing.T) {
// Check that decoding an invalid key returns an err and doesn't panic.
enc := NewKey(context.Background(), "Kind", "Foo", 0, nil).Encode()
invalid := []string{
"",
"Laboratorio",
enc + "Junk",
enc[:len(enc)-4],
}
for _, enc := range invalid {
key, err := DecodeKey(enc)
if err == nil || key != nil {
t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err)
}
}
}

383
vendor/cloud.google.com/go/datastore/load.go generated vendored Normal file
View File

@@ -0,0 +1,383 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
"reflect"
"strings"
"time"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
var (
typeOfByteSlice = reflect.TypeOf([]byte(nil))
typeOfTime = reflect.TypeOf(time.Time{})
typeOfGeoPoint = reflect.TypeOf(GeoPoint{})
typeOfKeyPtr = reflect.TypeOf(&Key{})
typeOfEntityPtr = reflect.TypeOf(&Entity{})
)
// typeMismatchReason returns a string explaining why the property p could not
// be stored in an entity field of type v.Type().
func typeMismatchReason(p Property, v reflect.Value) string {
entityType := "empty"
switch p.Value.(type) {
case int64:
entityType = "int"
case bool:
entityType = "bool"
case string:
entityType = "string"
case float64:
entityType = "float"
case *Key:
entityType = "*datastore.Key"
case GeoPoint:
entityType = "GeoPoint"
case time.Time:
entityType = "time.Time"
case []byte:
entityType = "[]byte"
}
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
}
type propertyLoader struct {
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
// been seen so far. The map is constructed lazily.
m map[string]int
}
func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string {
sl, ok := p.Value.([]interface{})
if !ok {
return l.loadOneElement(codec, structValue, p, prev)
}
for _, val := range sl {
p.Value = val
if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" {
return errStr
}
}
return ""
}
// loadOneElement loads the value of Property p into structValue based on the provided
// codec. codec is used to find the field in structValue into which p should be loaded.
// prev is the set of property names already seen for structValue.
func (l *propertyLoader) loadOneElement(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string {
var sliceOk bool
var sliceIndex int
var v reflect.Value
name := p.Name
for name != "" {
// First we try to find a field with name matching
// the value of 'name' exactly.
decoder, ok := codec.fields[name]
if ok {
name = ""
} else {
// Now try for legacy flattened nested field (named eg. "A.B.C.D").
parent := name
child := ""
// Cut off the last field (delimited by ".") and find its parent
// in the codec.
// eg. for name "A.B.C.D", split off "A.B.C" and try to
// find a field in the codec with this name.
// Loop again with "A.B", etc.
for !ok {
i := strings.LastIndex(parent, ".")
if i < 0 {
return "no such struct field"
}
if i == len(name)-1 {
return "field name cannot end with '.'"
}
parent, child = name[:i], name[i+1:]
decoder, ok = codec.fields[parent]
}
name = child
}
v = initField(structValue, decoder.path)
if !v.IsValid() {
return "no such struct field"
}
if !v.CanSet() {
return "cannot set struct field"
}
if decoder.structCodec != nil {
codec = decoder.structCodec
structValue = v
}
// If the element is a slice, we need to accommodate it.
if v.Kind() == reflect.Slice {
if l.m == nil {
l.m = make(map[string]int)
}
sliceIndex = l.m[p.Name]
l.m[p.Name] = sliceIndex + 1
for v.Len() <= sliceIndex {
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
}
structValue = v.Index(sliceIndex)
sliceOk = true
}
}
var slice reflect.Value
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
slice = v
v = reflect.New(v.Type().Elem()).Elem()
} else if _, ok := prev[p.Name]; ok && !sliceOk {
// Zero the field back out that was set previously, turns out
// it's a slice and we don't know what to do with it
v.Set(reflect.Zero(v.Type()))
return "multiple-valued property requires a slice field type"
}
prev[p.Name] = struct{}{}
if errReason := setVal(v, p); errReason != "" {
// Set the slice back to its zero value.
if slice.IsValid() {
slice.Set(reflect.Zero(slice.Type()))
}
return errReason
}
if slice.IsValid() {
slice.Index(sliceIndex).Set(v)
}
return ""
}
// setVal sets 'v' to the value of the Property 'p'.
func setVal(v reflect.Value, p Property) string {
pValue := p.Value
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x, ok := pValue.(int64)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.OverflowInt(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetInt(x)
case reflect.Bool:
x, ok := pValue.(bool)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.SetBool(x)
case reflect.String:
x, ok := pValue.(string)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.SetString(x)
case reflect.Float32, reflect.Float64:
x, ok := pValue.(float64)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.OverflowFloat(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetFloat(x)
case reflect.Ptr:
x, ok := pValue.(*Key)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if _, ok := v.Interface().(*Key); !ok {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
case reflect.Struct:
switch v.Type() {
case typeOfTime:
x, ok := pValue.(time.Time)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
case typeOfGeoPoint:
x, ok := pValue.(GeoPoint)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
default:
ent, ok := pValue.(*Entity)
if !ok {
return typeMismatchReason(p, v)
}
// Recursively load nested struct
pls, err := newStructPLS(v.Addr().Interface())
if err != nil {
return err.Error()
}
// if ent has a Key value and our struct has a Key field,
// load the Entity's Key value into the Key field on the struct.
if ent.Key != nil && pls.codec.keyField != -1 {
pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
}
err = pls.Load(ent.Properties)
if err != nil {
return err.Error()
}
}
case reflect.Slice:
x, ok := pValue.([]byte)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.Type().Elem().Kind() != reflect.Uint8 {
return typeMismatchReason(p, v)
}
v.SetBytes(x)
default:
return typeMismatchReason(p, v)
}
return ""
}
// initField is similar to reflect's Value.FieldByIndex, in that it
// returns the nested struct field corresponding to index, but it
// initialises any nil pointers encountered when traversing the structure.
func initField(val reflect.Value, index []int) reflect.Value {
for _, i := range index[:len(index)-1] {
val = val.Field(i)
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
}
return val.Field(index[len(index)-1])
}
// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
func loadEntity(dst interface{}, src *pb.Entity) (err error) {
ent, err := protoToEntity(src)
if err != nil {
return err
}
if e, ok := dst.(PropertyLoadSaver); ok {
return e.Load(ent.Properties)
}
return LoadStruct(dst, ent.Properties)
}
func (s structPLS) Load(props []Property) error {
var fieldName, errReason string
var l propertyLoader
prev := make(map[string]struct{})
for _, p := range props {
if errStr := l.load(s.codec, s.v, p, prev); errStr != "" {
// We don't return early, as we try to load as many properties as possible.
// It is valid to load an entity into a struct that cannot fully represent it.
// That case returns an error, but the caller is free to ignore it.
fieldName, errReason = p.Name, errStr
}
}
if errReason != "" {
return &ErrFieldMismatch{
StructType: s.v.Type(),
FieldName: fieldName,
Reason: errReason,
}
}
return nil
}
func protoToEntity(src *pb.Entity) (*Entity, error) {
props := make([]Property, 0, len(src.Properties))
for name, val := range src.Properties {
v, err := propToValue(val)
if err != nil {
return nil, err
}
props = append(props, Property{
Name: name,
Value: v,
NoIndex: val.ExcludeFromIndexes,
})
}
var key *Key
if src.Key != nil {
// Ignore any error, since nested entity values
// are allowed to have an invalid key.
key, _ = protoToKey(src.Key)
}
return &Entity{key, props}, nil
}
// propToValue returns a Go value that represents the PropertyValue. For
// example, a TimestampValue becomes a time.Time.
func propToValue(v *pb.Value) (interface{}, error) {
switch v := v.ValueType.(type) {
case *pb.Value_NullValue:
return nil, nil
case *pb.Value_BooleanValue:
return v.BooleanValue, nil
case *pb.Value_IntegerValue:
return v.IntegerValue, nil
case *pb.Value_DoubleValue:
return v.DoubleValue, nil
case *pb.Value_TimestampValue:
return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil
case *pb.Value_KeyValue:
return protoToKey(v.KeyValue)
case *pb.Value_StringValue:
return v.StringValue, nil
case *pb.Value_BlobValue:
return []byte(v.BlobValue), nil
case *pb.Value_GeoPointValue:
return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil
case *pb.Value_EntityValue:
return protoToEntity(v.EntityValue)
case *pb.Value_ArrayValue:
arr := make([]interface{}, 0, len(v.ArrayValue.Values))
for _, v := range v.ArrayValue.Values {
vv, err := propToValue(v)
if err != nil {
return nil, err
}
arr = append(arr, vv)
}
return arr, nil
default:
return nil, nil
}
}

414
vendor/cloud.google.com/go/datastore/load_test.go generated vendored Normal file
View File

@@ -0,0 +1,414 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRNestedSimpleWithTagIES OR CONDITIONS OF NestedSimpleY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"reflect"
"testing"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
type Simple struct {
I int64
}
type SimpleWithTag struct {
I int64 `datastore:"II"`
}
type NestedSimpleWithTag struct {
A SimpleWithTag `datastore:"AA"`
}
type NestedSliceOfSimple struct {
A []Simple
}
type SimpleTwoFields struct {
S string
SS string
}
type NestedSimpleAnonymous struct {
Simple
X string
}
type NestedSimple struct {
A Simple
I int
}
type NestedSimple1 struct {
A Simple
X string
}
type NestedSimple2X struct {
AA NestedSimple
A SimpleTwoFields
S string
}
type BDotB struct {
B string `datastore:"B.B"`
}
type ABDotB struct {
A BDotB
}
type MultiAnonymous struct {
Simple
SimpleTwoFields
X string
}
func TestLoadEntityNestedLegacy(t *testing.T) {
testCases := []struct {
desc string
src *pb.Entity
want interface{}
}{
{
"nested",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"X": {ValueType: &pb.Value_StringValue{"two"}},
"A.I": {ValueType: &pb.Value_IntegerValue{2}},
},
},
&NestedSimple1{
A: Simple{I: 2},
X: "two",
},
},
{
"nested with tag",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"AA.II": {ValueType: &pb.Value_IntegerValue{2}},
},
},
&NestedSimpleWithTag{
A: SimpleWithTag{I: 2},
},
},
{
"nested with anonymous struct field",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"X": {ValueType: &pb.Value_StringValue{"two"}},
"I": {ValueType: &pb.Value_IntegerValue{2}},
},
},
&NestedSimpleAnonymous{
Simple: Simple{I: 2},
X: "two",
},
},
{
"nested with dotted field tag",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"A.B.B": {ValueType: &pb.Value_StringValue{"bb"}},
},
},
&ABDotB{
A: BDotB{
B: "bb",
},
},
},
{
"nested with multiple anonymous fields",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{3}},
"S": {ValueType: &pb.Value_StringValue{"S"}},
"SS": {ValueType: &pb.Value_StringValue{"s"}},
"X": {ValueType: &pb.Value_StringValue{"s"}},
},
},
&MultiAnonymous{
Simple: Simple{I: 3},
SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"},
X: "s",
},
},
}
for _, tc := range testCases {
dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
err := loadEntity(dst, tc.src)
if err != nil {
t.Errorf("loadEntity: %s: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
}
}
}
type WithKey struct {
X string
I int
K *Key `datastore:"__key__"`
}
type NestedWithKey struct {
Y string
N WithKey
}
var (
incompleteKey = newKey("", nil)
invalidKey = newKey("s", incompleteKey)
)
func TestLoadEntityNested(t *testing.T) {
testCases := []struct {
desc string
src *pb.Entity
want interface{}
}{
{
"nested basic",
&pb.Entity{
Properties: map[string]*pb.Value{
"A": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{3}},
},
},
}},
"I": {ValueType: &pb.Value_IntegerValue{10}},
},
},
&NestedSimple{
A: Simple{I: 3},
I: 10,
},
},
{
"nested with struct tags",
&pb.Entity{
Properties: map[string]*pb.Value{
"AA": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"II": {ValueType: &pb.Value_IntegerValue{1}},
},
},
}},
},
},
&NestedSimpleWithTag{
A: SimpleWithTag{I: 1},
},
},
{
"nested 2x",
&pb.Entity{
Properties: map[string]*pb.Value{
"AA": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"A": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{3}},
},
},
}},
"I": {ValueType: &pb.Value_IntegerValue{1}},
},
},
}},
"A": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"S": {ValueType: &pb.Value_StringValue{"S"}},
"SS": {ValueType: &pb.Value_StringValue{"s"}},
},
},
}},
"S": {ValueType: &pb.Value_StringValue{"SS"}},
},
},
&NestedSimple2X{
AA: NestedSimple{
A: Simple{I: 3},
I: 1,
},
A: SimpleTwoFields{S: "S", SS: "s"},
S: "SS",
},
},
{
"nested anonymous",
&pb.Entity{
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{3}},
"X": {ValueType: &pb.Value_StringValue{"SomeX"}},
},
},
&NestedSimpleAnonymous{
Simple: Simple{I: 3},
X: "SomeX",
},
},
{
"nested simple with slice",
&pb.Entity{
Properties: map[string]*pb.Value{
"A": {ValueType: &pb.Value_ArrayValue{
&pb.ArrayValue{
[]*pb.Value{
{ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{3}},
},
},
}},
{ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{4}},
},
},
}},
},
},
}},
},
},
&NestedSliceOfSimple{
A: []Simple{Simple{I: 3}, Simple{I: 4}},
},
},
{
"nested with multiple anonymous fields",
&pb.Entity{
Properties: map[string]*pb.Value{
"I": {ValueType: &pb.Value_IntegerValue{3}},
"S": {ValueType: &pb.Value_StringValue{"S"}},
"SS": {ValueType: &pb.Value_StringValue{"s"}},
"X": {ValueType: &pb.Value_StringValue{"ss"}},
},
},
&MultiAnonymous{
Simple: Simple{I: 3},
SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"},
X: "ss",
},
},
{
"nested with dotted field tag",
&pb.Entity{
Properties: map[string]*pb.Value{
"A": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Properties: map[string]*pb.Value{
"B.B": {ValueType: &pb.Value_StringValue{"bb"}},
},
},
}},
},
},
&ABDotB{
A: BDotB{
B: "bb",
},
},
},
{
"nested entity with key",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
"N": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Key: keyToProto(testKey1a),
Properties: map[string]*pb.Value{
"X": {ValueType: &pb.Value_StringValue{"two"}},
"I": {ValueType: &pb.Value_IntegerValue{2}},
},
},
}},
},
},
&NestedWithKey{
Y: "yyy",
N: WithKey{
X: "two",
I: 2,
K: testKey1a,
},
},
},
{
"nested entity with invalid key",
&pb.Entity{
Key: keyToProto(testKey0),
Properties: map[string]*pb.Value{
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
"N": {ValueType: &pb.Value_EntityValue{
&pb.Entity{
Key: keyToProto(invalidKey),
Properties: map[string]*pb.Value{
"X": {ValueType: &pb.Value_StringValue{"two"}},
"I": {ValueType: &pb.Value_IntegerValue{2}},
},
},
}},
},
},
&NestedWithKey{
Y: "yyy",
N: WithKey{
X: "two",
I: 2,
K: invalidKey,
},
},
},
}
for _, tc := range testCases {
dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
err := loadEntity(dst, tc.src)
if err != nil {
t.Errorf("loadEntity: %s: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
}
}
}

324
vendor/cloud.google.com/go/datastore/prop.go generated vendored Normal file
View File

@@ -0,0 +1,324 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
"reflect"
"strings"
"sync"
"unicode"
)
// Entities with more than this many indexed properties will not be saved.
const maxIndexedProperties = 20000
// []byte fields more than 1 megabyte long will not be loaded or saved.
const maxBlobLen = 1 << 20
// Property is a name/value pair plus some metadata. A datastore entity's
// contents are loaded and saved as a sequence of Properties. Each property
// name must be unique within an entity.
type Property struct {
// Name is the property name.
Name string
// Value is the property value. The valid types are:
// - int64
// - bool
// - string
// - float64
// - *Key
// - time.Time
// - GeoPoint
// - []byte (up to 1 megabyte in length)
// - *Entity (representing a nested struct)
// Value can also be:
// - []interface{} where each element is one of the above types
// This set is smaller than the set of valid struct field types that the
// datastore can load and save. A Value's type must be explicitly on
// the list above; it is not sufficient for the underlying type to be
// on that list. For example, a Value of "type myInt64 int64" is
// invalid. Smaller-width integers and floats are also invalid. Again,
// this is more restrictive than the set of valid struct field types.
//
// A Value will have an opaque type when loading entities from an index,
// such as via a projection query. Load entities into a struct instead
// of a PropertyLoadSaver when using a projection query.
//
// A Value may also be the nil interface value; this is equivalent to
// Python's None but not directly representable by a Go struct. Loading
// a nil-valued property into a struct will set that field to the zero
// value.
Value interface{}
// NoIndex is whether the datastore cannot index this property.
// If NoIndex is set to false, []byte and string values are limited to
// 1500 bytes.
NoIndex bool
}
// An Entity is the value type for a nested struct.
// This type is only used for a Property's Value.
type Entity struct {
Key *Key
Properties []Property
}
// PropertyLoadSaver can be converted from and to a slice of Properties.
type PropertyLoadSaver interface {
Load([]Property) error
Save() ([]Property, error)
}
// PropertyList converts a []Property to implement PropertyLoadSaver.
type PropertyList []Property
var (
typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
)
// Load loads all of the provided properties into l.
// It does not first reset *l to an empty slice.
func (l *PropertyList) Load(p []Property) error {
*l = append(*l, p...)
return nil
}
// Save saves all of l's properties as a slice of Properties.
func (l *PropertyList) Save() ([]Property, error) {
return *l, nil
}
// validPropertyName returns whether name consists of one or more valid Go
// identifiers joined by ".".
func validPropertyName(name string) bool {
if name == "" {
return false
}
for _, s := range strings.Split(name, ".") {
if s == "" {
return false
}
first := true
for _, c := range s {
if first {
first = false
if c != '_' && !unicode.IsLetter(c) {
return false
}
} else {
if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
}
return true
}
// structCodec describes how to convert a struct to and from a sequence of
// properties.
type structCodec struct {
// fields gives the field codec for the structTag with the given name.
fields map[string]fieldCodec
// hasSlice is whether a struct or any of its nested or embedded structs
// has a slice-typed field (other than []byte).
hasSlice bool
// keyField is the index of a *Key field with structTag __key__.
// This field is not relevant for the top level struct, only for
// nested structs.
keyField int
// complete is whether the structCodec is complete. An incomplete
// structCodec may be encountered when walking a recursive struct.
complete bool
}
// fieldCodec is a struct field's index and, if that struct field's type is
// itself a struct, that substruct's structCodec.
type fieldCodec struct {
// path is the index path to the field
path []int
noIndex bool
// structCodec is the codec fot the struct field at index 'path',
// or nil if the field is not a struct.
structCodec *structCodec
}
// structCodecs collects the structCodecs that have already been calculated.
var (
structCodecsMutex sync.Mutex
structCodecs = make(map[reflect.Type]*structCodec)
)
// getStructCodec returns the structCodec for the given struct type.
func getStructCodec(t reflect.Type) (*structCodec, error) {
structCodecsMutex.Lock()
defer structCodecsMutex.Unlock()
return getStructCodecLocked(t)
}
// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
// be held when calling this function.
func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
c, ok := structCodecs[t]
if ok {
return c, nil
}
c = &structCodec{
fields: make(map[string]fieldCodec),
// We initialize keyField to -1 so that the zero-value is not
// misinterpreted as index 0.
keyField: -1,
}
// Add c to the structCodecs map before we are sure it is good. If t is
// a recursive type, it needs to find the incomplete entry for itself in
// the map.
structCodecs[t] = c
defer func() {
if retErr != nil {
delete(structCodecs, t)
}
}()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
// Skip unexported fields.
// Note that if f is an anonymous, unexported struct field,
// we will not promote its fields. We will skip f entirely.
if f.PkgPath != "" {
continue
}
name, opts := f.Tag.Get("datastore"), ""
if i := strings.Index(name, ","); i != -1 {
name, opts = name[:i], name[i+1:]
}
switch {
case name == "":
if !f.Anonymous {
name = f.Name
}
case name == "-":
continue
case name == "__key__":
if f.Type != typeOfKeyPtr {
return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
}
c.keyField = i
continue
case !validPropertyName(name):
return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
}
substructType, fIsSlice := reflect.Type(nil), false
switch f.Type.Kind() {
case reflect.Struct:
substructType = f.Type
case reflect.Slice:
if f.Type.Elem().Kind() == reflect.Struct {
substructType = f.Type.Elem()
}
fIsSlice = f.Type != typeOfByteSlice
c.hasSlice = c.hasSlice || fIsSlice
}
var sub *structCodec
if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
var err error
sub, err = getStructCodecLocked(substructType)
if err != nil {
return nil, err
}
if !sub.complete {
return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
}
if fIsSlice && sub.hasSlice {
return nil, fmt.Errorf(
"datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
}
c.hasSlice = c.hasSlice || sub.hasSlice
// If name is empty at this point, f is an anonymous struct field.
// In this case, we promote the substruct's fields up to this level
// in the linked list of struct codecs.
if name == "" {
for subname, subfield := range sub.fields {
if _, ok := c.fields[subname]; ok {
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
}
c.fields[subname] = fieldCodec{
path: append([]int{i}, subfield.path...),
noIndex: subfield.noIndex || opts == "noindex",
structCodec: subfield.structCodec,
}
}
continue
}
}
if _, ok := c.fields[name]; ok {
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
}
c.fields[name] = fieldCodec{
path: []int{i},
noIndex: opts == "noindex",
structCodec: sub,
}
}
c.complete = true
return c, nil
}
// structPLS adapts a struct to be a PropertyLoadSaver.
type structPLS struct {
v reflect.Value
codec *structCodec
}
// newStructPLS returns a structPLS, which implements the
// PropertyLoadSaver interface, for the struct pointer p.
func newStructPLS(p interface{}) (*structPLS, error) {
v := reflect.ValueOf(p)
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return nil, ErrInvalidEntityType
}
v = v.Elem()
codec, err := getStructCodec(v.Type())
if err != nil {
return nil, err
}
return &structPLS{v, codec}, nil
}
// LoadStruct loads the properties from p to dst.
// dst must be a struct pointer.
func LoadStruct(dst interface{}, p []Property) error {
x, err := newStructPLS(dst)
if err != nil {
return err
}
return x.Load(p)
}
// SaveStruct returns the properties from src as a slice of Properties.
// src must be a struct pointer.
func SaveStruct(src interface{}) ([]Property, error) {
x, err := newStructPLS(src)
if err != nil {
return nil, err
}
return x.Save()
}

743
vendor/cloud.google.com/go/datastore/query.go generated vendored Normal file
View File

@@ -0,0 +1,743 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"encoding/base64"
"errors"
"fmt"
"math"
"reflect"
"strconv"
"strings"
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
type operator int
const (
lessThan operator = iota + 1
lessEq
equal
greaterEq
greaterThan
keyFieldName = "__key__"
)
var operatorToProto = map[operator]pb.PropertyFilter_Operator{
lessThan: pb.PropertyFilter_LESS_THAN,
lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL,
equal: pb.PropertyFilter_EQUAL,
greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL,
greaterThan: pb.PropertyFilter_GREATER_THAN,
}
// filter is a conditional filter on query results.
type filter struct {
FieldName string
Op operator
Value interface{}
}
type sortDirection bool
const (
ascending sortDirection = false
descending sortDirection = true
)
var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{
ascending: pb.PropertyOrder_ASCENDING,
descending: pb.PropertyOrder_DESCENDING,
}
// order is a sort order on query results.
type order struct {
FieldName string
Direction sortDirection
}
// NewQuery creates a new Query for a specific entity kind.
//
// An empty kind means to return all entities, including entities created and
// managed by other App Engine features, and is called a kindless query.
// Kindless queries cannot include filters or sort orders on property values.
func NewQuery(kind string) *Query {
return &Query{
kind: kind,
limit: -1,
}
}
// Query represents a datastore query.
type Query struct {
kind string
ancestor *Key
filter []filter
order []order
projection []string
distinct bool
keysOnly bool
eventual bool
limit int32
offset int32
start []byte
end []byte
trans *Transaction
err error
}
func (q *Query) clone() *Query {
x := *q
// Copy the contents of the slice-typed fields to a new backing store.
if len(q.filter) > 0 {
x.filter = make([]filter, len(q.filter))
copy(x.filter, q.filter)
}
if len(q.order) > 0 {
x.order = make([]order, len(q.order))
copy(x.order, q.order)
}
return &x
}
// Ancestor returns a derivative query with an ancestor filter.
// The ancestor should not be nil.
func (q *Query) Ancestor(ancestor *Key) *Query {
q = q.clone()
if ancestor == nil {
q.err = errors.New("datastore: nil query ancestor")
return q
}
q.ancestor = ancestor
return q
}
// EventualConsistency returns a derivative query that returns eventually
// consistent results.
// It only has an effect on ancestor queries.
func (q *Query) EventualConsistency() *Query {
q = q.clone()
q.eventual = true
return q
}
// Transaction returns a derivative query that is associated with the given
// transaction.
//
// All reads performed as part of the transaction will come from a single
// consistent snapshot. Furthermore, if the transaction is set to a
// serializable isolation level, another transaction cannot concurrently modify
// the data that is read or modified by this transaction.
func (q *Query) Transaction(t *Transaction) *Query {
q = q.clone()
q.trans = t
return q
}
// Filter returns a derivative query with a field-based filter.
// The filterStr argument must be a field name followed by optional space,
// followed by an operator, one of ">", "<", ">=", "<=", or "=".
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
// Field names which contain spaces, quote marks, or operator characters
// should be passed as quoted Go string literals as returned by strconv.Quote
// or the fmt package's %q verb.
func (q *Query) Filter(filterStr string, value interface{}) *Query {
q = q.clone()
filterStr = strings.TrimSpace(filterStr)
if filterStr == "" {
q.err = fmt.Errorf("datastore: invalid filter %q", filterStr)
return q
}
f := filter{
FieldName: strings.TrimRight(filterStr, " ><=!"),
Value: value,
}
switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
case "<=":
f.Op = lessEq
case ">=":
f.Op = greaterEq
case "<":
f.Op = lessThan
case ">":
f.Op = greaterThan
case "=":
f.Op = equal
default:
q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
return q
}
var err error
f.FieldName, err = unquote(f.FieldName)
if err != nil {
q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName)
return q
}
q.filter = append(q.filter, f)
return q
}
// Order returns a derivative query with a field-based sort order. Orders are
// applied in the order they are added. The default order is ascending; to sort
// in descending order prefix the fieldName with a minus sign (-).
// Field names which contain spaces, quote marks, or the minus sign
// should be passed as quoted Go string literals as returned by strconv.Quote
// or the fmt package's %q verb.
func (q *Query) Order(fieldName string) *Query {
q = q.clone()
fieldName, dir := strings.TrimSpace(fieldName), ascending
if strings.HasPrefix(fieldName, "-") {
fieldName, dir = strings.TrimSpace(fieldName[1:]), descending
} else if strings.HasPrefix(fieldName, "+") {
q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
return q
}
fieldName, err := unquote(fieldName)
if err != nil {
q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName)
return q
}
if fieldName == "" {
q.err = errors.New("datastore: empty order")
return q
}
q.order = append(q.order, order{
Direction: dir,
FieldName: fieldName,
})
return q
}
// unquote optionally interprets s as a double-quoted or backquoted Go
// string literal if it begins with the relevant character.
func unquote(s string) (string, error) {
if s == "" || (s[0] != '`' && s[0] != '"') {
return s, nil
}
return strconv.Unquote(s)
}
// Project returns a derivative query that yields only the given fields. It
// cannot be used with KeysOnly.
func (q *Query) Project(fieldNames ...string) *Query {
q = q.clone()
q.projection = append([]string(nil), fieldNames...)
return q
}
// Distinct returns a derivative query that yields de-duplicated entities with
// respect to the set of projected fields. It is only used for projection
// queries.
func (q *Query) Distinct() *Query {
q = q.clone()
q.distinct = true
return q
}
// KeysOnly returns a derivative query that yields only keys, not keys and
// entities. It cannot be used with projection queries.
func (q *Query) KeysOnly() *Query {
q = q.clone()
q.keysOnly = true
return q
}
// Limit returns a derivative query that has a limit on the number of results
// returned. A negative value means unlimited.
func (q *Query) Limit(limit int) *Query {
q = q.clone()
if limit < math.MinInt32 || limit > math.MaxInt32 {
q.err = errors.New("datastore: query limit overflow")
return q
}
q.limit = int32(limit)
return q
}
// Offset returns a derivative query that has an offset of how many keys to
// skip over before returning results. A negative value is invalid.
func (q *Query) Offset(offset int) *Query {
q = q.clone()
if offset < 0 {
q.err = errors.New("datastore: negative query offset")
return q
}
if offset > math.MaxInt32 {
q.err = errors.New("datastore: query offset overflow")
return q
}
q.offset = int32(offset)
return q
}
// Start returns a derivative query with the given start point.
func (q *Query) Start(c Cursor) *Query {
q = q.clone()
q.start = c.cc
return q
}
// End returns a derivative query with the given end point.
func (q *Query) End(c Cursor) *Query {
q = q.clone()
q.end = c.cc
return q
}
// toProto converts the query to a protocol buffer.
func (q *Query) toProto(req *pb.RunQueryRequest) error {
if len(q.projection) != 0 && q.keysOnly {
return errors.New("datastore: query cannot both project and be keys-only")
}
dst := &pb.Query{}
if q.kind != "" {
dst.Kind = []*pb.KindExpression{{Name: q.kind}}
}
if q.projection != nil {
for _, propertyName := range q.projection {
dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}})
}
if q.distinct {
for _, propertyName := range q.projection {
dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName})
}
}
}
if q.keysOnly {
dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}}
}
var filters []*pb.Filter
for _, qf := range q.filter {
if qf.FieldName == "" {
return errors.New("datastore: empty query filter field name")
}
v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false)
if err != nil {
return fmt.Errorf("datastore: bad query filter value type: %v", err)
}
op, ok := operatorToProto[qf.Op]
if !ok {
return errors.New("datastore: unknown query filter operator")
}
xf := &pb.PropertyFilter{
Op: op,
Property: &pb.PropertyReference{Name: qf.FieldName},
Value: v,
}
filters = append(filters, &pb.Filter{
FilterType: &pb.Filter_PropertyFilter{xf},
})
}
if q.ancestor != nil {
filters = append(filters, &pb.Filter{
FilterType: &pb.Filter_PropertyFilter{&pb.PropertyFilter{
Property: &pb.PropertyReference{Name: "__key__"},
Op: pb.PropertyFilter_HAS_ANCESTOR,
Value: &pb.Value{ValueType: &pb.Value_KeyValue{keyToProto(q.ancestor)}},
}}})
}
if len(filters) == 1 {
dst.Filter = filters[0]
} else if len(filters) > 1 {
dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{&pb.CompositeFilter{
Op: pb.CompositeFilter_AND,
Filters: filters,
}}}
}
for _, qo := range q.order {
if qo.FieldName == "" {
return errors.New("datastore: empty query order field name")
}
xo := &pb.PropertyOrder{
Property: &pb.PropertyReference{Name: qo.FieldName},
Direction: sortDirectionToProto[qo.Direction],
}
dst.Order = append(dst.Order, xo)
}
if q.limit >= 0 {
dst.Limit = &wrapperspb.Int32Value{q.limit}
}
dst.Offset = q.offset
dst.StartCursor = q.start
dst.EndCursor = q.end
if t := q.trans; t != nil {
if t.id == nil {
return errExpiredTransaction
}
if q.eventual {
return errors.New("datastore: cannot use EventualConsistency query in a transaction")
}
req.ReadOptions = &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{t.id},
}
}
if q.eventual {
req.ReadOptions = &pb.ReadOptions{&pb.ReadOptions_ReadConsistency_{pb.ReadOptions_EVENTUAL}}
}
req.QueryType = &pb.RunQueryRequest_Query{dst}
return nil
}
// Count returns the number of results for the given query.
//
// The running time and number of API calls made by Count scale linearly with
// with the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise Count will
// continue until it finishes counting or the provided context expires.
func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
// Check that the query is well-formed.
if q.err != nil {
return 0, q.err
}
// Create a copy of the query, with keysOnly true (if we're not a projection,
// since the two are incompatible).
newQ := q.clone()
newQ.keysOnly = len(newQ.projection) == 0
// Create an iterator and use it to walk through the batches of results
// directly.
it := c.Run(ctx, newQ)
n := 0
for {
err := it.nextBatch()
if err == Done {
return n, nil
}
if err != nil {
return 0, err
}
n += len(it.results)
}
}
// GetAll runs the provided query in the given context and returns all keys
// that match that query, as well as appending the values to dst.
//
// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
//
// As a special case, *PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when *[]PropertyList was intended.
//
// The keys returned by GetAll will be in a 1-1 correspondence with the entities
// added to dst.
//
// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
//
// The running time and number of API calls made by GetAll scale linearly with
// with the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise GetAll will
// continue until it finishes collecting results or the provided context
// expires.
func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) {
var (
dv reflect.Value
mat multiArgType
elemType reflect.Type
errFieldMismatch error
)
if !q.keysOnly {
dv = reflect.ValueOf(dst)
if dv.Kind() != reflect.Ptr || dv.IsNil() {
return nil, ErrInvalidEntityType
}
dv = dv.Elem()
mat, elemType = checkMultiArg(dv)
if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
return nil, ErrInvalidEntityType
}
}
var keys []*Key
for t := c.Run(ctx, q); ; {
k, e, err := t.next()
if err == Done {
break
}
if err != nil {
return keys, err
}
if !q.keysOnly {
ev := reflect.New(elemType)
if elemType.Kind() == reflect.Map {
// This is a special case. The zero values of a map type are
// not immediately useful; they have to be make'd.
//
// Funcs and channels are similar, in that a zero value is not useful,
// but even a freshly make'd channel isn't useful: there's no fixed
// channel buffer size that is always going to be large enough, and
// there's no goroutine to drain the other end. Theoretically, these
// types could be supported, for example by sniffing for a constructor
// method or requiring prior registration, but for now it's not a
// frequent enough concern to be worth it. Programmers can work around
// it by explicitly using Iterator.Next instead of the Query.GetAll
// convenience method.
x := reflect.MakeMap(elemType)
ev.Elem().Set(x)
}
if err = loadEntity(ev.Interface(), e); err != nil {
if _, ok := err.(*ErrFieldMismatch); ok {
// We continue loading entities even in the face of field mismatch errors.
// If we encounter any other error, that other error is returned. Otherwise,
// an ErrFieldMismatch is returned.
errFieldMismatch = err
} else {
return keys, err
}
}
if mat != multiArgTypeStructPtr {
ev = ev.Elem()
}
dv.Set(reflect.Append(dv, ev))
}
keys = append(keys, k)
}
return keys, errFieldMismatch
}
// Run runs the given query in the given context.
func (c *Client) Run(ctx context.Context, q *Query) *Iterator {
if q.err != nil {
return &Iterator{err: q.err}
}
t := &Iterator{
ctx: ctx,
client: c,
limit: q.limit,
offset: q.offset,
keysOnly: q.keysOnly,
pageCursor: q.start,
entityCursor: q.start,
req: &pb.RunQueryRequest{
ProjectId: c.dataset,
},
}
if ns := ctxNamespace(ctx); ns != "" {
t.req.PartitionId = &pb.PartitionId{
NamespaceId: ns,
}
}
if err := q.toProto(t.req); err != nil {
t.err = err
}
return t
}
// Iterator is the result of running a query.
type Iterator struct {
ctx context.Context
client *Client
err error
// results is the list of EntityResults still to be iterated over from the
// most recent API call. It will be nil if no requests have yet been issued.
results []*pb.EntityResult
// req is the request to send. It may be modified and used multiple times.
req *pb.RunQueryRequest
// limit is the limit on the number of results this iterator should return.
// The zero value is used to prevent further fetches from the server.
// A negative value means unlimited.
limit int32
// offset is the number of results that still need to be skipped.
offset int32
// keysOnly records whether the query was keys-only (skip entity loading).
keysOnly bool
// pageCursor is the compiled cursor for the next batch/page of result.
// TODO(djd): Can we delete this in favour of paging with the last
// entityCursor from each batch?
pageCursor []byte
// entityCursor is the compiled cursor of the next result.
entityCursor []byte
}
// Done is returned when a query iteration has completed.
var Done = errors.New("datastore: query has no more results")
// Next returns the key of the next result. When there are no more results,
// Done is returned as the error.
//
// If the query is not keys only and dst is non-nil, it also loads the entity
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
// the same semantics and possible errors as for the Get function.
func (t *Iterator) Next(dst interface{}) (*Key, error) {
k, e, err := t.next()
if err != nil {
return nil, err
}
if dst != nil && !t.keysOnly {
err = loadEntity(dst, e)
}
return k, err
}
func (t *Iterator) next() (*Key, *pb.Entity, error) {
// Fetch additional batches while there are no more results.
for t.err == nil && len(t.results) == 0 {
t.err = t.nextBatch()
}
if t.err != nil {
return nil, nil, t.err
}
// Extract the next result, update cursors, and parse the entity's key.
e := t.results[0]
t.results = t.results[1:]
t.entityCursor = e.Cursor
if len(t.results) == 0 {
t.entityCursor = t.pageCursor // At the end of the batch.
}
if e.Entity.Key == nil {
return nil, nil, errors.New("datastore: internal error: server did not return a key")
}
k, err := protoToKey(e.Entity.Key)
if err != nil || k.Incomplete() {
return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
}
return k, e.Entity, nil
}
// nextBatch makes a single call to the server for a batch of results.
func (t *Iterator) nextBatch() error {
if t.limit == 0 {
return Done // Short-circuits the zero-item response.
}
// Adjust the query with the latest start cursor, limit and offset.
q := t.req.GetQuery()
q.StartCursor = t.pageCursor
q.Offset = t.offset
if t.limit >= 0 {
q.Limit = &wrapperspb.Int32Value{t.limit}
} else {
q.Limit = nil
}
// Run the query.
resp, err := t.client.client.RunQuery(t.ctx, t.req)
if err != nil {
return err
}
// Adjust any offset from skipped results.
skip := resp.Batch.SkippedResults
if skip < 0 {
return errors.New("datastore: internal error: negative number of skipped_results")
}
t.offset -= skip
if t.offset < 0 {
return errors.New("datastore: internal error: query skipped too many results")
}
if t.offset > 0 && len(resp.Batch.EntityResults) > 0 {
return errors.New("datastore: internal error: query returned results before requested offset")
}
// Adjust the limit.
if t.limit >= 0 {
t.limit -= int32(len(resp.Batch.EntityResults))
if t.limit < 0 {
return errors.New("datastore: internal error: query returned more results than the limit")
}
}
// If there are no more results available, set limit to zero to prevent
// further fetches. Otherwise, check that there is a next page cursor available.
if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED {
t.limit = 0
} else if resp.Batch.EndCursor == nil {
return errors.New("datastore: internal error: server did not return a cursor")
}
// Update cursors.
// If any results were skipped, use the SkippedCursor as the next entity cursor.
if skip > 0 {
t.entityCursor = resp.Batch.SkippedCursor
} else {
t.entityCursor = q.StartCursor
}
t.pageCursor = resp.Batch.EndCursor
t.results = resp.Batch.EntityResults
return nil
}
// Cursor returns a cursor for the iterator's current location.
func (t *Iterator) Cursor() (Cursor, error) {
// If there is still an offset, we need to the skip those results first.
for t.err == nil && t.offset > 0 {
t.err = t.nextBatch()
}
if t.err != nil && t.err != Done {
return Cursor{}, t.err
}
return Cursor{t.entityCursor}, nil
}
// Cursor is an iterator's position. It can be converted to and from an opaque
// string. A cursor can be used from different HTTP requests, but only with a
// query with the same kind, ancestor, filter and order constraints.
//
// The zero Cursor can be used to indicate that there is no start and/or end
// constraint for a query.
type Cursor struct {
cc []byte
}
// String returns a base-64 string representation of a cursor.
func (c Cursor) String() string {
if c.cc == nil {
return ""
}
return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=")
}
// Decode decodes a cursor from its base-64 string representation.
func DecodeCursor(s string) (Cursor, error) {
if s == "" {
return Cursor{}, nil
}
if n := len(s) % 4; n != 0 {
s += strings.Repeat("=", 4-n)
}
b, err := base64.URLEncoding.DecodeString(s)
if err != nil {
return Cursor{}, err
}
return Cursor{b}, nil
}

538
vendor/cloud.google.com/go/datastore/query_test.go generated vendored Normal file
View File

@@ -0,0 +1,538 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"reflect"
"sort"
"testing"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
)
var (
key1 = &pb.Key{
Path: []*pb.Key_PathElement{
{
Kind: "Gopher",
IdType: &pb.Key_PathElement_Id{6},
},
},
}
key2 = &pb.Key{
Path: []*pb.Key_PathElement{
{
Kind: "Gopher",
IdType: &pb.Key_PathElement_Id{6},
},
{
Kind: "Gopher",
IdType: &pb.Key_PathElement_Id{8},
},
},
}
)
type fakeClient struct {
pb.DatastoreClient
queryFn func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error)
commitFn func(*pb.CommitRequest) (*pb.CommitResponse, error)
}
func (c *fakeClient) RunQuery(_ context.Context, req *pb.RunQueryRequest, _ ...grpc.CallOption) (*pb.RunQueryResponse, error) {
return c.queryFn(req)
}
func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.CallOption) (*pb.CommitResponse, error) {
return c.commitFn(req)
}
func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) {
expectedIn := &pb.RunQueryRequest{
QueryType: &pb.RunQueryRequest_Query{&pb.Query{
Kind: []*pb.KindExpression{{Name: "Gopher"}},
}},
}
if !proto.Equal(in, expectedIn) {
return nil, fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
}
return &pb.RunQueryResponse{
Batch: &pb.QueryResultBatch{
MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS,
EntityResultType: pb.EntityResult_FULL,
EntityResults: []*pb.EntityResult{
{
Entity: &pb.Entity{
Key: key1,
Properties: map[string]*pb.Value{
"Name": {ValueType: &pb.Value_StringValue{"George"}},
"Height": {ValueType: &pb.Value_IntegerValue{32}},
},
},
},
{
Entity: &pb.Entity{
Key: key2,
Properties: map[string]*pb.Value{
"Name": {ValueType: &pb.Value_StringValue{"Rufus"}},
// No height for Rufus.
},
},
},
},
},
}, nil
}
type StructThatImplementsPLS struct{}
func (StructThatImplementsPLS) Load(p []Property) error { return nil }
func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
var _ PropertyLoadSaver = StructThatImplementsPLS{}
type StructPtrThatImplementsPLS struct{}
func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
type PropertyMap map[string]Property
func (m PropertyMap) Load(props []Property) error {
for _, p := range props {
m[p.Name] = p
}
return nil
}
func (m PropertyMap) Save() ([]Property, error) {
props := make([]Property, 0, len(m))
for _, p := range m {
props = append(props, p)
}
return props, nil
}
var _ PropertyLoadSaver = PropertyMap{}
type Gopher struct {
Name string
Height int
}
// typeOfEmptyInterface is the type of interface{}, but we can't use
// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
// interface{}.
var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
func TestCheckMultiArg(t *testing.T) {
testCases := []struct {
v interface{}
mat multiArgType
elemType reflect.Type
}{
// Invalid cases.
{nil, multiArgTypeInvalid, nil},
{Gopher{}, multiArgTypeInvalid, nil},
{&Gopher{}, multiArgTypeInvalid, nil},
{PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
{PropertyMap{}, multiArgTypeInvalid, nil},
{[]*PropertyList(nil), multiArgTypeInvalid, nil},
{[]*PropertyMap(nil), multiArgTypeInvalid, nil},
{[]**Gopher(nil), multiArgTypeInvalid, nil},
{[]*interface{}(nil), multiArgTypeInvalid, nil},
// Valid cases.
{
[]PropertyList(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(PropertyList{}),
},
{
[]PropertyMap(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(PropertyMap{}),
},
{
[]StructThatImplementsPLS(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(StructThatImplementsPLS{}),
},
{
[]StructPtrThatImplementsPLS(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(StructPtrThatImplementsPLS{}),
},
{
[]Gopher(nil),
multiArgTypeStruct,
reflect.TypeOf(Gopher{}),
},
{
[]*Gopher(nil),
multiArgTypeStructPtr,
reflect.TypeOf(Gopher{}),
},
{
[]interface{}(nil),
multiArgTypeInterface,
typeOfEmptyInterface,
},
}
for _, tc := range testCases {
mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
if mat != tc.mat || elemType != tc.elemType {
t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
tc.v, mat, elemType, tc.mat, tc.elemType)
}
}
}
func TestSimpleQuery(t *testing.T) {
struct1 := Gopher{Name: "George", Height: 32}
struct2 := Gopher{Name: "Rufus"}
pList1 := PropertyList{
{
Name: "Height",
Value: int64(32),
},
{
Name: "Name",
Value: "George",
},
}
pList2 := PropertyList{
{
Name: "Name",
Value: "Rufus",
},
}
pMap1 := PropertyMap{
"Name": Property{
Name: "Name",
Value: "George",
},
"Height": Property{
Name: "Height",
Value: int64(32),
},
}
pMap2 := PropertyMap{
"Name": Property{
Name: "Name",
Value: "Rufus",
},
}
testCases := []struct {
dst interface{}
want interface{}
}{
// The destination must have type *[]P, *[]S or *[]*S, for some non-interface
// type P such that *P implements PropertyLoadSaver, or for some struct type S.
{new([]Gopher), &[]Gopher{struct1, struct2}},
{new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
{new([]PropertyList), &[]PropertyList{pList1, pList2}},
{new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
// Any other destination type is invalid.
{0, nil},
{Gopher{}, nil},
{PropertyList{}, nil},
{PropertyMap{}, nil},
{[]int{}, nil},
{[]Gopher{}, nil},
{[]PropertyList{}, nil},
{new(int), nil},
{new(Gopher), nil},
{new(PropertyList), nil}, // This is a special case.
{new(PropertyMap), nil},
{new([]int), nil},
{new([]map[int]int), nil},
{new([]map[string]Property), nil},
{new([]map[string]interface{}), nil},
{new([]*int), nil},
{new([]*map[int]int), nil},
{new([]*map[string]Property), nil},
{new([]*map[string]interface{}), nil},
{new([]**Gopher), nil},
{new([]*PropertyList), nil},
{new([]*PropertyMap), nil},
}
for _, tc := range testCases {
nCall := 0
client := &Client{
client: &fakeClient{
queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) {
nCall++
return fakeRunQuery(req)
},
},
}
ctx := context.Background()
var (
expectedErr error
expectedNCall int
)
if tc.want == nil {
expectedErr = ErrInvalidEntityType
} else {
expectedNCall = 1
}
keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst)
if err != expectedErr {
t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr)
continue
}
if nCall != expectedNCall {
t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
continue
}
if err != nil {
continue
}
key1 := NewKey(ctx, "Gopher", "", 6, nil)
expectedKeys := []*Key{
key1,
NewKey(ctx, "Gopher", "", 8, key1),
}
if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
continue
}
for i, key := range keys {
if !keysEqual(key, expectedKeys[i]) {
t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
continue
}
}
// Make sure we sort any PropertyList items (the order is not deterministic).
if pLists, ok := tc.dst.(*[]PropertyList); ok {
for _, p := range *pLists {
sort.Sort(byName(p))
}
}
if !reflect.DeepEqual(tc.dst, tc.want) {
t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want)
continue
}
}
}
// keysEqual is like (*Key).Equal, but ignores the App ID.
func keysEqual(a, b *Key) bool {
for a != nil && b != nil {
if a.Kind() != b.Kind() || a.Name() != b.Name() || a.ID() != b.ID() {
return false
}
a, b = a.Parent(), b.Parent()
}
return a == b
}
func TestQueriesAreImmutable(t *testing.T) {
// Test that deriving q2 from q1 does not modify q1.
q0 := NewQuery("foo")
q1 := NewQuery("foo")
q2 := q1.Offset(2)
if !reflect.DeepEqual(q0, q1) {
t.Errorf("q0 and q1 were not equal")
}
if reflect.DeepEqual(q1, q2) {
t.Errorf("q1 and q2 were equal")
}
// Test that deriving from q4 twice does not conflict, even though
// q4 has a long list of order clauses. This tests that the arrays
// backed by a query's slice of orders are not shared.
f := func() *Query {
q := NewQuery("bar")
// 47 is an ugly number that is unlikely to be near a re-allocation
// point in repeated append calls. For example, it's not near a power
// of 2 or a multiple of 10.
for i := 0; i < 47; i++ {
q = q.Order(fmt.Sprintf("x%d", i))
}
return q
}
q3 := f().Order("y")
q4 := f()
q5 := q4.Order("y")
q6 := q4.Order("z")
if !reflect.DeepEqual(q3, q5) {
t.Errorf("q3 and q5 were not equal")
}
if reflect.DeepEqual(q5, q6) {
t.Errorf("q5 and q6 were equal")
}
}
func TestFilterParser(t *testing.T) {
testCases := []struct {
filterStr string
wantOK bool
wantFieldName string
wantOp operator
}{
// Supported ops.
{"x<", true, "x", lessThan},
{"x <", true, "x", lessThan},
{"x <", true, "x", lessThan},
{" x < ", true, "x", lessThan},
{"x <=", true, "x", lessEq},
{"x =", true, "x", equal},
{"x >=", true, "x", greaterEq},
{"x >", true, "x", greaterThan},
{"in >", true, "in", greaterThan},
{"in>", true, "in", greaterThan},
// Valid but (currently) unsupported ops.
{"x!=", false, "", 0},
{"x !=", false, "", 0},
{" x != ", false, "", 0},
{"x IN", false, "", 0},
{"x in", false, "", 0},
// Invalid ops.
{"x EQ", false, "", 0},
{"x lt", false, "", 0},
{"x <>", false, "", 0},
{"x >>", false, "", 0},
{"x ==", false, "", 0},
{"x =<", false, "", 0},
{"x =>", false, "", 0},
{"x !", false, "", 0},
{"x ", false, "", 0},
{"x", false, "", 0},
// Quoted and interesting field names.
{"x > y =", true, "x > y", equal},
{"` x ` =", true, " x ", equal},
{`" x " =`, true, " x ", equal},
{`" \"x " =`, true, ` "x `, equal},
{`" x =`, false, "", 0},
{`" x ="`, false, "", 0},
{"` x \" =", false, "", 0},
}
for _, tc := range testCases {
q := NewQuery("foo").Filter(tc.filterStr, 42)
if ok := q.err == nil; ok != tc.wantOK {
t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
continue
}
if !tc.wantOK {
continue
}
if len(q.filter) != 1 {
t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
continue
}
got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
if got != want {
t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
continue
}
}
}
func TestNamespaceQuery(t *testing.T) {
gotNamespace := make(chan string, 1)
ctx := context.Background()
client := &Client{
client: &fakeClient{
queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) {
if part := req.PartitionId; part != nil {
gotNamespace <- part.NamespaceId
} else {
gotNamespace <- ""
}
return nil, errors.New("not implemented")
},
},
}
var gs []Gopher
client.GetAll(ctx, NewQuery("gopher"), &gs)
if got, want := <-gotNamespace, ""; got != want {
t.Errorf("GetAll: got namespace %q, want %q", got, want)
}
client.Count(ctx, NewQuery("gopher"))
if got, want := <-gotNamespace, ""; got != want {
t.Errorf("Count: got namespace %q, want %q", got, want)
}
const ns = "not_default"
ctx = WithNamespace(ctx, ns)
client.GetAll(ctx, NewQuery("gopher"), &gs)
if got, want := <-gotNamespace, ns; got != want {
t.Errorf("GetAll: got namespace %q, want %q", got, want)
}
client.Count(ctx, NewQuery("gopher"))
if got, want := <-gotNamespace, ns; got != want {
t.Errorf("Count: got namespace %q, want %q", got, want)
}
}
func TestReadOptions(t *testing.T) {
tid := []byte{1}
for _, test := range []struct {
q *Query
want *pb.ReadOptions
}{
{
q: NewQuery(""),
want: nil,
},
{
q: NewQuery("").Transaction(nil),
want: nil,
},
{
q: NewQuery("").Transaction(&Transaction{id: tid}),
want: &pb.ReadOptions{&pb.ReadOptions_Transaction{tid}},
},
{
q: NewQuery("").EventualConsistency(),
want: &pb.ReadOptions{&pb.ReadOptions_ReadConsistency_{pb.ReadOptions_EVENTUAL}},
},
} {
req := &pb.RunQueryRequest{}
if err := test.q.toProto(req); err != nil {
t.Fatalf("%+v: got %v, want no error", test.q, err)
}
if got := req.ReadOptions; !proto.Equal(got, test.want) {
t.Errorf("%+v:\ngot %+v\nwant %+v", test.q, got, test.want)
}
}
// Test errors.
for _, q := range []*Query{
NewQuery("").Transaction(&Transaction{id: nil}),
NewQuery("").Transaction(&Transaction{id: tid}).EventualConsistency(),
} {
req := &pb.RunQueryRequest{}
if err := q.toProto(req); err == nil {
t.Errorf("%+v: got nil, wanted error", q)
}
}
}

251
vendor/cloud.google.com/go/datastore/save.go generated vendored Normal file
View File

@@ -0,0 +1,251 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"reflect"
"time"
timepb "github.com/golang/protobuf/ptypes/timestamp"
pb "google.golang.org/genproto/googleapis/datastore/v1"
llpb "google.golang.org/genproto/googleapis/type/latlng"
)
// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
func saveEntity(key *Key, src interface{}) (*pb.Entity, error) {
var err error
var props []Property
if e, ok := src.(PropertyLoadSaver); ok {
props, err = e.Save()
} else {
props, err = SaveStruct(src)
}
if err != nil {
return nil, err
}
return propertiesToProto(key, props)
}
// TODO(djd): Convert this and below to return ([]Property, error).
func saveStructProperty(props *[]Property, name string, noIndex bool, v reflect.Value) error {
p := Property{
Name: name,
NoIndex: noIndex,
}
switch x := v.Interface().(type) {
case *Key, time.Time, GeoPoint:
p.Value = x
default:
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.Value = v.Int()
case reflect.Bool:
p.Value = v.Bool()
case reflect.String:
p.Value = v.String()
case reflect.Float32, reflect.Float64:
p.Value = v.Float()
case reflect.Slice:
if v.Type().Elem().Kind() == reflect.Uint8 {
p.Value = v.Bytes()
} else {
return saveSliceProperty(props, name, noIndex, v)
}
case reflect.Struct:
if !v.CanAddr() {
return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
}
sub, err := newStructPLS(v.Addr().Interface())
if err != nil {
return fmt.Errorf("datastore: unsupported struct field: %v", err)
}
return sub.save(props, name+".", noIndex)
}
}
if p.Value == nil {
return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
}
*props = append(*props, p)
return nil
}
func saveSliceProperty(props *[]Property, name string, noIndex bool, v reflect.Value) error {
// Easy case: if the slice is empty, we're done.
if v.Len() == 0 {
return nil
}
// Work out the properties generated by the first element in the slice. This will
// usually be a single property, but will be more if this is a slice of structs.
var headProps []Property
if err := saveStructProperty(&headProps, name, noIndex, v.Index(0)); err != nil {
return err
}
// Convert the first element's properties into slice properties, and
// keep track of the values in a map.
values := make(map[string][]interface{}, len(headProps))
for _, p := range headProps {
values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value)
}
// Find the elements for the subsequent elements.
for i := 1; i < v.Len(); i++ {
elemProps := make([]Property, 0, len(headProps))
if err := saveStructProperty(&elemProps, name, noIndex, v.Index(i)); err != nil {
return err
}
for _, p := range elemProps {
v, ok := values[p.Name]
if !ok {
return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i)
}
values[p.Name] = append(v, p.Value)
}
}
// Convert to the final properties.
for _, p := range headProps {
p.Value = values[p.Name]
*props = append(*props, p)
}
return nil
}
func (s structPLS) Save() ([]Property, error) {
var props []Property
if err := s.save(&props, "", false); err != nil {
return nil, err
}
return props, nil
}
func (s structPLS) save(props *[]Property, prefix string, noIndex bool) error {
for name, f := range s.codec.fields {
name = prefix + name
v := s.v.FieldByIndex(f.path)
if !v.IsValid() || !v.CanSet() {
continue
}
noIndex1 := noIndex || f.noIndex
if err := saveStructProperty(props, name, noIndex1, v); err != nil {
return err
}
}
return nil
}
func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) {
e := &pb.Entity{
Key: keyToProto(key),
Properties: map[string]*pb.Value{},
}
indexedProps := 0
for _, p := range props {
val, err := interfaceToProto(p.Value, p.NoIndex)
if err != nil {
return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name)
}
if !p.NoIndex {
rVal := reflect.ValueOf(p.Value)
if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 {
indexedProps += rVal.Len()
} else {
indexedProps++
}
}
if indexedProps > maxIndexedProperties {
return nil, errors.New("datastore: too many indexed properties")
}
if _, ok := e.Properties[p.Name]; ok {
return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name)
}
e.Properties[p.Name] = val
}
return e, nil
}
func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
val := &pb.Value{ExcludeFromIndexes: noIndex}
switch v := iv.(type) {
case int:
val.ValueType = &pb.Value_IntegerValue{int64(v)}
case int32:
val.ValueType = &pb.Value_IntegerValue{int64(v)}
case int64:
val.ValueType = &pb.Value_IntegerValue{v}
case bool:
val.ValueType = &pb.Value_BooleanValue{v}
case string:
if len(v) > 1500 && !noIndex {
return nil, errors.New("string property too long to index")
}
val.ValueType = &pb.Value_StringValue{v}
case float32:
val.ValueType = &pb.Value_DoubleValue{float64(v)}
case float64:
val.ValueType = &pb.Value_DoubleValue{v}
case *Key:
if v == nil {
val.ValueType = &pb.Value_NullValue{}
} else {
val.ValueType = &pb.Value_KeyValue{keyToProto(v)}
}
case GeoPoint:
if !v.Valid() {
return nil, errors.New("invalid GeoPoint value")
}
val.ValueType = &pb.Value_GeoPointValue{&llpb.LatLng{
Latitude: v.Lat,
Longitude: v.Lng,
}}
case time.Time:
if v.Before(minTime) || v.After(maxTime) {
return nil, errors.New("time value out of range")
}
val.ValueType = &pb.Value_TimestampValue{&timepb.Timestamp{
Seconds: v.Unix(),
Nanos: int32(v.Nanosecond()),
}}
case []byte:
if len(v) > 1500 && !noIndex {
return nil, errors.New("[]byte property too long to index")
}
val.ValueType = &pb.Value_BlobValue{v}
case []interface{}:
arr := make([]*pb.Value, 0, len(v))
for i, v := range v {
elem, err := interfaceToProto(v, noIndex)
if err != nil {
return nil, fmt.Errorf("%v at index %d", err, i)
}
arr = append(arr, elem)
}
val.ValueType = &pb.Value_ArrayValue{&pb.ArrayValue{arr}}
// ArrayValues have ExcludeFromIndexes set on the individual items, rather
// than the top-level value.
val.ExcludeFromIndexes = false
default:
if iv != nil {
return nil, fmt.Errorf("invalid Value type %t", iv)
}
val.ValueType = &pb.Value_NullValue{}
}
// TODO(jbd): Support EntityValue.
return val, nil
}

34
vendor/cloud.google.com/go/datastore/save_test.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"testing"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
func TestInterfaceToProtoNilKey(t *testing.T) {
var iv *Key
pv, err := interfaceToProto(iv, false)
if err != nil {
t.Fatalf("nil key: interfaceToProto: %v", err)
}
_, ok := pv.ValueType.(*pb.Value_NullValue)
if !ok {
t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{})
}
}

View File

@@ -0,0 +1,41 @@
indexes:
- kind: SQChild
ancestor: yes
properties:
- name: T
- name: I
- kind: SQChild
ancestor: yes
properties:
- name: T
- name: I
direction: desc
- kind: SQChild
ancestor: yes
properties:
- name: I
- name: T
- name: U
- kind: SQChild
ancestor: yes
properties:
- name: I
- name: T
- name: U
- kind: SQChild
ancestor: yes
properties:
- name: T
- name: J
- kind: SQChild
ancestor: yes
properties:
- name: T
- name: J
- name: U

36
vendor/cloud.google.com/go/datastore/time.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"math"
"time"
)
var (
minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
)
func toUnixMicro(t time.Time) int64 {
// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
// be represented in the numerator of a single int64 divide.
return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
}
func fromUnixMicro(t int64) time.Time {
return time.Unix(t/1e6, (t%1e6)*1e3)
}

75
vendor/cloud.google.com/go/datastore/time_test.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"testing"
"time"
)
func TestUnixMicro(t *testing.T) {
// Test that all these time.Time values survive a round trip to unix micros.
testCases := []time.Time{
{},
time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
time.Unix(-1e6, -1000),
time.Unix(-1e6, 0),
time.Unix(-1e6, +1000),
time.Unix(-60, -1000),
time.Unix(-60, 0),
time.Unix(-60, +1000),
time.Unix(-1, -1000),
time.Unix(-1, 0),
time.Unix(-1, +1000),
time.Unix(0, -3000),
time.Unix(0, -2000),
time.Unix(0, -1000),
time.Unix(0, 0),
time.Unix(0, +1000),
time.Unix(0, +2000),
time.Unix(+60, -1000),
time.Unix(+60, 0),
time.Unix(+60, +1000),
time.Unix(+1e6, -1000),
time.Unix(+1e6, 0),
time.Unix(+1e6, +1000),
time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
}
for _, tc := range testCases {
got := fromUnixMicro(toUnixMicro(tc))
if !got.Equal(tc) {
t.Errorf("got %q, want %q", got, tc)
}
}
// Test that a time.Time that isn't an integral number of microseconds
// is not perfectly reconstructed after a round trip.
t0 := time.Unix(0, 123)
t1 := fromUnixMicro(toUnixMicro(t0))
if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
}
}

308
vendor/cloud.google.com/go/datastore/transaction.go generated vendored Normal file
View File

@@ -0,0 +1,308 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
// ErrConcurrentTransaction is returned when a transaction is rolled back due
// to a conflict with a concurrent transaction.
var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
var errExpiredTransaction = errors.New("datastore: transaction expired")
type transactionSettings struct {
attempts int
}
// newTransactionSettings creates a transactionSettings with a given TransactionOption slice.
// Unconfigured options will be set to default values.
func newTransactionSettings(opts []TransactionOption) *transactionSettings {
s := &transactionSettings{attempts: 3}
for _, o := range opts {
o.apply(s)
}
return s
}
// TransactionOption configures the way a transaction is executed.
type TransactionOption interface {
apply(*transactionSettings)
}
// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times.
func MaxAttempts(attempts int) TransactionOption {
return maxAttempts(attempts)
}
type maxAttempts int
func (w maxAttempts) apply(s *transactionSettings) {
if w > 0 {
s.attempts = int(w)
}
}
// Transaction represents a set of datastore operations to be committed atomically.
//
// Operations are enqueued by calling the Put and Delete methods on Transaction
// (or their Multi-equivalents). These operations are only committed when the
// Commit method is invoked. To ensure consistency, reads must be performed by
// using Transaction's Get method or by using the Transaction method when
// building a query.
//
// A Transaction must be committed or rolled back exactly once.
type Transaction struct {
id []byte
client *Client
ctx context.Context
mutations []*pb.Mutation // The mutations to apply.
pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion.
}
// NewTransaction starts a new transaction.
func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) {
for _, o := range opts {
if _, ok := o.(maxAttempts); ok {
return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
}
}
req := &pb.BeginTransactionRequest{
ProjectId: c.dataset,
}
resp, err := c.client.BeginTransaction(ctx, req)
if err != nil {
return nil, err
}
return &Transaction{
id: resp.Transaction,
ctx: ctx,
client: c,
mutations: nil,
pending: make(map[int]*PendingKey),
}, nil
}
// RunInTransaction runs f in a transaction. f is invoked with a Transaction
// that f should use for all the transaction's datastore operations.
//
// f must not call Commit or Rollback on the provided Transaction.
//
// If f returns nil, RunInTransaction commits the transaction,
// returning the Commit and a nil error if it succeeds. If the commit fails due
// to a conflicting transaction, RunInTransaction retries f with a new
// Transaction. It gives up and returns ErrConcurrentTransaction after three
// failed attempts (or as configured with MaxAttempts).
//
// If f returns non-nil, then the transaction will be rolled back and
// RunInTransaction will return the same error. The function f is not retried.
//
// Note that when f returns, the transaction is not committed. Calling code
// must not assume that any of f's changes have been committed until
// RunInTransaction returns nil.
//
// Since f may be called multiple times, f should usually be idempotent.
// Note that Transaction.Get is not idempotent when unmarshaling slice fields.
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) {
settings := newTransactionSettings(opts)
for n := 0; n < settings.attempts; n++ {
tx, err := c.NewTransaction(ctx)
if err != nil {
return nil, err
}
if err := f(tx); err != nil {
tx.Rollback()
return nil, err
}
if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
return cmt, err
}
}
return nil, ErrConcurrentTransaction
}
// Commit applies the enqueued operations atomically.
func (t *Transaction) Commit() (*Commit, error) {
if t.id == nil {
return nil, errExpiredTransaction
}
req := &pb.CommitRequest{
ProjectId: t.client.dataset,
TransactionSelector: &pb.CommitRequest_Transaction{t.id},
Mutations: t.mutations,
Mode: pb.CommitRequest_TRANSACTIONAL,
}
t.id = nil
resp, err := t.client.client.Commit(t.ctx, req)
if err != nil {
if grpc.Code(err) == codes.Aborted {
return nil, ErrConcurrentTransaction
}
return nil, err
}
// Copy any newly minted keys into the returned keys.
commit := &Commit{}
for i, p := range t.pending {
if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil {
return nil, errors.New("datastore: internal error: server returned the wrong mutation results")
}
key, err := protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
p.key = key
p.commit = commit
}
return commit, nil
}
// Rollback abandons a pending transaction.
func (t *Transaction) Rollback() error {
if t.id == nil {
return errExpiredTransaction
}
id := t.id
t.id = nil
_, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
ProjectId: t.client.dataset,
Transaction: id,
})
return err
}
// Get is the transaction-specific version of the package function Get.
// All reads performed during the transaction will come from a single consistent
// snapshot. Furthermore, if the transaction is set to a serializable isolation
// level, another transaction cannot concurrently modify the data that is read
// or modified by this transaction.
func (t *Transaction) Get(key *Key, dst interface{}) error {
opts := &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{t.id},
}
err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error {
if t.id == nil {
return errExpiredTransaction
}
opts := &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{t.id},
}
return t.client.get(t.ctx, keys, dst, opts)
}
// Put is the transaction-specific version of the package function Put.
//
// Put returns a PendingKey which can be resolved into a Key using the
// return value from a successful Commit. If key is an incomplete key, the
// returned pending key will resolve to a unique key generated by the
// datastore.
func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {
h, err := t.PutMulti([]*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(MultiError); ok {
return nil, me[0]
}
return nil, err
}
return h[0], nil
}
// PutMulti is a batch version of Put. One PendingKey is returned for each
// element of src in the same order.
func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) {
if t.id == nil {
return nil, errExpiredTransaction
}
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
}
origin := len(t.mutations)
t.mutations = append(t.mutations, mutations...)
// Prepare the returned handles, pre-populating where possible.
ret := make([]*PendingKey, len(keys))
for i, key := range keys {
p := &PendingKey{}
if key.Incomplete() {
// This key will be in the final commit result.
t.pending[origin+i] = p
} else {
p.key = key
}
ret[i] = p
}
return ret, nil
}
// Delete is the transaction-specific version of the package function Delete.
// Delete enqueues the deletion of the entity for the given key, to be
// committed atomically upon calling Commit.
func (t *Transaction) Delete(key *Key) error {
err := t.DeleteMulti([]*Key{key})
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
func (t *Transaction) DeleteMulti(keys []*Key) error {
if t.id == nil {
return errExpiredTransaction
}
mutations, err := deleteMutations(keys)
if err != nil {
return err
}
t.mutations = append(t.mutations, mutations...)
return nil
}
// Commit represents the result of a committed transaction.
type Commit struct{}
// Key resolves a pending key handle into a final key.
func (c *Commit) Key(p *PendingKey) *Key {
if c != p.commit {
panic("PendingKey was not created by corresponding transaction")
}
return p.key
}
// PendingKey represents the key for newly-inserted entity. It can be
// resolved into a Key by calling the Key method of Commit.
type PendingKey struct {
key *Key
commit *Commit
}

315
vendor/cloud.google.com/go/errors/errors.go generated vendored Normal file
View File

@@ -0,0 +1,315 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package errors is a Google Stackdriver Error Reporting library.
//
// This package is still experimental and subject to change.
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function. Generally you will want
// to do this on program initialization. The NewClient function takes as
// arguments a context, the project name, a service name, and a version string.
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// import "cloud.google.com/go/errors"
// ...
// errorsClient, err = errors.NewClient(ctx, projectID, "myservice", "v1.0")
//
// The client can recover panics in your program and report them as errors.
// To use this functionality, defer its Catch method, as you would any other
// function for recovering panics.
//
// func foo(ctx context.Context, ...) {
// defer errorsClient.Catch(ctx)
// ...
// }
//
// Catch writes an error report containing the recovered value and a stack trace
// to the log named "errorreports" using a Stackdriver Logging client.
//
// There are various options you can add to the call to Catch that modify how
// panics are handled.
//
// WithMessage and WithMessagef add a custom message after the recovered value,
// using fmt.Sprint and fmt.Sprintf respectively.
//
// defer errorsClient.Catch(ctx, errors.WithMessagef("x=%d", x))
//
// WithRequest fills in various fields in the error report with information
// about an http.Request that's being handled.
//
// defer errorsClient.Catch(ctx, errors.WithRequest(httpReq))
//
// By default, after recovering a panic, Catch will panic again with the
// recovered value. You can turn off this behavior with the Repanic option.
//
// defer errorsClient.Catch(ctx, errors.Repanic(false))
//
// You can also change the default behavior for the client by changing the
// RepanicDefault field.
//
// errorsClient.RepanicDefault = false
//
// It is also possible to write an error report directly without recovering a
// panic, using Report or Reportf.
//
// if err != nil {
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
// }
//
// If you try to write an error report with a nil client, or if the logging
// client fails to write the report to the Stackdriver Logging server, the error
// report is logged using log.Println.
package errors // import "cloud.google.com/go/errors"
import (
"bytes"
"fmt"
"log"
"net/http"
"runtime"
"strings"
"time"
"cloud.google.com/go/logging"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
const (
userAgent = `gcloud-golang-errorreporting/20160701`
)
type Client struct {
loggingClient *logging.Client
projectID string
serviceContext map[string]string
// RepanicDefault determines whether Catch will re-panic after recovering a
// panic. This behavior can be overridden for an individual call to Catch using
// the Repanic option.
RepanicDefault bool
}
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, opts ...option.ClientOption) (*Client, error) {
l, err := logging.NewClient(ctx, projectID, "errorreports", opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
c := &Client{
loggingClient: l,
projectID: projectID,
RepanicDefault: true,
serviceContext: map[string]string{
"service": serviceName,
},
}
if serviceVersion != "" {
c.serviceContext["version"] = serviceVersion
}
return c, nil
}
// An Option is an optional argument to Catch.
type Option interface {
isOption()
}
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
// If *p is true when Catch is called, an error report is made even if recover
// returns nil. This allows Catch to report an error for panic(nil).
// If p is nil, the option is ignored.
//
// Here is an example of how to use PanicFlag:
//
// func foo(ctx context.Context, ...) {
// hasPanicked := true
// defer errorsClient.Catch(ctx, errors.PanicFlag(&hasPanicked))
// ...
// ...
// // We have reached the end of the function, so we're not panicking.
// hasPanicked = false
// }
func PanicFlag(p *bool) Option { return panicFlag{p} }
type panicFlag struct {
*bool
}
func (h panicFlag) isOption() {}
// Repanic returns an Option that determines whether Catch will re-panic after
// it reports an error. This overrides the default in the client.
func Repanic(r bool) Option { return repanic(r) }
type repanic bool
func (r repanic) isOption() {}
// WithRequest returns an Option that informs Catch or Report of an http.Request
// that is being handled. Information from the Request is included in the error
// report, if one is made.
func WithRequest(r *http.Request) Option { return withRequest{r} }
type withRequest struct {
*http.Request
}
func (w withRequest) isOption() {}
// WithMessage returns an Option that sets a message to be included in the error
// report, if one is made. v is converted to a string with fmt.Sprint.
func WithMessage(v ...interface{}) Option { return message(v) }
type message []interface{}
func (m message) isOption() {}
// WithMessagef returns an Option that sets a message to be included in the error
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
type messagef struct {
format string
v []interface{}
}
func (m messagef) isOption() {}
// Catch tries to recover a panic; if it succeeds, it writes an error report.
// It should be called by deferring it, like any other function for recovering
// panics.
//
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Catch(ctx context.Context, opt ...Option) {
panicked := false
for _, o := range opt {
switch o := o.(type) {
case panicFlag:
panicked = panicked || o.bool != nil && *o.bool
}
}
x := recover()
if x == nil && !panicked {
return
}
var (
r *http.Request
shouldRepanic = true
messages = []string{fmt.Sprint(x)}
)
if c != nil {
shouldRepanic = c.RepanicDefault
}
for _, o := range opt {
switch o := o.(type) {
case repanic:
shouldRepanic = bool(o)
case withRequest:
r = o.Request
case message:
messages = append(messages, fmt.Sprint(o...))
case messagef:
messages = append(messages, fmt.Sprintf(o.format, o.v...))
}
}
c.logInternal(ctx, r, true, strings.Join(messages, " "))
if shouldRepanic {
panic(x)
}
}
// Report writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Report can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprint(v...))
}
// Reportf writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
}
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
payload := map[string]interface{}{
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
}
// limit the stack trace to 16k.
var buf [16384]byte
stack := buf[0:runtime.Stack(buf[:], false)]
payload["message"] = msg + "\n" + chopStack(stack, isPanic)
if r != nil {
payload["context"] = map[string]interface{}{
"httpRequest": map[string]interface{}{
"method": r.Method,
"url": r.Host + r.RequestURI,
"userAgent": r.UserAgent(),
"referrer": r.Referer(),
"remoteIp": r.RemoteAddr,
},
}
}
if c == nil {
log.Println("Error report used nil client:", payload)
return
}
payload["serviceContext"] = c.serviceContext
e := logging.Entry{
Level: logging.Error,
Payload: payload,
}
err := c.loggingClient.LogSync(e)
if err != nil {
log.Println("Error writing error report:", err, "report:", payload)
}
}
// chopStack trims a stack trace so that the function which panics or calls
// Report is first.
func chopStack(s []byte, isPanic bool) string {
var f []byte
if isPanic {
f = []byte("panic(")
} else {
f = []byte("cloud.google.com/go/errors.(*Client).Report")
}
lfFirst := bytes.IndexByte(s, '\n')
if lfFirst == -1 {
return string(s)
}
stack := s[lfFirst:]
panicLine := bytes.Index(stack, f)
if panicLine == -1 {
return string(s)
}
stack = stack[panicLine+1:]
for i := 0; i < 2; i++ {
nextLine := bytes.IndexByte(stack, '\n')
if nextLine == -1 {
return string(s)
}
stack = stack[nextLine+1:]
}
return string(s[:lfFirst+1]) + string(stack)
}

218
vendor/cloud.google.com/go/errors/errors_test.go generated vendored Normal file
View File

@@ -0,0 +1,218 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors_test
import (
"bytes"
"io/ioutil"
"log"
"net/http"
"strings"
"testing"
"cloud.google.com/go/errors"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
const testProjectID = "testproject"
type fakeRoundTripper struct {
req *http.Request
fail bool
body string
}
func newFakeRoundTripper() *fakeRoundTripper {
return &fakeRoundTripper{}
}
func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
rt.req = r
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
rt.body = string(body)
if rt.fail {
return &http.Response{
Status: "503 Service Unavailable",
StatusCode: 503,
Body: ioutil.NopCloser(strings.NewReader("{}")),
}, nil
}
return &http.Response{
Status: "200 OK",
StatusCode: 200,
Body: ioutil.NopCloser(strings.NewReader("{}")),
}, nil
}
func newTestClient(rt http.RoundTripper) *errors.Client {
t, err := errors.NewClient(context.Background(), testProjectID, "myservice", "v1.000", option.WithHTTPClient(&http.Client{Transport: rt}))
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
var ctx context.Context
func init() {
ctx = context.Background()
}
func TestCatchNothing(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
defer func() {
r := rt.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func commonChecks(t *testing.T, body, panickingFunction string) {
if !strings.Contains(body, "myservice") {
t.Errorf("error report didn't contain service name")
}
if !strings.Contains(body, "v1.000") {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanic(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
defer func() {
r := rt.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, rt.body, "errors_test.TestCatchPanic")
if !strings.Contains(rt.body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, errors.WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClient(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.Bytes()
if !strings.Contains(string(body), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(string(body), "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(string(body), "errors_test.TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *errors.Client
defer c.Catch(ctx, errors.WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReports(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
rt.fail = true
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.Bytes()
commonChecks(t, string(body), "errors_test.TestLogFailedReports")
if !strings.Contains(string(body), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, errors.WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanic(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
defer func() {
r := rt.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, rt.body, "errors_test.TestCatchNilPanic")
if !strings.Contains(rt.body, "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, errors.WithMessage("hello, error"), errors.PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanic(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
defer func() {
r := rt.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, errors.WithMessage("hello, error"))
panic(nil)
}
func TestReport(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
c.Report(ctx, nil, "hello, ", "error")
r := rt.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, rt.body, "errors_test.TestReport")
}
func TestReportf(t *testing.T) {
rt := &fakeRoundTripper{}
c := newTestClient(rt)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
r := rt.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, rt.body, "errors_test.TestReportf")
if !strings.Contains(rt.body, "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}

118
vendor/cloud.google.com/go/errors/stack_test.go generated vendored Normal file
View File

@@ -0,0 +1,118 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import "testing"
func TestChopStack(t *testing.T) {
for _, test := range []struct {
name string
in []byte
expected string
isPanic bool
}{
{
name: "Catch",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Catch()
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
panic()
/gopath/src/runtime/panic.go:458 +0x243
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "function not found",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Catch()
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Catch()
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "Report",
in: []byte(` goroutine 39 [running]:
runtime/debug.Stack()
/gopath/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Report()
/gopath/cloud.google.com/go/errors/errors.go:248 +0x4ed
cloud.google.com/go/errors_test.TestReport()
/gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`),
expected: ` goroutine 39 [running]:
cloud.google.com/go/errors_test.TestReport()
/gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`,
isPanic: false,
},
} {
out := chopStack(test.in, test.isPanic)
if out != test.expected {
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
}
}
}

View File

@@ -0,0 +1,92 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// concat_table is an example client of the bigquery client library.
// It concatenates two BigQuery tables and writes the result to another table.
package main
import (
"flag"
"fmt"
"log"
"os"
"time"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
)
var (
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate")
src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate")
dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to")
pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status")
)
func main() {
flag.Parse()
flagsOk := true
for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} {
if flag.Lookup(f).Value.String() == "" {
fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
flagsOk = false
}
}
if !flagsOk {
os.Exit(1)
}
if *src1 == *src2 || *src1 == *dest || *src2 == *dest {
log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest")
}
ctx := context.Background()
client, err := bigquery.NewClient(ctx, *project)
if err != nil {
log.Fatalf("Creating bigquery client: %v", err)
}
s1 := client.Dataset(*dataset).Table(*src1)
s2 := client.Dataset(*dataset).Table(*src2)
d := client.Dataset(*dataset).Table(*dest)
// Concatenate data.
job, err := client.Copy(ctx, d, bigquery.Tables{s1, s2}, bigquery.WriteTruncate)
if err != nil {
log.Fatalf("Concatenating: %v", err)
}
fmt.Printf("Job for concatenation operation: %+v\n", job)
fmt.Printf("Waiting for job to complete.\n")
for range time.Tick(*pollint) {
status, err := job.Status(ctx)
if err != nil {
fmt.Printf("Failure determining status: %v", err)
break
}
if !status.Done() {
continue
}
if err := status.Err(); err == nil {
fmt.Printf("Success\n")
} else {
fmt.Printf("Failure: %+v\n", err)
}
break
}
}

View File

@@ -0,0 +1,95 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// load is an example client of the bigquery client library.
// It loads a file from Google Cloud Storage into a BigQuery table.
package main
import (
"flag"
"fmt"
"log"
"os"
"time"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
)
var (
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
table = flag.String("table", "", "The ID of a BigQuery table to load data into")
bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from")
object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket")
skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading")
pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status")
)
func main() {
flag.Parse()
flagsOk := true
for _, f := range []string{"project", "dataset", "table", "bucket", "object"} {
if flag.Lookup(f).Value.String() == "" {
fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
flagsOk = false
}
}
if !flagsOk {
os.Exit(1)
}
ctx := context.Background()
client, err := bigquery.NewClient(ctx, *project)
if err != nil {
log.Fatalf("Creating bigquery client: %v", err)
}
table := client.Dataset(*dataset).Table(*table)
gcs := client.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object))
gcs.SkipLeadingRows = *skiprows
// Load data from Google Cloud Storage into a BigQuery table.
job, err := client.Copy(
ctx, table, gcs,
bigquery.MaxBadRecords(1),
bigquery.AllowQuotedNewlines(),
bigquery.WriteTruncate)
if err != nil {
log.Fatalf("Loading data: %v", err)
}
fmt.Printf("Job for data load operation: %+v\n", job)
fmt.Printf("Waiting for job to complete.\n")
for range time.Tick(*pollint) {
status, err := job.Status(ctx)
if err != nil {
fmt.Printf("Failure determining status: %v", err)
break
}
if !status.Done() {
continue
}
if err := status.Err(); err == nil {
fmt.Printf("Success\n")
} else {
fmt.Printf("Failure: %+v\n", err)
}
break
}
}

View File

@@ -0,0 +1,100 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// query is an example client of the bigquery client library.
// It submits a query and writes the result to a table.
package main
import (
"flag"
"fmt"
"log"
"os"
"time"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
)
var (
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
q = flag.String("q", "", "The query string")
dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.")
pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status")
wait = flag.Bool("wait", false, "Whether to wait for the query job to complete.")
)
func main() {
flag.Parse()
flagsOk := true
for _, f := range []string{"project", "dataset", "q"} {
if flag.Lookup(f).Value.String() == "" {
fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
flagsOk = false
}
}
if !flagsOk {
os.Exit(1)
}
ctx := context.Background()
client, err := bigquery.NewClient(ctx, *project)
if err != nil {
log.Fatalf("Creating bigquery client: %v", err)
}
d := &bigquery.Table{}
if *dest != "" {
d = client.Dataset(*dataset).Table(*dest)
}
query := &bigquery.Query{
Q: *q,
DefaultProjectID: *project,
DefaultDatasetID: *dataset,
}
// Query data.
job, err := client.Copy(ctx, d, query, bigquery.WriteTruncate)
if err != nil {
log.Fatalf("Querying: %v", err)
}
fmt.Printf("Submitted query. Job ID: %s\n", job.ID())
if !*wait {
return
}
fmt.Printf("Waiting for job to complete.\n")
for range time.Tick(*pollint) {
status, err := job.Status(ctx)
if err != nil {
fmt.Printf("Failure determining status: %v", err)
break
}
if !status.Done() {
continue
}
if err := status.Err(); err == nil {
fmt.Printf("Success\n")
} else {
fmt.Printf("Failure: %+v\n", err)
}
break
}
}

Some files were not shown because too many files have changed in this diff Show More