1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

bump(*): kubernetes release-1.16.0 dependencies

This commit is contained in:
Mike Dame
2019-10-12 11:11:43 -04:00
parent 5af668e89a
commit 1652ba7976
28121 changed files with 3491095 additions and 2280257 deletions

View File

@@ -0,0 +1,8 @@
# BigQuery Benchmark
This directory contains benchmarks for BigQuery client.
## Usage
`go run bench.go -- <your project id> queries.json`
BigQuery service caches requests so the benchmark should be run
at least twice, disregarding the first result.

View File

@@ -0,0 +1,85 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build ignore
package main
import (
"context"
"encoding/json"
"flag"
"io/ioutil"
"log"
"time"
"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
)
func main() {
flag.Parse()
ctx := context.Background()
c, err := bigquery.NewClient(ctx, flag.Arg(0))
if err != nil {
log.Fatal(err)
}
queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
if err != nil {
log.Fatal(err)
}
var queries []string
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
log.Fatal(err)
}
for _, q := range queries {
doQuery(ctx, c, q)
}
}
func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
startTime := time.Now()
q := c.Query(qt)
it, err := q.Read(ctx)
if err != nil {
log.Fatal(err)
}
numRows, numCols := 0, 0
var firstByte time.Duration
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
if numRows == 0 {
numCols = len(values)
firstByte = time.Since(startTime)
} else if numCols != len(values) {
log.Fatalf("got %d columns, want %d", len(values), numCols)
}
numRows++
}
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
}

View File

@@ -0,0 +1,10 @@
[
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
]

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,52 +14,44 @@
package bigquery
// TODO(mcgreevy): support dry-run mode when creating jobs.
import (
"context"
"fmt"
"io"
"net/http"
"time"
"google.golang.org/api/option"
"google.golang.org/api/transport"
"golang.org/x/net/context"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go/v2"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
)
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
const (
prodAddr = "https://www.googleapis.com/bigquery/v2/"
// Scope is the Oauth2 scope for the service.
Scope = "https://www.googleapis.com/auth/bigquery"
userAgent = "gcloud-golang-bigquery/20160429"
)
// A Source is a source of data for the Copy function.
type Source interface {
implementsSource()
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
}
// A Destination is a destination of data for the Copy function.
type Destination interface {
implementsDestination()
}
// An Option is an optional argument to Copy.
type Option interface {
implementsOption()
}
// A ReadSource is a source of data for the Read function.
type ReadSource interface {
implementsReadSource()
}
// A ReadOption is an optional argument to Read.
type ReadOption interface {
customizeRead(conf *pagingConf)
}
const Scope = "https://www.googleapis.com/auth/bigquery"
const userAgent = "gcloud-golang-bigquery/20160429"
// Client may be used to perform BigQuery operations.
type Client struct {
service service
// Location, if set, will be used as the default location for all subsequent
// dataset creation and job operations. A location specified directly in one of
// those operations will override this value.
Location string
projectID string
bqs *bq.Service
}
// NewClient constructs a new Client which can perform BigQuery operations.
@@ -71,105 +63,100 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
return nil, fmt.Errorf("bigquery: dialing: %v", err)
}
s, err := newBigqueryService(httpClient, endpoint)
bqs, err := bq.New(httpClient)
if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err)
return nil, fmt.Errorf("bigquery: constructing client: %v", err)
}
bqs.BasePath = endpoint
c := &Client{
service: s,
projectID: projectID,
bqs: bqs,
}
return c, nil
}
// initJobProto creates and returns a bigquery Job proto.
// The proto is customized using any jobOptions in options.
// The list of Options is returned with the jobOptions removed.
func initJobProto(projectID string, options []Option) (*bq.Job, []Option) {
job := &bq.Job{}
// Close closes any resources held by the client.
// Close should be called when the client is no longer needed.
// It need not be called at program exit.
func (c *Client) Close() error {
return nil
}
var other []Option
for _, opt := range options {
if o, ok := opt.(jobOption); ok {
o.customizeJob(job, projectID)
} else {
other = append(other, opt)
}
// Calls the Jobs.Insert RPC and returns a Job.
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
setClientHeader(call.Header())
if media != nil {
call.Media(media)
}
return job, other
}
// Copy starts a BigQuery operation to copy data from a Source to a Destination.
func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) {
switch dst := dst.(type) {
case *Table:
switch src := src.(type) {
case *GCSReference:
return c.load(ctx, dst, src, options)
case *Table:
return c.cp(ctx, dst, Tables{src}, options)
case Tables:
return c.cp(ctx, dst, src, options)
case *Query:
return c.query(ctx, dst, src, options)
}
case *GCSReference:
if src, ok := src.(*Table); ok {
return c.extract(ctx, dst, src, options)
}
var res *bq.Job
var err error
invoke := func() error {
res, err = call.Do()
return err
}
// A job with a client-generated ID can be retried; the presence of the
// ID makes the insert operation idempotent.
// We don't retry if there is media, because it is an io.Reader. We'd
// have to read the contents and keep it in memory, and that could be expensive.
// TODO(jba): Look into retrying if media != nil.
if job.JobReference != nil && media == nil {
err = runWithRetry(ctx, invoke)
} else {
err = invoke()
}
return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src)
}
// Query creates a query with string q. You may optionally set
// DefaultProjectID and DefaultDatasetID on the returned query before using it.
func (c *Client) Query(q string) *Query {
return &Query{Q: q, client: c}
}
// Read submits a query for execution and returns the results via an Iterator.
//
// Read uses a temporary table to hold the results of the query job.
//
// For more control over how a query is performed, don't use this method but
// instead pass the Query as a Source to Client.Copy, and call Read on the
// resulting Job.
func (q *Query) Read(ctx context.Context, options ...ReadOption) (*Iterator, error) {
dest := &Table{}
job, err := q.client.Copy(ctx, dest, q, WriteTruncate)
if err != nil {
return nil, err
}
return job.Read(ctx, options...)
return bqToJob(res, c)
}
// executeQuery submits a query for execution and returns the results via an Iterator.
func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) {
dest := &Table{}
job, err := c.Copy(ctx, dest, q, WriteTruncate)
if err != nil {
return nil, err
// Convert a number of milliseconds since the Unix epoch to a time.Time.
// Treat an input of zero specially: convert it to the zero time,
// rather than the start of the epoch.
func unixMillisToTime(m int64) time.Time {
if m == 0 {
return time.Time{}
}
return c.Read(ctx, job, options...)
return time.Unix(0, m*1e6)
}
// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
}
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
return &Dataset{
projectID: projectID,
id: datasetID,
service: c.service,
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
// See the similar function in ../storage/invoke.go. The main difference is the
// reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
backoff := gax.Backoff{
Initial: 1 * time.Second,
Max: 32 * time.Second,
Multiplier: 2,
}
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
return !retryableError(err), err
})
}
// This is the correct definition of retryable according to the BigQuery team. It
// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors
// retryable; these are returned by systems between the client and the BigQuery
// service.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
return false
}
var reason string
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
}

26493
vendor/cloud.google.com/go/bigquery/bigquery.replay generated vendored Normal file

File diff suppressed because one or more lines are too long

107
vendor/cloud.google.com/go/bigquery/copy.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
bq "google.golang.org/api/bigquery/v2"
)
// CopyConfig holds the configuration for a copy job.
type CopyConfig struct {
// Srcs are the tables from which data will be copied.
Srcs []*Table
// Dst is the table into which the data will be copied.
Dst *Table
// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteEmpty.
WriteDisposition TableWriteDisposition
// The labels associated with this job.
Labels map[string]string
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
}
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
var ts []*bq.TableReference
for _, t := range c.Srcs {
ts = append(ts, t.toBQ())
}
return &bq.JobConfiguration{
Labels: c.Labels,
Copy: &bq.JobConfigurationTableCopy{
CreateDisposition: string(c.CreateDisposition),
WriteDisposition: string(c.WriteDisposition),
DestinationTable: c.Dst.toBQ(),
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
SourceTables: ts,
},
}
}
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
cc := &CopyConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
Dst: bqToTable(q.Copy.DestinationTable, c),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
}
for _, t := range q.Copy.SourceTables {
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
}
return cc
}
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
type Copier struct {
JobIDConfig
CopyConfig
c *Client
}
// CopierFrom returns a Copier which can be used to copy data into a
// BigQuery table from one or more BigQuery tables.
// The returned Copier may optionally be further configured before its Run method is called.
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
return &Copier{
c: t.c,
CopyConfig: CopyConfig{
Srcs: srcs,
Dst: t,
},
}
}
// Run initiates a copy job.
func (c *Copier) Run(ctx context.Context) (*Job, error) {
return c.c.insertJob(ctx, c.newJob(), nil)
}
func (c *Copier) newJob() *bq.Job {
return &bq.Job{
JobReference: c.JobIDConfig.createJobRef(c.c),
Configuration: c.CopyConfig.toBQ(),
}
}

View File

@@ -1,47 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type copyOption interface {
customizeCopy(conf *bq.JobConfigurationTableCopy)
}
func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationTableCopy{}
dst.customizeCopyDst(payload)
src.customizeCopySrc(payload)
for _, opt := range options {
o, ok := opt.(copyOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeCopy(payload)
}
job.Configuration = &bq.JobConfiguration{
Copy: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,15 +15,16 @@
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2"
)
func defaultCopyJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{
@@ -44,11 +45,14 @@ func defaultCopyJob() *bq.Job {
}
func TestCopy(t *testing.T) {
defer fixRandomID("RANDOM")()
testCases := []struct {
dst *Table
src Tables
options []Option
want *bq.Job
dst *Table
srcs []*Table
jobID string
location string
config CopyConfig
want *bq.Job
}{
{
dst: &Table{
@@ -56,7 +60,7 @@ func TestCopy(t *testing.T) {
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
src: Tables{
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
@@ -71,34 +75,89 @@ func TestCopy(t *testing.T) {
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
src: Tables{
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
options: []Option{CreateNever, WriteTruncate},
config: CopyConfig{
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
Labels: map[string]string{"a": "b"},
},
want: func() *bq.Job {
j := defaultCopyJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
jobID: "job-id",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.JobId = "job-id"
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
location: "asia-northeast1",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.Location = "asia-northeast1"
return j
}(),
},
}
c := &Client{projectID: "client-project-id"}
for i, tc := range testCases {
tc.dst.c = c
copier := tc.dst.CopierFrom(tc.srcs...)
copier.JobID = tc.jobID
copier.Location = tc.location
tc.config.Srcs = tc.srcs
tc.config.Dst = tc.dst
copier.CopyConfig = tc.config
got := copier.newJob()
checkJob(t, i, got, tc.want)
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling cp: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
cmpopts.IgnoreUnexported(Table{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

View File

@@ -1,79 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type createTableRecorder struct {
conf *createTableConf
service
}
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
rec.conf = conf
return nil
}
func TestCreateTableOptions(t *testing.T) {
s := &createTableRecorder{}
c := &Client{
projectID: "p",
service: s,
}
ds := c.Dataset("d")
table := ds.Table("t")
exp := time.Now()
q := "query"
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q)); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want := createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
viewQuery: q,
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
// No need for an elaborate schema, that is tested in schema_test.go.
schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,42 +14,523 @@
package bigquery
import "golang.org/x/net/context"
import (
"context"
"errors"
"fmt"
"time"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)
// Dataset is a reference to a BigQuery dataset.
type Dataset struct {
projectID string
id string
service service
ProjectID string
DatasetID string
c *Client
}
// ListTables returns a list of all the tables contained in the Dataset.
func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) {
var tables []*Table
// DatasetMetadata contains information about a BigQuery dataset.
type DatasetMetadata struct {
// These fields can be set when creating a dataset.
Name string // The user-friendly name for this dataset.
Description string // The user-friendly description of this dataset.
Location string // The geo location of the dataset.
DefaultTableExpiration time.Duration // The default expiration time for new tables.
Labels map[string]string // User-provided labels.
Access []*AccessEntry // Access permissions.
err := getPages("", func(pageToken string) (string, error) {
ts, tok, err := d.service.listTables(ctx, d.projectID, d.id, pageToken)
if err == nil {
tables = append(tables, ts...)
}
return tok, err
})
// These fields are read-only.
CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
FullID string // The full dataset ID in the form projectID:datasetID.
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
// Only non-nil fields will be updated.
type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset.
// DefaultTableExpiration is the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration
// The entire access list. It is not possible to replace individual entries.
Access []*AccessEntry
labelUpdater
}
// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
}
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
return &Dataset{
ProjectID: projectID,
DatasetID: datasetID,
c: c,
}
}
// Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create")
defer func() { trace.EndSpan(ctx, err) }()
ds, err := md.toBQ()
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
// Use Client.Location as a default.
if ds.Location == "" {
ds.Location = d.c.Location
}
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
setClientHeader(call.Header())
_, err = call.Do()
return err
}
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
return tables, nil
if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
}
// Create creates a dataset in the BigQuery service. An error will be returned
// if the dataset already exists.
func (d *Dataset) Create(ctx context.Context) error {
return d.service.insertDataset(ctx, d.id, d.projectID)
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
var q []*bq.DatasetAccess
for _, e := range a {
a, err := e.toBQ()
if err != nil {
return nil, err
}
q = append(q, a)
}
return q, nil
}
// Delete deletes the dataset. Delete will fail if the dataset is not empty.
func (d *Dataset) Delete(ctx context.Context) (err error) {
return d.deleteInternal(ctx, false)
}
// DeleteWithContents deletes the dataset, as well as contained resources.
func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) {
return d.deleteInternal(ctx, true)
}
func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
defer func() { trace.EndSpan(ctx, err) }()
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents)
setClientHeader(call.Header())
return call.Do()
}
// Metadata fetches the metadata for the dataset.
func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata")
defer func() { trace.EndSpan(ctx, err) }()
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds)
}
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
dm := &DatasetMetadata{
CreationTime: unixMillisToTime(d.CreationTime),
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
for _, a := range d.Access {
e, err := bqToAccessEntry(a, nil)
if err != nil {
return nil, err
}
dm.Access = append(dm.Access, e)
}
return dm, nil
}
// Update modifies specific Dataset metadata fields.
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update")
defer func() { trace.EndSpan(ctx, err) }()
ds, err := dm.toBQ()
if err != nil {
return nil, err
}
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds2)
}
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.Access != nil {
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
if len(ds.Access) == 0 {
ds.NullFields = append(ds.NullFields, "Access")
}
}
labels, forces, nulls := dm.update()
ds.Labels = labels
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
ds.NullFields = append(ds.NullFields, nulls...)
return ds, nil
}
// Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it.
func (d *Dataset) Table(tableID string) *Table {
return &Table{ProjectID: d.projectID, DatasetID: d.id, TableID: tableID, service: d.service}
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
}
// Tables returns an iterator over the tables in the Dataset.
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
it := &TableIterator{
ctx: ctx,
dataset: d,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.tables) },
func() interface{} { b := it.tables; it.tables = nil; return b })
return it
}
// A TableIterator is an iterator over Tables.
type TableIterator struct {
ctx context.Context
dataset *Dataset
tables []*Table
pageInfo *iterator.PageInfo
nextFunc func() error
}
// Next returns the next result. Its second return value is Done if there are
// no more results. Once Next returns Done, all subsequent calls will return
// Done.
func (it *TableIterator) Next() (*Table, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
t := it.tables[0]
it.tables = it.tables[1:]
return t, nil
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// for testing
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
PageToken(pageToken).
Context(it.ctx)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
var res *bq.TableList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := listTables(it, pageSize, pageToken)
if err != nil {
return "", err
}
for _, t := range res.Tables {
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
}
return res.NextPageToken, nil
}
func bqToTable(tr *bq.TableReference, c *Client) *Table {
if tr == nil {
return nil
}
return &Table{
ProjectID: tr.ProjectId,
DatasetID: tr.DatasetId,
TableID: tr.TableId,
c: c,
}
}
// Datasets returns an iterator over the datasets in a project.
// The Client's project is used by default, but that can be
// changed by setting ProjectID on the returned iterator before calling Next.
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
return c.DatasetsInProject(ctx, c.projectID)
}
// DatasetsInProject returns an iterator over the datasets in the provided project.
//
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
it := &DatasetIterator{
ctx: ctx,
c: c,
ProjectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// DatasetIterator iterates over the datasets in a project.
type DatasetIterator struct {
// ListHidden causes hidden datasets to be listed when set to true.
// Set before the first call to Next.
ListHidden bool
// Filter restricts the datasets returned by label. The filter syntax is described in
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
// Set before the first call to Next.
Filter string
// The project ID of the listed datasets.
// Set before the first call to Next.
ProjectID string
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// Next returns the next Dataset. Its second return value is iterator.Done if
// there are no more results. Once Next returns Done, all subsequent calls will
// return Done.
func (it *DatasetIterator) Next() (*Dataset, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
// for testing
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
call := it.c.bqs.Datasets.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
All(it.ListHidden)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
if it.Filter != "" {
call.Filter(it.Filter)
}
var res *bq.DatasetList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := listDatasets(it, pageSize, pageToken)
if err != nil {
return "", err
}
for _, d := range res.Datasets {
it.items = append(it.items, &Dataset{
ProjectID: d.DatasetReference.ProjectId,
DatasetID: d.DatasetReference.DatasetId,
c: it.c,
})
}
return res.NextPageToken, nil
}
// An AccessEntry describes the permissions that an entity has on a dataset.
type AccessEntry struct {
Role AccessRole // The role of the entity
EntityType EntityType // The type of entity
Entity string // The entity (individual or group) granted access
View *Table // The view granted access (EntityType must be ViewEntity)
}
// AccessRole is the level of access to grant to a dataset.
type AccessRole string
const (
// OwnerRole is the OWNER AccessRole.
OwnerRole AccessRole = "OWNER"
// ReaderRole is the READER AccessRole.
ReaderRole AccessRole = "READER"
// WriterRole is the WRITER AccessRole.
WriterRole AccessRole = "WRITER"
)
// EntityType is the type of entity in an AccessEntry.
type EntityType int
const (
// DomainEntity is a domain (e.g. "example.com").
DomainEntity EntityType = iota + 1
// GroupEmailEntity is an email address of a Google Group.
GroupEmailEntity
// UserEmailEntity is an email address of an individual user.
UserEmailEntity
// SpecialGroupEntity is a special group: one of projectOwners, projectReaders, projectWriters or
// allAuthenticatedUsers.
SpecialGroupEntity
// ViewEntity is a BigQuery view.
ViewEntity
)
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
q := &bq.DatasetAccess{Role: string(e.Role)}
switch e.EntityType {
case DomainEntity:
q.Domain = e.Entity
case GroupEmailEntity:
q.GroupByEmail = e.Entity
case UserEmailEntity:
q.UserByEmail = e.Entity
case SpecialGroupEntity:
q.SpecialGroup = e.Entity
case ViewEntity:
q.View = e.View.toBQ()
default:
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
}
return q, nil
}
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
e := &AccessEntry{Role: AccessRole(q.Role)}
switch {
case q.Domain != "":
e.Entity = q.Domain
e.EntityType = DomainEntity
case q.GroupByEmail != "":
e.Entity = q.GroupByEmail
e.EntityType = GroupEmailEntity
case q.UserByEmail != "":
e.Entity = q.UserByEmail
e.EntityType = UserEmailEntity
case q.SpecialGroup != "":
e.Entity = q.SpecialGroup
e.EntityType = SpecialGroupEntity
case q.View != nil:
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
e.EntityType = ViewEntity
default:
return nil, errors.New("bigquery: invalid access value")
}
return e, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,91 +15,312 @@
package bigquery
import (
"context"
"errors"
"reflect"
"strconv"
"testing"
"time"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
itest "google.golang.org/api/iterator/testing"
)
// readServiceStub services read requests by returning data from an in-memory list of values.
type listTablesServiceStub struct {
type listTablesStub struct {
expectedProject, expectedDataset string
values [][]*Table // contains pages of tables.
pageTokens map[string]string // maps incoming page token to returned page token.
service
tables []*bq.TableListTables
}
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
if projectID != s.expectedProject {
return nil, "", errors.New("wrong project id")
func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
if it.dataset.ProjectID != s.expectedProject {
return nil, errors.New("wrong project id")
}
if datasetID != s.expectedDataset {
return nil, "", errors.New("wrong dataset id")
if it.dataset.DatasetID != s.expectedDataset {
return nil, errors.New("wrong dataset id")
}
tables := s.values[0]
s.values = s.values[1:]
return tables, s.pageTokens[pageToken], nil
}
func TestListTables(t *testing.T) {
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
testCases := []struct {
data [][]*Table
pageTokens map[string]string
want []*Table
}{
{
data: [][]*Table{{t1, t2}, {t3}},
pageTokens: map[string]string{"": "a", "a": ""},
want: []*Table{t1, t2, t3},
},
{
data: [][]*Table{{t1, t2}, {t3}},
pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: []*Table{t1, t2},
},
const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize {
pageSize = maxPageSize
}
for _, tc := range testCases {
c := &Client{
service: &listTablesServiceStub{
expectedProject: "x",
expectedDataset: "y",
values: tc.data,
pageTokens: tc.pageTokens,
},
projectID: "x",
}
got, err := c.Dataset("y").ListTables(context.Background())
start := 0
if pageToken != "" {
var err error
start, err = strconv.Atoi(pageToken)
if err != nil {
t.Errorf("err calling ListTables: %v", err)
return nil, err
}
}
end := start + pageSize
if end > len(s.tables) {
end = len(s.tables)
}
nextPageToken := ""
if end < len(s.tables) {
nextPageToken = strconv.Itoa(end)
}
return &bq.TableList{
Tables: s.tables[start:end],
NextPageToken: nextPageToken,
}, nil
}
func TestTables(t *testing.T) {
c := &Client{projectID: "p1"}
inTables := []*bq.TableListTables{
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
}
outTables := []*Table{
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
}
lts := &listTablesStub{
expectedProject: "p1",
expectedDataset: "d1",
tables: inTables,
}
old := listTables
listTables = lts.listTables // cannot use t.Parallel with this test
defer func() { listTables = old }()
msg, ok := itest.TestIterator(outTables,
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
if !ok {
t.Error(msg)
}
}
type listDatasetsStub struct {
expectedProject string
datasets []*bq.DatasetListDatasets
hidden map[*bq.DatasetListDatasets]bool
}
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize {
pageSize = maxPageSize
}
if it.Filter != "" {
return nil, errors.New("filter not supported")
}
if it.ProjectID != s.expectedProject {
return nil, errors.New("bad project ID")
}
start := 0
if pageToken != "" {
var err error
start, err = strconv.Atoi(pageToken)
if err != nil {
return nil, err
}
}
var (
i int
result []*bq.DatasetListDatasets
nextPageToken string
)
for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
if s.hidden[s.datasets[i]] && !it.ListHidden {
continue
}
result = append(result, s.datasets[i])
}
if i < len(s.datasets) {
nextPageToken = strconv.Itoa(i)
}
return &bq.DatasetList{
Datasets: result,
NextPageToken: nextPageToken,
}, nil
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
func TestDatasets(t *testing.T) {
client := &Client{projectID: "p"}
inDatasets := []*bq.DatasetListDatasets{
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
}
outDatasets := []*Dataset{
{"p", "a", client},
{"p", "b", client},
{"p", "hidden", client},
{"p", "c", client},
}
lds := &listDatasetsStub{
expectedProject: "p",
datasets: inDatasets,
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
}
old := listDatasets
listDatasets = lds.listDatasets // cannot use t.Parallel with this test
defer func() { listDatasets = old }()
msg, ok := itest.TestIterator(outDatasets,
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok {
t.Fatalf("ListHidden=true: %s", msg)
}
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok {
t.Fatalf("ListHidden=false: %s", msg)
}
}
func TestDatasetToBQ(t *testing.T) {
for _, test := range []struct {
in *DatasetMetadata
want *bq.Dataset
}{
{nil, &bq.Dataset{}},
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
{&DatasetMetadata{
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
}, &bq.Dataset{
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
}},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
}
}
// Check that non-writeable fields are unset.
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, dm := range []*DatasetMetadata{
{CreationTime: aTime},
{LastModifiedTime: aTime},
{FullID: "x"},
{ETag: "e"},
} {
if _, err := dm.toBQ(); err == nil {
t.Errorf("%+v: got nil, want error", dm)
}
}
}
func TestListTablesError(t *testing.T) {
c := &Client{
service: &listTablesServiceStub{
expectedProject: "x",
expectedDataset: "y",
func TestBQToDatasetMetadata(t *testing.T) {
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
cMillis := cTime.UnixNano() / 1e6
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
mMillis := mTime.UnixNano() / 1e6
q := &bq.Dataset{
CreationTime: cMillis,
LastModifiedTime: mMillis,
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{
{Role: "READER", UserByEmail: "joe@example.com"},
{Role: "WRITER", GroupByEmail: "users@example.com"},
},
projectID: "x",
Etag: "etag",
}
// Test that service read errors are propagated back to the caller.
// Passing "not y" as the dataset id will cause the service to return an error.
_, err := c.Dataset("not y").ListTables(context.Background())
if err == nil {
// Read should not return an error; only Err should.
t.Errorf("ListTables expected: non-nil err, got: nil")
want := &DatasetMetadata{
CreationTime: cTime,
LastModifiedTime: mTime,
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
},
ETag: "etag",
}
got, err := bqToDatasetMetadata(q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}
func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got, err := dm.toBQ()
if err != nil {
t.Fatal(err)
}
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}
func TestConvertAccessEntry(t *testing.T) {
c := &Client{projectID: "pid"}
for _, e := range []*AccessEntry{
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
{Role: ReaderRole, EntityType: ViewEntity,
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
} {
q, err := e.toBQ()
if err != nil {
t.Fatal(err)
}
got, err := bqToAccessEntry(q, c)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}
e := &AccessEntry{Role: ReaderRole, Entity: "e"}
if _, err := e.toBQ(); err == nil {
t.Error("got nil, want error")
}
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
t.Error("got nil, want error")
}
}

View File

@@ -0,0 +1,67 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package datatransfer
import (
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
import (
"context"
"fmt"
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestDataTransferServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var formattedParent string = fmt.Sprintf("projects/%s", projectId)
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}
iter := c.ListDataSources(ctx, request)
if _, err := iter.Next(); err != nil && err != iterator.Done {
t.Error(err)
}
}

View File

@@ -0,0 +1,625 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package datatransfer
import (
"context"
"fmt"
"math"
"time"
"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetDataSource []gax.CallOption
ListDataSources []gax.CallOption
CreateTransferConfig []gax.CallOption
UpdateTransferConfig []gax.CallOption
DeleteTransferConfig []gax.CallOption
GetTransferConfig []gax.CallOption
ListTransferConfigs []gax.CallOption
ScheduleTransferRuns []gax.CallOption
GetTransferRun []gax.CallOption
DeleteTransferRun []gax.CallOption
ListTransferRuns []gax.CallOption
ListTransferLogs []gax.CallOption
CheckValidCreds []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
GetDataSource: retry[[2]string{"default", "idempotent"}],
ListDataSources: retry[[2]string{"default", "idempotent"}],
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
GetTransferRun: retry[[2]string{"default", "idempotent"}],
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with BigQuery Data Transfer API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client datatransferpb.DataTransferServiceClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new data transfer service client.
//
// The Google BigQuery Data Transfer Service API enables BigQuery users to
// configure the transfer of their data from other Google Products into
// BigQuery. This service contains methods that are end user exposed. It backs
// up the frontend.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: datatransferpb.NewDataTransferServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// GetDataSource retrieves a supported data source and returns its settings,
// which can be used for UI rendering.
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
var resp *datatransferpb.DataSource
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDataSources lists supported data sources and returns their settings,
// which can be used for UI rendering.
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
it := &DataSourceIterator{}
req = proto.Clone(req).(*datatransferpb.ListDataSourcesRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
var resp *datatransferpb.ListDataSourcesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.DataSources, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// CreateTransferConfig creates a new data transfer configuration.
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateTransferConfig updates a data transfer configuration.
// All fields must be set, even if they are not updated.
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "transfer_config.name", req.GetTransferConfig().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferConfig deletes a data transfer configuration,
// including any associated transfer runs and logs.
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetTransferConfig returns information about a data transfer config.
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListTransferConfigs returns information about all data transfers in the project.
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
it := &TransferConfigIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferConfigsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
var resp *datatransferpb.ListTransferConfigsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferConfigs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
// For each date - or whatever granularity the data source supports - in the
// range, one transfer run is created.
// Note that runs are created per UTC time in the time range.
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
var resp *datatransferpb.ScheduleTransferRunsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetTransferRun returns information about the particular transfer run.
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
var resp *datatransferpb.TransferRun
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferRun deletes the specified transfer run.
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ListTransferRuns returns information about running and completed jobs.
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
it := &TransferRunIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferRunsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
var resp *datatransferpb.ListTransferRunsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferRuns, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// ListTransferLogs returns user facing log messages for the data transfer run.
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
it := &TransferMessageIterator{}
req = proto.Clone(req).(*datatransferpb.ListTransferLogsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
var resp *datatransferpb.ListTransferLogsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferMessages, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// CheckValidCreds returns true if valid credentials exist for the given data source and
// requesting user.
// Some data sources doesn't support service account, so we need to talk to
// them on behalf of the end user. This API just checks whether we have OAuth
// token for the particular user, which is a pre-requisite before user can
// create a transfer config.
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
var resp *datatransferpb.CheckValidCredsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DataSourceIterator manages a stream of *datatransferpb.DataSource.
type DataSourceIterator struct {
items []*datatransferpb.DataSource
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
var item *datatransferpb.DataSource
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DataSourceIterator) bufLen() int {
return len(it.items)
}
func (it *DataSourceIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
type TransferConfigIterator struct {
items []*datatransferpb.TransferConfig
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
var item *datatransferpb.TransferConfig
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferConfigIterator) bufLen() int {
return len(it.items)
}
func (it *TransferConfigIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
type TransferMessageIterator struct {
items []*datatransferpb.TransferMessage
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
var item *datatransferpb.TransferMessage
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferMessageIterator) bufLen() int {
return len(it.items)
}
func (it *TransferMessageIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
type TransferRunIterator struct {
items []*datatransferpb.TransferRun
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
var item *datatransferpb.TransferRun
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferRunIterator) bufLen() int {
return len(it.items)
}
func (it *TransferRunIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@@ -0,0 +1,289 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package datatransfer_test
import (
"context"
datatransfer "cloud.google.com/go/bigquery/datatransfer/apiv1"
"google.golang.org/api/iterator"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_GetDataSource() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetDataSourceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDataSource(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListDataSources() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListDataSourcesRequest{
// TODO: Fill request struct fields.
}
it := c.ListDataSources(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CreateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CreateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_UpdateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.UpdateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferConfigRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_GetTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListTransferConfigs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferConfigsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferConfigs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ScheduleTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ScheduleTransferRunsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ScheduleTransferRuns(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_GetTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferRunRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferRunRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_ListTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferRunsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferRuns(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ListTransferLogs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferLogsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferLogs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CheckValidCreds() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CheckValidCredsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CheckValidCreds(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

View File

@@ -0,0 +1,101 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
// Package datatransfer is an auto-generated package for the
// BigQuery Data Transfer API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Transfers data from partner SaaS applications to Google BigQuery on a
// scheduled, managed basis.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20190404"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,135 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datatransfer
// ProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func ProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}
// LocationPath returns the path for the location resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s", project, location)
// instead.
func LocationPath(project, location string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
""
}
// LocationDataSourcePath returns the path for the location data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
// instead.
func LocationDataSourcePath(project, location, dataSource string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/dataSources/" +
dataSource +
""
}
// LocationTransferConfigPath returns the path for the location transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
// instead.
func LocationTransferConfigPath(project, location, transferConfig string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
""
}
// LocationRunPath returns the path for the location run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
// instead.
func LocationRunPath(project, location, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}
// DataSourcePath returns the path for the data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
// instead.
func DataSourcePath(project, dataSource string) string {
return "" +
"projects/" +
project +
"/dataSources/" +
dataSource +
""
}
// TransferConfigPath returns the path for the transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
// instead.
func TransferConfigPath(project, transferConfig string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
""
}
// RunPath returns the path for the run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
// instead.
func RunPath(project, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,7 +12,299 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package bigquery provides a client for the BigQuery service.
//
// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected.
/*
Package bigquery provides a client for the BigQuery service.
Note: This package is in beta. Some backwards-incompatible changes may occur.
The following assumes a basic familiarity with BigQuery concepts.
See https://cloud.google.com/bigquery/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Creating a Client
To start working with this package, create a client:
ctx := context.Background()
client, err := bigquery.NewClient(ctx, projectID)
if err != nil {
// TODO: Handle error.
}
Querying
To query existing tables, create a Query and call its Read method:
q := client.Query(`
SELECT year, SUM(number) as num
FROM ` + "`bigquery-public-data.usa_names.usa_1910_2013`" + `
WHERE name = "William"
GROUP BY year
ORDER BY year
`)
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
Then iterate through the resulting rows. You can store a row using
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
A slice is simplest:
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(values)
}
You can also use a struct whose exported fields match the query:
type Count struct {
Year int
Num int
}
for {
var c Count
err := it.Next(&c)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(c)
}
You can also start the query running and get the results later.
Create the query as above, but call Run instead of Read. This returns a Job,
which represents an asynchronous operation.
job, err := q.Run(ctx)
if err != nil {
// TODO: Handle error.
}
Get the job's ID, a printable string. You can save this string to retrieve
the results at a later time, even in another process.
jobID := job.ID()
fmt.Printf("The job ID is %s\n", jobID)
To retrieve the job's results from the ID, first look up the Job:
job, err = client.JobFromID(ctx, jobID)
if err != nil {
// TODO: Handle error.
}
Use the Job.Read method to obtain an iterator, and loop over the rows.
Query.Read is just a convenience method that combines Query.Run and Job.Read.
it, err = job.Read(ctx)
if err != nil {
// TODO: Handle error.
}
// Proceed with iteration as above.
Datasets and Tables
You can refer to datasets in the client's project with the Dataset method, and
in other projects with the DatasetInProject method:
myDataset := client.Dataset("my_dataset")
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
These methods create references to datasets, not the datasets themselves. You can have
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
create a dataset from a reference:
if err := myDataset.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
to an object in BigQuery that may or may not exist.
table := myDataset.Table("my_table")
You can create, delete and update the metadata of tables with methods on Table.
For instance, you could create a temporary table with:
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
ExpirationTime: time.Now().Add(1*time.Hour)})
if err != nil {
// TODO: Handle error.
}
We'll see how to create a table with a schema in the next section.
Schemas
There are two ways to construct schemas with this package.
You can build a schema by hand, like so:
schema1 := bigquery.Schema{
{Name: "Name", Required: true, Type: bigquery.StringFieldType},
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
}
Or you can infer the schema from a struct:
type student struct {
Name string
Grades []int
Optional bigquery.NullInt64
}
schema2, err := bigquery.InferSchema(student{})
if err != nil {
// TODO: Handle error.
}
// schema1 and schema2 are identical.
Struct inference supports tags like those of the encoding/json package, so you can
change names, ignore fields, or mark a field as nullable (non-required). Fields
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool,
NullTimestamp, NullDate, NullTime, NullDateTime, and NullGeography) are
automatically inferred as nullable, so the "nullable" tag is only needed for []byte,
*big.Rat and pointer-to-struct fields.
type student2 struct {
Name string `bigquery:"full_name"`
Grades []int
Secret string `bigquery:"-"`
Optional []byte `bigquery:",nullable"
}
schema3, err := bigquery.InferSchema(student2{})
if err != nil {
// TODO: Handle error.
}
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".
Having constructed a schema, you can create a table with it like so:
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
// TODO: Handle error.
}
Copying
You can copy one or more tables to another table. Begin by constructing a Copier
describing the copy. Then set any desired copy options, and finally call Run to get a Job:
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
copier.WriteDisposition = bigquery.WriteTruncate
job, err = copier.Run(ctx)
if err != nil {
// TODO: Handle error.
}
You can chain the call to Run if you don't want to set options:
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
You can wait for your job to complete:
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
Job.Wait polls with exponential backoff. You can also poll yourself, if you
wish:
for {
status, err := job.Status(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Done() {
if status.Err() != nil {
log.Fatalf("Job failed with error %v", status.Err())
}
break
}
time.Sleep(pollInterval)
}
Loading and Uploading
There are two ways to populate a table with this package: load the data from a Google Cloud Storage
object, or upload rows directly from your program.
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
it as well, and call its Run method.
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
loader.CreateDisposition = bigquery.CreateNever
job, err = loader.Run(ctx)
// Poll the job for completion if desired, as above.
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
Then create an Uploader, and call its Put method with a slice of values.
u := table.Uploader()
// Item implements the ValueSaver interface.
items := []*Item{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := u.Put(ctx, items); err != nil {
// TODO: Handle error.
}
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
directly and the schema will be inferred:
type Item2 struct {
Name string
Size float64
Count int
}
// Item implements the ValueSaver interface.
items2 := []*Item2{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := u.Put(ctx, items2); err != nil {
// TODO: Handle error.
}
Extracting
If you've been following so far, extracting data from a BigQuery table
into a Google Cloud Storage object will feel familiar. First create an
Extractor, then optionally configure it, and lastly call its Run method.
extractor := table.ExtractorTo(gcsRef)
extractor.DisableHeader = true
job, err = extractor.Run(ctx)
// Poll the job for completion if desired, as above.
Errors
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code = 409 { ... }
}
*/
package bigquery // import "cloud.google.com/go/bigquery"

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ import (
)
// An Error contains detailed information about a failed bigquery operation.
// Detailed description of possible Reasons can be found here: https://cloud.google.com/bigquery/troubleshooting-errors.
type Error struct {
// Mirrors bq.ErrorProto, but drops DebugInfo
Location, Message, Reason string
@@ -30,7 +31,7 @@ func (e Error) Error() string {
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
}
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
func bqToError(ep *bq.ErrorProto) *Error {
if ep == nil {
return nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,10 +16,10 @@ package bigquery
import (
"errors"
"reflect"
"strings"
"testing"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@@ -94,7 +94,7 @@ func TestErrorFromErrorProto(t *testing.T) {
want: &Error{Location: "L", Message: "M", Reason: "R"},
},
} {
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
if got := bqToError(test.in); !testutil.Equal(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
}
}

829
vendor/cloud.google.com/go/bigquery/examples_test.go generated vendored Normal file
View File

@@ -0,0 +1,829 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery_test
import (
"context"
"fmt"
"os"
"time"
"cloud.google.com/go/bigquery"
"google.golang.org/api/iterator"
)
func ExampleNewClient() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
_ = client // TODO: Use client.
}
func ExampleClient_Dataset() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
fmt.Println(ds)
}
func ExampleClient_DatasetInProject() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.DatasetInProject("their-project-id", "their-dataset")
fmt.Println(ds)
}
func ExampleClient_Datasets() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Datasets(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleClient_DatasetsInProject() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.DatasetsInProject(ctx, "their-project-id")
_ = it // TODO: iterate using Next or iterator.Pager.
}
func getJobID() string { return "" }
func ExampleClient_JobFromID() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
job, err := client.JobFromID(ctx, jobID)
if err != nil {
// TODO: Handle error.
}
fmt.Println(job.LastStatus()) // Display the job's status.
}
func ExampleClient_Jobs() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Jobs(ctx)
it.State = bigquery.Running // list only running jobs.
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleNewGCSReference() {
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
fmt.Println(gcsRef)
}
func ExampleClient_Query() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
q.DefaultProjectID = "project-id"
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}
func ExampleClient_Query_parameters() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select num from t1 where name = @user")
q.Parameters = []bigquery.QueryParameter{
{Name: "user", Value: "Elizabeth"},
}
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}
// This example demonstrates how to run a query job on a table
// with a customer-managed encryption key. The same
// applies to load and copy jobs as well.
func ExampleClient_Query_encryptionKey() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
// TODO: Replace this key with a key you have created in Cloud KMS.
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName}
// TODO: set other options on the Query.
// TODO: Call Query.Run or Query.Read.
}
func ExampleQuery_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleRowIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
for {
var row []bigquery.Value
err := it.Next(&row)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(row)
}
}
func ExampleRowIterator_Next_struct() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
type score struct {
Name string
Num int
}
q := client.Query("select name, num from t1")
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
for {
var s score
err := it.Next(&s)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(s)
}
}
func ExampleJob_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
q := client.Query("select name, num from t1")
// Call Query.Run to get a Job, then call Read on the job.
// Note: Query.Read is a shorthand for this.
job, err := q.Run(ctx)
if err != nil {
// TODO: Handle error.
}
it, err := job.Read(ctx)
if err != nil {
// TODO: Handle error.
}
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleJob_Wait() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleJob_Config() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
jc, err := job.Config()
if err != nil {
// TODO: Handle error.
}
copyConfig := jc.(*bigquery.CopyConfig)
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
}
func ExampleDataset_Create() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil {
// TODO: Handle error.
}
}
func ExampleDataset_Delete() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
// TODO: Handle error.
}
}
func ExampleDataset_Metadata() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
// This example illustrates how to perform a read-modify-write sequence on dataset
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleDataset_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
md, err := ds.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := ds.Update(ctx,
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleDataset_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleDataset_Table() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// Table creates a reference to the table. It does not create the actual
// table in BigQuery; to do so, use Table.Create.
t := client.Dataset("my_dataset").Table("my_table")
fmt.Println(t)
}
func ExampleDataset_Tables() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Tables(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleDatasetIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Datasets(ctx)
for {
ds, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(ds)
}
}
func ExampleInferSchema() {
type Item struct {
Name string
Size float64
Count int
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
fmt.Println(err)
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type)
}
// Output:
// Name STRING
// Size FLOAT
// Count INTEGER
}
func ExampleInferSchema_tags() {
type Item struct {
Name string
Size float64
Count int `bigquery:"number"`
Secret []byte `bigquery:"-"`
Optional bigquery.NullBool
OptBytes []byte `bigquery:",nullable"`
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
fmt.Println(err)
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type, fs.Required)
}
// Output:
// Name STRING true
// Size FLOAT true
// number INTEGER true
// Optional BOOLEAN false
// OptBytes BYTES false
}
func ExampleTable_Create() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
}
// Initialize a new table by passing TableMetadata to Table.Create.
func ExampleTable_Create_initialize() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
if err != nil {
// TODO: Handle error.
}
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
ExpirationTime: time.Now().Add(24 * time.Hour),
}); err != nil {
// TODO: Handle error.
}
}
// This example demonstrates how to create a table with
// a customer-managed encryption key.
func ExampleTable_Create_encryptionKey() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
if err != nil {
// TODO: Handle error.
}
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
// TODO: Replace this key with a key you have created in Cloud KMS.
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName},
}); err != nil {
// TODO: Handle error.
}
}
func ExampleTable_Delete() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
// TODO: Handle error.
}
}
func ExampleTable_Metadata() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleTable_Inserter() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
_ = ins // TODO: Use ins.
}
func ExampleTable_Inserter_options() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
ins.SkipInvalidRows = true
ins.IgnoreUnknownValues = true
_ = ins // TODO: Use ins.
}
func ExampleTable_CopierFrom() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
c.WriteDisposition = bigquery.WriteTruncate
// TODO: set other options on the Copier.
job, err := c.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_ExtractorTo() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.FieldDelimiter = ":"
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
extractor.DisableHeader = true
// TODO: set other options on the Extractor.
job, err := extractor.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_LoaderFrom() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
gcsRef.MaxBadRecords = 5
gcsRef.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(gcsRef)
loader.CreateDisposition = bigquery.CreateNever
// TODO: set other options on the Loader.
job, err := loader.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_LoaderFrom_reader() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
f, err := os.Open("data.csv")
if err != nil {
// TODO: Handle error.
}
rs := bigquery.NewReaderSource(f)
rs.AllowJaggedRows = true
rs.MaxBadRecords = 5
rs.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(rs)
loader.CreateDisposition = bigquery.CreateNever
// TODO: set other options on the Loader.
job, err := loader.Run(ctx)
if err != nil {
// TODO: Handle error.
}
status, err := job.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
if status.Err() != nil {
// TODO: Handle error.
}
}
func ExampleTable_Read() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
_ = it // TODO: iterate using Next or iterator.Pager.
}
// This example illustrates how to perform a read-modify-write sequence on table
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleTable_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
md, err := t.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := t.Update(ctx,
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleTable_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "my favorite table",
}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(tm)
}
func ExampleTableIterator_Next() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Dataset("my_dataset").Tables(ctx)
for {
t, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(t)
}
}
type Item struct {
Name string
Size float64
Count int
}
// Save implements the ValueSaver interface.
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
return map[string]bigquery.Value{
"Name": i.Name,
"Size": i.Size,
"Count": i.Count,
}, "", nil
}
func ExampleInserter_Put() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
// Item implements the ValueSaver interface.
items := []*Item{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
{Name: "n3", Size: 101.5, Count: 1},
}
if err := ins.Put(ctx, items); err != nil {
// TODO: Handle error.
}
}
var schema bigquery.Schema
func ExampleInserter_Put_structSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
type score struct {
Name string
Num int
}
// Assume schema holds the table's schema.
savers := []*bigquery.StructSaver{
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
}
if err := ins.Put(ctx, savers); err != nil {
// TODO: Handle error.
}
}
func ExampleInserter_Put_struct() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
type score struct {
Name string
Num int
}
scores := []score{
{Name: "n1", Num: 12},
{Name: "n2", Num: 31},
{Name: "n3", Num: 7},
}
// Schema is inferred from the score type.
if err := ins.Put(ctx, scores); err != nil {
// TODO: Handle error.
}
}
func ExampleInserter_Put_valuesSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
var vss []*bigquery.ValuesSaver
for i, name := range []string{"n1", "n2", "n3"} {
// Assume schema holds the table's schema.
vss = append(vss, &bigquery.ValuesSaver{
Schema: schema,
InsertID: name,
Row: []bigquery.Value{name, int64(i)},
})
}
if err := ins.Put(ctx, vss); err != nil {
// TODO: Handle error.
}
}

400
vendor/cloud.google.com/go/bigquery/external.go generated vendored Normal file
View File

@@ -0,0 +1,400 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/base64"
"unicode/utf8"
bq "google.golang.org/api/bigquery/v2"
)
// DataFormat describes the format of BigQuery table data.
type DataFormat string
// Constants describing the format of BigQuery table data.
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
GoogleSheets DataFormat = "GOOGLE_SHEETS"
Bigtable DataFormat = "BIGTABLE"
Parquet DataFormat = "PARQUET"
ORC DataFormat = "ORC"
)
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
// *ExternalDataConfig.
// GCSReference also implements it, for backwards compatibility.
type ExternalData interface {
toBQ() bq.ExternalDataConfiguration
}
// ExternalDataConfig describes data external to BigQuery that can be used
// in queries and to create external tables.
type ExternalDataConfig struct {
// The format of the data. Required.
SourceFormat DataFormat
// The fully-qualified URIs that point to your
// data in Google Cloud. Required.
//
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
// and it must come after the 'bucket' name. Size limits related to load jobs
// apply to external data sources.
//
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
//
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
// the '*' wildcard character is not allowed.
SourceURIs []string
// The schema of the data. Required for CSV and JSON; disallowed for the
// other formats.
Schema Schema
// Try to detect schema and format options automatically.
// Any option specified explicitly will be honored.
AutoDetect bool
// The compression type of the data.
Compression Compression
// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool
// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64
// Additional options for CSV, GoogleSheets and Bigtable formats.
Options ExternalDataConfigOptions
}
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
q := bq.ExternalDataConfiguration{
SourceFormat: string(e.SourceFormat),
SourceUris: e.SourceURIs,
Autodetect: e.AutoDetect,
Compression: string(e.Compression),
IgnoreUnknownValues: e.IgnoreUnknownValues,
MaxBadRecords: e.MaxBadRecords,
}
if e.Schema != nil {
q.Schema = e.Schema.toBQ()
}
if e.Options != nil {
e.Options.populateExternalDataConfig(&q)
}
return q
}
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
e := &ExternalDataConfig{
SourceFormat: DataFormat(q.SourceFormat),
SourceURIs: q.SourceUris,
AutoDetect: q.Autodetect,
Compression: Compression(q.Compression),
IgnoreUnknownValues: q.IgnoreUnknownValues,
MaxBadRecords: q.MaxBadRecords,
Schema: bqToSchema(q.Schema),
}
switch {
case q.CsvOptions != nil:
e.Options = bqToCSVOptions(q.CsvOptions)
case q.GoogleSheetsOptions != nil:
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
case q.BigtableOptions != nil:
var err error
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
if err != nil {
return nil, err
}
}
return e, nil
}
// ExternalDataConfigOptions are additional options for external data configurations.
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
type ExternalDataConfigOptions interface {
populateExternalDataConfig(*bq.ExternalDataConfiguration)
}
// CSVOptions are additional options for CSV external data sources.
type CSVOptions struct {
// AllowJaggedRows causes missing trailing optional columns to be tolerated
// when reading CSV data. Missing values are treated as nulls.
AllowJaggedRows bool
// AllowQuotedNewlines sets whether quoted data sections containing
// newlines are allowed when reading CSV data.
AllowQuotedNewlines bool
// Encoding is the character encoding of data to be read.
Encoding Encoding
// FieldDelimiter is the separator for fields in a CSV file, used when
// reading or exporting data. The default is ",".
FieldDelimiter string
// Quote is the value used to quote data sections in a CSV file. The
// default quotation character is the double quote ("), which is used if
// both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation
// character, set ForceZeroQuote to true.
// Only used when reading data.
Quote string
ForceZeroQuote bool
// The number of rows at the top of a CSV file that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.CsvOptions = &bq.CsvOptions{
AllowJaggedRows: o.AllowJaggedRows,
AllowQuotedNewlines: o.AllowQuotedNewlines,
Encoding: string(o.Encoding),
FieldDelimiter: o.FieldDelimiter,
Quote: o.quote(),
SkipLeadingRows: o.SkipLeadingRows,
}
}
// quote returns the CSV quote character, or nil if unset.
func (o *CSVOptions) quote() *string {
if o.ForceZeroQuote {
quote := ""
return &quote
}
if o.Quote == "" {
return nil
}
return &o.Quote
}
func (o *CSVOptions) setQuote(ps *string) {
if ps != nil {
o.Quote = *ps
if o.Quote == "" {
o.ForceZeroQuote = true
}
}
}
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
o := &CSVOptions{
AllowJaggedRows: q.AllowJaggedRows,
AllowQuotedNewlines: q.AllowQuotedNewlines,
Encoding: Encoding(q.Encoding),
FieldDelimiter: q.FieldDelimiter,
SkipLeadingRows: q.SkipLeadingRows,
}
o.setQuote(q.Quote)
return o
}
// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
type GoogleSheetsOptions struct {
// The number of rows at the top of a sheet that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
SkipLeadingRows: o.SkipLeadingRows,
}
}
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
return &GoogleSheetsOptions{
SkipLeadingRows: q.SkipLeadingRows,
}
}
// BigtableOptions are additional options for Bigtable external data sources.
type BigtableOptions struct {
// A list of column families to expose in the table schema along with their
// types. If omitted, all column families are present in the table schema and
// their values are read as BYTES.
ColumnFamilies []*BigtableColumnFamily
// If true, then the column families that are not specified in columnFamilies
// list are not exposed in the table schema. Otherwise, they are read with BYTES
// type values. The default is false.
IgnoreUnspecifiedColumnFamilies bool
// If true, then the rowkey column families will be read and converted to string.
// Otherwise they are read with BYTES type values and users need to manually cast
// them with CAST if necessary. The default is false.
ReadRowkeyAsString bool
}
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
q := &bq.BigtableOptions{
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: o.ReadRowkeyAsString,
}
for _, f := range o.ColumnFamilies {
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
}
c.BigtableOptions = q
}
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
b := &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: q.ReadRowkeyAsString,
}
for _, f := range q.ColumnFamilies {
f2, err := bqToBigtableColumnFamily(f)
if err != nil {
return nil, err
}
b.ColumnFamilies = append(b.ColumnFamilies, f2)
}
return b, nil
}
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
type BigtableColumnFamily struct {
// Identifier of the column family.
FamilyID string
// Lists of columns that should be exposed as individual fields as opposed to a
// list of (column name, value) pairs. All columns whose qualifier matches a
// qualifier in this list can be accessed as .. Other columns can be accessed as
// a list through .Column field.
Columns []*BigtableColumn
// The encoding of the values when the type is not STRING. Acceptable encoding values are:
// - TEXT - indicates values are alphanumeric text strings.
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
// This can be overridden for a specific column by listing that column in 'columns' and
// specifying an encoding for it.
Encoding string
// If true, only the latest version of values are exposed for all columns in this
// column family. This can be overridden for a specific column by listing that
// column in 'columns' and specifying a different setting for that column.
OnlyReadLatest bool
// The type to convert the value in cells of this
// column family. The values are expected to be encoded using HBase
// Bytes.toBytes function when using the BINARY encoding value.
// Following BigQuery types are allowed (case-sensitive):
// BYTES STRING INTEGER FLOAT BOOLEAN.
// The default type is BYTES. This can be overridden for a specific column by
// listing that column in 'columns' and specifying a type for it.
Type string
}
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
q := &bq.BigtableColumnFamily{
FamilyId: b.FamilyID,
Encoding: b.Encoding,
OnlyReadLatest: b.OnlyReadLatest,
Type: b.Type,
}
for _, col := range b.Columns {
q.Columns = append(q.Columns, col.toBQ())
}
return q
}
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
b := &BigtableColumnFamily{
FamilyID: q.FamilyId,
Encoding: q.Encoding,
OnlyReadLatest: q.OnlyReadLatest,
Type: q.Type,
}
for _, col := range q.Columns {
c, err := bqToBigtableColumn(col)
if err != nil {
return nil, err
}
b.Columns = append(b.Columns, c)
}
return b, nil
}
// BigtableColumn describes how BigQuery should access a Bigtable column.
type BigtableColumn struct {
// Qualifier of the column. Columns in the parent column family that have this
// exact qualifier are exposed as . field. The column field name is the
// same as the column qualifier.
Qualifier string
// If the qualifier is not a valid BigQuery field identifier i.e. does not match
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
// name and is used as field name in queries.
FieldName string
// If true, only the latest version of values are exposed for this column.
// See BigtableColumnFamily.OnlyReadLatest.
OnlyReadLatest bool
// The encoding of the values when the type is not STRING.
// See BigtableColumnFamily.Encoding
Encoding string
// The type to convert the value in cells of this column.
// See BigtableColumnFamily.Type
Type string
}
func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
q := &bq.BigtableColumn{
FieldName: b.FieldName,
OnlyReadLatest: b.OnlyReadLatest,
Encoding: b.Encoding,
Type: b.Type,
}
if utf8.ValidString(b.Qualifier) {
q.QualifierString = b.Qualifier
} else {
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
}
return q
}
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
b := &BigtableColumn{
FieldName: q.FieldName,
OnlyReadLatest: q.OnlyReadLatest,
Encoding: q.Encoding,
Type: q.Type,
}
if q.QualifierString != "" {
b.Qualifier = q.QualifierString
} else {
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
if err != nil {
return nil, err
}
b.Qualifier = string(bytes)
}
return b, nil
}

143
vendor/cloud.google.com/go/bigquery/external_test.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
)
func TestExternalDataConfig(t *testing.T) {
// Round-trip of ExternalDataConfig to underlying representation.
for i, want := range []*ExternalDataConfig{
{
SourceFormat: CSV,
SourceURIs: []string{"uri"},
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
AutoDetect: true,
Compression: Gzip,
IgnoreUnknownValues: true,
MaxBadRecords: 17,
Options: &CSVOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
FieldDelimiter: "f",
Quote: "q",
SkipLeadingRows: 3,
},
},
{
SourceFormat: GoogleSheets,
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
},
{
SourceFormat: Bigtable,
Options: &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: true,
ReadRowkeyAsString: true,
ColumnFamilies: []*BigtableColumnFamily{
{
FamilyID: "f1",
Encoding: "TEXT",
OnlyReadLatest: true,
Type: "FLOAT",
Columns: []*BigtableColumn{
{
Qualifier: "valid-utf-8",
FieldName: "fn",
OnlyReadLatest: true,
Encoding: "BINARY",
Type: "STRING",
},
},
},
},
},
},
} {
q := want.toBQ()
got, err := bqToExternalDataConfig(&q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
}
}
}
func TestQuote(t *testing.T) {
ptr := func(s string) *string { return &s }
for _, test := range []struct {
quote string
force bool
want *string
}{
{"", false, nil},
{"", true, ptr("")},
{"-", false, ptr("-")},
{"-", true, ptr("")},
} {
o := CSVOptions{
Quote: test.quote,
ForceZeroQuote: test.force,
}
got := o.quote()
if (got == nil) != (test.want == nil) {
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
}
if got != nil && test.want != nil && *got != *test.want {
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
}
}
}
func TestQualifier(t *testing.T) {
b := BigtableColumn{Qualifier: "a"}
q := b.toBQ()
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
}
b2, err := bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
const (
invalidUTF8 = "\xDF\xFF"
invalidEncoded = "3/8"
)
b = BigtableColumn{Qualifier: invalidUTF8}
q = b.toBQ()
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, "", b.Qualifier, invalidEncoded)
}
b2, err = bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
}

110
vendor/cloud.google.com/go/bigquery/extract.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// ExtractConfig holds the configuration for an extract job.
type ExtractConfig struct {
// Src is the table from which data will be extracted.
Src *Table
// Dst is the destination into which the data will be extracted.
Dst *GCSReference
// DisableHeader disables the printing of a header row in exported data.
DisableHeader bool
// The labels associated with this job.
Labels map[string]string
}
func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
var printHeader *bool
if e.DisableHeader {
f := false
printHeader = &f
}
return &bq.JobConfiguration{
Labels: e.Labels,
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.URIs...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.toBQ(),
PrintHeader: printHeader,
},
}
}
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
qe := q.Extract
return &ExtractConfig{
Labels: q.Labels,
Dst: &GCSReference{
URIs: qe.DestinationUris,
Compression: Compression(qe.Compression),
DestinationFormat: DataFormat(qe.DestinationFormat),
FileConfig: FileConfig{
CSVOptions: CSVOptions{
FieldDelimiter: qe.FieldDelimiter,
},
},
},
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
Src: bqToTable(qe.SourceTable, c),
}
}
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
type Extractor struct {
JobIDConfig
ExtractConfig
c *Client
}
// ExtractorTo returns an Extractor which can be used to extract data from a
// BigQuery table into Google Cloud Storage.
// The returned Extractor may optionally be further configured before its Run method is called.
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
return &Extractor{
c: t.c,
ExtractConfig: ExtractConfig{
Src: t,
Dst: dst,
},
}
}
// Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run")
defer func() { trace.EndSpan(ctx, err) }()
return e.c.insertJob(ctx, e.newJob(), nil)
}
func (e *Extractor) newJob() *bq.Job {
return &bq.Job{
JobReference: e.JobIDConfig.createJobRef(e.c),
Configuration: e.ExtractConfig.toBQ(),
}
}

View File

@@ -1,59 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type extractOption interface {
customizeExtract(conf *bq.JobConfigurationExtract)
}
// DisableHeader returns an Option that disables the printing of a header row in exported data.
func DisableHeader() Option { return disableHeader{} }
type disableHeader struct{}
func (opt disableHeader) implementsOption() {}
func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract) {
f := false
conf.PrintHeader = &f
}
func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationExtract{}
dst.customizeExtractDst(payload)
src.customizeExtractSrc(payload)
for _, opt := range options {
o, ok := opt.(extractOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeExtract(payload)
}
job.Configuration = &bq.JobConfiguration{
Extract: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,20 +15,20 @@
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)
func defaultExtractJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@@ -38,39 +38,53 @@ func defaultExtractJob() *bq.Job {
}
}
func defaultGCS() *GCSReference {
return &GCSReference{
URIs: []string{"uri"},
}
}
func TestExtract(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{
projectID: "client-project-id",
}
testCases := []struct {
dst *GCSReference
src *Table
options []Option
want *bq.Job
dst *GCSReference
src *Table
config ExtractConfig
want *bq.Job
}{
{
dst: defaultGCS,
src: defaultTable(nil),
dst: defaultGCS(),
src: c.Dataset("dataset-id").Table("table-id"),
want: defaultExtractJob(),
},
{
dst: defaultGCS,
src: defaultTable(nil),
options: []Option{
DisableHeader(),
dst: defaultGCS(),
src: c.Dataset("dataset-id").Table("table-id"),
config: ExtractConfig{
DisableHeader: true,
Labels: map[string]string{"a": "b"},
},
want: func() *bq.Job {
j := defaultExtractJob()
j.Configuration.Labels = map[string]string{"a": "b"}
f := false
j.Configuration.Extract.PrintHeader = &f
return j
}(),
},
{
dst: &GCSReference{
uris: []string{"uri"},
Compression: Gzip,
DestinationFormat: JSON,
FieldDelimiter: "\t",
},
src: defaultTable(nil),
dst: func() *GCSReference {
g := NewGCSReference("uri")
g.Compression = Gzip
g.DestinationFormat = JSON
g.FieldDelimiter = "\t"
return g
}(),
src: c.Dataset("dataset-id").Table("table-id"),
want: func() *bq.Job {
j := defaultExtractJob()
j.Configuration.Extract.Compression = "GZIP"
@@ -81,17 +95,22 @@ func TestExtract(t *testing.T) {
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
for i, tc := range testCases {
ext := tc.src.ExtractorTo(tc.dst)
tc.config.Src = ext.Src
tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config
got := ext.newJob()
checkJob(t, i, got, tc.want)
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling extract: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
diff := testutil.Diff(jc, &ext.ExtractConfig,
cmp.AllowUnexported(Table{}, Client{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

137
vendor/cloud.google.com/go/bigquery/file.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"io"
bq "google.golang.org/api/bigquery/v2"
)
// A ReaderSource is a source for a load operation that gets
// data from an io.Reader.
//
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
// its internal io.Reader will be nil, so it cannot be used for a
// subsequent load operation.
type ReaderSource struct {
r io.Reader
FileConfig
}
// NewReaderSource creates a ReaderSource from an io.Reader. You may
// optionally configure properties on the ReaderSource that describe the
// data being read, before passing it to Table.LoaderFrom.
func NewReaderSource(r io.Reader) *ReaderSource {
return &ReaderSource{r: r}
}
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
r.FileConfig.populateLoadConfig(lc)
return r.r
}
// FileConfig contains configuration options that pertain to files, typically
// text files that require interpretation to be used as a BigQuery table. A
// file may live in Google Cloud Storage (see GCSReference), or it may be
// loaded into a table via the Table.LoaderFromReader.
type FileConfig struct {
// SourceFormat is the format of the data to be read.
// Allowed values are: Avro, CSV, DatastoreBackup, JSON, ORC, and Parquet. The default is CSV.
SourceFormat DataFormat
// Indicates if we should automatically infer the options and
// schema for CSV and JSON sources.
AutoDetect bool
// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64
// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool
// Schema describes the data. It is required when reading CSV or JSON data,
// unless the data is being loaded into a table that already exists.
Schema Schema
// Additional options for CSV files.
CSVOptions
}
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
conf.SkipLeadingRows = fc.SkipLeadingRows
conf.SourceFormat = string(fc.SourceFormat)
conf.Autodetect = fc.AutoDetect
conf.AllowJaggedRows = fc.AllowJaggedRows
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
conf.Encoding = string(fc.Encoding)
conf.FieldDelimiter = fc.FieldDelimiter
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords
if fc.Schema != nil {
conf.Schema = fc.Schema.toBQ()
}
conf.Quote = fc.quote()
}
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
fc.SourceFormat = DataFormat(conf.SourceFormat)
fc.AutoDetect = conf.Autodetect
fc.MaxBadRecords = conf.MaxBadRecords
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
fc.Schema = bqToSchema(conf.Schema)
fc.SkipLeadingRows = conf.SkipLeadingRows
fc.AllowJaggedRows = conf.AllowJaggedRows
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
fc.Encoding = Encoding(conf.Encoding)
fc.FieldDelimiter = conf.FieldDelimiter
fc.CSVOptions.setQuote(conf.Quote)
}
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
format := fc.SourceFormat
if format == "" {
// Format must be explicitly set for external data sources.
format = CSV
}
conf.Autodetect = fc.AutoDetect
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords
conf.SourceFormat = string(format)
if fc.Schema != nil {
conf.Schema = fc.Schema.toBQ()
}
if format == CSV {
fc.CSVOptions.populateExternalDataConfig(conf)
}
}
// Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used.
type Encoding string
const (
// UTF_8 specifies the UTF-8 encoding type.
UTF_8 Encoding = "UTF-8"
// ISO_8859_1 specifies the ISO-8859-1 encoding type.
ISO_8859_1 Encoding = "ISO-8859-1"
)

98
vendor/cloud.google.com/go/bigquery/file_test.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
var (
hyphen = "-"
fc = FileConfig{
SourceFormat: CSV,
AutoDetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: Schema{
stringFieldSchema(),
nestedFieldSchema(),
},
CSVOptions: CSVOptions{
Quote: hyphen,
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
},
}
)
func TestFileConfigPopulateLoadConfig(t *testing.T) {
want := &bq.JobConfigurationLoad{
SourceFormat: "CSV",
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Autodetect: true,
Encoding: "UTF-8",
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
Quote: &hyphen,
}
got := &bq.JobConfigurationLoad{}
fc.populateLoadConfig(got)
if !testutil.Equal(got, want) {
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
}
}
func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
got := &bq.ExternalDataConfiguration{}
fc.populateExternalDataConfig(got)
want := &bq.ExternalDataConfiguration{
SourceFormat: "CSV",
Autodetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: "\t",
Quote: &hyphen,
SkipLeadingRows: 8,
},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,99 +14,62 @@
package bigquery
import bq "google.golang.org/api/bigquery/v2"
import (
"io"
bq "google.golang.org/api/bigquery/v2"
)
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
// an input or output to a BigQuery operation.
type GCSReference struct {
uris []string
// URIs refer to Google Cloud Storage objects.
URIs []string
// FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data.
// The default is ",".
FieldDelimiter string
// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
SkipLeadingRows int64
// SourceFormat is the format of the GCS data to be loaded into BigQuery.
// Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV.
SourceFormat DataFormat
// Only used when loading data.
Encoding Encoding
// Quote is the value used to quote data sections in a CSV file.
// The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true.
// Only used when loading data.
Quote string
ForceZeroQuote bool
FileConfig
// DestinationFormat is the format to use when writing exported files.
// Allowed values are: CSV, Avro, JSON. The default is CSV.
// CSV is not supported for tables with nested or repeated fields.
DestinationFormat DataFormat
// Only used when writing data. Default is None.
// Compression specifies the type of compression to apply when writing data
// to Google Cloud Storage, or using this GCSReference as an ExternalData
// source with CSV or JSON SourceFormat. Default is None.
Compression Compression
}
func (gcs *GCSReference) implementsSource() {}
func (gcs *GCSReference) implementsDestination() {}
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
// For more information about the treatment of wildcards and multiple URIs,
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
func (c *Client) NewGCSReference(uri ...string) *GCSReference {
return &GCSReference{uris: uri}
func NewGCSReference(uri ...string) *GCSReference {
return &GCSReference{URIs: uri}
}
type DataFormat string
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
)
// Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used.
type Encoding string
const (
UTF_8 Encoding = "UTF-8"
ISO_8859_1 Encoding = "ISO-8859-1"
)
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
type Compression string
const (
// None specifies no compression.
None Compression = "NONE"
// Gzip specifies gzip compression.
Gzip Compression = "GZIP"
)
func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad) {
conf.SourceUris = gcs.uris
conf.SkipLeadingRows = gcs.SkipLeadingRows
conf.SourceFormat = string(gcs.SourceFormat)
conf.Encoding = string(gcs.Encoding)
conf.FieldDelimiter = gcs.FieldDelimiter
func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
lc.SourceUris = gcs.URIs
gcs.FileConfig.populateLoadConfig(lc)
return nil
}
if gcs.ForceZeroQuote {
quote := ""
conf.Quote = &quote
} else if gcs.Quote != "" {
conf.Quote = &gcs.Quote
func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
conf := bq.ExternalDataConfiguration{
Compression: string(gcs.Compression),
SourceUris: append([]string{}, gcs.URIs...),
}
}
func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract) {
conf.DestinationUris = gcs.uris
conf.Compression = string(gcs.Compression)
conf.DestinationFormat = string(gcs.DestinationFormat)
conf.FieldDelimiter = gcs.FieldDelimiter
gcs.FileConfig.populateExternalDataConfig(&conf)
return conf
}

238
vendor/cloud.google.com/go/bigquery/inserter.go generated vendored Normal file
View File

@@ -0,0 +1,238 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"errors"
"fmt"
"reflect"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// An Inserter does streaming inserts into a BigQuery table.
// It is safe for concurrent use.
type Inserter struct {
t *Table
// SkipInvalidRows causes rows containing invalid data to be silently
// ignored. The default value is false, which causes the entire request to
// fail if there is an attempt to insert an invalid row.
SkipInvalidRows bool
// IgnoreUnknownValues causes values not matching the schema to be ignored.
// The default value is false, which causes records containing such values
// to be treated as invalid records.
IgnoreUnknownValues bool
// A TableTemplateSuffix allows Inserters to create tables automatically.
//
// Experimental: this option is experimental and may be modified or removed in future versions,
// regardless of any other documented package stability guarantees.
//
// When you specify a suffix, the table you upload data to
// will be used as a template for creating a new table, with the same schema,
// called <table> + <suffix>.
//
// More information is available at
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
TableTemplateSuffix string
}
// Inserter returns an Inserter that can be used to append rows to t.
// The returned Inserter may optionally be further configured before its Put method is called.
//
// To stream rows into a date-partitioned table at a particular date, add the
// $yyyymmdd suffix to the table name when constructing the Table.
func (t *Table) Inserter() *Inserter {
return &Inserter{t: t}
}
// Uploader calls Inserter.
// Deprecated: use Table.Inserter instead.
func (t *Table) Uploader() *Inserter { return t.Inserter() }
// Put uploads one or more rows to the BigQuery service.
//
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
//
// If src is a struct or pointer to a struct, then a schema is inferred from it
// and used to create a StructSaver. The InsertID of the StructSaver will be
// empty.
//
// If src is a slice of ValueSavers, structs, or struct pointers, then each
// element of the slice is treated as above, and multiple rows are uploaded.
//
// Put returns a PutMultiError if one or more rows failed to be uploaded.
// The PutMultiError contains a RowInsertionError for each failed row.
//
// Put will retry on temporary errors (see
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
// the call will run indefinitely. Pass a context with a timeout to prevent
// hanging calls.
func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put")
defer func() { trace.EndSpan(ctx, err) }()
savers, err := valueSavers(src)
if err != nil {
return err
}
return u.putMulti(ctx, savers)
}
func valueSavers(src interface{}) ([]ValueSaver, error) {
saver, ok, err := toValueSaver(src)
if err != nil {
return nil, err
}
if ok {
return []ValueSaver{saver}, nil
}
srcVal := reflect.ValueOf(src)
if srcVal.Kind() != reflect.Slice {
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
}
var savers []ValueSaver
for i := 0; i < srcVal.Len(); i++ {
s := srcVal.Index(i).Interface()
saver, ok, err := toValueSaver(s)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
}
savers = append(savers, saver)
}
return savers, nil
}
// Make a ValueSaver from x, which must implement ValueSaver already
// or be a struct or pointer to struct.
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
if _, ok := x.(StructSaver); ok {
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
}
var insertID string
// Handle StructSavers specially so we can infer the schema if necessary.
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
x = ss.Struct
insertID = ss.InsertID
// Fall through so we can infer the schema.
}
if saver, ok := x.(ValueSaver); ok {
return saver, ok, nil
}
v := reflect.ValueOf(x)
// Support Put with []interface{}
if v.Kind() == reflect.Interface {
v = v.Elem()
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
return nil, false, nil
}
schema, err := inferSchemaReflectCached(v.Type())
if err != nil {
return nil, false, err
}
return &StructSaver{
Struct: x,
InsertID: insertID,
Schema: schema,
}, true, nil
}
func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error {
req, err := u.newInsertRequest(src)
if err != nil {
return err
}
if req == nil {
return nil
}
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
call = call.Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err = runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
return err
}
return handleInsertErrors(res.InsertErrors, req.Rows)
}
func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
if savers == nil { // If there are no rows, do nothing.
return nil, nil
}
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: u.TableTemplateSuffix,
IgnoreUnknownValues: u.IgnoreUnknownValues,
SkipInvalidRows: u.SkipInvalidRows,
}
for _, saver := range savers {
row, insertID, err := saver.Save()
if err != nil {
return nil, err
}
if insertID == "" {
insertID = randomIDFn()
}
m := make(map[string]bq.JsonValue)
for k, v := range row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: insertID,
Json: m,
})
}
return req, nil
}
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
if len(ierrs) == 0 {
return nil
}
var errs PutMultiError
for _, e := range ierrs {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertId,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, bqToError(errp))
}
errs = append(errs, rie)
}
return errs
}
// Uploader is an obsolete name for Inserter.
type Uploader = Inserter

210
vendor/cloud.google.com/go/bigquery/inserter_test.go generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"strconv"
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)
type testSaver struct {
row map[string]Value
insertID string
err error
}
func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.row, ts.insertID, ts.err
}
func TestNewInsertRequest(t *testing.T) {
prev := randomIDFn
n := 0
randomIDFn = func() string { n++; return strconv.Itoa(n) }
defer func() { randomIDFn = prev }()
tests := []struct {
ul *Uploader
savers []ValueSaver
req *bq.TableDataInsertAllRequest
}{
{
ul: &Uploader{},
savers: nil,
req: nil,
},
{
ul: &Uploader{},
savers: []ValueSaver{
testSaver{row: map[string]Value{"one": 1}},
testSaver{row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
},
},
},
{
ul: &Uploader{
TableTemplateSuffix: "suffix",
IgnoreUnknownValues: true,
SkipInvalidRows: true,
},
savers: []ValueSaver{
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
testSaver{insertID: "", row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
},
TemplateSuffix: "suffix",
SkipInvalidRows: true,
IgnoreUnknownValues: true,
},
},
}
for i, tc := range tests {
got, err := tc.ul.newInsertRequest(tc.savers)
if err != nil {
t.Fatal(err)
}
want := tc.req
if !testutil.Equal(got, want) {
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
}
}
}
func TestNewInsertRequestErrors(t *testing.T) {
var u Uploader
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("bang")}})
if err == nil {
t.Error("got nil, want error")
}
}
func TestHandleInsertErrors(t *testing.T) {
rows := []*bq.TableDataInsertAllRequestRows{
{InsertId: "a"},
{InsertId: "b"},
}
for _, test := range []struct {
in []*bq.TableDataInsertAllResponseInsertErrors
want error
}{
{
in: nil,
want: nil,
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
},
want: PutMultiError{
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
},
},
} {
got := handleInsertErrors(test.in, rows)
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
}
}
}
func TestValueSavers(t *testing.T) {
ts := &testSaver{}
type T struct{ I int }
schema, err := InferSchema(T{})
if err != nil {
t.Fatal(err)
}
for _, test := range []struct {
in interface{}
want []ValueSaver
}{
{[]interface{}(nil), nil},
{[]interface{}{}, nil},
{ts, []ValueSaver{ts}},
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 1}},
&StructSaver{Schema: schema, Struct: T{I: 2}},
}},
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 1}},
&StructSaver{Schema: schema, Struct: &T{I: 2}},
}},
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
[]ValueSaver{
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
}},
} {
got, err := valueSavers(test.in)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
}
// Make sure Save is successful.
for i, vs := range got {
_, _, err := vs.Save()
if err != nil {
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
}
}
}
}
func TestValueSaversErrors(t *testing.T) {
inputs := []interface{}{
nil,
1,
[]int{1, 2},
[]interface{}{
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
1,
},
StructSaver{},
}
for _, in := range inputs {
if _, err := valueSavers(in); err == nil {
t.Errorf("%#v: got nil, want error", in)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,172 +15,208 @@
package bigquery
import (
"errors"
"context"
"fmt"
"reflect"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)
// A pageFetcher returns a page of rows, starting from the row specified by token.
type pageFetcher interface {
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
}
// Iterator provides access to the result of a BigQuery lookup.
// Next must be called before the first call to Get.
type Iterator struct {
service service
err error // contains any error encountered during calls to Next.
// Once Next has been called at least once, schema has the result schema, rs contains the current
// page of data, and nextToken contains the token for fetching the next
// page (empty if there is no more data to be fetched).
schema Schema
rs [][]Value
nextToken string
// The remaining fields contain enough information to fetch the current
// page of data, and determine which row of data from this page is the
// current row.
pf pageFetcher
pageToken string
// The offset from the start of the current page to the current row.
// For a new iterator, this is -1.
offset int64
}
func newIterator(s service, pf pageFetcher) *Iterator {
return &Iterator{
service: s,
pf: pf,
offset: -1,
// Construct a RowIterator.
// If pf is nil, there are no rows in the result set.
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
it := &RowIterator{
ctx: ctx,
table: t,
pf: pf,
}
if pf != nil {
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.rows) },
func() interface{} { r := it.rows; it.rows = nil; return r })
}
return it
}
// fetchPage loads the current page of data from the server.
// The contents of rs and nextToken are replaced with the loaded data.
// If there is an error while fetching, the error is stored in it.err and false is returned.
func (it *Iterator) fetchPage(ctx context.Context) bool {
var res *readDataResult
var err error
for {
res, err = it.pf.fetch(ctx, it.service, it.pageToken)
if err != errIncompleteJob {
break
// A RowIterator provides access to the result of a BigQuery lookup.
type RowIterator struct {
ctx context.Context
table *Table
pf pageFetcher
pageInfo *iterator.PageInfo
nextFunc func() error
// StartIndex can be set before the first call to Next. If PageInfo().Token
// is also set, StartIndex is ignored.
StartIndex uint64
// The schema of the table. Available after the first call to Next.
Schema Schema
// The total number of rows in the result. Available after the first call to Next.
// May be zero just after rows were inserted.
TotalRows uint64
rows [][]Value
structLoader structLoader // used to populate a pointer to a struct
}
// Next loads the next row into dst. Its return value is iterator.Done if there
// are no more results. Once Next returns iterator.Done, all subsequent calls
// will return iterator.Done.
//
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
//
// If dst is a *[]Value, it will be set to new []Value whose i'th element
// will be populated with the i'th column of the row.
//
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
// for each schema column name, the map key of that name will be set to the column's
// value. STRUCT types (RECORD types or nested schemas) become nested maps.
//
// If dst is pointer to a struct, each column in the schema will be matched
// with an exported field of the struct that has the same name, ignoring case.
// Unmatched schema columns and struct fields will be ignored.
//
// Each BigQuery column type corresponds to one or more Go types; a matching struct
// field must be of the correct type. The correspondences are:
//
// STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
//
// A repeated field corresponds to a slice or array of the element type. A STRUCT
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field,
// unless the field is of type []byte or is one of the special Null types: NullInt64,
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
// table with NULLs.
func (it *RowIterator) Next(dst interface{}) error {
if it.pf == nil { // There are no rows in the result set.
return iterator.Done
}
var vl ValueLoader
switch dst := dst.(type) {
case ValueLoader:
vl = dst
case *[]Value:
vl = (*valueList)(dst)
case *map[string]Value:
vl = (*valueMap)(dst)
default:
if !isStructPtr(dst) {
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
}
}
if err := it.nextFunc(); err != nil {
return err
}
row := it.rows[0]
it.rows = it.rows[1:]
if vl == nil {
// This can only happen if dst is a pointer to a struct. We couldn't
// set vl above because we need the schema.
if err := it.structLoader.set(dst, it.Schema); err != nil {
return err
}
vl = &it.structLoader
}
return vl.Load(row, it.Schema)
}
func isStructPtr(x interface{}) bool {
t := reflect.TypeOf(x)
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
if err != nil {
it.err = err
return false
return "", err
}
it.schema = res.schema
it.rs = res.rows
it.nextToken = res.pageToken
return true
it.rows = append(it.rows, res.rows...)
it.Schema = res.schema
it.TotalRows = res.totalRows
return res.pageToken, nil
}
// getEnoughData loads new data into rs until offset no longer points beyond the end of rs.
func (it *Iterator) getEnoughData(ctx context.Context) bool {
if len(it.rs) == 0 {
// Either we have not yet fetched any pages, or we are iterating over an empty dataset.
// In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken.
// In the latter case, it is harmless to fetch a page of data.
if !it.fetchPage(ctx) {
return false
}
}
// A pageFetcher returns a page of rows from a destination table.
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
for it.offset >= int64(len(it.rs)) {
// If offset is still outside the bounds of the loaded data,
// but there are no more pages of data to fetch, then we have
// failed to satisfy the offset.
if it.nextToken == "" {
return false
}
// offset cannot be satisfied with the currently loaded data,
// so we fetch the next page. We no longer need the existing
// cached rows, so we remove them and update the offset to be
// relative to the new page that we're about to fetch.
// NOTE: we can't just set offset to 0, because after
// marshalling/unmarshalling, it's possible for the offset to
// point arbitrarily far beyond the end of rs.
// This can happen if the server returns a different size
// results page before and after marshalling.
it.offset -= int64(len(it.rs))
it.pageToken = it.nextToken
if !it.fetchPage(ctx) {
return false
}
}
return true
type fetchPageResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}
// Next advances the Iterator to the next row, making that row available
// via the Get method.
// Next must be called before the first call to Get or Schema, and blocks until data is available.
// Next returns false when there are no more rows available, either because
// the end of the output was reached, or because there was an error (consult
// the Err method to determine which).
func (it *Iterator) Next(ctx context.Context) bool {
if it.err != nil {
return false
// fetchPage gets a page of rows from t.
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
// Fetch the table schema in the background, if necessary.
errc := make(chan error, 1)
if schema != nil {
errc <- nil
} else {
go func() {
var bqt *bq.Table
err := runWithRetry(ctx, func() (err error) {
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && bqt.Schema != nil {
schema = bqToSchema(bqt.Schema)
}
errc <- err
}()
}
// Advance offset to where we want it to be for the next call to Get.
it.offset++
// offset may now point beyond the end of rs, so we fetch data
// until offset is within its bounds again. If there are no more
// results available, offset will be left pointing beyond the bounds
// of rs.
// At the end of this method, rs will contain at least one element
// unless the dataset we are iterating over is empty.
return it.getEnoughData(ctx)
}
// Err returns the last error encountered by Next, or nil for no error.
func (it *Iterator) Err() error {
return it.err
}
// verifyState checks that the iterator is pointing to a valid row.
func (it *Iterator) verifyState() error {
if it.err != nil {
return fmt.Errorf("called on iterator in error state: %v", it.err)
}
// If Next has been called, then offset should always index into a
// valid row in rs, as long as there is still data available.
if it.offset >= int64(len(it.rs)) || it.offset < 0 {
return errors.New("called without preceding successful call to Next")
}
return nil
}
// Get loads the current row into dst, which must implement ValueLoader.
func (it *Iterator) Get(dst interface{}) error {
if err := it.verifyState(); err != nil {
return fmt.Errorf("Get %v", err)
}
if dst, ok := dst.(ValueLoader); ok {
return dst.Load(it.rs[it.offset])
}
return errors.New("Get called with unsupported argument type")
}
// Schema returns the schema of the result rows.
func (it *Iterator) Schema() (Schema, error) {
if err := it.verifyState(); err != nil {
return nil, fmt.Errorf("Schema %v", err)
}
return it.schema, nil
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
setClientHeader(call.Header())
if pageToken != "" {
call.PageToken(pageToken)
} else {
call.StartIndex(startIndex)
}
if pageSize > 0 {
call.MaxResults(pageSize)
}
var res *bq.TableDataList
err := runWithRetry(ctx, func() (err error) {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
err = <-errc
if err != nil {
return nil, err
}
rows, err := convertRows(res.Rows, schema)
if err != nil {
return nil, err
}
return &fetchPageResult{
pageToken: res.PageToken,
rows: rows,
totalRows: uint64(res.TotalRows),
schema: schema,
}, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,324 +15,226 @@
package bigquery
import (
"context"
"errors"
"fmt"
"reflect"
"testing"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
)
type fetchResponse struct {
result *readDataResult // The result to return.
err error // The error to return.
result *fetchPageResult // The result to return.
err error // The error to return.
}
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
type pageFetcherStub struct {
fetchResponses map[string]fetchResponse
err error
err error
}
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
call, ok := pf.fetchResponses[token]
func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
call, ok := pf.fetchResponses[pageToken]
if !ok {
pf.err = fmt.Errorf("Unexpected page token: %q", token)
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
}
return call.result, call.err
}
func TestIterator(t *testing.T) {
var (
iiSchema = Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
}
siSchema = Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
}
)
fetchFailure := errors.New("fetch failure")
testCases := []struct {
desc string
alreadyConsumed int64 // amount to advance offset before commencing reading.
fetchResponses map[string]fetchResponse
want []ValueList
wantErr error
wantSchema Schema
desc string
pageToken string
fetchResponses map[string]fetchResponse
want [][]Value
wantErr error
wantSchema Schema
wantTotalRows uint64
}{
{
desc: "Iteration over single empty page",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{},
schema: Schema{},
},
},
},
want: []ValueList{},
want: [][]Value{},
wantSchema: Schema{},
},
{
desc: "Iteration over single page",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
totalRows: 4,
},
},
},
want: []ValueList{{1, 2}, {11, 12}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
want: [][]Value{{1, 2}, {11, 12}},
wantSchema: iiSchema,
wantTotalRows: 4,
},
{
desc: "Iteration over single page with different schema",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{"1", 2}, {"11", 12}},
schema: Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
},
schema: siSchema,
},
},
},
want: []ValueList{{"1", 2}, {"11", 12}},
wantSchema: Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
},
want: [][]Value{{"1", 2}, {"11", 12}},
wantSchema: siSchema,
},
{
desc: "Iteration over two pages",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
totalRows: 4,
},
},
"a": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
totalRows: 4,
},
},
},
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: iiSchema,
wantTotalRows: 4,
},
{
desc: "Server response includes empty page",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
"a": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "b",
rows: [][]Value{},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
"b": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
},
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: iiSchema,
},
{
desc: "Fetch error",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
"a": {
// We returns some data from this fetch, but also an error.
// So the end result should include only data from the previous fetch.
err: fetchFailure,
result: &readDataResult{
result: &fetchPageResult{
pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
},
want: []ValueList{{1, 2}, {11, 12}},
wantErr: fetchFailure,
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
want: [][]Value{{1, 2}, {11, 12}},
wantErr: fetchFailure,
wantSchema: iiSchema,
},
{
desc: "Skip over a single element",
alreadyConsumed: 1,
desc: "Skip over an entire page",
pageToken: "a",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
"a": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
},
want: []ValueList{{11, 12}, {101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
want: [][]Value{{101, 102}, {111, 112}},
wantSchema: iiSchema,
},
{
desc: "Skip over an entire page",
alreadyConsumed: 2,
desc: "Skip beyond all data",
pageToken: "b",
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
"a": {
result: &readDataResult{
pageToken: "",
result: &fetchPageResult{
pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
schema: iiSchema,
},
},
},
want: []ValueList{{101, 102}, {111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip beyond start of second page",
alreadyConsumed: 3,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
},
want: []ValueList{{111, 112}},
wantSchema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
{
desc: "Skip beyond all data",
alreadyConsumed: 4,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: Schema{
{Type: IntegerFieldType},
{Type: IntegerFieldType},
},
},
"b": {
result: &fetchPageResult{},
},
},
// In this test case, Next will return false on its first call,
// so we won't even attempt to call Get.
want: []ValueList{},
want: [][]Value{},
wantSchema: Schema{},
},
}
@@ -341,173 +243,88 @@ func TestIterator(t *testing.T) {
pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses,
}
it := newIterator(nil, pf)
it.offset += tc.alreadyConsumed
values, schema, err := consumeIterator(it)
if err != nil {
t.Fatalf("%s: %v", tc.desc, err)
it := newRowIterator(context.Background(), nil, pf.fetchPage)
it.PageInfo().Token = tc.pageToken
values, schema, totalRows, err := consumeRowIterator(it)
if err != tc.wantErr {
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
}
if it.Err() != tc.wantErr {
t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr)
}
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
}
}
}
// consumeIterator reads the schema and all values from an iterator and returns them.
func consumeIterator(it *Iterator) ([]ValueList, Schema, error) {
var got []ValueList
var schema Schema
for it.Next(context.Background()) {
var vals ValueList
var err error
if err = it.Get(&vals); err != nil {
return nil, Schema{}, fmt.Errorf("err calling Get: %v", err)
}
got = append(got, vals)
if schema, err = it.Schema(); err != nil {
return nil, Schema{}, fmt.Errorf("err calling Schema: %v", err)
if totalRows != tc.wantTotalRows {
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows)
}
}
return got, schema, nil
}
func TestGetBeforeNext(t *testing.T) {
// TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators.
pf := &pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
}
it := newIterator(nil, pf)
var vals ValueList
if err := it.Get(&vals); err == nil {
t.Errorf("Expected error calling Get before Next")
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) {
var (
got [][]Value
schema Schema
totalRows uint64
)
for {
var vls []Value
err := it.Next(&vls)
if err == iterator.Done {
return got, schema, totalRows, nil
}
if err != nil {
return got, schema, totalRows, err
}
got = append(got, vls)
schema = it.Schema
totalRows = it.TotalRows
}
}
type delayedPageFetcher struct {
pageFetcherStub
delayCount int
}
func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
if pf.delayCount > 0 {
pf.delayCount--
return nil, errIncompleteJob
}
return pf.pageFetcherStub.fetch(ctx, s, token)
}
func TestIterateIncompleteJob(t *testing.T) {
want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
pf := pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
"a": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
},
},
},
}
dpf := &delayedPageFetcher{
pageFetcherStub: pf,
delayCount: 1,
}
it := newIterator(nil, dpf)
values, _, err := consumeIterator(it)
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
}
if it.Err() != nil {
t.Fatalf("iterator.Err: got:\n%v", it.Err())
}
if dpf.delayCount != 0 {
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
}
}
func TestGetDuringErrorState(t *testing.T) {
func TestNextDuringErrorState(t *testing.T) {
pf := &pageFetcherStub{
fetchResponses: map[string]fetchResponse{
"": {err: errors.New("bang")},
},
}
it := newIterator(nil, pf)
var vals ValueList
it.Next(context.Background())
if it.Err() == nil {
it := newRowIterator(context.Background(), nil, pf.fetchPage)
var vals []Value
if err := it.Next(&vals); err == nil {
t.Errorf("Expected error after calling Next")
}
if err := it.Get(&vals); err == nil {
t.Errorf("Expected error calling Get when iterator has a non-nil error.")
if err := it.Next(&vals); err == nil {
t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
}
}
func TestGetAfterFinished(t *testing.T) {
func TestNextAfterFinished(t *testing.T) {
testCases := []struct {
alreadyConsumed int64 // amount to advance offset before commencing reading.
fetchResponses map[string]fetchResponse
want []ValueList
fetchResponses map[string]fetchResponse
want [][]Value
}{
{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
want: []ValueList{{1, 2}, {11, 12}},
want: [][]Value{{1, 2}, {11, 12}},
},
{
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
result: &fetchPageResult{
pageToken: "",
rows: [][]Value{},
},
},
},
want: []ValueList{},
},
{
alreadyConsumed: 100,
fetchResponses: map[string]fetchResponse{
"": {
result: &readDataResult{
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
},
},
},
want: []ValueList{},
want: [][]Value{},
},
}
@@ -515,24 +332,31 @@ func TestGetAfterFinished(t *testing.T) {
pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses,
}
it := newIterator(nil, pf)
it.offset += tc.alreadyConsumed
it := newRowIterator(context.Background(), nil, pf.fetchPage)
values, _, err := consumeIterator(it)
values, _, _, err := consumeRowIterator(it)
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
}
if it.Err() != nil {
t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err())
}
// Try calling Get again.
var vals ValueList
if err := it.Get(&vals); err == nil {
t.Errorf("Expected error calling Get when there are no more values")
var vals []Value
if err := it.Next(&vals); err != iterator.Done {
t.Errorf("Expected Done calling Next when there are no more values")
}
}
}
func TestIteratorNextTypes(t *testing.T) {
it := newRowIterator(context.Background(), nil, nil)
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
map[string]Value{}, &map[string]interface{}{},
struct{}{},
} {
if err := it.Next(v); err == nil {
t.Errorf("%v: want error, got nil", v)
}
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,48 +15,80 @@
package bigquery
import (
"context"
"errors"
"fmt"
"time"
"golang.org/x/net/context"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/trace"
gax "github.com/googleapis/gax-go/v2"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
)
// A Job represents an operation which has been submitted to BigQuery for processing.
type Job struct {
service service
projectID string
jobID string
isQuery bool
c *Client
projectID string
jobID string
location string
email string
config *bq.JobConfiguration
lastStatus *JobStatus
}
// JobFromID creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package. For example, the job may have
// been created in the BigQuery console.
//
// For jobs whose location is other than "US" or "EU", set Client.Location or use
// JobFromIDLocation.
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
jobType, err := c.service.getJobType(ctx, c.projectID, id)
return c.JobFromIDLocation(ctx, id, c.Location)
}
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package (for example, it may have
// been created in the BigQuery console), but it must exist in the specified location.
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation")
defer func() { trace.EndSpan(ctx, err) }()
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
if err != nil {
return nil, err
}
return &Job{
service: c.service,
projectID: c.projectID,
jobID: id,
isQuery: jobType == queryJobType,
}, nil
return bqToJob(bqjob, c)
}
// ID returns the job's ID.
func (j *Job) ID() string {
return j.jobID
}
// Location returns the job's location.
func (j *Job) Location() string {
return j.location
}
// Email returns the email of the job's creator.
func (j *Job) Email() string {
return j.email
}
// State is one of a sequence of states that a Job progresses through as it is processed.
type State int
const (
Pending State = iota
// StateUnspecified is the default JobIterator state.
StateUnspecified State = iota
// Pending is a state that describes that the job is pending.
Pending
// Running is a state that describes that the job is running.
Running
// Done is a state that describes that the job is done.
Done
)
@@ -69,63 +101,730 @@ type JobStatus struct {
// All errors encountered during the running of the job.
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
Errors []*Error
// Statistics about the job.
Statistics *JobStatistics
}
// jobOption is an Option which modifies a bq.Job proto.
// This is used for configuring values that apply to all operations, such as setting a jobReference.
type jobOption interface {
customizeJob(job *bq.Job, projectID string)
// JobConfig contains configuration information for a job. It is implemented by
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
type JobConfig interface {
isJobConfig()
}
type jobID string
func (*CopyConfig) isJobConfig() {}
func (*ExtractConfig) isJobConfig() {}
func (*LoadConfig) isJobConfig() {}
func (*QueryConfig) isJobConfig() {}
// JobID returns an Option that sets the job ID of a BigQuery job.
// If this Option is not used, a job ID is generated automatically.
func JobID(ID string) Option {
return jobID(ID)
// Config returns the configuration information for j.
func (j *Job) Config() (JobConfig, error) {
return bqToJobConfig(j.config, j.c)
}
func (opt jobID) implementsOption() {}
func (opt jobID) customizeJob(job *bq.Job, projectID string) {
job.JobReference = &bq.JobReference{
JobId: string(opt),
ProjectId: projectID,
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
switch {
case q == nil:
return nil, nil
case q.Copy != nil:
return bqToCopyConfig(q, c), nil
case q.Extract != nil:
return bqToExtractConfig(q, c), nil
case q.Load != nil:
return bqToLoadConfig(q, c), nil
case q.Query != nil:
return bqToQueryConfig(q, c)
default:
return nil, nil
}
}
// JobIDConfig describes how to create an ID for a job.
type JobIDConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Location is the location for the job.
Location string
}
// createJobRef creates a JobReference.
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
// We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference.
loc := j.Location
if loc == "" { // Use Client.Location as a default.
loc = c.Location
}
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
if j.JobID == "" {
jr.JobId = randomIDFn()
} else if j.AddJobIDSuffix {
jr.JobId = j.JobID + "-" + randomIDFn()
} else {
jr.JobId = j.JobID
}
return jr
}
// Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
// After Done returns true, the Err method will return an error if the job completed unsuccessfully.
func (s *JobStatus) Done() bool {
return s.State == Done
}
// Err returns the error that caused the job to complete unsuccesfully (if any).
// Err returns the error that caused the job to complete unsuccessfully (if any).
func (s *JobStatus) Err() error {
return s.err
}
// Status returns the current status of the job. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
return j.service.jobStatus(ctx, j.projectID, j.jobID)
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status")
defer func() { trace.EndSpan(ctx, err) }()
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
if err != nil {
return nil, err
}
if err := j.setStatus(bqjob.Status); err != nil {
return nil, err
}
j.setStatistics(bqjob.Statistics, j.c)
return j.lastStatus, nil
}
// LastStatus returns the most recently retrieved status of the job. The status is
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
// Call Job.Status to get the most up-to-date information about a job.
func (j *Job) LastStatus() *JobStatus {
return j.lastStatus
}
// Cancel requests that a job be cancelled. This method returns without waiting for
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
// Cancelled jobs may still incur costs.
func (j *Job) Cancel(ctx context.Context) error {
return j.service.jobCancel(ctx, j.projectID, j.jobID)
// Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
Location(j.location).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
}
func (j *Job) implementsReadSource() {}
// Wait blocks until the job or the context is done. It returns the final status
// of the job.
// If an error occurs while retrieving the status, Wait returns that error. But
// Wait returns nil if the status was retrieved successfully, even if
// status.Err() != nil. So callers must check both errors. See the example.
func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait")
defer func() { trace.EndSpan(ctx, err) }()
func (j *Job) customizeReadQuery(cursor *readQueryConf) error {
// There are mulitple kinds of jobs, but only a query job is suitable for reading.
if !j.isQuery {
return errors.New("Cannot read from a non-query job")
if j.isQuery() {
// We can avoid polling for query jobs.
if _, _, err := j.waitForQuery(ctx, j.projectID); err != nil {
return nil, err
}
// Note: extra RPC even if you just want to wait for the query to finish.
js, err := j.Status(ctx)
if err != nil {
return nil, err
}
return js, nil
}
// Non-query jobs must poll.
err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
js, err = j.Status(ctx)
if err != nil {
return true, err
}
if js.Done() {
return true, nil
}
return false, nil
})
if err != nil {
return nil, err
}
return js, nil
}
// Read fetches the results of a query job.
// If j is not a query job, Read returns an error.
func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read")
defer func() { trace.EndSpan(ctx, err) }()
return j.read(ctx, j.waitForQuery, fetchPage)
}
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, uint64, error), pf pageFetcher) (*RowIterator, error) {
if !j.isQuery() {
return nil, errors.New("bigquery: cannot read from a non-query job")
}
destTable := j.config.Query.DestinationTable
// The destination table should only be nil if there was a query error.
projectID := j.projectID
if destTable != nil && projectID != destTable.ProjectId {
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
}
schema, totalRows, err := waitForQuery(ctx, projectID)
if err != nil {
return nil, err
}
if destTable == nil {
return nil, errors.New("bigquery: query job missing destination table")
}
dt := bqToTable(destTable, j.c)
if totalRows == 0 {
pf = nil
}
it := newRowIterator(ctx, dt, pf)
it.Schema = schema
it.TotalRows = totalRows
return it, nil
}
// waitForQuery waits for the query job to complete and returns its schema. It also
// returns the total number of rows in the result set.
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint64, error) {
// Use GetQueryResults only to wait for completion, not to read results.
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
setClientHeader(call.Header())
backoff := gax.Backoff{
Initial: 1 * time.Second,
Multiplier: 2,
Max: 60 * time.Second,
}
var res *bq.GetQueryResultsResponse
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
res, err = call.Do()
if err != nil {
return !retryableError(err), err
}
if !res.JobComplete { // GetQueryResults may return early without error; retry.
return false, nil
}
return true, nil
})
if err != nil {
return nil, 0, err
}
return bqToSchema(res.Schema), res.TotalRows, nil
}
// JobStatistics contains statistics about a job.
type JobStatistics struct {
CreationTime time.Time
StartTime time.Time
EndTime time.Time
TotalBytesProcessed int64
Details Statistics
}
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
type Statistics interface {
implementsStatistics()
}
// ExtractStatistics contains statistics about an extract job.
type ExtractStatistics struct {
// The number of files per destination URI or URI pattern specified in the
// extract configuration. These values will be in the same order as the
// URIs specified in the 'destinationUris' field.
DestinationURIFileCounts []int64
}
// LoadStatistics contains statistics about a load job.
type LoadStatistics struct {
// The number of bytes of source data in a load job.
InputFileBytes int64
// The number of source files in a load job.
InputFiles int64
// Size of the loaded data in bytes. Note that while a load job is in the
// running state, this value may change.
OutputBytes int64
// The number of rows imported in a load job. Note that while an import job is
// in the running state, this value may change.
OutputRows int64
}
// QueryStatistics contains statistics about a query job.
type QueryStatistics struct {
// Billing tier for the job.
BillingTier int64
// Whether the query result was fetched from the query cache.
CacheHit bool
// The type of query statement, if valid.
StatementType string
// Total bytes billed for the job.
TotalBytesBilled int64
// Total bytes processed for the job.
TotalBytesProcessed int64
// For dry run queries, indicates how accurate the TotalBytesProcessed value is.
// When indicated, values include:
// UNKNOWN: accuracy of the estimate is unknown.
// PRECISE: estimate is precise.
// LOWER_BOUND: estimate is lower bound of what the query would cost.
// UPPER_BOUND: estiamte is upper bound of what the query would cost.
TotalBytesProcessedAccuracy string
// Describes execution plan for the query.
QueryPlan []*ExplainQueryStage
// The number of rows affected by a DML statement. Present only for DML
// statements INSERT, UPDATE or DELETE.
NumDMLAffectedRows int64
// Describes a timeline of job execution.
Timeline []*QueryTimelineSample
// ReferencedTables: [Output-only, Experimental] Referenced tables for
// the job. Queries that reference more than 50 tables will not have a
// complete list.
ReferencedTables []*Table
// The schema of the results. Present only for successful dry run of
// non-legacy SQL queries.
Schema Schema
// Slot-milliseconds consumed by this query job.
SlotMillis int64
// Standard SQL: list of undeclared query parameter names detected during a
// dry run validation.
UndeclaredQueryParameterNames []string
// DDL target table.
DDLTargetTable *Table
// DDL Operation performed on the target table. Used to report how the
// query impacted the DDL target table.
DDLOperationPerformed string
}
// ExplainQueryStage describes one stage of a query.
type ExplainQueryStage struct {
// CompletedParallelInputs: Number of parallel input segments completed.
CompletedParallelInputs int64
// ComputeAvg: Duration the average shard spent on CPU-bound tasks.
ComputeAvg time.Duration
// ComputeMax: Duration the slowest shard spent on CPU-bound tasks.
ComputeMax time.Duration
// Relative amount of the total time the average shard spent on CPU-bound tasks.
ComputeRatioAvg float64
// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
ComputeRatioMax float64
// EndTime: Stage end time.
EndTime time.Time
// Unique ID for stage within plan.
ID int64
// InputStages: IDs for stages that are inputs to this stage.
InputStages []int64
// Human-readable name for stage.
Name string
// ParallelInputs: Number of parallel input segments to be processed.
ParallelInputs int64
// ReadAvg: Duration the average shard spent reading input.
ReadAvg time.Duration
// ReadMax: Duration the slowest shard spent reading input.
ReadMax time.Duration
// Relative amount of the total time the average shard spent reading input.
ReadRatioAvg float64
// Relative amount of the total time the slowest shard spent reading input.
ReadRatioMax float64
// Number of records read into the stage.
RecordsRead int64
// Number of records written by the stage.
RecordsWritten int64
// ShuffleOutputBytes: Total number of bytes written to shuffle.
ShuffleOutputBytes int64
// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle
// and spilled to disk.
ShuffleOutputBytesSpilled int64
// StartTime: Stage start time.
StartTime time.Time
// Current status for the stage.
Status string
// List of operations within the stage in dependency order (approximately
// chronological).
Steps []*ExplainQueryStep
// WaitAvg: Duration the average shard spent waiting to be scheduled.
WaitAvg time.Duration
// WaitMax: Duration the slowest shard spent waiting to be scheduled.
WaitMax time.Duration
// Relative amount of the total time the average shard spent waiting to be scheduled.
WaitRatioAvg float64
// Relative amount of the total time the slowest shard spent waiting to be scheduled.
WaitRatioMax float64
// WriteAvg: Duration the average shard spent on writing output.
WriteAvg time.Duration
// WriteMax: Duration the slowest shard spent on writing output.
WriteMax time.Duration
// Relative amount of the total time the average shard spent on writing output.
WriteRatioAvg float64
// Relative amount of the total time the slowest shard spent on writing output.
WriteRatioMax float64
}
// ExplainQueryStep describes one step of a query stage.
type ExplainQueryStep struct {
// Machine-readable operation type.
Kind string
// Human-readable stage descriptions.
Substeps []string
}
// QueryTimelineSample represents a sample of execution statistics at a point in time.
type QueryTimelineSample struct {
// Total number of units currently being processed by workers, represented as largest value since last sample.
ActiveUnits int64
// Total parallel units of work completed by this query.
CompletedUnits int64
// Time elapsed since start of query execution.
Elapsed time.Duration
// Total parallel units of work remaining for the active stages.
PendingUnits int64
// Cumulative slot-milliseconds consumed by the query.
SlotMillis int64
}
func (*ExtractStatistics) implementsStatistics() {}
func (*LoadStatistics) implementsStatistics() {}
func (*QueryStatistics) implementsStatistics() {}
// Jobs lists jobs within a project.
func (c *Client) Jobs(ctx context.Context) *JobIterator {
it := &JobIterator{
ctx: ctx,
c: c,
ProjectID: c.projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// JobIterator iterates over jobs in a project.
type JobIterator struct {
ProjectID string // Project ID of the jobs to list. Default is the client's project.
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
State State // List only jobs in the given state. Defaults to all states.
MinCreationTime time.Time // List only jobs created after this time.
MaxCreationTime time.Time // List only jobs created before this time.
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Job
}
// PageInfo is a getter for the JobIterator's PageInfo.
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// Next returns the next Job. Its second return value is iterator.Done if
// there are no more results. Once Next returns Done, all subsequent calls will
// return Done.
func (it *JobIterator) Next() (*Job, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
var st string
switch it.State {
case StateUnspecified:
st = ""
case Pending:
st = "pending"
case Running:
st = "running"
case Done:
st = "done"
default:
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
}
cursor.projectID = j.projectID
cursor.jobID = j.jobID
req := it.c.bqs.Jobs.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
Projection("full").
AllUsers(it.AllUsers)
if st != "" {
req.StateFilter(st)
}
if !it.MinCreationTime.IsZero() {
req.MinCreationTime(uint64(it.MinCreationTime.UnixNano() / 1e6))
}
if !it.MaxCreationTime.IsZero() {
req.MaxCreationTime(uint64(it.MaxCreationTime.UnixNano() / 1e6))
}
setClientHeader(req.Header())
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
res, err := req.Do()
if err != nil {
return "", err
}
for _, j := range res.Jobs {
job, err := convertListedJob(j, it.c)
if err != nil {
return "", err
}
it.items = append(it.items, job)
}
return res.NextPageToken, nil
}
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, j.UserEmail, c)
}
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
if location != "" {
call = call.Location(location)
}
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, q.UserEmail, c)
}
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, email string, c *Client) (*Job, error) {
j := &Job{
projectID: qr.ProjectId,
jobID: qr.JobId,
location: qr.Location,
c: c,
email: email,
}
j.setConfig(qc)
if err := j.setStatus(qs); err != nil {
return nil, err
}
j.setStatistics(qt, c)
return j, nil
}
func (j *Job) setConfig(config *bq.JobConfiguration) {
if config == nil {
return
}
j.config = config
}
func (j *Job) isQuery() bool {
return j.config != nil && j.config.Query != nil
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func (j *Job) setStatus(qs *bq.JobStatus) error {
if qs == nil {
return nil
}
state, ok := stateMap[qs.State]
if !ok {
return fmt.Errorf("unexpected job state: %v", qs.State)
}
j.lastStatus = &JobStatus{
State: state,
err: nil,
}
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
j.lastStatus.err = err
}
for _, ep := range qs.Errors {
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
}
return nil
}
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
if s == nil || j.lastStatus == nil {
return
}
js := &JobStatistics{
CreationTime: unixMillisToTime(s.CreationTime),
StartTime: unixMillisToTime(s.StartTime),
EndTime: unixMillisToTime(s.EndTime),
TotalBytesProcessed: s.TotalBytesProcessed,
}
switch {
case s.Extract != nil:
js.Details = &ExtractStatistics{
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
}
case s.Load != nil:
js.Details = &LoadStatistics{
InputFileBytes: s.Load.InputFileBytes,
InputFiles: s.Load.InputFiles,
OutputBytes: s.Load.OutputBytes,
OutputRows: s.Load.OutputRows,
}
case s.Query != nil:
var names []string
for _, qp := range s.Query.UndeclaredQueryParameters {
names = append(names, qp.Name)
}
var tables []*Table
for _, tr := range s.Query.ReferencedTables {
tables = append(tables, bqToTable(tr, c))
}
js.Details = &QueryStatistics{
BillingTier: s.Query.BillingTier,
CacheHit: s.Query.CacheHit,
DDLTargetTable: bqToTable(s.Query.DdlTargetTable, c),
DDLOperationPerformed: s.Query.DdlOperationPerformed,
StatementType: s.Query.StatementType,
TotalBytesBilled: s.Query.TotalBytesBilled,
TotalBytesProcessed: s.Query.TotalBytesProcessed,
TotalBytesProcessedAccuracy: s.Query.TotalBytesProcessedAccuracy,
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
Schema: bqToSchema(s.Query.Schema),
SlotMillis: s.Query.TotalSlotMs,
Timeline: timelineFromProto(s.Query.Timeline),
ReferencedTables: tables,
UndeclaredQueryParameterNames: names,
}
}
j.lastStatus.Statistics = js
}
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
var res []*ExplainQueryStage
for _, s := range stages {
var steps []*ExplainQueryStep
for _, p := range s.Steps {
steps = append(steps, &ExplainQueryStep{
Kind: p.Kind,
Substeps: p.Substeps,
})
}
res = append(res, &ExplainQueryStage{
CompletedParallelInputs: s.CompletedParallelInputs,
ComputeAvg: time.Duration(s.ComputeMsAvg) * time.Millisecond,
ComputeMax: time.Duration(s.ComputeMsMax) * time.Millisecond,
ComputeRatioAvg: s.ComputeRatioAvg,
ComputeRatioMax: s.ComputeRatioMax,
EndTime: time.Unix(0, s.EndMs*1e6),
ID: s.Id,
InputStages: s.InputStages,
Name: s.Name,
ParallelInputs: s.ParallelInputs,
ReadAvg: time.Duration(s.ReadMsAvg) * time.Millisecond,
ReadMax: time.Duration(s.ReadMsMax) * time.Millisecond,
ReadRatioAvg: s.ReadRatioAvg,
ReadRatioMax: s.ReadRatioMax,
RecordsRead: s.RecordsRead,
RecordsWritten: s.RecordsWritten,
ShuffleOutputBytes: s.ShuffleOutputBytes,
ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled,
StartTime: time.Unix(0, s.StartMs*1e6),
Status: s.Status,
Steps: steps,
WaitAvg: time.Duration(s.WaitMsAvg) * time.Millisecond,
WaitMax: time.Duration(s.WaitMsMax) * time.Millisecond,
WaitRatioAvg: s.WaitRatioAvg,
WaitRatioMax: s.WaitRatioMax,
WriteAvg: time.Duration(s.WriteMsAvg) * time.Millisecond,
WriteMax: time.Duration(s.WriteMsMax) * time.Millisecond,
WriteRatioAvg: s.WriteRatioAvg,
WriteRatioMax: s.WriteRatioMax,
})
}
return res
}
func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample {
var res []*QueryTimelineSample
for _, s := range timeline {
res = append(res, &QueryTimelineSample{
ActiveUnits: s.ActiveUnits,
CompletedUnits: s.CompletedUnits,
Elapsed: time.Duration(s.ElapsedMs) * time.Millisecond,
PendingUnits: s.PendingUnits,
SlotMillis: s.TotalSlotMs,
})
}
return res
}

95
vendor/cloud.google.com/go/bigquery/job_test.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
func TestCreateJobRef(t *testing.T) {
defer fixRandomID("RANDOM")()
cNoLoc := &Client{projectID: "projectID"}
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"}
for _, test := range []struct {
in JobIDConfig
client *Client
want *bq.JobReference
}{
{
in: JobIDConfig{JobID: "foo"},
want: &bq.JobReference{JobId: "foo"},
},
{
in: JobIDConfig{},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
in: JobIDConfig{AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "foo-RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
{
in: JobIDConfig{JobID: "foo"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
} {
client := test.client
if client == nil {
client = cNoLoc
}
got := test.in.createJobRef(client)
test.want.ProjectId = "projectID"
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want)
}
}
}
func fixRandomID(s string) func() {
prev := randomIDFn
randomIDFn = func() string { return s }
return func() { randomIDFn = prev }
}
func checkJob(t *testing.T, i int, got, want *bq.Job) {
if got.JobReference == nil {
t.Errorf("#%d: empty job reference", i)
return
}
if got.JobReference.JobId == "" {
t.Errorf("#%d: empty job ID", i)
return
}
d := testutil.Diff(got, want)
if d != "" {
t.Errorf("#%d: (got=-, want=+) %s", i, d)
}
}

View File

@@ -1,70 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
)
// OpenTable creates a handle to an existing BigQuery table. If the table does
// not already exist, subsequent uses of the *Table will fail.
//
// Deprecated: use Client.DatasetInProject.Table instead.
func (c *Client) OpenTable(projectID, datasetID, tableID string) *Table {
return c.Table(projectID, datasetID, tableID)
}
// Table creates a handle to a BigQuery table.
//
// Use this method to reference a table in a project other than that of the
// Client.
//
// Deprecated: use Client.DatasetInProject.Table instead.
func (c *Client) Table(projectID, datasetID, tableID string) *Table {
return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service}
}
// CreateTable creates a table in the BigQuery service and returns a handle to it.
//
// Deprecated: use Table.Create instead.
func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, options ...CreateTableOption) (*Table, error) {
t := c.Table(projectID, datasetID, tableID)
if err := t.Create(ctx, options...); err != nil {
return nil, err
}
return t, nil
}
// Read fetches data from a ReadSource and returns the data via an Iterator.
//
// Deprecated: use Query.Read, Job.Read or Table.Read instead.
func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) {
switch src := src.(type) {
case *Job:
return src.Read(ctx, options...)
case *Query:
// For compatibility, support Query values created by literal, rather
// than Client.Query.
if src.client == nil {
src.client = c
}
return src.Read(ctx, options...)
case *Table:
return src.Read(ctx, options...)
}
return nil, fmt.Errorf("src (%T) does not support the Read operation", src)
}

153
vendor/cloud.google.com/go/bigquery/load.go generated vendored Normal file
View File

@@ -0,0 +1,153 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"io"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// LoadConfig holds the configuration for a load job.
type LoadConfig struct {
// Src is the source from which data will be loaded.
Src LoadSource
// Dst is the table into which the data will be loaded.
Dst *Table
// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteAppend.
WriteDisposition TableWriteDisposition
// The labels associated with this job.
Labels map[string]string
// If non-nil, the destination table is partitioned by time.
TimePartitioning *TimePartitioning
// Clustering specifies the data clustering configuration for the destination table.
Clustering *Clustering
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
// Allows the schema of the destination table to be updated as a side effect of
// the load job.
SchemaUpdateOptions []string
// For Avro-based loads, controls whether logical type annotations are used.
// See https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types
// for additional information.
UseAvroLogicalTypes bool
}
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
config := &bq.JobConfiguration{
Labels: l.Labels,
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition),
WriteDisposition: string(l.WriteDisposition),
DestinationTable: l.Dst.toBQ(),
TimePartitioning: l.TimePartitioning.toBQ(),
Clustering: l.Clustering.toBQ(),
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: l.SchemaUpdateOptions,
UseAvroLogicalTypes: l.UseAvroLogicalTypes,
},
}
media := l.Src.populateLoadConfig(config.Load)
return config, media
}
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
lc := &LoadConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
Dst: bqToTable(q.Load.DestinationTable, c),
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
Clustering: bqToClustering(q.Load.Clustering),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
UseAvroLogicalTypes: q.Load.UseAvroLogicalTypes,
}
var fc *FileConfig
if len(q.Load.SourceUris) == 0 {
s := NewReaderSource(nil)
fc = &s.FileConfig
lc.Src = s
} else {
s := NewGCSReference(q.Load.SourceUris...)
fc = &s.FileConfig
lc.Src = s
}
bqPopulateFileConfig(q.Load, fc)
return lc
}
// A Loader loads data from Google Cloud Storage into a BigQuery table.
type Loader struct {
JobIDConfig
LoadConfig
c *Client
}
// A LoadSource represents a source of data that can be loaded into
// a BigQuery table.
//
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
// objects, and ReaderSource, for data read from an io.Reader.
type LoadSource interface {
// populates config, returns media
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
}
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
// The returned Loader may optionally be further configured before its Run method is called.
// See GCSReference and ReaderSource for additional configuration options that
// affect loading.
func (t *Table) LoaderFrom(src LoadSource) *Loader {
return &Loader{
c: t.c,
LoadConfig: LoadConfig{
Src: src,
Dst: t,
},
}
}
// Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run")
defer func() { trace.EndSpan(ctx, err) }()
job, media := l.newJob()
return l.c.insertJob(ctx, job, media)
}
func (l *Loader) newJob() (*bq.Job, io.Reader) {
config, media := l.LoadConfig.toBQ()
return &bq.Job{
JobReference: l.JobIDConfig.createJobRef(l.c),
Configuration: config,
}, media
}

View File

@@ -1,112 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type loadOption interface {
customizeLoad(conf *bq.JobConfigurationLoad)
}
// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table.
// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table.
// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup.
// schema must not be nil.
func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} }
type destSchema struct {
Schema
}
func (opt destSchema) implementsOption() {}
func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.Schema = opt.asTableSchema()
}
// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored.
// If this maximum is exceeded, the operation will be unsuccessful.
func MaxBadRecords(n int64) Option { return maxBadRecords(n) }
type maxBadRecords int64
func (opt maxBadRecords) implementsOption() {}
func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.MaxBadRecords = int64(opt)
}
// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls.
func AllowJaggedRows() Option { return allowJaggedRows{} }
type allowJaggedRows struct{}
func (opt allowJaggedRows) implementsOption() {}
func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.AllowJaggedRows = true
}
// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data.
func AllowQuotedNewlines() Option { return allowQuotedNewlines{} }
type allowQuotedNewlines struct{}
func (opt allowQuotedNewlines) implementsOption() {}
func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.AllowQuotedNewlines = true
}
// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated.
// Unknown values are ignored. For CSV this ignores extra values at the end of a line.
// For JSON this ignores named values that do not match any column name.
// If this Option is not used, records containing unknown values are treated as bad records.
// The MaxBadRecords Option can be used to customize how bad records are handled.
func IgnoreUnknownValues() Option { return ignoreUnknownValues{} }
type ignoreUnknownValues struct{}
func (opt ignoreUnknownValues) implementsOption() {}
func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.IgnoreUnknownValues = true
}
func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationLoad{}
dst.customizeLoadDst(payload)
src.customizeLoadSrc(payload)
for _, opt := range options {
o, ok := opt.(loadOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeLoad(payload)
}
job.Configuration = &bq.JobConfiguration{
Load: payload,
}
return c.service.insertJob(ctx, job, c.projectID)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,20 +15,23 @@
package bigquery
import (
"reflect"
"strings"
"testing"
"time"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2"
)
func defaultLoadJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@@ -66,26 +69,76 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
}
func TestLoad(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{projectID: "client-project-id"}
testCases := []struct {
dst *Table
src *GCSReference
options []Option
want *bq.Job
dst *Table
src LoadSource
jobID string
location string
config LoadConfig
want *bq.Job
}{
{
dst: defaultTable(nil),
src: defaultGCS,
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
want: defaultLoadJob(),
},
{
dst: defaultTable(nil),
src: defaultGCS,
options: []Option{
MaxBadRecords(1),
AllowJaggedRows(),
AllowQuotedNewlines(),
IgnoreUnknownValues(),
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
location: "loc",
want: func() *bq.Job {
j := defaultLoadJob()
j.JobReference.Location = "loc"
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobID: "ajob",
config: LoadConfig{
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
Labels: map[string]string{"a": "b"},
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
Clustering: &Clustering{Fields: []string{"cfield1"}},
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
},
src: NewGCSReference("uri"),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1234,
}
j.Configuration.Load.Clustering = &bq.Clustering{
Fields: []string{"cfield1"},
}
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
j.JobReference = &bq.JobReference{
JobId: "ajob",
ProjectId: "client-project-id",
}
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.MaxBadRecords = 1
g.AllowJaggedRows = true
g.AllowQuotedNewlines = true
g.IgnoreUnknownValues = true
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.MaxBadRecords = 1
@@ -96,33 +149,15 @@ func TestLoad(t *testing.T) {
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
options: []Option{CreateNever, WriteTruncate},
src: defaultGCS,
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
src: defaultGCS,
options: []Option{
DestinationSchema(Schema{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.Schema = Schema{
stringFieldSchema(),
nestedFieldSchema(),
}),
},
}
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.Schema = &bq.TableSchema{
@@ -134,15 +169,16 @@ func TestLoad(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: &GCSReference{
uris: []string{"uri"},
SkipLeadingRows: 1,
SourceFormat: JSON,
Encoding: UTF_8,
FieldDelimiter: "\t",
Quote: "-",
},
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.SkipLeadingRows = 1
g.SourceFormat = JSON
g.Encoding = UTF_8
g.FieldDelimiter = "\t"
g.Quote = "-"
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SkipLeadingRows = 1
@@ -155,24 +191,22 @@ func TestLoad(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: &GCSReference{
uris: []string{"uri"},
Quote: "",
},
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
want: func() *bq.Job {
j := defaultLoadJob()
// Quote is left unset in GCSReference, so should be nil here.
j.Configuration.Load.Quote = nil
return j
}(),
},
{
dst: defaultTable(nil),
src: &GCSReference{
uris: []string{"uri"},
Quote: "",
ForceZeroQuote: true,
},
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.ForceZeroQuote = true
return g
}(),
want: func() *bq.Job {
j := defaultLoadJob()
empty := ""
@@ -180,19 +214,85 @@ func TestLoad(t *testing.T) {
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *ReaderSource {
r := NewReaderSource(strings.NewReader("foo"))
r.SkipLeadingRows = 1
r.SourceFormat = JSON
r.Encoding = UTF_8
r.FieldDelimiter = "\t"
r.Quote = "-"
return r
}(),
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceUris = nil
j.Configuration.Load.SkipLeadingRows = 1
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
j.Configuration.Load.Encoding = "UTF-8"
j.Configuration.Load.FieldDelimiter = "\t"
hyphen := "-"
j.Configuration.Load.Quote = &hyphen
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *GCSReference {
g := NewGCSReference("uri")
g.SourceFormat = Avro
return g
}(),
config: LoadConfig{
UseAvroLogicalTypes: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceFormat = "AVRO"
j.Configuration.Load.UseAvroLogicalTypes = true
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: func() *ReaderSource {
r := NewReaderSource(strings.NewReader("foo"))
r.SourceFormat = Avro
return r
}(),
config: LoadConfig{
UseAvroLogicalTypes: true,
},
want: func() *bq.Job {
j := defaultLoadJob()
j.Configuration.Load.SourceUris = nil
j.Configuration.Load.SourceFormat = "AVRO"
j.Configuration.Load.UseAvroLogicalTypes = true
return j
}(),
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
for i, tc := range testCases {
loader := tc.dst.LoaderFrom(tc.src)
loader.JobID = tc.jobID
loader.Location = tc.location
tc.config.Src = tc.src
tc.config.Dst = tc.dst
loader.LoadConfig = tc.config
got, _ := loader.newJob()
checkJob(t, i, got, tc.want)
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling load: %v", err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want)
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
cmp.AllowUnexported(Table{}, Client{}),
cmpopts.IgnoreUnexported(ReaderSource{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}

348
vendor/cloud.google.com/go/bigquery/nulls.go generated vendored Normal file
View File

@@ -0,0 +1,348 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"time"
"cloud.google.com/go/civil"
)
// NullInt64 represents a BigQuery INT64 that may be NULL.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL.
}
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }
// NullString represents a BigQuery STRING that may be NULL.
type NullString struct {
StringVal string
Valid bool // Valid is true if StringVal is not NULL.
}
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
// NullGeography represents a BigQuery GEOGRAPHY string that may be NULL.
type NullGeography struct {
GeographyVal string
Valid bool // Valid is true if GeographyVal is not NULL.
}
func (n NullGeography) String() string { return nullstr(n.Valid, n.GeographyVal) }
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL.
}
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }
// NullBool represents a BigQuery BOOL that may be NULL.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL.
}
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }
// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
type NullTimestamp struct {
Timestamp time.Time
Valid bool // Valid is true if Time is not NULL.
}
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }
// NullDate represents a BigQuery DATE that may be null.
type NullDate struct {
Date civil.Date
Valid bool // Valid is true if Date is not NULL.
}
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }
// NullTime represents a BigQuery TIME that may be null.
type NullTime struct {
Time civil.Time
Valid bool // Valid is true if Time is not NULL.
}
func (n NullTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilTimeString(n.Time)
}
// NullDateTime represents a BigQuery DATETIME that may be null.
type NullDateTime struct {
DateTime civil.DateTime
Valid bool // Valid is true if DateTime is not NULL.
}
func (n NullDateTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilDateTimeString(n.DateTime)
}
// MarshalJSON converts the NullInt64 to JSON.
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
// MarshalJSON converts the NullFloat64 to JSON.
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
// MarshalJSON converts the NullBool to JSON.
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
// MarshalJSON converts the NullString to JSON.
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
// MarshalJSON converts the NullGeography to JSON.
func (n NullGeography) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.GeographyVal) }
// MarshalJSON converts the NullTimestamp to JSON.
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
// MarshalJSON converts the NullDate to JSON.
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
// MarshalJSON converts the NullTime to JSON.
func (n NullTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
}
// MarshalJSON converts the NullDateTime to JSON.
func (n NullDateTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
}
func nullstr(valid bool, v interface{}) string {
if !valid {
return "NULL"
}
return fmt.Sprint(v)
}
var jsonNull = []byte("null")
func nulljson(valid bool, v interface{}) ([]byte, error) {
if !valid {
return jsonNull, nil
}
return json.Marshal(v)
}
// UnmarshalJSON converts JSON into a NullInt64.
func (n *NullInt64) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Int64 = 0
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Int64); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullFloat64.
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Float64 = 0
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Float64); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullBool.
func (n *NullBool) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Bool = false
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Bool); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullString.
func (n *NullString) UnmarshalJSON(b []byte) error {
n.Valid = false
n.StringVal = ""
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.StringVal); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullGeography.
func (n *NullGeography) UnmarshalJSON(b []byte) error {
n.Valid = false
n.GeographyVal = ""
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.GeographyVal); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullTimestamp.
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Timestamp = time.Time{}
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Timestamp); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullDate.
func (n *NullDate) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Date = civil.Date{}
if bytes.Equal(b, jsonNull) {
return nil
}
if err := json.Unmarshal(b, &n.Date); err != nil {
return err
}
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullTime.
func (n *NullTime) UnmarshalJSON(b []byte) error {
n.Valid = false
n.Time = civil.Time{}
if bytes.Equal(b, jsonNull) {
return nil
}
s, err := strconv.Unquote(string(b))
if err != nil {
return err
}
t, err := civil.ParseTime(s)
if err != nil {
return err
}
n.Time = t
n.Valid = true
return nil
}
// UnmarshalJSON converts JSON into a NullDateTime.
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
n.Valid = false
n.DateTime = civil.DateTime{}
if bytes.Equal(b, jsonNull) {
return nil
}
s, err := strconv.Unquote(string(b))
if err != nil {
return err
}
dt, err := parseCivilDateTime(s)
if err != nil {
return err
}
n.DateTime = dt
n.Valid = true
return nil
}
var (
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
typeOfNullBool = reflect.TypeOf(NullBool{})
typeOfNullString = reflect.TypeOf(NullString{})
typeOfNullGeography = reflect.TypeOf(NullGeography{})
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
typeOfNullDate = reflect.TypeOf(NullDate{})
typeOfNullTime = reflect.TypeOf(NullTime{})
typeOfNullDateTime = reflect.TypeOf(NullDateTime{})
)
func nullableFieldType(t reflect.Type) FieldType {
switch t {
case typeOfNullInt64:
return IntegerFieldType
case typeOfNullFloat64:
return FloatFieldType
case typeOfNullBool:
return BooleanFieldType
case typeOfNullString:
return StringFieldType
case typeOfNullGeography:
return GeographyFieldType
case typeOfNullTimestamp:
return TimestampFieldType
case typeOfNullDate:
return DateFieldType
case typeOfNullTime:
return TimeFieldType
case typeOfNullDateTime:
return DateTimeFieldType
default:
return ""
}
}

75
vendor/cloud.google.com/go/bigquery/nulls_test.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/json"
"reflect"
"testing"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
)
var (
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000}
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime}
)
func TestNullsJSON(t *testing.T) {
for _, test := range []struct {
in interface{}
want string
}{
{&NullInt64{Valid: true, Int64: 3}, `3`},
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
{&NullBool{Valid: true, Bool: true}, `true`},
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
{&NullGeography{Valid: true, GeographyVal: "ST_GEOPOINT(47.649154, -122.350220)"}, `"ST_GEOPOINT(47.649154, -122.350220)"`},
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`},
{&NullInt64{}, `null`},
{&NullFloat64{}, `null`},
{&NullBool{}, `null`},
{&NullString{}, `null`},
{&NullGeography{}, `null`},
{&NullTimestamp{}, `null`},
{&NullDate{}, `null`},
{&NullTime{}, `null`},
{&NullDateTime{}, `null`},
} {
bytes, err := json.Marshal(test.in)
if err != nil {
t.Fatal(err)
}
if got, want := string(bytes), test.want; got != want {
t.Errorf("%#v: got %s, want %s", test.in, got, want)
}
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type()
value := reflect.New(typ).Interface()
err = json.Unmarshal(bytes, value)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(value, test.in) {
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in)
}
}
}

38
vendor/cloud.google.com/go/bigquery/oc_test.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"testing"
"cloud.google.com/go/internal/testutil"
)
func TestOCTracing(t *testing.T) {
ctx := context.Background()
client := getClient(t)
defer client.Close()
te := testutil.NewTestExporter()
defer te.Unregister()
q := client.Query("select *")
q.Run(ctx) // Doesn't matter if we get an error; span should be created either way
if len(te.Spans) == 0 {
t.Fatalf("Expected some spans to be created, but got %d", 0)
}
}

370
vendor/cloud.google.com/go/bigquery/params.go generated vendored Normal file
View File

@@ -0,0 +1,370 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/base64"
"errors"
"fmt"
"math/big"
"reflect"
"regexp"
"time"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/fields"
bq "google.golang.org/api/bigquery/v2"
)
var (
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
timestampFormat = "2006-01-02 15:04:05.999999-07:00"
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
)
const nullableTagOption = "nullable"
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
if err != nil {
return "", false, nil, err
}
if name != "" && !validFieldName.MatchString(name) {
return "", false, nil, invalidFieldNameError(name)
}
for _, opt := range opts {
if opt != nullableTagOption {
return "", false, nil, fmt.Errorf(
"bigquery: invalid tag option %q. The only valid option is %q",
opt, nullableTagOption)
}
}
return name, keep, opts, nil
}
type invalidFieldNameError string
func (e invalidFieldNameError) Error() string {
return fmt.Sprintf("bigquery: invalid name %q of field in struct", string(e))
}
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
var (
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
stringParamType = &bq.QueryParameterType{Type: "STRING"}
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
dateParamType = &bq.QueryParameterType{Type: "DATE"}
timeParamType = &bq.QueryParameterType{Type: "TIME"}
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
numericParamType = &bq.QueryParameterType{Type: "NUMERIC"}
)
var (
typeOfDate = reflect.TypeOf(civil.Date{})
typeOfTime = reflect.TypeOf(civil.Time{})
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
typeOfGoTime = reflect.TypeOf(time.Time{})
typeOfRat = reflect.TypeOf(&big.Rat{})
)
// A QueryParameter is a parameter to a query.
type QueryParameter struct {
// Name is used for named parameter mode.
// It must match the name in the query case-insensitively.
Name string
// Value is the value of the parameter.
//
// When you create a QueryParameter to send to BigQuery, the following Go types
// are supported, with their corresponding Bigquery types:
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
// Note that uint, uint64 and uintptr are not supported, because
// they may contain values that cannot fit into a 64-bit signed integer.
// float32, float64: FLOAT64
// bool: BOOL
// string: STRING
// []byte: BYTES
// time.Time: TIMESTAMP
// *big.Rat: NUMERIC
// Arrays and slices of the above.
// Structs of the above. Only the exported fields are used.
//
// BigQuery does not support params of type GEOGRAPHY. For users wishing
// to parameterize Geography values, use string parameters and cast in the
// SQL query, e.g. `SELECT ST_GeogFromText(@string_param) as geo`
//
// When a QueryParameter is returned inside a QueryConfig from a call to
// Job.Config:
// Integers are of type int64.
// Floating-point values are of type float64.
// Arrays are of type []interface{}, regardless of the array element type.
// Structs are of type map[string]interface{}.
Value interface{}
}
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
pv, err := paramValue(reflect.ValueOf(p.Value))
if err != nil {
return nil, err
}
pt, err := paramType(reflect.TypeOf(p.Value))
if err != nil {
return nil, err
}
return &bq.QueryParameter{
Name: p.Name,
ParameterValue: &pv,
ParameterType: pt,
}, nil
}
func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
if t == nil {
return nil, errors.New("bigquery: nil parameter")
}
switch t {
case typeOfDate:
return dateParamType, nil
case typeOfTime:
return timeParamType, nil
case typeOfDateTime:
return dateTimeParamType, nil
case typeOfGoTime:
return timestampParamType, nil
case typeOfRat:
return numericParamType, nil
}
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64ParamType, nil
case reflect.Float32, reflect.Float64:
return float64ParamType, nil
case reflect.Bool:
return boolParamType, nil
case reflect.String:
return stringParamType, nil
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return bytesParamType, nil
}
fallthrough
case reflect.Array:
et, err := paramType(t.Elem())
if err != nil {
return nil, err
}
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
break
}
t = t.Elem()
fallthrough
case reflect.Struct:
var fts []*bq.QueryParameterTypeStructTypes
fields, err := fieldCache.Fields(t)
if err != nil {
return nil, err
}
for _, f := range fields {
pt, err := paramType(f.Type)
if err != nil {
return nil, err
}
fts = append(fts, &bq.QueryParameterTypeStructTypes{
Name: f.Name,
Type: pt,
})
}
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
}
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
}
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
var res bq.QueryParameterValue
if !v.IsValid() {
return res, errors.New("bigquery: nil parameter")
}
t := v.Type()
switch t {
case typeOfDate:
res.Value = v.Interface().(civil.Date).String()
return res, nil
case typeOfTime:
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
// (If we send nanoseconds, then when we try to read the result we get "query job
// missing destination table").
res.Value = CivilTimeString(v.Interface().(civil.Time))
return res, nil
case typeOfDateTime:
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
return res, nil
case typeOfGoTime:
res.Value = v.Interface().(time.Time).Format(timestampFormat)
return res, nil
case typeOfRat:
res.Value = NumericString(v.Interface().(*big.Rat))
return res, nil
}
switch t.Kind() {
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
return res, nil
}
fallthrough
case reflect.Array:
var vals []*bq.QueryParameterValue
for i := 0; i < v.Len(); i++ {
val, err := paramValue(v.Index(i))
if err != nil {
return bq.QueryParameterValue{}, err
}
vals = append(vals, &val)
}
return bq.QueryParameterValue{ArrayValues: vals}, nil
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
}
t = t.Elem()
v = v.Elem()
if !v.IsValid() {
// nil pointer becomes empty value
return res, nil
}
fallthrough
case reflect.Struct:
fields, err := fieldCache.Fields(t)
if err != nil {
return bq.QueryParameterValue{}, err
}
res.StructValues = map[string]bq.QueryParameterValue{}
for _, f := range fields {
fv := v.FieldByIndex(f.Index)
fp, err := paramValue(fv)
if err != nil {
return bq.QueryParameterValue{}, err
}
res.StructValues[f.Name] = fp
}
return res, nil
}
// None of the above: assume a scalar type. (If it's not a valid type,
// paramType will catch the error.)
res.Value = fmt.Sprint(v.Interface())
// Ensure empty string values are sent.
if res.Value == "" {
res.ForceSendFields = append(res.ForceSendFields, "Value")
}
return res, nil
}
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
p := QueryParameter{Name: q.Name}
val, err := convertParamValue(q.ParameterValue, q.ParameterType)
if err != nil {
return QueryParameter{}, err
}
p.Value = val
return p, nil
}
var paramTypeToFieldType = map[string]FieldType{
int64ParamType.Type: IntegerFieldType,
float64ParamType.Type: FloatFieldType,
boolParamType.Type: BooleanFieldType,
stringParamType.Type: StringFieldType,
bytesParamType.Type: BytesFieldType,
dateParamType.Type: DateFieldType,
timeParamType.Type: TimeFieldType,
numericParamType.Type: NumericFieldType,
}
// Convert a parameter value from the service to a Go value. This is similar to, but
// not quite the same as, converting data values.
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
switch qtype.Type {
case "ARRAY":
if qval == nil {
return []interface{}(nil), nil
}
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
case "STRUCT":
if qval == nil {
return map[string]interface{}(nil), nil
}
return convertParamStruct(qval.StructValues, qtype.StructTypes)
case "TIMESTAMP":
return time.Parse(timestampFormat, qval.Value)
case "DATETIME":
return parseCivilDateTime(qval.Value)
default:
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
}
}
// convertParamArray converts a query parameter array value to a Go value. It
// always returns a []interface{}.
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
var vals []interface{}
for _, el := range elVals {
val, err := convertParamValue(el, elType)
if err != nil {
return nil, err
}
vals = append(vals, val)
}
return vals, nil
}
// convertParamStruct converts a query parameter struct value into a Go value. It
// always returns a map[string]interface{}.
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
vals := map[string]interface{}{}
for _, st := range sTypes {
if sv, ok := sVals[st.Name]; ok {
val, err := convertParamValue(&sv, st.Type)
if err != nil {
return nil, err
}
vals[st.Name] = val
} else {
vals[st.Name] = nil
}
}
return vals, nil
}

385
vendor/cloud.google.com/go/bigquery/params_test.go generated vendored Normal file
View File

@@ -0,0 +1,385 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"context"
"errors"
"math"
"math/big"
"reflect"
"testing"
"time"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)
var scalarTests = []struct {
val interface{} // The Go value
wantVal string // paramValue's desired output
wantType *bq.QueryParameterType // paramType's desired output
}{
{int64(0), "0", int64ParamType},
{3.14, "3.14", float64ParamType},
{3.14159e-87, "3.14159e-87", float64ParamType},
{true, "true", boolParamType},
{"string", "string", stringParamType},
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
{math.NaN(), "NaN", float64ParamType},
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
"2016-03-20 04:22:09.000005-01:02",
timestampParamType},
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType},
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType},
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}},
"2016-03-20 04:05:06.789000",
dateTimeParamType},
{big.NewRat(12345, 1000), "12.345000000", numericParamType},
}
type (
S1 struct {
A int
B *S2
C bool
}
S2 struct {
D string
}
)
var (
s1 = S1{
A: 1,
B: &S2{D: "s"},
C: true,
}
s1ParamType = &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "A", Type: int64ParamType},
{Name: "B", Type: &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "D", Type: stringParamType},
},
}},
{Name: "C", Type: boolParamType},
},
}
s1ParamValue = bq.QueryParameterValue{
StructValues: map[string]bq.QueryParameterValue{
"A": sval("1"),
"B": {
StructValues: map[string]bq.QueryParameterValue{
"D": sval("s"),
},
},
"C": sval("true"),
},
}
s1ParamReturnValue = map[string]interface{}{
"A": int64(1),
"B": map[string]interface{}{"D": "s"},
"C": true,
}
)
func sval(s string) bq.QueryParameterValue {
return bq.QueryParameterValue{Value: s}
}
func TestParamValueScalar(t *testing.T) {
for _, test := range scalarTests {
got, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Errorf("%v: got %v, want nil", test.val, err)
continue
}
want := sval(test.wantVal)
if !testutil.Equal(got, want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
}
}
}
func TestParamValueArray(t *testing.T) {
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
{Value: "1"},
{Value: "2"},
},
}
for _, test := range []struct {
val interface{}
want bq.QueryParameterValue
}{
{[]int(nil), bq.QueryParameterValue{}},
{[]int{}, bq.QueryParameterValue{}},
{[]int{1, 2}, qpv},
{[2]int{1, 2}, qpv},
} {
got, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
}
}
}
func TestParamValueStruct(t *testing.T) {
got, err := paramValue(reflect.ValueOf(s1))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamValue) {
t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
}
}
func TestParamValueErrors(t *testing.T) {
// paramValue lets a few invalid types through, but paramType catches them.
// Since we never call one without the other that's fine.
for _, val := range []interface{}{nil, new([]int)} {
_, err := paramValue(reflect.ValueOf(val))
if err == nil {
t.Errorf("%v (%T): got nil, want error", val, val)
}
}
}
func TestParamType(t *testing.T) {
for _, test := range scalarTests {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.wantType) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
}
}
for _, test := range []struct {
val interface{}
want *bq.QueryParameterType
}{
{uint32(32767), int64ParamType},
{[]byte("foo"), bytesParamType},
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
{S1{}, s1ParamType},
} {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
}
}
}
func TestParamTypeErrors(t *testing.T) {
for _, val := range []interface{}{
nil, uint(0), new([]int), make(chan int),
} {
_, err := paramType(reflect.TypeOf(val))
if err == nil {
t.Errorf("%v (%T): got nil, want error", val, val)
}
}
}
func TestConvertParamValue(t *testing.T) {
// Scalars.
for _, test := range scalarTests {
pval, err := paramValue(reflect.ValueOf(test.val))
if err != nil {
t.Fatal(err)
}
ptype, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
got, err := convertParamValue(&pval, ptype)
if err != nil {
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
}
if !testutil.Equal(got, test.val) {
t.Errorf("%#v: got %#v", test.val, got)
}
}
// Arrays.
for _, test := range []struct {
pval *bq.QueryParameterValue
want []interface{}
}{
{
&bq.QueryParameterValue{},
nil,
},
{
&bq.QueryParameterValue{
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
},
[]interface{}{int64(1), int64(2)},
},
} {
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
got, err := convertParamValue(test.pval, ptype)
if err != nil {
t.Fatalf("%+v: %v", test.pval, err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
}
}
// Structs.
got, err := convertParamValue(&s1ParamValue, s1ParamType)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamReturnValue) {
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
}
}
func TestIntegration_ScalarParam(t *testing.T) {
roundToMicros := cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
c := getClient(t)
for _, test := range scalarTests {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
}
if !testutil.Equal(gotParam, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
}
}
}
func TestIntegration_OtherParam(t *testing.T) {
c := getClient(t)
for _, test := range []struct {
val interface{}
wantData interface{}
wantParam interface{}
}{
{[]int(nil), []Value(nil), []interface{}(nil)},
{[]int{}, []Value(nil), []interface{}(nil)},
{
[]int{1, 2},
[]Value{int64(1), int64(2)},
[]interface{}{int64(1), int64(2)},
},
{
[3]int{1, 2, 3},
[]Value{int64(1), int64(2), int64(3)},
[]interface{}{int64(1), int64(2), int64(3)},
},
{
S1{},
[]Value{int64(0), nil, false},
map[string]interface{}{
"A": int64(0),
"B": nil,
"C": false,
},
},
{
s1,
[]Value{int64(1), []Value{"s"}, true},
s1ParamReturnValue,
},
} {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.wantData) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotData, gotData, test.wantData, test.wantData)
}
if !testutil.Equal(gotParam, test.wantParam) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
}
}
}
// paramRoundTrip passes x as a query parameter to BigQuery. It returns
// the resulting data value from running the query and the parameter value from
// the returned job configuration.
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
ctx := context.Background()
q := c.Query("select ?")
q.Parameters = []QueryParameter{{Value: x}}
job, err := q.Run(ctx)
if err != nil {
return nil, nil, err
}
it, err := job.Read(ctx)
if err != nil {
return nil, nil, err
}
var val []Value
err = it.Next(&val)
if err != nil {
return nil, nil, err
}
if len(val) != 1 {
return nil, nil, errors.New("wrong number of values")
}
conf, err := job.Config()
if err != nil {
return nil, nil, err
}
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
}
func TestQueryParameter_toBQ(t *testing.T) {
tests := []struct {
in QueryParameter
want []string
}{
{
in: QueryParameter{Name: "name", Value: ""},
want: []string{"Value"},
},
}
for _, test := range tests {
q, err := test.in.toBQ()
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
got := q.ParameterValue.ForceSendFields
if !cmp.Equal(test.want, got) {
t.Fatalf("want %v, got %v", test.want, got)
}
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,10 +14,20 @@
package bigquery
import bq "google.golang.org/api/bigquery/v2"
import (
"context"
"errors"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
// QueryConfig holds the configuration for a query job.
type QueryConfig struct {
// Dst is the table into which the results of the query will be written.
// If this field is nil, a temporary table will be created.
Dst *Table
// Query represents a query to be executed. Use Client.Query to create a query.
type Query struct {
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
Q string
@@ -26,19 +36,293 @@ type Query struct {
DefaultProjectID string
DefaultDatasetID string
// TableDefinitions describes data sources outside of BigQuery.
// The map keys may be used as table names in the query string.
//
// When a QueryConfig is returned from Job.Config, the map values
// are always of type *ExternalDataConfig.
TableDefinitions map[string]ExternalData
// CreateDisposition specifies the circumstances under which the destination table will be created.
// The default is CreateIfNeeded.
CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteEmpty.
WriteDisposition TableWriteDisposition
// DisableQueryCache prevents results being fetched from the query cache.
// If this field is false, results are fetched from the cache if they are available.
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
// Cached results are only available when TableID is unspecified in the query's destination Table.
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
DisableQueryCache bool
// DisableFlattenedResults prevents results being flattened.
// If this field is false, results from nested and repeated fields are flattened.
// DisableFlattenedResults implies AllowLargeResults
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
DisableFlattenedResults bool
// AllowLargeResults allows the query to produce arbitrarily large result tables.
// The destination must be a table.
// When using this option, queries will take longer to execute, even if the result set is small.
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
AllowLargeResults bool
// Priority specifies the priority with which to schedule the query.
// The default priority is InteractivePriority.
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
Priority QueryPriority
// MaxBillingTier sets the maximum billing tier for a Query.
// Queries that have resource usage beyond this tier will fail (without
// incurring a charge). If this field is zero, the project default will be used.
MaxBillingTier int
// MaxBytesBilled limits the number of bytes billed for
// this job. Queries that would exceed this limit will fail (without incurring
// a charge).
// If this field is less than 1, the project default will be
// used.
MaxBytesBilled int64
// UseStandardSQL causes the query to use standard SQL. The default.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool
// UseLegacySQL causes the query to use legacy SQL.
UseLegacySQL bool
// Parameters is a list of query parameters. The presence of parameters
// implies the use of standard SQL.
// If the query uses positional syntax ("?"), then no parameter may have a name.
// If the query uses named syntax ("@p"), then all parameters must have names.
// It is illegal to mix positional and named syntax.
Parameters []QueryParameter
// TimePartitioning specifies time-based partitioning
// for the destination table.
TimePartitioning *TimePartitioning
// Clustering specifies the data clustering configuration for the destination table.
Clustering *Clustering
// The labels associated with this job.
Labels map[string]string
// If true, don't actually run this job. A valid query will return a mostly
// empty response with some processing statistics, while an invalid query will
// return the same error it would if it wasn't a dry run.
//
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
// call LastStatus on the returned job to get statistics. Calling Status on a
// dry-run job will fail.
DryRun bool
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
// Allows the schema of the destination table to be updated as a side effect of
// the query job.
SchemaUpdateOptions []string
}
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
qconf := &bq.JobConfigurationQuery{
Query: qc.Q,
CreateDisposition: string(qc.CreateDisposition),
WriteDisposition: string(qc.WriteDisposition),
AllowLargeResults: qc.AllowLargeResults,
Priority: string(qc.Priority),
MaximumBytesBilled: qc.MaxBytesBilled,
TimePartitioning: qc.TimePartitioning.toBQ(),
Clustering: qc.Clustering.toBQ(),
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: qc.SchemaUpdateOptions,
}
if len(qc.TableDefinitions) > 0 {
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
}
for name, data := range qc.TableDefinitions {
qconf.TableDefinitions[name] = data.toBQ()
}
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
qconf.DefaultDataset = &bq.DatasetReference{
DatasetId: qc.DefaultDatasetID,
ProjectId: qc.DefaultProjectID,
}
}
if tier := int64(qc.MaxBillingTier); tier > 0 {
qconf.MaximumBillingTier = &tier
}
f := false
if qc.DisableQueryCache {
qconf.UseQueryCache = &f
}
if qc.DisableFlattenedResults {
qconf.FlattenResults = &f
// DisableFlattenResults implies AllowLargeResults.
qconf.AllowLargeResults = true
}
if qc.UseStandardSQL && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
ptrue := true
pfalse := false
if qc.UseLegacySQL {
qconf.UseLegacySql = &ptrue
} else {
qconf.UseLegacySql = &pfalse
}
if qc.Dst != nil && !qc.Dst.implicitTable() {
qconf.DestinationTable = qc.Dst.toBQ()
}
for _, p := range qc.Parameters {
qp, err := p.toBQ()
if err != nil {
return nil, err
}
qconf.QueryParameters = append(qconf.QueryParameters, qp)
}
return &bq.JobConfiguration{
Labels: qc.Labels,
DryRun: qc.DryRun,
Query: qconf,
}, nil
}
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
qq := q.Query
qc := &QueryConfig{
Labels: q.Labels,
DryRun: q.DryRun,
Q: qq.Query,
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
AllowLargeResults: qq.AllowLargeResults,
Priority: QueryPriority(qq.Priority),
MaxBytesBilled: qq.MaximumBytesBilled,
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql,
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
Clustering: bqToClustering(qq.Clustering),
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
SchemaUpdateOptions: qq.SchemaUpdateOptions,
}
qc.UseStandardSQL = !qc.UseLegacySQL
if len(qq.TableDefinitions) > 0 {
qc.TableDefinitions = make(map[string]ExternalData)
}
for name, qedc := range qq.TableDefinitions {
edc, err := bqToExternalDataConfig(&qedc)
if err != nil {
return nil, err
}
qc.TableDefinitions[name] = edc
}
if qq.DefaultDataset != nil {
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
}
if qq.MaximumBillingTier != nil {
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
}
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
qc.DisableQueryCache = true
}
if qq.FlattenResults != nil && !*qq.FlattenResults {
qc.DisableFlattenedResults = true
}
if qq.DestinationTable != nil {
qc.Dst = bqToTable(qq.DestinationTable, c)
}
for _, qp := range qq.QueryParameters {
p, err := bqToQueryParameter(qp)
if err != nil {
return nil, err
}
qc.Parameters = append(qc.Parameters, p)
}
return qc, nil
}
// QueryPriority specifies a priority with which a query is to be executed.
type QueryPriority string
const (
// BatchPriority specifies that the query should be scheduled with the
// batch priority. BigQuery queues each batch query on your behalf, and
// starts the query as soon as idle resources are available, usually within
// a few minutes. If BigQuery hasn't started the query within 24 hours,
// BigQuery changes the job priority to interactive. Batch queries don't
// count towards your concurrent rate limit, which can make it easier to
// start many queries at once.
//
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#batchqueries.
BatchPriority QueryPriority = "BATCH"
// InteractivePriority specifies that the query should be scheduled with
// interactive priority, which means that the query is executed as soon as
// possible. Interactive queries count towards your concurrent rate limit
// and your daily limit. It is the default priority with which queries get
// executed.
//
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#queries.
InteractivePriority QueryPriority = "INTERACTIVE"
)
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
type Query struct {
JobIDConfig
QueryConfig
client *Client
}
func (q *Query) implementsSource() {}
func (q *Query) implementsReadSource() {}
func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery) {
conf.Query = q.Q
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
conf.DefaultDataset = &bq.DatasetReference{
DatasetId: q.DefaultDatasetID,
ProjectId: q.DefaultProjectID,
}
// Query creates a query with string q.
// The returned Query may optionally be further configured before its Run method is called.
func (c *Client) Query(q string) *Query {
return &Query{
client: c,
QueryConfig: QueryConfig{Q: q},
}
}
// Run initiates a query job.
func (q *Query) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run")
defer func() { trace.EndSpan(ctx, err) }()
job, err := q.newJob()
if err != nil {
return nil, err
}
j, err = q.client.insertJob(ctx, job, nil)
if err != nil {
return nil, err
}
return j, nil
}
func (q *Query) newJob() (*bq.Job, error) {
config, err := q.QueryConfig.toBQ()
if err != nil {
return nil, err
}
return &bq.Job{
JobReference: q.JobIDConfig.createJobRef(q.client),
Configuration: config,
}, nil
}
// Read submits a query for execution and returns the results via a RowIterator.
// It is a shorthand for Query.Run followed by Job.Read.
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
job, err := q.Run(ctx)
if err != nil {
return nil, err
}
return job.Read(ctx)
}

View File

@@ -1,148 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type queryOption interface {
customizeQuery(conf *bq.JobConfigurationQuery)
}
// DisableQueryCache returns an Option that prevents results being fetched from the query cache.
// If this Option is not used, results are fetched from the cache if they are available.
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
// Cached results are only available when TableID is unspecified in the query's destination Table.
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
func DisableQueryCache() Option { return disableQueryCache{} }
type disableQueryCache struct{}
func (opt disableQueryCache) implementsOption() {}
func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery) {
f := false
conf.UseQueryCache = &f
}
// DisableFlattenedResults returns an Option that prevents results being flattened.
// If this Option is not used, results from nested and repeated fields are flattened.
// DisableFlattenedResults implies AllowLargeResults
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
func DisableFlattenedResults() Option { return disableFlattenedResults{} }
type disableFlattenedResults struct{}
func (opt disableFlattenedResults) implementsOption() {}
func (opt disableFlattenedResults) customizeQuery(conf *bq.JobConfigurationQuery) {
f := false
conf.FlattenResults = &f
// DisableFlattenedResults implies AllowLargeResults
allowLargeResults{}.customizeQuery(conf)
}
// AllowLargeResults returns an Option that allows the query to produce arbitrarily large result tables.
// The destination must be a table.
// When using this option, queries will take longer to execute, even if the result set is small.
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
func AllowLargeResults() Option { return allowLargeResults{} }
type allowLargeResults struct{}
func (opt allowLargeResults) implementsOption() {}
func (opt allowLargeResults) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.AllowLargeResults = true
}
// JobPriority returns an Option that causes a query to be scheduled with the specified priority.
// The default priority is InteractivePriority.
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
func JobPriority(priority string) Option { return jobPriority(priority) }
type jobPriority string
func (opt jobPriority) implementsOption() {}
func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.Priority = string(opt)
}
const (
BatchPriority = "BATCH"
InteractivePriority = "INTERACTIVE"
)
// MaxBillingTier returns an Option that sets the maximum billing tier for a Query.
// Queries that have resource usage beyond this tier will fail (without
// incurring a charge). If this Option is not used, the project default will be used.
func MaxBillingTier(tier int) Option { return maxBillingTier(tier) }
type maxBillingTier int
func (opt maxBillingTier) implementsOption() {}
func (opt maxBillingTier) customizeQuery(conf *bq.JobConfigurationQuery) {
tier := int64(opt)
conf.MaximumBillingTier = &tier
}
// MaxBytesBilled returns an Option that limits the number of bytes billed for
// this job. Queries that would exceed this limit will fail (without incurring
// a charge).
// If this Option is not used, or bytes is < 1, the project default will be
// used.
func MaxBytesBilled(bytes int64) Option { return maxBytesBilled(bytes) }
type maxBytesBilled int64
func (opt maxBytesBilled) implementsOption() {}
func (opt maxBytesBilled) customizeQuery(conf *bq.JobConfigurationQuery) {
if opt >= 1 {
conf.MaximumBytesBilled = int64(opt)
}
}
func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) {
job, options := initJobProto(c.projectID, options)
payload := &bq.JobConfigurationQuery{}
dst.customizeQueryDst(payload)
src.customizeQuerySrc(payload)
for _, opt := range options {
o, ok := opt.(queryOption)
if !ok {
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
}
o.customizeQuery(payload)
}
job.Configuration = &bq.JobConfiguration{
Query: payload,
}
j, err := c.service.insertJob(ctx, job, c.projectID)
if err != nil {
return nil, err
}
j.isQuery = true
return j, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,20 +15,22 @@
package bigquery
import (
"reflect"
"testing"
"time"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
)
func defaultQueryJob() *bq.Job {
pfalse := false
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@@ -37,34 +39,60 @@ func defaultQueryJob() *bq.Job {
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: &pfalse,
},
},
}
}
var defaultQuery = &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
func TestQuery(t *testing.T) {
defer fixRandomID("RANDOM")()
c := &Client{
projectID: "client-project-id",
}
testCases := []struct {
dst *Table
src *Query
options []Option
want *bq.Job
dst *Table
src *QueryConfig
jobIDConfig JobIDConfig
want *bq.Job
}{
{
dst: defaultTable(nil),
dst: c.Dataset("dataset-id").Table("table-id"),
src: defaultQuery,
want: defaultQueryJob(),
},
{
dst: defaultTable(nil),
src: &Query{
Q: "query string",
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
Labels: map[string]string{"a": "b"},
DryRun: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.DryRun = true
j.Configuration.Query.DefaultDataset = nil
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
src: &QueryConfig{Q: "query string"},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
j.JobReference.JobId = "jobID-RANDOM"
return j
}(),
},
{
dst: &Table{},
src: defaultQuery,
@@ -74,25 +102,85 @@ func TestQuery(t *testing.T) {
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
TableDefinitions: map[string]ExternalData{
"atable": func() *GCSReference {
g := NewGCSReference("uri")
g.AllowJaggedRows = true
g.AllowQuotedNewlines = true
g.Compression = Gzip
g.Encoding = UTF_8
g.FieldDelimiter = ";"
g.IgnoreUnknownValues = true
g.MaxBadRecords = 1
g.Quote = "'"
g.SkipLeadingRows = 2
g.Schema = Schema{{Name: "name", Type: StringFieldType}}
return g
}(),
},
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
td := make(map[string]bq.ExternalDataConfiguration)
quote := "'"
td["atable"] = bq.ExternalDataConfiguration{
Compression: "GZIP",
IgnoreUnknownValues: true,
MaxBadRecords: 1,
SourceFormat: "CSV", // must be explicitly set.
SourceUris: []string{"uri"},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: ";",
SkipLeadingRows: 2,
Quote: &quote,
},
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
{Name: "name", Type: "STRING"},
},
},
}
j.Configuration.Query.TableDefinitions = td
return j
}(),
},
{
dst: &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
},
src: defaultQuery,
options: []Option{CreateNever, WriteTruncate},
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{DisableQueryCache()},
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
DisableQueryCache: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
f := false
@@ -101,9 +189,13 @@ func TestQuery(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{AllowLargeResults()},
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
AllowLargeResults: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.AllowLargeResults = true
@@ -111,9 +203,13 @@ func TestQuery(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{DisableFlattenedResults()},
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
DisableFlattenedResults: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
f := false
@@ -123,9 +219,13 @@ func TestQuery(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{JobPriority("low")},
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
Priority: QueryPriority("low"),
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.Priority = "low"
@@ -133,9 +233,14 @@ func TestQuery(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{MaxBillingTier(3), MaxBytesBilled(5)},
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
MaxBillingTier: 3,
MaxBytesBilled: 5,
},
want: func() *bq.Job {
j := defaultQueryJob()
tier := int64(3)
@@ -145,24 +250,159 @@ func TestQuery(t *testing.T) {
}(),
},
{
dst: defaultTable(nil),
src: defaultQuery,
options: []Option{MaxBytesBilled(-1)},
want: defaultQueryJob(),
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseStandardSQL: true,
},
want: defaultQueryJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseLegacySQL: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
ptrue := true
j.Configuration.Query.UseLegacySql = &ptrue
j.Configuration.Query.ForceSendFields = nil
return j
}(),
},
}
for _, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
}
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
t.Errorf("err calling query: %v", err)
for i, tc := range testCases {
query := c.Query("")
query.JobIDConfig = tc.jobIDConfig
query.QueryConfig = *tc.src
query.Dst = tc.dst
got, err := query.newJob()
if err != nil {
t.Errorf("#%d: err calling query: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
checkJob(t, i, got, tc.want)
// Round-trip.
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
wantConfig := query.QueryConfig
// We set AllowLargeResults to true when DisableFlattenedResults is true.
if wantConfig.DisableFlattenedResults {
wantConfig.AllowLargeResults = true
}
// A QueryConfig with neither UseXXXSQL field set is equivalent
// to one where UseStandardSQL = true.
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
wantConfig.UseStandardSQL = true
}
// Treat nil and empty tables the same, and ignore the client.
tableEqual := func(t1, t2 *Table) bool {
if t1 == nil {
t1 = &Table{}
}
if t2 == nil {
t2 = &Table{}
}
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
}
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
// TODO(jba): see if there is a way to express this with a transformer.
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
q := g.toBQ()
e, _ := bqToExternalDataConfig(&q)
return e
}
externalDataEqual := func(e1, e2 ExternalData) bool {
if r, ok := e1.(*GCSReference); ok {
e1 = gcsRefToEDC(r)
}
if r, ok := e2.(*GCSReference); ok {
e2 = gcsRefToEDC(r)
}
return cmp.Equal(e1, e2)
}
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
cmp.Comparer(tableEqual),
cmp.Comparer(externalDataEqual),
)
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
}
}
func TestConfiguringQuery(t *testing.T) {
c := &Client{
projectID: "project-id",
}
query := c.Query("q")
query.JobID = "ajob"
query.DefaultProjectID = "def-project-id"
query.DefaultDatasetID = "def-dataset-id"
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
query.Clustering = &Clustering{
Fields: []string{"cfield1"},
}
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
// Note: Other configuration fields are tested in other tests above.
// A lot of that can be consolidated once Client.Copy is gone.
pfalse := false
want := &bq.Job{
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
Query: "q",
DefaultDataset: &bq.DatasetReference{
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: &pfalse,
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
Clustering: &bq.Clustering{Fields: []string{"cfield1"}},
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
},
},
JobReference: &bq.JobReference{
JobId: "ajob",
ProjectId: "project-id",
},
}
got, err := query.newJob()
if err != nil {
t.Fatalf("err calling Query.newJob: %v", err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("querying: -got +want:\n%s", diff)
}
}
func TestQueryLegacySQL(t *testing.T) {
c := &Client{projectID: "project-id"}
q := c.Query("q")
q.UseStandardSQL = true
q.UseLegacySQL = true
_, err := q.newJob()
if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
}
q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true
_, err = q.newJob()
if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error")
}
}

56
vendor/cloud.google.com/go/bigquery/random.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"math/rand"
"os"
"sync"
"time"
)
// Support for random values (typically job IDs and insert IDs).
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var (
rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
)
// For testing.
var randomIDFn = randomID
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
// suffixes.
const randomIDLen = 27
func randomID() string {
// This is used for both job IDs and insert IDs.
var b [randomIDLen]byte
rngMu.Lock()
for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))]
}
rngMu.Unlock()
return string(b[:])
}
// Seed seeds this package's random number generator, used for generating job and
// insert IDs. Use Seed to obtain repeatable, deterministic behavior from bigquery
// clients. Seed should be called before any clients are created.
func Seed(s int64) {
rng = rand.New(rand.NewSource(s))
}

View File

@@ -1,70 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import "golang.org/x/net/context"
// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery.
func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) }
type recordsPerRequest int64
func (opt recordsPerRequest) customizeRead(conf *pagingConf) {
conf.recordsPerRequest = int64(opt)
conf.setRecordsPerRequest = true
}
// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from.
func StartIndex(i uint64) ReadOption { return startIndex(i) }
type startIndex uint64
func (opt startIndex) customizeRead(conf *pagingConf) {
conf.startIndex = uint64(opt)
}
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readTabledata(ctx, conf, token)
}
// Read fetches the contents of the table.
func (t *Table) Read(_ context.Context, options ...ReadOption) (*Iterator, error) {
conf := &readTableConf{}
t.customizeReadSrc(conf)
for _, o := range options {
o.customizeRead(&conf.paging)
}
return newIterator(t.service, conf), nil
}
func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readQuery(ctx, conf, token)
}
// Read fetches the results of a query job.
func (j *Job) Read(_ context.Context, options ...ReadOption) (*Iterator, error) {
conf := &readQueryConf{}
if err := j.customizeReadQuery(conf); err != nil {
return nil, err
}
for _, o := range options {
o.customizeRead(&conf.paging)
}
return newIterator(j.service, conf), nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,92 +15,102 @@
package bigquery
import (
"context"
"errors"
"reflect"
"testing"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)
type readTabledataArgs struct {
conf *readTableConf
tok string
type pageFetcherArgs struct {
table *Table
schema Schema
startIndex uint64
pageSize int64
pageToken string
}
type readQueryArgs struct {
conf *readQueryConf
tok string
}
// readServiceStub services read requests by returning data from an in-memory list of values.
type readServiceStub struct {
// pageFetcherReadStub services read requests by returning data from an in-memory list of values.
type pageFetcherReadStub struct {
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
values [][][]Value // contains pages / rows / columns.
pageTokens map[string]string // maps incoming page token to returned page token.
// arguments are recorded for later inspection.
readTabledataCalls []readTabledataArgs
readQueryCalls []readQueryArgs
service
calls []pageFetcherArgs
}
func (s *readServiceStub) readValues(tok string) *readDataResult {
result := &readDataResult{
pageToken: s.pageTokens[tok],
func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
s.calls = append(s.calls,
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
result := &fetchPageResult{
pageToken: s.pageTokens[pageToken],
rows: s.values[0],
}
s.values = s.values[1:]
return result
}
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
return s.readValues(token), nil
return result, nil
}
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token})
return s.readValues(token), nil
func waitForQueryStub(context.Context, string) (Schema, uint64, error) {
return nil, 1, nil
}
func TestRead(t *testing.T) {
// The data for the service stub to return is populated for each test case in the testCases for loop.
service := &readServiceStub{}
c := &Client{
service: service,
}
ctx := context.Background()
c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: service,
isQuery: true,
c: c,
config: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
},
},
}
for _, src := range []ReadSource{defaultTable(service), queryJob} {
for _, readFunc := range []func() *RowIterator{
func() *RowIterator {
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
},
func() *RowIterator {
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
if err != nil {
t.Fatal(err)
}
return it
},
} {
testCases := []struct {
data [][][]Value
pageTokens map[string]string
want []ValueList
want [][]Value
}{
{
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": "a", "a": ""},
want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
},
{
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
pageTokens: map[string]string{"": ""}, // no more pages after first one.
want: []ValueList{{1, 2}, {11, 12}},
want: [][]Value{{1, 2}, {11, 12}},
},
}
for _, tc := range testCases {
service.values = tc.data
service.pageTokens = tc.pageTokens
if got, ok := doRead(t, c, src); ok {
if !reflect.DeepEqual(got, tc.want) {
pf.values = tc.data
pf.pageTokens = tc.pageTokens
if got, ok := collectValues(t, readFunc()); ok {
if !testutil.Equal(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
}
}
@@ -108,201 +118,116 @@ func TestRead(t *testing.T) {
}
}
// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned.
func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) {
it, err := c.Read(context.Background(), src)
if err != nil {
t.Errorf("err calling Read: %v", err)
return nil, false
}
var got []ValueList
for it.Next(context.Background()) {
var vals ValueList
if err := it.Get(&vals); err != nil {
t.Errorf("err calling Get: %v", err)
return nil, false
} else {
got = append(got, vals)
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
var got [][]Value
for {
var vals []Value
err := it.Next(&vals)
if err == iterator.Done {
break
}
if err != nil {
t.Errorf("err calling Next: %v", err)
return nil, false
}
got = append(got, vals)
}
return got, true
}
func TestNoMoreValues(t *testing.T) {
c := &Client{
service: &readServiceStub{
values: [][][]Value{{{1, 2}, {11, 12}}},
},
c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}, {11, 12}}},
}
it, err := c.Read(context.Background(), defaultTable(c.service))
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
var vals ValueList
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
var vals []Value
// We expect to retrieve two values and then fail on the next attempt.
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
}
if err := it.Get(&vals); err != nil {
t.Fatalf("Get: got: %v: want: nil", err)
}
if it.Next(context.Background()) {
t.Fatalf("Next: got: true: want: false")
}
if err := it.Get(&vals); err == nil {
t.Fatalf("Get: got: %v: want: non-nil", err)
if err := it.Next(&vals); err != iterator.Done {
t.Fatalf("Next: got: %v: want: iterator.Done", err)
}
}
// delayedReadStub simulates reading results from a query that has not yet
// completed. Its readQuery method initially reports that the query job is not
// yet complete. Subsequently, it proxies the request through to another
// service stub.
type delayedReadStub struct {
numDelays int
var errBang = errors.New("bang")
readServiceStub
}
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
if s.numDelays > 0 {
s.numDelays--
return nil, errIncompleteJob
}
return s.readServiceStub.readQuery(ctx, conf, token)
}
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
func TestIncompleteJob(t *testing.T) {
service := &delayedReadStub{
numDelays: 2,
readServiceStub: readServiceStub{
values: [][][]Value{{{1, 2}}},
},
}
c := &Client{service: service}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: service,
isQuery: true,
}
it, err := c.Read(context.Background(), queryJob)
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
var got ValueList
want := ValueList{1, 2}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
if err := it.Get(&got); err != nil {
t.Fatalf("Error calling Get: %v", err)
}
if service.numDelays != 0 {
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
}
}
type errorReadService struct {
service
}
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
return nil, errors.New("bang!")
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
return nil, errBang
}
func TestReadError(t *testing.T) {
// test that service read errors are propagated back to the caller.
c := &Client{service: &errorReadService{}}
it, err := c.Read(context.Background(), defaultTable(c.service))
if err != nil {
// Read should not return an error; only Err should.
t.Fatalf("err calling Read: %v", err)
}
if it.Next(context.Background()) {
t.Fatalf("Next: got: true: want: false")
}
if err := it.Err(); err.Error() != "bang!" {
t.Fatalf("Get: got: %v: want: bang!", err)
c := &Client{projectID: "project-id"}
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
var vals []Value
if err := it.Next(&vals); err != errBang {
t.Fatalf("Get: got: %v: want: %v", err, errBang)
}
}
func TestReadTabledataOptions(t *testing.T) {
// test that read options are propagated.
s := &readServiceStub{
s := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}},
}
c := &Client{service: s}
it, err := c.Read(context.Background(), defaultTable(s), RecordsPerRequest(5))
if err != nil {
t.Fatalf("err calling Read: %v", err)
c := &Client{projectID: "project-id"}
tr := c.Dataset("dataset-id").Table("table-id")
it := tr.read(context.Background(), s.fetchPage)
it.PageInfo().MaxSize = 5
var vals []Value
if err := it.Next(&vals); err != nil {
t.Fatal(err)
}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
}
want := []readTabledataArgs{{
conf: &readTableConf{
projectID: "project-id",
datasetID: "dataset-id",
tableID: "table-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
want := []pageFetcherArgs{{
table: tr,
pageSize: 5,
pageToken: "",
}}
if !reflect.DeepEqual(s.readTabledataCalls, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
t.Errorf("reading (got=-, want=+):\n%s", diff)
}
}
func TestReadQueryOptions(t *testing.T) {
// test that read options are propagated.
s := &readServiceStub{
c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}},
}
c := &Client{service: s}
tr := &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
}
queryJob := &Job{
projectID: "project-id",
jobID: "job-id",
service: s,
isQuery: true,
c: c,
config: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{DestinationTable: tr},
},
}
it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5))
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
if err != nil {
t.Fatalf("err calling Read: %v", err)
}
if !it.Next(context.Background()) {
t.Fatalf("Next: got: false: want: true")
it.PageInfo().MaxSize = 5
var vals []Value
if err := it.Next(&vals); err != nil {
t.Fatalf("Next: got: %v: want: nil", err)
}
want := []readQueryArgs{{
conf: &readQueryConf{
projectID: "project-id",
jobID: "job-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
want := []pageFetcherArgs{{
table: bqToTable(tr, c),
pageSize: 5,
pageToken: "",
}}
if !reflect.DeepEqual(s.readQueryCalls, want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,8 +15,11 @@
package bigquery
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sync"
bq "google.golang.org/api/bigquery/v2"
)
@@ -24,6 +27,7 @@ import (
// Schema describes the fields in a table or query result.
type Schema []*FieldSchema
// FieldSchema describes a single field.
type FieldSchema struct {
// The field name.
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
@@ -46,7 +50,7 @@ type FieldSchema struct {
Schema Schema
}
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
tfs := &bq.TableFieldSchema{
Description: fs.Description,
Name: fs.Name,
@@ -60,26 +64,21 @@ func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
} // else leave as default, which is interpreted as NULLABLE.
for _, f := range fs.Schema {
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
tfs.Fields = append(tfs.Fields, f.toBQ())
}
return tfs
}
func (s Schema) asTableSchema() *bq.TableSchema {
func (s Schema) toBQ() *bq.TableSchema {
var fields []*bq.TableFieldSchema
for _, f := range s {
fields = append(fields, f.asTableFieldSchema())
fields = append(fields, f.toBQ())
}
return &bq.TableSchema{Fields: fields}
}
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
func (s Schema) customizeCreateTable(conf *createTableConf) {
conf.schema = s.asTableSchema()
}
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{
Description: tfs.Description,
Name: tfs.Name,
@@ -89,145 +88,431 @@ func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
}
for _, f := range tfs.Fields {
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
fs.Schema = append(fs.Schema, bqToFieldSchema(f))
}
return fs
}
func convertTableSchema(ts *bq.TableSchema) Schema {
func bqToSchema(ts *bq.TableSchema) Schema {
if ts == nil {
return nil
}
var s Schema
for _, f := range ts.Fields {
s = append(s, convertTableFieldSchema(f))
s = append(s, bqToFieldSchema(f))
}
return s
}
// FieldType is the type of field.
type FieldType string
const (
StringFieldType FieldType = "STRING"
IntegerFieldType FieldType = "INTEGER"
FloatFieldType FieldType = "FLOAT"
BooleanFieldType FieldType = "BOOLEAN"
// StringFieldType is a string field type.
StringFieldType FieldType = "STRING"
// BytesFieldType is a bytes field type.
BytesFieldType FieldType = "BYTES"
// IntegerFieldType is a integer field type.
IntegerFieldType FieldType = "INTEGER"
// FloatFieldType is a float field type.
FloatFieldType FieldType = "FLOAT"
// BooleanFieldType is a boolean field type.
BooleanFieldType FieldType = "BOOLEAN"
// TimestampFieldType is a timestamp field type.
TimestampFieldType FieldType = "TIMESTAMP"
RecordFieldType FieldType = "RECORD"
// RecordFieldType is a record field type. It is typically used to create columns with repeated or nested data.
RecordFieldType FieldType = "RECORD"
// DateFieldType is a date field type.
DateFieldType FieldType = "DATE"
// TimeFieldType is a time field type.
TimeFieldType FieldType = "TIME"
// DateTimeFieldType is a datetime field type.
DateTimeFieldType FieldType = "DATETIME"
// NumericFieldType is a numeric field type. Numeric types include integer types, floating point types and the
// NUMERIC data type.
NumericFieldType FieldType = "NUMERIC"
// GeographyFieldType is a string field type. Geography types represent a set of points
// on the Earth's surface, represented in Well Known Text (WKT) format.
GeographyFieldType FieldType = "GEOGRAPHY"
)
var errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
var errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
var (
errEmptyJSONSchema = errors.New("bigquery: empty JSON schema")
fieldTypes = map[FieldType]bool{
StringFieldType: true,
BytesFieldType: true,
IntegerFieldType: true,
FloatFieldType: true,
BooleanFieldType: true,
TimestampFieldType: true,
RecordFieldType: true,
DateFieldType: true,
TimeFieldType: true,
DateTimeFieldType: true,
NumericFieldType: true,
GeographyFieldType: true,
}
)
var typeOfByteSlice = reflect.TypeOf([]byte{})
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
// NOTE: All fields in the returned Schema are configured to be required,
// unless the corresponding field in the supplied struct is a slice or array.
// It is considered an error if the struct (including nested structs) contains
// any exported fields that are pointers or one of the following types:
// map, interface, complex64, complex128, func, chan.
// In these cases, an error will be returned.
// Future versions may handle these cases without error.
// Each exported struct field is mapped to a field in the schema.
//
// The following BigQuery types are inferred from the corresponding Go types.
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred
// from these types are marked required (non-nullable).
//
// STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
// NUMERIC *big.Rat
//
// The big.Rat type supports numbers of arbitrary size and precision. Values
// will be rounded to 9 digits after the decimal point before being transmitted
// to BigQuery. See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
// for more on NUMERIC.
//
// A Go slice or array type is inferred to be a BigQuery repeated field of the
// element type. The element type must be one of the above listed types.
//
// Due to lack of unique native Go type for GEOGRAPHY, there is no schema
// inference to GEOGRAPHY at this time.
//
// Nullable fields are inferred from the NullXXX types, declared in this package:
//
// STRING NullString
// BOOL NullBool
// INTEGER NullInt64
// FLOAT NullFloat64
// TIMESTAMP NullTimestamp
// DATE NullDate
// TIME NullTime
// DATETIME NullDateTime
// GEOGRAPHY NullGeography
//
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable".
//
// A struct field that is of struct type is inferred to be a required field of type
// RECORD with a schema inferred recursively. For backwards compatibility, a field of
// type pointer to struct is also inferred to be required. To get a nullable RECORD
// field, use the "nullable" tag (see below).
//
// InferSchema returns an error if any of the examined fields is of type uint,
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
// versions may handle these cases without error.
//
// Recursively defined structs are also disallowed.
//
// Struct fields may be tagged in a way similar to the encoding/json package.
// A tag of the form
// bigquery:"name"
// uses "name" instead of the struct field name as the BigQuery field name.
// A tag of the form
// bigquery:"-"
// omits the field from the inferred schema.
// The "nullable" option marks the field as nullable (not required). It is only
// needed for []byte, *big.Rat and pointer-to-struct fields, and cannot appear on other
// fields. In this example, the Go name of the field is retained:
// bigquery:",nullable"
func InferSchema(st interface{}) (Schema, error) {
return inferStruct(reflect.TypeOf(st))
return inferSchemaReflectCached(reflect.TypeOf(st))
}
func inferStruct(rt reflect.Type) (Schema, error) {
switch rt.Kind() {
case reflect.Struct:
return inferFields(rt)
default:
return nil, errNoStruct
}
var schemaCache sync.Map
type cacheVal struct {
schema Schema
err error
}
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
var cv cacheVal
v, ok := schemaCache.Load(t)
if ok {
cv = v.(cacheVal)
} else {
s, err := inferSchemaReflect(t)
cv = cacheVal{s, err}
schemaCache.Store(t, cv)
}
return cv.schema, cv.err
}
func inferSchemaReflect(t reflect.Type) (Schema, error) {
rec, err := hasRecursiveType(t, nil)
if err != nil {
return nil, err
}
if rec {
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
}
return inferStruct(t)
}
func inferStruct(t reflect.Type) (Schema, error) {
switch t.Kind() {
case reflect.Ptr:
if t.Elem().Kind() != reflect.Struct {
return nil, noStructError{t}
}
t = t.Elem()
fallthrough
case reflect.Struct:
return inferFields(t)
default:
return nil, noStructError{t}
}
}
// inferFieldSchema infers the FieldSchema for a Go type
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
switch {
case isByteSlice(rt):
return &FieldSchema{Required: true, Type: StringFieldType}, nil
case isTimeTime(rt):
func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldSchema, error) {
// Only []byte and struct pointers can be tagged nullable.
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
return nil, badNullableError{fieldName, rt}
}
switch rt {
case typeOfByteSlice:
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
case typeOfGoTime:
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
case isRepeated(rt):
case typeOfDate:
return &FieldSchema{Required: true, Type: DateFieldType}, nil
case typeOfTime:
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
case typeOfDateTime:
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
case typeOfRat:
return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil
}
if ft := nullableFieldType(rt); ft != "" {
return &FieldSchema{Required: false, Type: ft}, nil
}
if isSupportedIntType(rt) || isSupportedUintType(rt) {
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
}
switch rt.Kind() {
case reflect.Slice, reflect.Array:
et := rt.Elem()
if isRepeated(et) && !isByteSlice(et) {
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
// Multi dimensional slices/arrays are not supported by BigQuery
return nil, errUnsupportedFieldType
return nil, unsupportedFieldTypeError{fieldName, rt}
}
f, err := inferFieldSchema(et)
if nullableFieldType(et) != "" {
// Repeated nullable types are not supported by BigQuery.
return nil, unsupportedFieldTypeError{fieldName, rt}
}
f, err := inferFieldSchema(fieldName, et, false)
if err != nil {
return nil, err
}
f.Repeated = true
f.Required = false
return f, nil
case isStruct(rt):
nested, err := inferFields(rt)
case reflect.Ptr:
if rt.Elem().Kind() != reflect.Struct {
return nil, unsupportedFieldTypeError{fieldName, rt}
}
fallthrough
case reflect.Struct:
nested, err := inferStruct(rt)
if err != nil {
return nil, err
}
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
}
switch rt.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
case reflect.String:
return &FieldSchema{Required: true, Type: StringFieldType}, nil
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
case reflect.Bool:
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
default:
return nil, errUnsupportedFieldType
return nil, unsupportedFieldTypeError{fieldName, rt}
}
}
// inferFields extracts all exported field types from struct type.
func inferFields(rt reflect.Type) (Schema, error) {
var s Schema
for i := 0; i < rt.NumField(); i++ {
field := rt.Field(i)
if field.PkgPath != "" {
// field is unexported.
continue
fields, err := fieldCache.Fields(rt)
if err != nil {
return nil, err
}
for _, field := range fields {
var nullable bool
for _, opt := range field.ParsedTag.([]string) {
if opt == nullableTagOption {
nullable = true
break
}
}
if field.Anonymous {
// TODO(nightlyone) support embedded (see https://github.com/GoogleCloudPlatform/google-cloud-go/issues/238)
return nil, errUnsupportedFieldType
}
f, err := inferFieldSchema(field.Type)
f, err := inferFieldSchema(field.Name, field.Type, nullable)
if err != nil {
return nil, err
}
f.Name = field.Name
s = append(s, f)
}
return s, nil
}
func isByteSlice(rt reflect.Type) bool {
return rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8
}
func isTimeTime(rt reflect.Type) bool {
return rt.PkgPath() == "time" && rt.Name() == "Time"
}
func isStruct(rt reflect.Type) bool {
return rt.Kind() == reflect.Struct
}
func isRepeated(rt reflect.Type) bool {
switch rt.Kind() {
case reflect.Slice, reflect.Array:
// isSupportedIntType reports whether t is an int type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedIntType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
default:
return false
}
}
// isSupportedIntType reports whether t is a uint type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedUintType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
return true
default:
return false
}
}
// typeList is a linked list of reflect.Types.
type typeList struct {
t reflect.Type
next *typeList
}
func (l *typeList) has(t reflect.Type) bool {
for l != nil {
if l.t == t {
return true
}
l = l.next
}
return false
}
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
// via exported fields. (Schema inference ignores unexported fields.)
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return false, nil
}
if seen.has(t) {
return true, nil
}
fields, err := fieldCache.Fields(t)
if err != nil {
return false, err
}
seen = &typeList{t, seen}
// Because seen is a linked list, additions to it from one field's
// recursive call will not affect the value for subsequent fields' calls.
for _, field := range fields {
ok, err := hasRecursiveType(field.Type, seen)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
// bigQuerySchemaJSONField is an individual field in a JSON BigQuery table schema definition
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema).
type bigQueryJSONField struct {
Description string `json:"description"`
Fields []bigQueryJSONField `json:"fields"`
Mode string `json:"mode"`
Name string `json:"name"`
Type string `json:"type"`
}
// convertSchemaFromJSON generates a Schema:
func convertSchemaFromJSON(fs []bigQueryJSONField) (Schema, error) {
convertedSchema := Schema{}
for _, f := range fs {
convertedFieldSchema := &FieldSchema{
Description: f.Description,
Name: f.Name,
Required: f.Mode == "REQUIRED",
Repeated: f.Mode == "REPEATED",
}
if len(f.Fields) > 0 {
convertedNestedFieldSchema, err := convertSchemaFromJSON(f.Fields)
if err != nil {
return nil, err
}
convertedFieldSchema.Schema = convertedNestedFieldSchema
}
// Check that the field-type (string) maps to a known FieldType:
if _, ok := fieldTypes[FieldType(f.Type)]; !ok {
return nil, fmt.Errorf("unknown field type (%v)", f.Type)
}
convertedFieldSchema.Type = FieldType(f.Type)
convertedSchema = append(convertedSchema, convertedFieldSchema)
}
return convertedSchema, nil
}
// SchemaFromJSON takes a JSON BigQuery table schema definition
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema)
// and returns a fully-populated Schema.
func SchemaFromJSON(schemaJSON []byte) (Schema, error) {
var bigQuerySchema []bigQueryJSONField
// Make sure we actually have some content:
if len(schemaJSON) == 0 {
return nil, errEmptyJSONSchema
}
if err := json.Unmarshal(schemaJSON, &bigQuerySchema); err != nil {
return nil, err
}
return convertSchemaFromJSON(bigQuerySchema)
}
type noStructError struct {
typ reflect.Type
}
func (e noStructError) Error() string {
return fmt.Sprintf("bigquery: can only infer schema from struct or pointer to struct, not %s", e.typ)
}
type badNullableError struct {
name string
typ reflect.Type
}
func (e badNullableError) Error() string {
return fmt.Sprintf(`bigquery: field %q of type %s: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`, e.name, e.typ)
}
type unsupportedFieldTypeError struct {
name string
typ reflect.Type
}
func (e unsupportedFieldTypeError) Error() string {
return fmt.Sprintf("bigquery: field %q: type %s is not supported", e.name, e.typ)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,10 +16,14 @@ package bigquery
import (
"fmt"
"math/big"
"reflect"
"testing"
"time"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@@ -139,6 +143,42 @@ func TestSchemaConversion(t *testing.T) {
fieldSchema("desc", "name", "TIMESTAMP", false, false),
},
},
{
// civil times
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "f1", "TIME", ""),
bqTableFieldSchema("desc", "f2", "DATE", ""),
bqTableFieldSchema("desc", "f3", "DATETIME", ""),
},
},
schema: Schema{
fieldSchema("desc", "f1", "TIME", false, false),
fieldSchema("desc", "f2", "DATE", false, false),
fieldSchema("desc", "f3", "DATETIME", false, false),
},
},
{
// numeric
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "n", "NUMERIC", ""),
},
},
schema: Schema{
fieldSchema("desc", "n", "NUMERIC", false, false),
},
},
{
bqSchema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("geo", "g", "GEOGRAPHY", ""),
},
},
schema: Schema{
fieldSchema("geo", "g", "GEOGRAPHY", false, false),
},
},
{
// nested
bqSchema: &bq.TableSchema{
@@ -160,7 +200,7 @@ func TestSchemaConversion(t *testing.T) {
Name: "outer",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
Schema: Schema{
{
Description: "inner field",
Name: "inner",
@@ -171,14 +211,14 @@ func TestSchemaConversion(t *testing.T) {
},
},
}
for _, tc := range testCases {
bqSchema := tc.schema.asTableSchema()
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema)
bqSchema := tc.schema.toBQ()
if !testutil.Equal(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
}
schema := convertTableSchema(tc.bqSchema)
if !reflect.DeepEqual(schema, tc.schema) {
schema := bqToSchema(tc.bqSchema)
if !testutil.Equal(schema, tc.schema) {
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
}
}
@@ -198,12 +238,9 @@ type allSignedIntegers struct {
}
type allUnsignedIntegers struct {
Uint64 uint64
Uint32 uint32
Uint16 uint16
Uint8 uint8
Uintptr uintptr
Uint uint
Uint32 uint32
Uint16 uint16
Uint8 uint8
}
type allFloat struct {
@@ -217,7 +254,30 @@ type allBoolean struct {
}
type allTime struct {
Time time.Time
Timestamp time.Time
Time civil.Time
Date civil.Date
DateTime civil.DateTime
}
type allNumeric struct {
Numeric *big.Rat
}
func reqField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Required: true,
}
}
func optField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Required: false,
}
}
func TestSimpleInference(t *testing.T) {
@@ -228,64 +288,76 @@ func TestSimpleInference(t *testing.T) {
{
in: allSignedIntegers{},
want: Schema{
fieldSchema("", "Int64", "INTEGER", false, true),
fieldSchema("", "Int32", "INTEGER", false, true),
fieldSchema("", "Int16", "INTEGER", false, true),
fieldSchema("", "Int8", "INTEGER", false, true),
fieldSchema("", "Int", "INTEGER", false, true),
reqField("Int64", "INTEGER"),
reqField("Int32", "INTEGER"),
reqField("Int16", "INTEGER"),
reqField("Int8", "INTEGER"),
reqField("Int", "INTEGER"),
},
},
{
in: allUnsignedIntegers{},
want: Schema{
fieldSchema("", "Uint64", "INTEGER", false, true),
fieldSchema("", "Uint32", "INTEGER", false, true),
fieldSchema("", "Uint16", "INTEGER", false, true),
fieldSchema("", "Uint8", "INTEGER", false, true),
fieldSchema("", "Uintptr", "INTEGER", false, true),
fieldSchema("", "Uint", "INTEGER", false, true),
reqField("Uint32", "INTEGER"),
reqField("Uint16", "INTEGER"),
reqField("Uint8", "INTEGER"),
},
},
{
in: allFloat{},
want: Schema{
fieldSchema("", "Float64", "FLOAT", false, true),
fieldSchema("", "Float32", "FLOAT", false, true),
reqField("Float64", "FLOAT"),
reqField("Float32", "FLOAT"),
},
},
{
in: allBoolean{},
want: Schema{
fieldSchema("", "Bool", "BOOLEAN", false, true),
reqField("Bool", "BOOLEAN"),
},
},
{
in: &allBoolean{},
want: Schema{
reqField("Bool", "BOOLEAN"),
},
},
{
in: allTime{},
want: Schema{
fieldSchema("", "Time", "TIMESTAMP", false, true),
reqField("Timestamp", "TIMESTAMP"),
reqField("Time", "TIME"),
reqField("Date", "DATE"),
reqField("DateTime", "DATETIME"),
},
},
{
in: &allNumeric{},
want: Schema{
reqField("Numeric", "NUMERIC"),
},
},
{
in: allStrings{},
want: Schema{
fieldSchema("", "String", "STRING", false, true),
fieldSchema("", "ByteSlice", "STRING", false, true),
reqField("String", "STRING"),
reqField("ByteSlice", "BYTES"),
},
},
}
for i, tc := range testCases {
for _, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want)
if !testutil.Equal(got, tc.want) {
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
}
}
}
type containsNested struct {
hidden string
NotNested int
Nested struct {
Inside int
@@ -301,6 +373,14 @@ type containsDoubleNested struct {
}
}
type ptrNested struct {
Ptr *struct{ Inside int }
}
type dup struct { // more than one field of the same struct type
A, B allBoolean
}
func TestNestedInference(t *testing.T) {
testCases := []struct {
in interface{}
@@ -309,70 +389,97 @@ func TestNestedInference(t *testing.T) {
{
in: containsNested{},
want: Schema{
fieldSchema("", "NotNested", "INTEGER", false, true),
reqField("NotNested", "INTEGER"),
&FieldSchema{
Name: "Nested",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
Schema: Schema{reqField("Inside", "INTEGER")},
},
},
},
{
in: containsDoubleNested{},
want: Schema{
fieldSchema("", "NotNested", "INTEGER", false, true),
reqField("NotNested", "INTEGER"),
&FieldSchema{
Name: "Nested",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
Schema: Schema{
{
Name: "InsideNested",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
Schema: Schema{reqField("Inside", "INTEGER")},
},
},
},
},
},
{
in: ptrNested{},
want: Schema{
&FieldSchema{
Name: "Ptr",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("Inside", "INTEGER")},
},
},
},
{
in: dup{},
want: Schema{
&FieldSchema{
Name: "A",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("Bool", "BOOLEAN")},
},
&FieldSchema{
Name: "B",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("Bool", "BOOLEAN")},
},
},
},
}
for i, tc := range testCases {
for _, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want)
if !testutil.Equal(got, tc.want) {
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
}
}
}
type simpleRepeated struct {
type repeated struct {
NotRepeated []byte
RepeatedByteSlice [][]byte
Repeated []int
Slice []int
Array [5]bool
}
type simpleNestedRepeated struct {
type nestedRepeated struct {
NotRepeated int
Repeated []struct {
Inside int
}
RepeatedPtr []*struct{ Inside int }
}
func repField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Repeated: true,
}
}
func TestRepeatedInference(t *testing.T) {
@@ -381,28 +488,29 @@ func TestRepeatedInference(t *testing.T) {
want Schema
}{
{
in: simpleRepeated{},
in: repeated{},
want: Schema{
fieldSchema("", "NotRepeated", "STRING", false, true),
fieldSchema("", "RepeatedByteSlice", "STRING", true, false),
fieldSchema("", "Repeated", "INTEGER", true, false),
reqField("NotRepeated", "BYTES"),
repField("RepeatedByteSlice", "BYTES"),
repField("Slice", "INTEGER"),
repField("Array", "BOOLEAN"),
},
},
{
in: simpleNestedRepeated{},
in: nestedRepeated{},
want: Schema{
fieldSchema("", "NotRepeated", "INTEGER", false, true),
&FieldSchema{
reqField("NotRepeated", "INTEGER"),
{
Name: "Repeated",
Repeated: true,
Type: "RECORD",
Schema: []*FieldSchema{
{
Name: "Inside",
Type: "INTEGER",
Required: true,
},
},
Schema: Schema{reqField("Inside", "INTEGER")},
},
{
Name: "RepeatedPtr",
Repeated: true,
Type: "RECORD",
Schema: Schema{reqField("Inside", "INTEGER")},
},
},
},
@@ -413,83 +521,524 @@ func TestRepeatedInference(t *testing.T) {
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want)
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
}
}
type allNulls struct {
A NullInt64
B NullFloat64
C NullBool
D NullString
E NullTimestamp
F NullTime
G NullDate
H NullDateTime
I NullGeography
}
func TestNullInference(t *testing.T) {
got, err := InferSchema(allNulls{})
if err != nil {
t.Fatal(err)
}
want := Schema{
optField("A", "INTEGER"),
optField("B", "FLOAT"),
optField("C", "BOOLEAN"),
optField("D", "STRING"),
optField("E", "TIMESTAMP"),
optField("F", "TIME"),
optField("G", "DATE"),
optField("H", "DATETIME"),
optField("I", "GEOGRAPHY"),
}
if diff := testutil.Diff(got, want); diff != "" {
t.Error(diff)
}
}
type Embedded struct {
Embedded int
}
type embedded struct {
Embedded2 int
}
type nestedEmbedded struct {
Embedded
embedded
}
func TestEmbeddedInference(t *testing.T) {
got, err := InferSchema(nestedEmbedded{})
if err != nil {
t.Fatal(err)
}
want := Schema{
reqField("Embedded", "INTEGER"),
reqField("Embedded2", "INTEGER"),
}
if !testutil.Equal(got, want) {
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
}
}
func TestRecursiveInference(t *testing.T) {
type List struct {
Val int
Next *List
}
_, err := InferSchema(List{})
if err == nil {
t.Fatal("got nil, want error")
}
}
type withTags struct {
NoTag int
ExcludeTag int `bigquery:"-"`
SimpleTag int `bigquery:"simple_tag"`
UnderscoreTag int `bigquery:"_id"`
MixedCase int `bigquery:"MIXEDcase"`
Nullable []byte `bigquery:",nullable"`
NullNumeric *big.Rat `bigquery:",nullable"`
}
type withTagsNested struct {
Nested withTags `bigquery:"nested"`
NestedAnonymous struct {
ExcludeTag int `bigquery:"-"`
Inside int `bigquery:"inside"`
} `bigquery:"anon"`
PNested *struct{ X int } // not nullable, for backwards compatibility
PNestedNullable *struct{ X int } `bigquery:",nullable"`
}
type withTagsRepeated struct {
Repeated []withTags `bigquery:"repeated"`
RepeatedAnonymous []struct {
ExcludeTag int `bigquery:"-"`
Inside int `bigquery:"inside"`
} `bigquery:"anon"`
}
type withTagsEmbedded struct {
withTags
}
var withTagsSchema = Schema{
reqField("NoTag", "INTEGER"),
reqField("simple_tag", "INTEGER"),
reqField("_id", "INTEGER"),
reqField("MIXEDcase", "INTEGER"),
optField("Nullable", "BYTES"),
optField("NullNumeric", "NUMERIC"),
}
func TestTagInference(t *testing.T) {
testCases := []struct {
in interface{}
want Schema
}{
{
in: withTags{},
want: withTagsSchema,
},
{
in: withTagsNested{},
want: Schema{
&FieldSchema{
Name: "nested",
Required: true,
Type: "RECORD",
Schema: withTagsSchema,
},
&FieldSchema{
Name: "anon",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("inside", "INTEGER")},
},
&FieldSchema{
Name: "PNested",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("X", "INTEGER")},
},
&FieldSchema{
Name: "PNestedNullable",
Required: false,
Type: "RECORD",
Schema: Schema{reqField("X", "INTEGER")},
},
},
},
{
in: withTagsRepeated{},
want: Schema{
&FieldSchema{
Name: "repeated",
Repeated: true,
Type: "RECORD",
Schema: withTagsSchema,
},
&FieldSchema{
Name: "anon",
Repeated: true,
Type: "RECORD",
Schema: Schema{reqField("inside", "INTEGER")},
},
},
},
{
in: withTagsEmbedded{},
want: withTagsSchema,
},
}
for i, tc := range testCases {
got, err := InferSchema(tc.in)
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
}
}
func TestTagInferenceErrors(t *testing.T) {
testCases := []interface{}{
struct {
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
}{},
struct {
UnsupporedStartChar int `bigquery:"øab"`
}{},
struct {
UnsupportedEndChar int `bigquery:"abø"`
}{},
struct {
UnsupportedMiddleChar int `bigquery:"aøb"`
}{},
struct {
StartInt int `bigquery:"1abc"`
}{},
struct {
Hyphens int `bigquery:"a-b"`
}{},
}
for i, tc := range testCases {
_, got := InferSchema(tc)
if _, ok := got.(invalidFieldNameError); !ok {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant invalidFieldNameError", i, got)
}
}
_, err := InferSchema(struct {
X int `bigquery:",optional"`
}{})
if err == nil {
t.Error("got nil, want error")
}
}
func TestSchemaErrors(t *testing.T) {
testCases := []struct {
in interface{}
err error
in interface{}
want interface{}
}{
{
in: []byte{},
err: errNoStruct,
in: []byte{},
want: noStructError{},
},
{
in: new(int),
err: errNoStruct,
in: new(int),
want: noStructError{},
},
{
in: new(allStrings),
err: errNoStruct,
in: struct{ Uint uint }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ Complex complex64 }{},
err: errUnsupportedFieldType,
in: struct{ Uint64 uint64 }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ Map map[string]int }{},
err: errUnsupportedFieldType,
in: struct{ Uintptr uintptr }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ Chan chan bool }{},
err: errUnsupportedFieldType,
in: struct{ Complex complex64 }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ Ptr *int }{},
err: errUnsupportedFieldType,
in: struct{ Map map[string]int }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ Interface interface{} }{},
err: errUnsupportedFieldType,
in: struct{ Chan chan bool }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ MultiDimensional [][]int }{},
err: errUnsupportedFieldType,
in: struct{ Ptr *int }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ MultiDimensional [][][]byte }{},
err: errUnsupportedFieldType,
in: struct{ Interface interface{} }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ ChanSlice []chan bool }{},
err: errUnsupportedFieldType,
in: struct{ MultiDimensional [][]int }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ NestedChan struct{ Chan []chan bool } }{},
err: errUnsupportedFieldType,
in: struct{ MultiDimensional [][][]byte }{},
want: unsupportedFieldTypeError{},
},
{
in: nestedEmbedded{},
err: errUnsupportedFieldType,
in: struct{ SliceOfPointer []*int }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ SliceOfNull []NullInt64 }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ ChanSlice []chan bool }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ NestedChan struct{ Chan []chan bool } }{},
want: unsupportedFieldTypeError{},
},
{
in: struct {
X int `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct {
X bool `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct {
X struct{ N int } `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct {
X []int `bigquery:",nullable"`
}{},
want: badNullableError{},
},
{
in: struct{ X *[]byte }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ X *[]int }{},
want: unsupportedFieldTypeError{},
},
{
in: struct{ X *int }{},
want: unsupportedFieldTypeError{},
},
}
for i, tc := range testCases {
want := tc.err
for _, tc := range testCases {
_, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
if reflect.TypeOf(got) != reflect.TypeOf(tc.want) {
t.Errorf("%#v: got:\n%#v\nwant type %T", tc.in, got, tc.want)
}
}
}
func TestHasRecursiveType(t *testing.T) {
type (
nonStruct int
nonRec struct{ A string }
dup struct{ A, B nonRec }
rec struct {
A int
B *rec
}
recUnexported struct {
A int
}
hasRec struct {
A int
R *rec
}
recSlicePointer struct {
A []*recSlicePointer
}
)
for _, test := range []struct {
in interface{}
want bool
}{
{nonStruct(0), false},
{nonRec{}, false},
{dup{}, false},
{rec{}, true},
{recUnexported{}, false},
{hasRec{}, true},
{&recSlicePointer{}, true},
} {
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
if err != nil {
t.Fatal(err)
}
if got != test.want {
t.Errorf("%T: got %t, want %t", test.in, got, test.want)
}
}
}
func TestSchemaFromJSON(t *testing.T) {
testCasesExpectingSuccess := []struct {
bqSchemaJSON []byte
description string
expectedSchema Schema
}{
{
description: "Flat table with a mixture of NULLABLE and REQUIRED fields",
bqSchemaJSON: []byte(`
[
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
{"name":"flat_bytes","type":"BYTES","mode":"REQUIRED","description":"Flat required BYTES"},
{"name":"flat_integer","type":"INTEGER","mode":"NULLABLE","description":"Flat nullable INTEGER"},
{"name":"flat_float","type":"FLOAT","mode":"REQUIRED","description":"Flat required FLOAT"},
{"name":"flat_boolean","type":"BOOLEAN","mode":"NULLABLE","description":"Flat nullable BOOLEAN"},
{"name":"flat_timestamp","type":"TIMESTAMP","mode":"REQUIRED","description":"Flat required TIMESTAMP"},
{"name":"flat_date","type":"DATE","mode":"NULLABLE","description":"Flat required DATE"},
{"name":"flat_time","type":"TIME","mode":"REQUIRED","description":"Flat nullable TIME"},
{"name":"flat_datetime","type":"DATETIME","mode":"NULLABLE","description":"Flat required DATETIME"},
{"name":"flat_numeric","type":"NUMERIC","mode":"REQUIRED","description":"Flat nullable NUMERIC"},
{"name":"flat_geography","type":"GEOGRAPHY","mode":"REQUIRED","description":"Flat required GEOGRAPHY"}
]`),
expectedSchema: Schema{
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
fieldSchema("Flat required BYTES", "flat_bytes", "BYTES", false, true),
fieldSchema("Flat nullable INTEGER", "flat_integer", "INTEGER", false, false),
fieldSchema("Flat required FLOAT", "flat_float", "FLOAT", false, true),
fieldSchema("Flat nullable BOOLEAN", "flat_boolean", "BOOLEAN", false, false),
fieldSchema("Flat required TIMESTAMP", "flat_timestamp", "TIMESTAMP", false, true),
fieldSchema("Flat required DATE", "flat_date", "DATE", false, false),
fieldSchema("Flat nullable TIME", "flat_time", "TIME", false, true),
fieldSchema("Flat required DATETIME", "flat_datetime", "DATETIME", false, false),
fieldSchema("Flat nullable NUMERIC", "flat_numeric", "NUMERIC", false, true),
fieldSchema("Flat required GEOGRAPHY", "flat_geography", "GEOGRAPHY", false, true),
},
},
{
description: "Table with a nested RECORD",
bqSchemaJSON: []byte(`
[
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
{"name":"nested_record","type":"RECORD","mode":"NULLABLE","description":"Nested nullable RECORD","fields":[{"name":"record_field_1","type":"STRING","mode":"NULLABLE","description":"First nested record field"},{"name":"record_field_2","type":"INTEGER","mode":"REQUIRED","description":"Second nested record field"}]}
]`),
expectedSchema: Schema{
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
&FieldSchema{
Description: "Nested nullable RECORD",
Name: "nested_record",
Required: false,
Type: "RECORD",
Schema: Schema{
{
Description: "First nested record field",
Name: "record_field_1",
Required: false,
Type: "STRING",
},
{
Description: "Second nested record field",
Name: "record_field_2",
Required: true,
Type: "INTEGER",
},
},
},
},
},
{
description: "Table with a repeated RECORD",
bqSchemaJSON: []byte(`
[
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
{"name":"nested_record","type":"RECORD","mode":"REPEATED","description":"Nested nullable RECORD","fields":[{"name":"record_field_1","type":"STRING","mode":"NULLABLE","description":"First nested record field"},{"name":"record_field_2","type":"INTEGER","mode":"REQUIRED","description":"Second nested record field"}]}
]`),
expectedSchema: Schema{
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
&FieldSchema{
Description: "Nested nullable RECORD",
Name: "nested_record",
Repeated: true,
Required: false,
Type: "RECORD",
Schema: Schema{
{
Description: "First nested record field",
Name: "record_field_1",
Required: false,
Type: "STRING",
},
{
Description: "Second nested record field",
Name: "record_field_2",
Required: true,
Type: "INTEGER",
},
},
},
},
},
}
for _, tc := range testCasesExpectingSuccess {
convertedSchema, err := SchemaFromJSON(tc.bqSchemaJSON)
if err != nil {
t.Errorf("encountered an error when converting JSON table schema (%s): %v", tc.description, err)
continue
}
if !testutil.Equal(convertedSchema, tc.expectedSchema) {
t.Errorf("generated JSON table schema (%s) differs from the expected schema", tc.description)
}
}
testCasesExpectingFailure := []struct {
bqSchemaJSON []byte
description string
}{
{
description: "Schema with invalid JSON",
bqSchemaJSON: []byte(`This is not JSON`),
},
{
description: "Schema with unknown field type",
bqSchemaJSON: []byte(`[{"name":"strange_type","type":"STRANGE","description":"This type should not exist"}]`),
},
{
description: "Schema with zero length",
bqSchemaJSON: []byte(``),
},
}
for _, tc := range testCasesExpectingFailure {
_, err := SchemaFromJSON(tc.bqSchemaJSON)
if err == nil {
t.Errorf("converting this schema should have returned an error (%s): %v", tc.description, err)
continue
}
}
}

View File

@@ -1,484 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// service provides an internal abstraction to isolate the generated
// BigQuery API; most of this package uses this interface instead.
// The single implementation, *bigqueryService, contains all the knowledge
// of the generated BigQuery API.
type service interface {
// Jobs
insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error)
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
jobCancel(ctx context.Context, projectId, jobID string) error
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
// Tables
createTable(ctx context.Context, conf *createTableConf) error
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
// Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
// Datasets
insertDataset(ctx context.Context, datasetID, projectID string) error
// Misc
// readQuery reads data resulting from a query job. If the job is
// incomplete, an errIncompleteJob is returned. readQuery may be called
// repeatedly to poll for job completion.
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
}
type bigqueryService struct {
s *bq.Service
}
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
s, err := bq.New(client)
if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err)
}
s.BasePath = endpoint
return &bigqueryService{s: s}, nil
}
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
for {
var err error
token, err = getPage(token)
if err != nil {
return err
}
if token == "" {
return nil
}
}
}
func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
res, err := s.s.Jobs.Insert(projectID, job).Context(ctx).Do()
if err != nil {
return nil, err
}
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
}
type pagingConf struct {
recordsPerRequest int64
setRecordsPerRequest bool
startIndex uint64
}
type readTableConf struct {
projectID, datasetID, tableID string
paging pagingConf
schema Schema // lazily initialized when the first page of data is fetched.
}
type readDataResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}
type readQueryConf struct {
projectID, jobID string
paging pagingConf
}
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
// Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
// Fetch the table schema in the background, if necessary.
var schemaErr error
var schemaFetch sync.WaitGroup
if conf.schema == nil {
schemaFetch.Add(1)
go func() {
defer schemaFetch.Done()
var t *bq.Table
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
if schemaErr == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema)
}
}()
}
res, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
schemaFetch.Wait()
if schemaErr != nil {
return nil, schemaErr
}
result := &readDataResult{
pageToken: res.PageToken,
totalRows: uint64(res.TotalRows),
schema: conf.schema,
}
result.rows, err = convertRows(res.Rows, conf.schema)
if err != nil {
return nil, err
}
return result, nil
}
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
// getQueryResultsTimeout controls the maximum duration of a request to the
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
// cause increased overall latency, as results are returned as soon as they are
// available.
const getQueryResultsTimeout = time.Minute
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
res, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
if !res.JobComplete {
return nil, errIncompleteJob
}
schema := convertTableSchema(res.Schema)
result := &readDataResult{
pageToken: res.PageToken,
totalRows: res.TotalRows,
schema: schema,
}
result.rows, err = convertRows(res.Rows, schema)
if err != nil {
return nil, err
}
return result, nil
}
type insertRowsConf struct {
templateSuffix string
ignoreUnknownValues bool
skipInvalidRows bool
}
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: conf.templateSuffix,
IgnoreUnknownValues: conf.ignoreUnknownValues,
SkipInvalidRows: conf.skipInvalidRows,
}
for _, row := range rows {
m := make(map[string]bq.JsonValue)
for k, v := range row.Row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: row.InsertID,
Json: m,
})
}
res, err := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do()
if err != nil {
return err
}
if len(res.InsertErrors) == 0 {
return nil
}
var errs PutMultiError
for _, e := range res.InsertErrors {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertID,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
}
errs = append(errs, rie)
}
return errs
}
type jobType int
const (
copyJobType jobType = iota
extractJobType
loadJobType
queryJobType
)
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("configuration").
Context(ctx).
Do()
if err != nil {
return 0, err
}
switch {
case res.Configuration.Copy != nil:
return copyJobType, nil
case res.Configuration.Extract != nil:
return extractJobType, nil
case res.Configuration.Load != nil:
return loadJobType, nil
case res.Configuration.Query != nil:
return queryJobType, nil
default:
return 0, errors.New("unknown job type")
}
}
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
// Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
_, err := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx).
Do()
return err
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("status"). // Only fetch what we need.
Context(ctx).
Do()
if err != nil {
return nil, err
}
return jobStatusFromProto(res.Status)
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
state, ok := stateMap[status.State]
if !ok {
return nil, fmt.Errorf("unexpected job state: %v", status.State)
}
newStatus := &JobStatus{
State: state,
err: nil,
}
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
newStatus.err = err
}
for _, ep := range status.Errors {
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
}
return newStatus, nil
}
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
var tables []*Table
res, err := s.s.Tables.List(projectID, datasetID).
PageToken(pageToken).
Context(ctx).
Do()
if err != nil {
return nil, "", err
}
for _, t := range res.Tables {
tables = append(tables, s.convertListedTable(t))
}
return tables, res.NextPageToken, nil
}
type createTableConf struct {
projectID, datasetID, tableID string
expiration time.Time
viewQuery string
schema *bq.TableSchema
}
// createTable creates a table in the BigQuery service.
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
// If viewQuery is non-empty, the created table will be of type VIEW.
// Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
table := &bq.Table{
TableReference: &bq.TableReference{
ProjectId: conf.projectID,
DatasetId: conf.datasetID,
TableId: conf.tableID,
},
}
if !conf.expiration.IsZero() {
table.ExpirationTime = conf.expiration.UnixNano() / 1000
}
// TODO(jba): make it impossible to provide both a view query and a schema.
if conf.viewQuery != "" {
table.View = &bq.ViewDefinition{
Query: conf.viewQuery,
}
}
if conf.schema != nil {
table.Schema = conf.schema
}
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
return err
}
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
}
func bqTableToMetadata(t *bq.Table) *TableMetadata {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
ID: t.Id,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
}
if t.ExpirationTime != 0 {
md.ExpirationTime = time.Unix(0, t.ExpirationTime*1e6)
}
if t.CreationTime != 0 {
md.CreationTime = time.Unix(0, t.CreationTime*1e6)
}
if t.LastModifiedTime != 0 {
md.LastModifiedTime = time.Unix(0, int64(t.LastModifiedTime*1e6))
}
if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema)
}
if t.View != nil {
md.View = t.View.Query
}
return md
}
func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table {
return &Table{
ProjectID: t.TableReference.ProjectId,
DatasetID: t.TableReference.DatasetId,
TableID: t.TableReference.TableId,
service: s,
}
}
// patchTableConf contains fields to be patched.
type patchTableConf struct {
// These fields are omitted from the patch operation if nil.
Description *string
Name *string
}
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}
if conf.Description != nil {
t.Description = *conf.Description
forceSend("Description")
}
if conf.Name != nil {
t.FriendlyName = *conf.Name
forceSend("FriendlyName")
}
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
Context(ctx).
Do()
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
ds := &bq.Dataset{
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
}
_, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do()
return err
}

View File

@@ -0,0 +1,267 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package storage
import (
"context"
"fmt"
"time"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
"google.golang.org/api/transport"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// BigQueryStorageCallOptions contains the retry settings for each method of BigQueryStorageClient.
type BigQueryStorageCallOptions struct {
CreateReadSession []gax.CallOption
ReadRows []gax.CallOption
BatchCreateReadSessionStreams []gax.CallOption
FinalizeStream []gax.CallOption
SplitReadStream []gax.CallOption
}
func defaultBigQueryStorageClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerystorage.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultBigQueryStorageCallOptions() *BigQueryStorageCallOptions {
retry := map[[2]string][]gax.CallOption{
{"create_read_session", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &BigQueryStorageCallOptions{
CreateReadSession: retry[[2]string{"create_read_session", "idempotent"}],
ReadRows: retry[[2]string{"default", "idempotent"}],
BatchCreateReadSessionStreams: retry[[2]string{"default", "idempotent"}],
FinalizeStream: retry[[2]string{"default", "idempotent"}],
SplitReadStream: retry[[2]string{"default", "idempotent"}],
}
}
// BigQueryStorageClient is a client for interacting with BigQuery Storage API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type BigQueryStorageClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
bigQueryStorageClient storagepb.BigQueryStorageClient
// The call options for this service.
CallOptions *BigQueryStorageCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewBigQueryStorageClient creates a new big query storage client.
//
// BigQuery storage API.
//
// The BigQuery storage API can be used to read data stored in BigQuery.
func NewBigQueryStorageClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryStorageClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultBigQueryStorageClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &BigQueryStorageClient{
conn: conn,
CallOptions: defaultBigQueryStorageCallOptions(),
bigQueryStorageClient: storagepb.NewBigQueryStorageClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *BigQueryStorageClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *BigQueryStorageClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *BigQueryStorageClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// CreateReadSession creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Read sessions automatically expire 24 hours after they are created and do
// not require manual clean-up by the caller.
func (c *BigQueryStorageClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v", "table_reference.project_id", req.GetTableReference().GetProjectId(), "table_reference.dataset_id", req.GetTableReference().GetDatasetId()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...)
var resp *storagepb.ReadSession
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.CreateReadSession(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ReadRows reads rows from the table in the format prescribed by the read session.
// Each response contains one or more table rows, up to a maximum of 10 MiB
// per response; read requests which attempt to read individual rows larger
// than this will fail.
//
// Each request also returns a set of stream statistics reflecting the
// estimated total number of rows in the read stream. This number is computed
// based on the total table size and the number of active streams in the read
// session, and may change as other streams continue to read data.
func (c *BigQueryStorageClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryStorage_ReadRowsClient, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_position.stream.name", req.GetReadPosition().GetStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...)
var resp storagepb.BigQueryStorage_ReadRowsClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.ReadRows(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// BatchCreateReadSessionStreams creates additional streams for a ReadSession. This API can be used to
// dynamically adjust the parallelism of a batch processing task upwards by
// adding additional workers.
func (c *BigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "session.name", req.GetSession().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.BatchCreateReadSessionStreams[0:len(c.CallOptions.BatchCreateReadSessionStreams):len(c.CallOptions.BatchCreateReadSessionStreams)], opts...)
var resp *storagepb.BatchCreateReadSessionStreamsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.BatchCreateReadSessionStreams(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// FinalizeStream triggers the graceful termination of a single stream in a ReadSession. This
// API can be used to dynamically adjust the parallelism of a batch processing
// task downwards without losing data.
//
// This API does not delete the stream -- it remains visible in the
// ReadSession, and any data processed by the stream is not released to other
// streams. However, no additional data will be assigned to the stream once
// this call completes. Callers must continue reading data on the stream until
// the end of the stream is reached so that data which has already been
// assigned to the stream will be processed.
//
// This method will return an error if there are no other live streams
// in the Session, or if SplitReadStream() has been called on the given
// Stream.
func (c *BigQueryStorageClient) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "stream.name", req.GetStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.FinalizeStream[0:len(c.CallOptions.FinalizeStream):len(c.CallOptions.FinalizeStream)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.bigQueryStorageClient.FinalizeStream(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// SplitReadStream splits a given read stream into two Streams. These streams are referred to
// as the primary and the residual of the split. The original stream can still
// be read from in the same manner as before. Both of the returned streams can
// also be read from, and the total rows return by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back to back in the
// original Stream. Concretely, it is guaranteed that for streams Original,
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
// Original[j-n] = Residual[0-m] once the streams have been read to
// completion.
//
// This method is guaranteed to be idempotent.
func (c *BigQueryStorageClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "original_stream.name", req.GetOriginalStream().GetName()))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...)
var resp *storagepb.SplitReadStreamResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.bigQueryStorageClient.SplitReadStream(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

View File

@@ -0,0 +1,132 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package storage_test
import (
"context"
"io"
storage "cloud.google.com/go/bigquery/storage/apiv1beta1"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
)
func ExampleNewBigQueryStorageClient() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleBigQueryStorageClient_CreateReadSession() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.CreateReadSessionRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateReadSession(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleBigQueryStorageClient_ReadRows() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.ReadRowsRequest{
// TODO: Fill request struct fields.
}
stream, err := c.ReadRows(ctx, req)
if err != nil {
// TODO: Handle error.
}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleBigQueryStorageClient_BatchCreateReadSessionStreams() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.BatchCreateReadSessionStreamsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.BatchCreateReadSessionStreams(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleBigQueryStorageClient_FinalizeStream() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.FinalizeStreamRequest{
// TODO: Fill request struct fields.
}
err = c.FinalizeStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleBigQueryStorageClient_SplitReadStream() {
ctx := context.Background()
c, err := storage.NewBigQueryStorageClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &storagepb.SplitReadStreamRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SplitReadStream(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

View File

@@ -0,0 +1,100 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
// Package storage is an auto-generated package for the
// BigQuery Storage API.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
package storage // import "cloud.google.com/go/bigquery/storage/apiv1beta1"
import (
"context"
"runtime"
"strings"
"unicode"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
const versionClient = "20190404"

View File

@@ -0,0 +1,452 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapic-generator. DO NOT EDIT.
package storage
import (
emptypb "github.com/golang/protobuf/ptypes/empty"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
)
import (
"context"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockBigQueryStorageServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
storagepb.BigQueryStorageServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockBigQueryStorageServer) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest) (*storagepb.ReadSession, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.ReadSession), nil
}
func (s *mockBigQueryStorageServer) ReadRows(req *storagepb.ReadRowsRequest, stream storagepb.BigQueryStorage_ReadRowsServer) error {
md, _ := metadata.FromIncomingContext(stream.Context())
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return s.err
}
for _, v := range s.resps {
if err := stream.Send(v.(*storagepb.ReadRowsResponse)); err != nil {
return err
}
}
return nil
}
func (s *mockBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.BatchCreateReadSessionStreamsResponse), nil
}
func (s *mockBigQueryStorageServer) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}
func (s *mockBigQueryStorageServer) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest) (*storagepb.SplitReadStreamResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*storagepb.SplitReadStreamResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockBigQueryStorage mockBigQueryStorageServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
storagepb.RegisterBigQueryStorageServer(serv, &mockBigQueryStorage)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestBigQueryStorageCreateReadSession(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &storagepb.ReadSession{
Name: name,
}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var tableReference *storagepb.TableReference = &storagepb.TableReference{}
var parent string = "parent-995424086"
var request = &storagepb.CreateReadSessionRequest{
TableReference: tableReference,
Parent: parent,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateReadSession(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageCreateReadSessionError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var tableReference *storagepb.TableReference = &storagepb.TableReference{}
var parent string = "parent-995424086"
var request = &storagepb.CreateReadSessionRequest{
TableReference: tableReference,
Parent: parent,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateReadSession(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageReadRows(t *testing.T) {
var expectedResponse *storagepb.ReadRowsResponse = &storagepb.ReadRowsResponse{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
var request = &storagepb.ReadRowsRequest{
ReadPosition: readPosition,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.ReadRows(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageReadRowsError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
var request = &storagepb.ReadRowsRequest{
ReadPosition: readPosition,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.ReadRows(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageBatchCreateReadSessionStreams(t *testing.T) {
var expectedResponse *storagepb.BatchCreateReadSessionStreamsResponse = &storagepb.BatchCreateReadSessionStreamsResponse{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var session *storagepb.ReadSession = &storagepb.ReadSession{}
var requestedStreams int32 = 1017221410
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
Session: session,
RequestedStreams: requestedStreams,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageBatchCreateReadSessionStreamsError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var session *storagepb.ReadSession = &storagepb.ReadSession{}
var requestedStreams int32 = 1017221410
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
Session: session,
RequestedStreams: requestedStreams,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestBigQueryStorageFinalizeStream(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var stream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.FinalizeStreamRequest{
Stream: stream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.FinalizeStream(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
}
func TestBigQueryStorageFinalizeStreamError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var stream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.FinalizeStreamRequest{
Stream: stream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.FinalizeStream(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestBigQueryStorageSplitReadStream(t *testing.T) {
var expectedResponse *storagepb.SplitReadStreamResponse = &storagepb.SplitReadStreamResponse{}
mockBigQueryStorage.err = nil
mockBigQueryStorage.reqs = nil
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
var originalStream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.SplitReadStreamRequest{
OriginalStream: originalStream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.SplitReadStream(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestBigQueryStorageSplitReadStreamError(t *testing.T) {
errCode := codes.PermissionDenied
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
var originalStream *storagepb.Stream = &storagepb.Stream{}
var request = &storagepb.SplitReadStreamRequest{
OriginalStream: originalStream,
}
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.SplitReadStream(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,11 +15,13 @@
package bigquery
import (
"context"
"errors"
"fmt"
"time"
"golang.org/x/net/context"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
bq "google.golang.org/api/bigquery/v2"
)
@@ -33,23 +35,58 @@ type Table struct {
// The maximum length is 1,024 characters.
TableID string
service service
c *Client
}
// TableMetadata contains information about a BigQuery table.
type TableMetadata struct {
Description string // The user-friendly description of this table.
Name string // The user-friendly name for this table.
Schema Schema
View string
// The following fields can be set when creating a table.
ID string // An opaque ID uniquely identifying the table.
Type TableType
// The user-friendly name for the table.
Name string
// The time when this table expires. If not set, the table will persist
// indefinitely. Expired tables will be deleted and their storage reclaimed.
// The user-friendly description of the table.
Description string
// The table schema. If provided on create, ViewQuery must be empty.
Schema Schema
// The query to use for a view. If provided on create, Schema must be nil.
ViewQuery string
// Use Legacy SQL for the view query.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseLegacySQL bool
// Use Legacy SQL for the view query. The default.
// At most one of UseLegacySQL and UseStandardSQL can be true.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool
// If non-nil, the table is partitioned by time.
TimePartitioning *TimePartitioning
// Clustering specifies the data clustering configuration for the table.
Clustering *Clustering
// The time when this table expires. If set, this table will expire at the
// specified time. Expired tables will be deleted and their storage
// reclaimed. The zero value is ignored.
ExpirationTime time.Time
// User-provided labels.
Labels map[string]string
// Information about a table stored outside of BigQuery.
ExternalDataConfig *ExternalDataConfig
// Custom encryption configuration (e.g., Cloud KMS keys).
EncryptionConfig *EncryptionConfig
// All the fields below are read-only.
FullID string // An opaque ID uniquely identifying the table.
Type TableType
CreationTime time.Time
LastModifiedTime time.Time
@@ -57,89 +94,176 @@ type TableMetadata struct {
// This does not include data that is being buffered during a streaming insert.
NumBytes int64
// The number of bytes in the table considered "long-term storage" for reduced
// billing purposes. See https://cloud.google.com/bigquery/pricing#long-term-storage
// for more information.
NumLongTermBytes int64
// The number of rows of data in this table.
// This does not include data that is being buffered during a streaming insert.
NumRows uint64
// Contains information regarding this table's streaming buffer, if one is
// present. This field will be nil if the table is not being streamed to or if
// there is no data in the streaming buffer.
StreamingBuffer *StreamingBuffer
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}
// Tables is a group of tables. The tables may belong to differing projects or datasets.
type Tables []*Table
// CreateDisposition specifies the circumstances under which destination table will be created.
// TableCreateDisposition specifies the circumstances under which destination table will be created.
// Default is CreateIfNeeded.
type TableCreateDisposition string
const (
// The table will be created if it does not already exist. Tables are created atomically on successful completion of a job.
// CreateIfNeeded will create the table if it does not already exist.
// Tables are created atomically on successful completion of a job.
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
// The table must already exist and will not be automatically created.
// CreateNever ensures the table must already exist and will not be
// automatically created.
CreateNever TableCreateDisposition = "CREATE_NEVER"
)
func CreateDisposition(disp TableCreateDisposition) Option { return disp }
func (opt TableCreateDisposition) implementsOption() {}
func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.CreateDisposition = string(opt)
}
func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) {
conf.CreateDisposition = string(opt)
}
func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.CreateDisposition = string(opt)
}
// TableWriteDisposition specifies how existing data in a destination table is treated.
// Default is WriteAppend.
type TableWriteDisposition string
const (
// Data will be appended to any existing data in the destination table.
// WriteAppend will append to any existing data in the destination table.
// Data is appended atomically on successful completion of a job.
WriteAppend TableWriteDisposition = "WRITE_APPEND"
// Existing data in the destination table will be overwritten.
// WriteTruncate overrides the existing data in the destination table.
// Data is overwritten atomically on successful completion of a job.
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
// Writes will fail if the destination table already contains data.
// WriteEmpty fails writes if the destination table already contains data.
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
)
func WriteDisposition(disp TableWriteDisposition) Option { return disp }
func (opt TableWriteDisposition) implementsOption() {}
func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad) {
conf.WriteDisposition = string(opt)
}
func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) {
conf.WriteDisposition = string(opt)
}
func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery) {
conf.WriteDisposition = string(opt)
}
// TableType is the type of table.
type TableType string
const (
// RegularTable is a regular table.
RegularTable TableType = "TABLE"
ViewTable TableType = "VIEW"
// ViewTable is a table type describing that the table is view. See more
// information at https://cloud.google.com/bigquery/docs/views.
ViewTable TableType = "VIEW"
// ExternalTable is a table type describing that the table is an external
// table (also known as a federated data source). See more information at
// https://cloud.google.com/bigquery/external-data-sources.
ExternalTable TableType = "EXTERNAL"
)
func (t *Table) implementsSource() {}
func (t *Table) implementsReadSource() {}
func (t *Table) implementsDestination() {}
func (ts Tables) implementsSource() {}
// TimePartitioning describes the time-based date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
type TimePartitioning struct {
// The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
func (t *Table) tableRefProto() *bq.TableReference {
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
// DATE field. Its mode must be NULLABLE or REQUIRED.
Field string
// If true, queries that reference this table must include a filter (e.g. a WHERE predicate)
// that can be used for partition elimination.
RequirePartitionFilter bool
}
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
if p == nil {
return nil
}
return &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(p.Expiration / time.Millisecond),
Field: p.Field,
RequirePartitionFilter: p.RequirePartitionFilter,
}
}
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
if q == nil {
return nil
}
return &TimePartitioning{
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
Field: q.Field,
RequirePartitionFilter: q.RequirePartitionFilter,
}
}
// Clustering governs the organization of data within a partitioned table.
// For more information, see https://cloud.google.com/bigquery/docs/clustered-tables
type Clustering struct {
Fields []string
}
func (c *Clustering) toBQ() *bq.Clustering {
if c == nil {
return nil
}
return &bq.Clustering{
Fields: c.Fields,
}
}
func bqToClustering(q *bq.Clustering) *Clustering {
if q == nil {
return nil
}
return &Clustering{
Fields: q.Fields,
}
}
// EncryptionConfig configures customer-managed encryption on tables.
type EncryptionConfig struct {
// Describes the Cloud KMS encryption key that will be used to protect
// destination BigQuery table. The BigQuery Service Account associated with your
// project requires access to this encryption key.
KMSKeyName string
}
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
if e == nil {
return nil
}
return &bq.EncryptionConfiguration{
KmsKeyName: e.KMSKeyName,
}
}
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
if q == nil {
return nil
}
return &EncryptionConfig{
KMSKeyName: q.KmsKeyName,
}
}
// StreamingBuffer holds information about the streaming buffer.
type StreamingBuffer struct {
// A lower-bound estimate of the number of bytes currently in the streaming
// buffer.
EstimatedBytes uint64
// A lower-bound estimate of the number of rows currently in the streaming
// buffer.
EstimatedRows uint64
// The time of the oldest entry in the streaming buffer.
OldestEntryTime time.Time
}
func (t *Table) toBQ() *bq.TableReference {
return &bq.TableReference{
ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
@@ -157,125 +281,349 @@ func (t *Table) implicitTable() bool {
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
}
func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad) {
conf.DestinationTable = t.tableRefProto()
}
func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract) {
conf.SourceTable = t.tableRefProto()
}
func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy) {
conf.DestinationTable = t.tableRefProto()
}
func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy) {
for _, t := range ts {
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
}
}
func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery) {
if !t.implicitTable() {
conf.DestinationTable = t.tableRefProto()
}
}
func (t *Table) customizeReadSrc(cursor *readTableConf) {
cursor.projectID = t.ProjectID
cursor.datasetID = t.DatasetID
cursor.tableID = t.TableID
}
// Create creates a table in the BigQuery service.
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
conf := &createTableConf{
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
// Pass in a TableMetadata value to configure the table.
// If tm.View.Query is non-empty, the created table will be of type VIEW.
// If no ExpirationTime is specified, the table will never expire.
// After table creation, a view can be modified only if its table was initially created
// with a view.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create")
defer func() { trace.EndSpan(ctx, err) }()
table, err := tm.toBQ()
if err != nil {
return err
}
for _, o := range options {
o.customizeCreateTable(conf)
table.TableReference = &bq.TableReference{
ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
TableId: t.TableID,
}
return t.service.createTable(ctx, conf)
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func (tm *TableMetadata) toBQ() (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
t.Labels = tm.Labels
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
t.TimePartitioning = tm.TimePartitioning.toBQ()
t.Clustering = tm.Clustering.toBQ()
if !validExpiration(tm.ExpirationTime) {
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
}
if !tm.ExpirationTime.IsZero() && tm.ExpirationTime != NeverExpire {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
if tm.ExternalDataConfig != nil {
edc := tm.ExternalDataConfig.toBQ()
t.ExternalDataConfiguration = &edc
}
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumLongTermBytes != 0 {
return nil, errors.New("cannot set NumLongTermBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
}
// Metadata fetches the metadata for the table.
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
return t.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata")
defer func() { trace.EndSpan(ctx, err) }()
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
var table *bq.Table
err = runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return bqToTableMetadata(table)
}
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
FullID: t.Id,
Labels: t.Labels,
NumBytes: t.NumBytes,
NumLongTermBytes: t.NumLongTermBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration),
}
if t.Schema != nil {
md.Schema = bqToSchema(t.Schema)
}
if t.View != nil {
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
md.Clustering = bqToClustering(t.Clustering)
if t.StreamingBuffer != nil {
md.StreamingBuffer = &StreamingBuffer{
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
EstimatedRows: t.StreamingBuffer.EstimatedRows,
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
}
}
if t.ExternalDataConfiguration != nil {
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
if err != nil {
return nil, err
}
md.ExternalDataConfig = edc
}
return md, nil
}
// Delete deletes the table.
func (t *Table) Delete(ctx context.Context) error {
return t.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
func (t *Table) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
}
// A CreateTableOption is an optional argument to CreateTable.
type CreateTableOption interface {
customizeCreateTable(*createTableConf)
// Read fetches the contents of the table.
func (t *Table) Read(ctx context.Context) *RowIterator {
return t.read(ctx, fetchPage)
}
type tableExpiration time.Time
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
conf.expiration = time.Time(opt)
func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
return newRowIterator(ctx, t, pf)
}
type viewQuery string
// NeverExpire is a sentinel value used to remove a table'e expiration time.
var NeverExpire = time.Time{}.Add(-1)
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
// For more information see: https://cloud.google.com/bigquery/querying-data#views
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
// Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
defer func() { trace.EndSpan(ctx, err) }()
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
conf.viewQuery = string(opt)
}
// TableMetadataPatch represents a set of changes to a table's metadata.
type TableMetadataPatch struct {
s service
projectID, datasetID, tableID string
conf patchTableConf
}
// Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields.
// In order to apply the changes, the TableMetadataPatch's Apply method must be called.
func (t *Table) Patch() *TableMetadataPatch {
return &TableMetadataPatch{
s: t.service,
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
bqt, err := tm.toBQ()
if err != nil {
return nil, err
}
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var res *bq.Table
if err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToTableMetadata(res)
}
// Description sets the table description.
func (p *TableMetadataPatch) Description(desc string) {
p.conf.Description = &desc
}
// Name sets the table name.
func (p *TableMetadataPatch) Name(name string) {
p.conf.Name = &name
}
// TODO(mcgreevy): support patching the schema.
// Apply applies the patch operation.
func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error) {
return p.s.patchTable(ctx, p.projectID, p.datasetID, p.tableID, &p.conf)
}
// NewUploader returns an *Uploader that can be used to append rows to t.
func (t *Table) NewUploader(opts ...UploadOption) *Uploader {
uploader := &Uploader{t: t}
for _, o := range opts {
o.customizeInsertRows(&uploader.conf)
func (tm *TableMetadataToUpdate) toBQ() (*bq.Table, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}
return uploader
if tm.Description != nil {
t.Description = optional.ToString(tm.Description)
forceSend("Description")
}
if tm.Name != nil {
t.FriendlyName = optional.ToString(tm.Name)
forceSend("FriendlyName")
}
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
forceSend("Schema")
}
if tm.EncryptionConfig != nil {
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
}
if !validExpiration(tm.ExpirationTime) {
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
}
if tm.ExpirationTime == NeverExpire {
t.NullFields = append(t.NullFields, "ExpirationTime")
} else if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
if tm.TimePartitioning != nil {
t.TimePartitioning = tm.TimePartitioning.toBQ()
t.TimePartitioning.ForceSendFields = []string{"RequirePartitionFilter"}
if tm.TimePartitioning.Expiration == 0 {
t.TimePartitioning.NullFields = []string{"ExpirationMs"}
}
}
if tm.ViewQuery != nil {
t.View = &bq.ViewDefinition{
Query: optional.ToString(tm.ViewQuery),
ForceSendFields: []string{"Query"},
}
}
if tm.UseLegacySQL != nil {
if t.View == nil {
t.View = &bq.ViewDefinition{}
}
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
labels, forces, nulls := tm.update()
t.Labels = labels
t.ForceSendFields = append(t.ForceSendFields, forces...)
t.NullFields = append(t.NullFields, nulls...)
return t, nil
}
// validExpiration ensures a specified time is either the sentinel NeverExpire,
// the zero value, or within the defined range of UnixNano. Internal
// represetations of expiration times are based upon Time.UnixNano. Any time
// before 1678 or after 2262 cannot be represented by an int64 and is therefore
// undefined and invalid. See https://godoc.org/time#Time.UnixNano.
func validExpiration(t time.Time) bool {
return t == NeverExpire || t.IsZero() || time.Unix(0, t.UnixNano()).Equal(t)
}
// TableMetadataToUpdate is used when updating a table's metadata.
// Only non-nil fields will be updated.
type TableMetadataToUpdate struct {
// The user-friendly description of this table.
Description optional.String
// The user-friendly name for this table.
Name optional.String
// The table's schema.
// When updating a schema, you can add columns but not remove them.
Schema Schema
// The table's encryption configuration. When calling Update, ensure that
// all mutable fields of EncryptionConfig are populated.
EncryptionConfig *EncryptionConfig
// The time when this table expires. To remove a table's expiration,
// set ExpirationTime to NeverExpire. The zero value is ignored.
ExpirationTime time.Time
// The query to use for a view.
ViewQuery optional.String
// Use Legacy SQL for the view query.
UseLegacySQL optional.Bool
// TimePartitioning allows modification of certain aspects of partition
// configuration such as partition expiration and whether partition
// filtration is required at query time. When calling Update, ensure
// that all mutable fields of TimePartitioning are populated.
TimePartitioning *TimePartitioning
labelUpdater
}
// labelUpdater contains common code for updating labels.
type labelUpdater struct {
setLabels map[string]string
deleteLabels map[string]bool
}
// SetLabel causes a label to be added or modified on a call to Update.
func (u *labelUpdater) SetLabel(name, value string) {
if u.setLabels == nil {
u.setLabels = map[string]string{}
}
u.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted on a call to Update.
func (u *labelUpdater) DeleteLabel(name string) {
if u.deleteLabels == nil {
u.deleteLabels = map[string]bool{}
}
u.deleteLabels[name] = true
}
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
if u.setLabels == nil && u.deleteLabels == nil {
return nil, nil, nil
}
labels = map[string]string{}
for k, v := range u.setLabels {
labels[k] = v
}
if len(labels) == 0 && len(u.deleteLabels) > 0 {
forces = []string{"Labels"}
}
for l := range u.deleteLabels {
nulls = append(nulls, "Labels."+l)
}
return labels, forces, nulls
}

372
vendor/cloud.google.com/go/bigquery/table_test.go generated vendored Normal file
View File

@@ -0,0 +1,372 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"time"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
func TestBQToTableMetadata(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
for _, test := range []struct {
in *bq.Table
want *TableMetadata
}{
{&bq.Table{}, &TableMetadata{}}, // test minimal case
{
&bq.Table{
CreationTime: aTimeMillis,
Description: "desc",
Etag: "etag",
ExpirationTime: aTimeMillis,
FriendlyName: "fname",
Id: "id",
LastModifiedTime: uint64(aTimeMillis),
Location: "loc",
NumBytes: 123,
NumLongTermBytes: 23,
NumRows: 7,
StreamingBuffer: &bq.Streamingbuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: uint64(aTimeMillis),
},
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 7890,
Type: "DAY",
Field: "pfield",
},
Clustering: &bq.Clustering{
Fields: []string{"cfield1", "cfield2"},
},
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
Type: "EXTERNAL",
View: &bq.ViewDefinition{Query: "view-query"},
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{
SourceFormat: "GOOGLE_SHEETS",
},
},
&TableMetadata{
Description: "desc",
Name: "fname",
ViewQuery: "view-query",
FullID: "id",
Type: ExternalTable,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets},
ExpirationTime: aTime.Truncate(time.Millisecond),
CreationTime: aTime.Truncate(time.Millisecond),
LastModifiedTime: aTime.Truncate(time.Millisecond),
NumBytes: 123,
NumLongTermBytes: 23,
NumRows: 7,
TimePartitioning: &TimePartitioning{
Expiration: 7890 * time.Millisecond,
Field: "pfield",
},
Clustering: &Clustering{
Fields: []string{"cfield1", "cfield2"},
},
StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: aTime,
},
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
ETag: "etag",
},
},
} {
got, err := bqToTableMetadata(test.in)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
}
}
}
func TestTableMetadataToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
for _, test := range []struct {
in *TableMetadata
want *bq.Table
}{
{nil, &bq.Table{}},
{&TableMetadata{}, &bq.Table{}},
{
&TableMetadata{
Name: "n",
Description: "d",
Schema: sc,
ExpirationTime: aTime,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
},
&bq.Table{
FriendlyName: "n",
Description: "d",
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTimeMillis,
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
},
},
{
&TableMetadata{ViewQuery: "q"},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseLegacySQL: true,
TimePartitioning: &TimePartitioning{},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 0,
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{
Expiration: time.Second,
Field: "ofDreams",
},
Clustering: &Clustering{
Fields: []string{"cfield1"},
},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1000,
Field: "ofDreams",
},
Clustering: &bq.Clustering{
Fields: []string{"cfield1"},
},
},
},
{
&TableMetadata{ExpirationTime: NeverExpire},
&bq.Table{ExpirationTime: 0},
},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatalf("%+v: %v", test.in, err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff)
}
}
// Errors
for _, in := range []*TableMetadata{
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query
{UseLegacySQL: true}, // UseLegacySQL without query
{UseStandardSQL: true}, // UseStandardSQL without query
// read-only fields
{FullID: "x"},
{Type: "x"},
{CreationTime: aTime},
{LastModifiedTime: aTime},
{NumBytes: 1},
{NumLongTermBytes: 1},
{NumRows: 1},
{StreamingBuffer: &StreamingBuffer{}},
{ETag: "x"},
// expiration time outside allowable range is invalid
// See https://godoc.org/time#Time.UnixNano
{ExpirationTime: time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC).Add(-1)},
{ExpirationTime: time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC).Add(1)},
} {
_, err := in.toBQ()
if err == nil {
t.Errorf("%+v: got nil, want error", in)
}
}
}
func TestTableMetadataToUpdateToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, test := range []struct {
tm TableMetadataToUpdate
want *bq.Table
}{
{
tm: TableMetadataToUpdate{},
want: &bq.Table{},
},
{
tm: TableMetadataToUpdate{
Description: "d",
Name: "n",
},
want: &bq.Table{
Description: "d",
FriendlyName: "n",
ForceSendFields: []string{"Description", "FriendlyName"},
},
},
{
tm: TableMetadataToUpdate{
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)},
ExpirationTime: aTime,
},
want: &bq.Table{
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTime.UnixNano() / 1e6,
ForceSendFields: []string{"Schema", "ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q"},
want: &bq.Table{
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}},
},
},
{
tm: TableMetadataToUpdate{UseLegacySQL: false},
want: &bq.Table{
View: &bq.ViewDefinition{
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true},
want: &bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
ForceSendFields: []string{"Query", "UseLegacySql"},
},
},
},
{
tm: func() (tm TableMetadataToUpdate) {
tm.SetLabel("L", "V")
tm.DeleteLabel("D")
return tm
}(),
want: &bq.Table{
Labels: map[string]string{"L": "V"},
NullFields: []string{"Labels.D"},
},
},
{
tm: TableMetadataToUpdate{ExpirationTime: NeverExpire},
want: &bq.Table{
NullFields: []string{"ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: 0}},
want: &bq.Table{
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ForceSendFields: []string{"RequirePartitionFilter"},
NullFields: []string{"ExpirationMs"},
},
},
},
{
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: time.Duration(time.Hour)}},
want: &bq.Table{
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 3600000,
Type: "DAY",
ForceSendFields: []string{"RequirePartitionFilter"},
},
},
},
} {
got, _ := test.tm.toBQ()
if !testutil.Equal(got, test.want) {
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
}
}
}
func TestTableMetadataToUpdateToBQErrors(t *testing.T) {
// See https://godoc.org/time#Time.UnixNano
start := time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC)
end := time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC)
for _, test := range []struct {
desc string
aTime time.Time
wantErr bool
}{
{desc: "ignored zero value", aTime: time.Time{}, wantErr: false},
{desc: "earliest valid time", aTime: start, wantErr: false},
{desc: "latested valid time", aTime: end, wantErr: false},
{desc: "invalid times before 1678", aTime: start.Add(-1), wantErr: true},
{desc: "invalid times after 2262", aTime: end.Add(1), wantErr: true},
{desc: "valid times after 1678", aTime: start.Add(1), wantErr: false},
{desc: "valid times before 2262", aTime: end.Add(-1), wantErr: false},
} {
tm := &TableMetadataToUpdate{ExpirationTime: test.aTime}
_, err := tm.toBQ()
if test.wantErr && err == nil {
t.Errorf("[%s] got no error, want error", test.desc)
}
if !test.wantErr && err != nil {
t.Errorf("[%s] got error, want no error", test.desc)
}
}
}

View File

@@ -1,121 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"fmt"
"reflect"
"golang.org/x/net/context"
)
// An UploadOption is an optional argument to NewUploader.
type UploadOption interface {
customizeInsertRows(conf *insertRowsConf)
}
// An Uploader does streaming inserts into a BigQuery table.
// It is safe for concurrent use.
type Uploader struct {
conf insertRowsConf
t *Table
}
// SkipInvalidRows returns an UploadOption that causes rows containing invalid data to be silently ignored.
// The default value is false, which causes the entire request to fail, if there is an attempt to insert an invalid row.
func SkipInvalidRows() UploadOption { return skipInvalidRows{} }
type skipInvalidRows struct{}
func (opt skipInvalidRows) customizeInsertRows(conf *insertRowsConf) {
conf.skipInvalidRows = true
}
// UploadIgnoreUnknownValues returns an UploadOption that causes values not matching the schema to be ignored.
// If this option is not used, records containing such values are treated as invalid records.
func UploadIgnoreUnknownValues() UploadOption { return uploadIgnoreUnknownValues{} }
type uploadIgnoreUnknownValues struct{}
func (opt uploadIgnoreUnknownValues) customizeInsertRows(conf *insertRowsConf) {
conf.ignoreUnknownValues = true
}
// A TableTemplateSuffix allows Uploaders to create tables automatically.
//
// Experimental: this option is experimental and may be modified or removed in future versions,
// regardless of any other documented package stability guarantees.
//
// When you specify a suffix, the table you upload data to
// will be used as a template for creating a new table, with the same schema,
// called <table> + <suffix>.
//
// More information is available at
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
func TableTemplateSuffix(suffix string) UploadOption { return tableTemplateSuffix(suffix) }
type tableTemplateSuffix string
func (opt tableTemplateSuffix) customizeInsertRows(conf *insertRowsConf) {
conf.templateSuffix = string(opt)
}
// Put uploads one or more rows to the BigQuery service. src must implement ValueSaver or be a slice of ValueSavers.
// Put returns a PutMultiError if one or more rows failed to be uploaded.
// The PutMultiError contains a RowInsertionError for each failed row.
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
// TODO(mcgreevy): Support structs which do not implement ValueSaver as src, a la Datastore.
if saver, ok := src.(ValueSaver); ok {
return u.putMulti(ctx, []ValueSaver{saver})
}
srcVal := reflect.ValueOf(src)
if srcVal.Kind() != reflect.Slice {
return fmt.Errorf("%T is not a ValueSaver or slice of ValueSavers", src)
}
var savers []ValueSaver
for i := 0; i < srcVal.Len(); i++ {
s := srcVal.Index(i).Interface()
saver, ok := s.(ValueSaver)
if !ok {
return fmt.Errorf("element %d of src is of type %T, which is not a ValueSaver", i, s)
}
savers = append(savers, saver)
}
return u.putMulti(ctx, savers)
}
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
var rows []*insertionRow
for _, saver := range src {
row, insertID, err := saver.Save()
if err != nil {
return err
}
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
}
return u.t.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &u.conf)
}
// An insertionRow represents a row of data to be inserted into a table.
type insertionRow struct {
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
// this row on a best-effort basis.
InsertID string
// The data to be inserted, represented as a map from field name to Value.
Row map[string]Value
}

View File

@@ -1,234 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
)
type testSaver struct {
ir *insertionRow
err error
}
func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.ir.Row, ts.ir.InsertID, ts.err
}
func TestRejectsNonValueSavers(t *testing.T) {
u := Uploader{t: defaultTable(nil)}
testCases := []struct {
src interface{}
}{
{
src: 1,
},
{
src: []int{1, 2},
},
{
src: []interface{}{
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
1,
},
},
}
for _, tc := range testCases {
if err := u.Put(context.Background(), tc.src); err == nil {
t.Errorf("put value: %v; got err: %v; want nil", tc.src, err)
}
}
}
type insertRowsRecorder struct {
rowBatches [][]*insertionRow
service
}
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
irr.rowBatches = append(irr.rowBatches, rows)
return nil
}
func TestInsertsData(t *testing.T) {
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
}
testCases := []struct {
data [][]*insertionRow
}{
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
{
&insertionRow{"b", map[string]Value{"two": 2}},
},
},
},
{
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
{
&insertionRow{"c", map[string]Value{"three": 3}},
&insertionRow{"d", map[string]Value{"four": 4}},
},
},
},
}
for _, tc := range testCases {
irr := &insertRowsRecorder{}
table.service = irr
u := Uploader{t: table}
for _, batch := range tc.data {
if len(batch) == 0 {
continue
}
var toUpload interface{}
if len(batch) == 1 {
toUpload = testSaver{ir: batch[0]}
} else {
savers := []testSaver{}
for _, row := range batch {
savers = append(savers, testSaver{ir: row})
}
toUpload = savers
}
err := u.Put(context.Background(), toUpload)
if err != nil {
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
}
}
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
}
}
type uploadOptionRecorder struct {
received *insertRowsConf
service
}
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
u.received = conf
return nil
}
func TestUploadOptionsPropagate(t *testing.T) {
// we don't care for the data in this testcase.
dummyData := testSaver{ir: &insertionRow{}}
tests := [...]struct {
opts []UploadOption
conf insertRowsConf
}{
{ // test zero options lead to zero value for insertRowsConf
},
{
opts: []UploadOption{
TableTemplateSuffix("suffix"),
},
conf: insertRowsConf{
templateSuffix: "suffix",
},
},
{
opts: []UploadOption{
UploadIgnoreUnknownValues(),
},
conf: insertRowsConf{
ignoreUnknownValues: true,
},
},
{
opts: []UploadOption{
SkipInvalidRows(),
},
conf: insertRowsConf{
skipInvalidRows: true,
},
},
{ // multiple upload options combine
opts: []UploadOption{
TableTemplateSuffix("suffix"),
SkipInvalidRows(),
UploadIgnoreUnknownValues(),
},
conf: insertRowsConf{
templateSuffix: "suffix",
skipInvalidRows: true,
ignoreUnknownValues: true,
},
},
}
for i, tc := range tests {
recorder := new(uploadOptionRecorder)
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
service: recorder,
}
u := table.NewUploader(tc.opts...)
err := u.Put(context.Background(), dummyData)
if err != nil {
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
}
if recorder.received == nil {
t.Fatalf("%d: received no options at all!", i)
}
want := tc.conf
got := *recorder.received
if got != want {
t.Errorf("%d: got %#v, want %#v, opts=%#v", i, got, want, tc.opts)
}
}
}

View File

@@ -1,54 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
var defaultGCS = &GCSReference{
uris: []string{"uri"},
}
var defaultQuery = &Query{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
func defaultTable(s service) *Table {
return &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
service: s,
}
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
s.Job = job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved.
// Copyright 2015 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,11 +15,17 @@
package bigquery
import (
"encoding/base64"
"errors"
"fmt"
"math"
"math/big"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/civil"
bq "google.golang.org/api/bigquery/v2"
)
@@ -27,17 +33,459 @@ import (
type Value interface{}
// ValueLoader stores a slice of Values representing a result row from a Read operation.
// See Iterator.Get for more information.
// See RowIterator.Next for more information.
type ValueLoader interface {
Load(v []Value) error
Load(v []Value, s Schema) error
}
// ValueList converts a []Value to implement ValueLoader.
type ValueList []Value
// valueList converts a []Value to implement ValueLoader.
type valueList []Value
// Load stores a sequence of values in a ValueList.
func (vs *ValueList) Load(v []Value) error {
*vs = append(*vs, v...)
// Load stores a sequence of values in a valueList.
// It resets the slice length to zero, then appends each value to it.
func (vs *valueList) Load(v []Value, _ Schema) error {
*vs = append((*vs)[:0], v...)
return nil
}
// valueMap converts a map[string]Value to implement ValueLoader.
type valueMap map[string]Value
// Load stores a sequence of values in a valueMap.
func (vm *valueMap) Load(v []Value, s Schema) error {
if *vm == nil {
*vm = map[string]Value{}
}
loadMap(*vm, v, s)
return nil
}
func loadMap(m map[string]Value, vals []Value, s Schema) {
for i, f := range s {
val := vals[i]
var v interface{}
switch {
case val == nil:
v = val
case f.Schema == nil:
v = val
case !f.Repeated:
m2 := map[string]Value{}
loadMap(m2, val.([]Value), f.Schema)
v = m2
default: // repeated and nested
sval := val.([]Value)
vs := make([]Value, len(sval))
for j, e := range sval {
m2 := map[string]Value{}
loadMap(m2, e.([]Value), f.Schema)
vs[j] = m2
}
v = vs
}
m[f.Name] = v
}
}
type structLoader struct {
typ reflect.Type // type of struct
err error
ops []structLoaderOp
vstructp reflect.Value // pointer to current struct value; changed by set
}
// A setFunc is a function that sets a struct field or slice/array
// element to a value.
type setFunc func(v reflect.Value, val interface{}) error
// A structLoaderOp instructs the loader to set a struct field to a row value.
type structLoaderOp struct {
fieldIndex []int
valueIndex int
setFunc setFunc
repeated bool
}
var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs")
func setAny(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.Set(reflect.ValueOf(x))
return nil
}
func setInt(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if v.OverflowInt(xx) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetInt(xx)
return nil
}
func setUint(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if xx < 0 || v.OverflowUint(uint64(xx)) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetUint(uint64(xx))
return nil
}
func setFloat(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(float64)
if v.OverflowFloat(xx) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetFloat(xx)
return nil
}
func setBool(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetBool(x.(bool))
return nil
}
func setString(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetString(x.(string))
return nil
}
func setGeography(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
v.SetString(x.(string))
return nil
}
func setBytes(v reflect.Value, x interface{}) error {
if x == nil {
v.SetBytes(nil)
} else {
v.SetBytes(x.([]byte))
}
return nil
}
func setNull(v reflect.Value, x interface{}, build func() interface{}) error {
if x == nil {
v.Set(reflect.Zero(v.Type()))
} else {
n := build()
v.Set(reflect.ValueOf(n))
}
return nil
}
// set remembers a value for the next call to Load. The value must be
// a pointer to a struct. (This is checked in RowIterator.Next.)
func (sl *structLoader) set(structp interface{}, schema Schema) error {
if sl.err != nil {
return sl.err
}
sl.vstructp = reflect.ValueOf(structp)
typ := sl.vstructp.Type().Elem()
if sl.typ == nil {
// First call: remember the type and compile the schema.
sl.typ = typ
ops, err := compileToOps(typ, schema)
if err != nil {
sl.err = err
return err
}
sl.ops = ops
} else if sl.typ != typ {
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
}
return nil
}
// compileToOps produces a sequence of operations that will set the fields of a
// value of structType to the contents of a row with schema.
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
var ops []structLoaderOp
fields, err := fieldCache.Fields(structType)
if err != nil {
return nil, err
}
for i, schemaField := range schema {
// Look for an exported struct field with the same name as the schema
// field, ignoring case (BigQuery column names are case-insensitive,
// and we want to act like encoding/json anyway).
structField := fields.Match(schemaField.Name)
if structField == nil {
// Ignore schema fields with no corresponding struct field.
continue
}
op := structLoaderOp{
fieldIndex: structField.Index,
valueIndex: i,
}
t := structField.Type
if schemaField.Repeated {
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
schemaField.Name, structField.Name, t)
}
t = t.Elem()
op.repeated = true
}
if schemaField.Type == RecordFieldType {
// Field can be a struct or a pointer to a struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
structField.Name, structField.Type)
}
nested, err := compileToOps(t, schemaField.Schema)
if err != nil {
return nil, err
}
op.setFunc = func(v reflect.Value, val interface{}) error {
return setNested(nested, v, val)
}
} else {
op.setFunc = determineSetFunc(t, schemaField.Type)
if op.setFunc == nil {
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
schemaField.Name, schemaField.Type, structField.Name, t)
}
}
ops = append(ops, op)
}
return ops, nil
}
// determineSetFunc chooses the best function for setting a field of type ftype
// to a value whose schema field type is stype. It returns nil if stype
// is not assignable to ftype.
// determineSetFunc considers only basic types. See compileToOps for
// handling of repetition and nesting.
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
switch stype {
case StringFieldType:
if ftype.Kind() == reflect.String {
return setString
}
if ftype == typeOfNullString {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullString{StringVal: x.(string), Valid: true}
})
}
}
case GeographyFieldType:
if ftype.Kind() == reflect.String {
return setGeography
}
if ftype == typeOfNullGeography {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullGeography{GeographyVal: x.(string), Valid: true}
})
}
}
case BytesFieldType:
if ftype == typeOfByteSlice {
return setBytes
}
case IntegerFieldType:
if isSupportedUintType(ftype) {
return setUint
} else if isSupportedIntType(ftype) {
return setInt
}
if ftype == typeOfNullInt64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullInt64{Int64: x.(int64), Valid: true}
})
}
}
case FloatFieldType:
switch ftype.Kind() {
case reflect.Float32, reflect.Float64:
return setFloat
}
if ftype == typeOfNullFloat64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullFloat64{Float64: x.(float64), Valid: true}
})
}
}
case BooleanFieldType:
if ftype.Kind() == reflect.Bool {
return setBool
}
if ftype == typeOfNullBool {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullBool{Bool: x.(bool), Valid: true}
})
}
}
case TimestampFieldType:
if ftype == typeOfGoTime {
return setAny
}
if ftype == typeOfNullTimestamp {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTimestamp{Timestamp: x.(time.Time), Valid: true}
})
}
}
case DateFieldType:
if ftype == typeOfDate {
return setAny
}
if ftype == typeOfNullDate {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDate{Date: x.(civil.Date), Valid: true}
})
}
}
case TimeFieldType:
if ftype == typeOfTime {
return setAny
}
if ftype == typeOfNullTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTime{Time: x.(civil.Time), Valid: true}
})
}
}
case DateTimeFieldType:
if ftype == typeOfDateTime {
return setAny
}
if ftype == typeOfNullDateTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true}
})
}
}
case NumericFieldType:
if ftype == typeOfRat {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} { return x.(*big.Rat) })
}
}
}
return nil
}
func (sl *structLoader) Load(values []Value, _ Schema) error {
if sl.err != nil {
return sl.err
}
return runOps(sl.ops, sl.vstructp.Elem(), values)
}
// runOps executes a sequence of ops, setting the fields of vstruct to the
// supplied values.
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
for _, op := range ops {
field := vstruct.FieldByIndex(op.fieldIndex)
var err error
if op.repeated {
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
} else {
err = op.setFunc(field, values[op.valueIndex])
}
if err != nil {
return err
}
}
return nil
}
func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error {
// v is either a struct or a pointer to a struct.
if v.Kind() == reflect.Ptr {
// If the value is nil, set the pointer to nil.
if val == nil {
v.Set(reflect.Zero(v.Type()))
return nil
}
// If the pointer is nil, set it to a zero struct value.
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return runOps(ops, v, val.([]Value))
}
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
vlen := len(vslice)
var flen int
switch field.Type().Kind() {
case reflect.Slice:
// Make a slice of the right size, avoiding allocation if possible.
switch {
case field.Len() < vlen:
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
case field.Len() > vlen:
field.SetLen(vlen)
}
flen = vlen
case reflect.Array:
flen = field.Len()
if flen > vlen {
// Set extra elements to their zero value.
z := reflect.Zero(field.Type().Elem())
for i := vlen; i < flen; i++ {
field.Index(i).Set(z)
}
}
default:
return fmt.Errorf("bigquery: impossible field type %s", field.Type())
}
for i, val := range vslice {
if i < flen { // avoid writing past the end of a short array
if err := setElem(field.Index(i), val); err != nil {
return err
}
}
}
return nil
}
@@ -61,7 +509,7 @@ type ValuesSaver struct {
Row []Value
}
// Save implements ValueSaver
// Save implements ValueSaver.
func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
m, err := valuesToMap(vls.Row, vls.Schema)
return m, vls.InsertID, err
@@ -69,28 +517,257 @@ func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
if len(vs) != len(schema) {
return nil, errors.New("Schema does not match length of row to be inserted")
return nil, errors.New("schema does not match length of row to be inserted")
}
m := make(map[string]Value)
for i, fieldSchema := range schema {
if fieldSchema.Type == RecordFieldType {
nested, ok := vs[i].([]Value)
if !ok {
return nil, errors.New("Nested record is not a []Value")
}
value, err := valuesToMap(nested, fieldSchema.Schema)
if vs[i] == nil {
m[fieldSchema.Name] = nil
continue
}
if fieldSchema.Type != RecordFieldType {
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
continue
}
// Nested record, possibly repeated.
vals, ok := vs[i].([]Value)
if !ok {
return nil, errors.New("nested record is not a []Value")
}
if !fieldSchema.Repeated {
value, err := valuesToMap(vals, fieldSchema.Schema)
if err != nil {
return nil, err
}
m[fieldSchema.Name] = value
} else {
m[fieldSchema.Name] = vs[i]
continue
}
// A repeated nested field is converted into a slice of maps.
var maps []Value
for _, v := range vals {
sv, ok := v.([]Value)
if !ok {
return nil, errors.New("nested record in slice is not a []Value")
}
value, err := valuesToMap(sv, fieldSchema.Schema)
if err != nil {
return nil, err
}
maps = append(maps, value)
}
m[fieldSchema.Name] = maps
}
return m, nil
}
// StructSaver implements ValueSaver for a struct.
// The struct is converted to a map of values by using the values of struct
// fields corresponding to schema fields. Additional and missing
// fields are ignored, as are nested struct pointers that are nil.
type StructSaver struct {
// Schema determines what fields of the struct are uploaded. It should
// match the table's schema.
// Schema is optional for StructSavers that are passed to Uploader.Put.
Schema Schema
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
// of this row on a best-effort basis.
InsertID string
// Struct should be a struct or a pointer to a struct.
Struct interface{}
}
// Save implements ValueSaver.
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
vstruct := reflect.ValueOf(ss.Struct)
row, err = structToMap(vstruct, ss.Schema)
if err != nil {
return nil, "", err
}
return row, ss.InsertID, nil
}
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
if vstruct.Kind() == reflect.Ptr {
vstruct = vstruct.Elem()
}
if !vstruct.IsValid() {
return nil, nil
}
m := map[string]Value{}
if vstruct.Kind() != reflect.Struct {
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
}
fields, err := fieldCache.Fields(vstruct.Type())
if err != nil {
return nil, err
}
for _, schemaField := range schema {
// Look for an exported struct field with the same name as the schema
// field, ignoring case.
structField := fields.Match(schemaField.Name)
if structField == nil {
continue
}
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
if err != nil {
return nil, err
}
// Add the value to the map, unless it is nil.
if val != nil {
m[schemaField.Name] = val
}
}
return m, nil
}
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
// the schemaField as a guide.
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
// caller can easily identify a nil value.
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
schemaField.Name, vfield.Type())
}
// A non-nested field can be represented by its Go value, except for some types.
if schemaField.Type != RecordFieldType {
return toUploadValueReflect(vfield, schemaField), nil
}
// A non-repeated nested field is converted into a map[string]Value.
if !schemaField.Repeated {
m, err := structToMap(vfield, schemaField.Schema)
if err != nil {
return nil, err
}
if m == nil {
return nil, nil
}
return m, nil
}
// A repeated nested field is converted into a slice of maps.
// If the field is zero-length (but not nil), we return a zero-length []Value.
if vfield.IsNil() {
return nil, nil
}
vals := []Value{}
for i := 0; i < vfield.Len(); i++ {
m, err := structToMap(vfield.Index(i), schemaField.Schema)
if err != nil {
return nil, err
}
vals = append(vals, m)
}
return vals, nil
}
func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType || fs.Type == NumericFieldType {
return toUploadValueReflect(reflect.ValueOf(val), fs)
}
return val
}
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
switch fs.Type {
case TimeFieldType:
if v.Type() == typeOfNullTime {
return v.Interface()
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return CivilTimeString(v.Interface().(civil.Time))
})
case DateTimeFieldType:
if v.Type() == typeOfNullDateTime {
return v.Interface()
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return CivilDateTimeString(v.Interface().(civil.DateTime))
})
case NumericFieldType:
if r, ok := v.Interface().(*big.Rat); ok && r == nil {
return nil
}
return formatUploadValue(v, fs, func(v reflect.Value) string {
return NumericString(v.Interface().(*big.Rat))
})
default:
if !fs.Repeated || v.Len() > 0 {
return v.Interface()
}
// The service treats a null repeated field as an error. Return
// nil to omit the field entirely.
return nil
}
}
func formatUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
if !fs.Repeated {
return cvt(v)
}
if v.Len() == 0 {
return nil
}
s := make([]string, v.Len())
for i := 0; i < v.Len(); i++ {
s[i] = cvt(v.Index(i))
}
return s
}
// CivilTimeString returns a string representing a civil.Time in a format compatible
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
// string with six digits of sub-second precision.
//
// Use CivilTimeString when using civil.Time in DML, for example in INSERT
// statements.
func CivilTimeString(t civil.Time) string {
if t.Nanosecond == 0 {
return t.String()
}
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
t.Nanosecond = 0
return t.String() + fmt.Sprintf(".%06d", micro)
}
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
// with BigQuery SQL. It separate the date and time with a space, and formats the time
// with CivilTimeString.
//
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
// statements.
func CivilDateTimeString(dt civil.DateTime) string {
return dt.Date.String() + " " + CivilTimeString(dt.Time)
}
// parseCivilDateTime parses a date-time represented in a BigQuery SQL
// compatible format and returns a civil.DateTime.
func parseCivilDateTime(s string) (civil.DateTime, error) {
parts := strings.Fields(s)
if len(parts) != 2 {
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s)
}
return civil.ParseDateTime(parts[0] + "T" + parts[1])
}
const (
// NumericPrecisionDigits is the maximum number of digits in a NUMERIC value.
NumericPrecisionDigits = 38
// NumericScaleDigits is the maximum number of digits after the decimal point in a NUMERIC value.
NumericScaleDigits = 9
)
// NumericString returns a string representing a *big.Rat in a format compatible
// with BigQuery SQL. It returns a floating-point literal with 9 digits
// after the decimal point.
func NumericString(r *big.Rat) string {
return r.FloatString(NumericScaleDigits)
}
// convertRows converts a series of TableRows into a series of Value slices.
// schema is used to interpret the data from rows; its length must match the
// length of each row.
@@ -164,7 +841,6 @@ func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, erro
for i, cell := range record {
// each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"]
fs := schema[i]
v, err := convertValue(val, fs.Type, fs.Schema)
if err != nil {
@@ -180,16 +856,37 @@ func convertBasicType(val string, typ FieldType) (Value, error) {
switch typ {
case StringFieldType:
return val, nil
case BytesFieldType:
return base64.StdEncoding.DecodeString(val)
case IntegerFieldType:
return strconv.Atoi(val)
return strconv.ParseInt(val, 10, 64)
case FloatFieldType:
return strconv.ParseFloat(val, 64)
case BooleanFieldType:
return strconv.ParseBool(val)
case TimestampFieldType:
f, err := strconv.ParseFloat(val, 64)
return Value(time.Unix(0, int64(f*1e9))), err
if err != nil {
return nil, err
}
secs := math.Trunc(f)
nanos := (f - secs) * 1e9
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil
case DateFieldType:
return civil.ParseDate(val)
case TimeFieldType:
return civil.ParseTime(val)
case DateTimeFieldType:
return civil.ParseDateTime(val)
case NumericFieldType:
r, ok := (&big.Rat{}).SetString(val)
if !ok {
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val)
}
return Value(r), nil
case GeographyFieldType:
return val, nil
default:
return nil, errors.New("unrecognized type")
return nil, fmt.Errorf("unrecognized type: %s", typ)
}
}

File diff suppressed because it is too large Load Diff