mirror of
https://github.com/dutchcoders/transfer.sh.git
synced 2020-11-18 19:53:40 -08:00
Bump google.golang.org/api
This commit is contained in:
parent
8e39b7fa01
commit
ec086b4eb3
23
vendor/cloud.google.com/go/.travis.yml
generated
vendored
23
vendor/cloud.google.com/go/.travis.yml
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
|
||||||
install:
|
|
||||||
- go get -v cloud.google.com/go/...
|
|
||||||
script:
|
|
||||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
|
|
||||||
- tar xvf keys.tar
|
|
||||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
|
|
||||||
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
|
|
||||||
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
|
|
||||||
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
|
|
||||||
GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test"
|
|
||||||
./run-tests.sh $TRAVIS_COMMIT
|
|
||||||
env:
|
|
||||||
matrix:
|
|
||||||
# The GCLOUD_TESTS_API_KEY environment variable.
|
|
||||||
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=
|
|
1122
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
Normal file
1122
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
Normal file
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Contributor Code of Conduct
|
||||||
|
|
||||||
|
As contributors and maintainers of this project,
|
||||||
|
and in the interest of fostering an open and welcoming community,
|
||||||
|
we pledge to respect all people who contribute through reporting issues,
|
||||||
|
posting feature requests, updating documentation,
|
||||||
|
submitting pull requests or patches, and other activities.
|
||||||
|
|
||||||
|
We are committed to making participation in this project
|
||||||
|
a harassment-free experience for everyone,
|
||||||
|
regardless of level of experience, gender, gender identity and expression,
|
||||||
|
sexual orientation, disability, personal appearance,
|
||||||
|
body size, race, ethnicity, age, religion, or nationality.
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery
|
||||||
|
* Personal attacks
|
||||||
|
* Trolling or insulting/derogatory comments
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing other's private information,
|
||||||
|
such as physical or electronic
|
||||||
|
addresses, without explicit permission
|
||||||
|
* Other unethical or unprofessional conduct.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct.
|
||||||
|
By adopting this Code of Conduct,
|
||||||
|
project maintainers commit themselves to fairly and consistently
|
||||||
|
applying these principles to every aspect of managing this project.
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct
|
||||||
|
may be permanently removed from the project team.
|
||||||
|
|
||||||
|
This code of conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||||
|
may be reported by opening an issue
|
||||||
|
or contacting one or more of the project maintainers.
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||||
|
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||||
|
|
179
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
179
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
@ -1,110 +1,177 @@
|
|||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
1. Sign one of the contributor license agreements below.
|
1. Sign one of the contributor license agreements below.
|
||||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
1. `go get golang.org/x/review/git-codereview` to install the code reviewing
|
||||||
|
tool.
|
||||||
1. You will need to ensure that your `GOBIN` directory (by default
|
1. You will need to ensure that your `GOBIN` directory (by default
|
||||||
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
|
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
|
||||||
1. If you would like, you may want to set up aliases for git-codereview,
|
1. If you would like, you may want to set up aliases for git-codereview,
|
||||||
such that `git codereview change` becomes `git change`. See the
|
such that `git codereview change` becomes `git change`. See the
|
||||||
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
|
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
|
||||||
1. Should you run into issues with the git-codereview tool, please note
|
1. Should you run into issues with the git-codereview tool, please note
|
||||||
that all error messages will assume that you have set up these
|
that all error messages will assume that you have set up these aliases.
|
||||||
aliases.
|
|
||||||
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
||||||
1. If you have already checked out the source, make sure that the remote git
|
1. If you have already checked out the source, make sure that the remote
|
||||||
origin is https://code.googlesource.com/gocloud:
|
git origin is https://code.googlesource.com/gocloud:
|
||||||
|
|
||||||
|
```
|
||||||
git remote set-url origin https://code.googlesource.com/gocloud
|
git remote set-url origin https://code.googlesource.com/gocloud
|
||||||
|
```
|
||||||
|
|
||||||
1. Make sure your auth is configured correctly by visiting
|
1. Make sure your auth is configured correctly by visiting
|
||||||
https://code.googlesource.com, clicking "Generate Password", and following
|
https://code.googlesource.com, clicking "Generate Password", and following the
|
||||||
the directions.
|
directions.
|
||||||
1. Make changes and create a change by running `git codereview change <name>`,
|
1. Make changes and create a change by running `git codereview change <name>`,
|
||||||
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
||||||
1. Keep amending to the change with `git codereview change` and mail as your receive
|
1. Keep amending to the change with `git codereview change` and mail as your
|
||||||
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit.
|
receive feedback. Each new mailed amendment will create a new patch set for
|
||||||
|
your change in Gerrit.
|
||||||
|
|
||||||
## Integration Tests
|
## Integration Tests
|
||||||
|
|
||||||
In addition to the unit tests, you may run the integration test suite.
|
In addition to the unit tests, you may run the integration test suite. These
|
||||||
|
directions describe setting up your environment to run integration tests for
|
||||||
|
_all_ packages: note that many of these instructions may be redundant if you
|
||||||
|
intend only to run integration tests on a single package.
|
||||||
|
|
||||||
To run the integrations tests, creating and configuration of a project in the
|
#### GCP Setup
|
||||||
Google Developers Console is required.
|
|
||||||
|
|
||||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
To run the integrations tests, creation and configuration of two projects in
|
||||||
Ensure the project-level **Owner**
|
the Google Developers Console is required: one specifically for Firestore
|
||||||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
|
integration tests, and another for all other integration tests. We'll refer to
|
||||||
service account. Alternatively, the account can be granted all of the following roles:
|
these projects as "general project" and "Firestore project".
|
||||||
- **Editor**
|
|
||||||
- **Logs Configuration Writer**
|
|
||||||
- **PubSub Admin**
|
|
||||||
|
|
||||||
Once you create a project, set the following environment variables to be able to
|
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
|
||||||
run the against the actual APIs.
|
for each project. Ensure the project-level **Owner**
|
||||||
|
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to
|
||||||
|
each service account. During the creation of the service account, you should
|
||||||
|
download the JSON credential file for use later.
|
||||||
|
|
||||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
Next, ensure the following APIs are enabled in the general project:
|
||||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
|
||||||
|
|
||||||
Some packages require additional environment variables to be set:
|
- BigQuery API
|
||||||
|
- BigQuery Data Transfer API
|
||||||
|
- Cloud Dataproc API
|
||||||
|
- Cloud Dataproc Control API Private
|
||||||
|
- Cloud Datastore API
|
||||||
|
- Cloud Firestore API
|
||||||
|
- Cloud Key Management Service (KMS) API
|
||||||
|
- Cloud Natural Language API
|
||||||
|
- Cloud OS Login API
|
||||||
|
- Cloud Pub/Sub API
|
||||||
|
- Cloud Resource Manager API
|
||||||
|
- Cloud Spanner API
|
||||||
|
- Cloud Speech API
|
||||||
|
- Cloud Translation API
|
||||||
|
- Cloud Video Intelligence API
|
||||||
|
- Cloud Vision API
|
||||||
|
- Compute Engine API
|
||||||
|
- Compute Engine Instance Group Manager API
|
||||||
|
- Container Registry API
|
||||||
|
- Firebase Rules API
|
||||||
|
- Google Cloud APIs
|
||||||
|
- Google Cloud Deployment Manager V2 API
|
||||||
|
- Google Cloud SQL
|
||||||
|
- Google Cloud Storage
|
||||||
|
- Google Cloud Storage JSON API
|
||||||
|
- Google Compute Engine Instance Group Updater API
|
||||||
|
- Google Compute Engine Instance Groups API
|
||||||
|
- Kubernetes Engine API
|
||||||
|
- Stackdriver Error Reporting API
|
||||||
|
|
||||||
- firestore
|
Next, create a Datastore database in the general project, and a Firestore
|
||||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore.
|
database in the Firestore project.
|
||||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
|
|
||||||
- storage
|
|
||||||
- **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the
|
|
||||||
form "projects/P/locations/L/keyRings/R".
|
|
||||||
- translate
|
|
||||||
- **GCLOUD_TESTS_API_KEY**: API key for using the Translate API.
|
|
||||||
- profiler
|
|
||||||
- **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone.
|
|
||||||
|
|
||||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
Finally, in the general project, create an API key for the translate API:
|
||||||
to create some resources used in integration tests.
|
|
||||||
|
- Go to GCP Developer Console.
|
||||||
|
- Navigate to APIs & Services > Credentials.
|
||||||
|
- Click Create Credentials > API Key.
|
||||||
|
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
|
||||||
|
|
||||||
|
#### Local Setup
|
||||||
|
|
||||||
|
Once the two projects are created and configured, set the following environment
|
||||||
|
variables:
|
||||||
|
|
||||||
|
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
|
||||||
|
bamboo-shift-455) for the general project.
|
||||||
|
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
|
||||||
|
project's service account.
|
||||||
|
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
|
||||||
|
(e.g. doorway-cliff-677) for the Firestore project.
|
||||||
|
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
|
||||||
|
Firestore project's service account.
|
||||||
|
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
|
||||||
|
in the form
|
||||||
|
"projects/P/locations/L/keyRings/R". The creation of this is described below.
|
||||||
|
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
|
||||||
|
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
|
||||||
|
|
||||||
|
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
|
||||||
|
create some resources used in integration tests.
|
||||||
|
|
||||||
From the project's root directory:
|
From the project's root directory:
|
||||||
|
|
||||||
``` sh
|
``` sh
|
||||||
# Set the default project in your env.
|
# Sets the default project in your env.
|
||||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||||
|
|
||||||
# Authenticate the gcloud tool with your account.
|
# Authenticates the gcloud tool with your account.
|
||||||
$ gcloud auth login
|
$ gcloud auth login
|
||||||
|
|
||||||
# Create the indexes used in the datastore integration tests.
|
# Create the indexes used in the datastore integration tests.
|
||||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
$ gcloud datastore indexes create datastore/testdata/index.yaml
|
||||||
|
|
||||||
# Create a Google Cloud storage bucket with the same name as your test project,
|
# Creates a Google Cloud storage bucket with the same name as your test project,
|
||||||
# and with the Stackdriver Logging service account as owner, for the sink
|
# and with the Stackdriver Logging service account as owner, for the sink
|
||||||
# integration tests in logging.
|
# integration tests in logging.
|
||||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||||
|
|
||||||
# Create a PubSub topic for integration tests of storage notifications.
|
# Creates a PubSub topic for integration tests of storage notifications.
|
||||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||||
|
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
|
||||||
|
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
|
||||||
|
# as a publisher to that topic.
|
||||||
|
|
||||||
# Create a Spanner instance for the spanner integration tests.
|
# Creates a Spanner instance for the spanner integration tests.
|
||||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
|
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
|
||||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
# NOTE: Spanner instances are priced by the node-hour, so you may want to
|
||||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
# delete the instance after testing with 'gcloud beta spanner instances delete'.
|
||||||
|
|
||||||
# For Storage integration tests:
|
$ export MY_KEYRING=some-keyring-name
|
||||||
# Enable KMS for your project in the Cloud Console.
|
$ export MY_LOCATION=global
|
||||||
# Create a KMS keyring, in the same location as the default location for your project's buckets.
|
# Creates a KMS keyring, in the same location as the default location for your
|
||||||
$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION
|
# project's buckets.
|
||||||
# Create two keys in the keyring, named key1 and key2.
|
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
|
||||||
$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
|
# Creates two keys in the keyring, named key1 and key2.
|
||||||
$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
|
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
||||||
# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
||||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING
|
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
||||||
# Authorize Google Cloud Storage to encrypt and decrypt using key1.
|
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||||
|
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
|
||||||
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you've done the necessary setup, you can run the integration tests by running:
|
#### Running
|
||||||
|
|
||||||
|
Once you've done the necessary setup, you can run the integration tests by
|
||||||
|
running:
|
||||||
|
|
||||||
``` sh
|
``` sh
|
||||||
$ go test -v cloud.google.com/go/...
|
$ go test -v cloud.google.com/go/...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Replay
|
||||||
|
|
||||||
|
Some packages can record the RPCs during integration tests to a file for
|
||||||
|
subsequent replay. To record, pass the `-record` flag to `go test`. The
|
||||||
|
recording will be saved to the _package_`.replay` file. To replay integration
|
||||||
|
tests from a saved recording, the replay file must be present, the `-short`
|
||||||
|
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
|
||||||
|
environment variable must have a non-empty value.
|
||||||
|
|
||||||
## Contributor License Agreements
|
## Contributor License Agreements
|
||||||
|
|
||||||
Before we can accept your pull requests you'll need to sign a Contributor
|
Before we can accept your pull requests you'll need to sign a Contributor
|
||||||
|
54
vendor/cloud.google.com/go/MIGRATION.md
generated
vendored
54
vendor/cloud.google.com/go/MIGRATION.md
generated
vendored
@ -1,54 +0,0 @@
|
|||||||
# Code Changes
|
|
||||||
|
|
||||||
## v0.10.0
|
|
||||||
|
|
||||||
- pubsub: Replace
|
|
||||||
|
|
||||||
```
|
|
||||||
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
|
|
||||||
```
|
|
||||||
|
|
||||||
with
|
|
||||||
|
|
||||||
```
|
|
||||||
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
|
|
||||||
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
|
|
||||||
Given an initialized `*trace.Client` named `tc`, instead of
|
|
||||||
|
|
||||||
```
|
|
||||||
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
|
|
||||||
```
|
|
||||||
|
|
||||||
write
|
|
||||||
|
|
||||||
```
|
|
||||||
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
|
|
||||||
```
|
|
||||||
|
|
||||||
- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
|
|
||||||
Instead of
|
|
||||||
|
|
||||||
```
|
|
||||||
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
|
|
||||||
```
|
|
||||||
|
|
||||||
write
|
|
||||||
|
|
||||||
```
|
|
||||||
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
|
|
||||||
```
|
|
||||||
|
|
||||||
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
|
|
||||||
interceptor as a dial option as shown below when initializing Cloud package
|
|
||||||
clients:
|
|
||||||
|
|
||||||
```
|
|
||||||
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
|
|
||||||
if err != nil {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
303
vendor/cloud.google.com/go/README.md
generated
vendored
303
vendor/cloud.google.com/go/README.md
generated
vendored
@ -26,257 +26,61 @@ make backwards-incompatible changes.
|
|||||||
* [Cloud Datastore](#cloud-datastore-)
|
* [Cloud Datastore](#cloud-datastore-)
|
||||||
* [Cloud Storage](#cloud-storage-)
|
* [Cloud Storage](#cloud-storage-)
|
||||||
* [Cloud Pub/Sub](#cloud-pub-sub-)
|
* [Cloud Pub/Sub](#cloud-pub-sub-)
|
||||||
* [Cloud BigQuery](#cloud-bigquery-)
|
* [BigQuery](#cloud-bigquery-)
|
||||||
* [Stackdriver Logging](#stackdriver-logging-)
|
* [Stackdriver Logging](#stackdriver-logging-)
|
||||||
* [Cloud Spanner](#cloud-spanner-)
|
* [Cloud Spanner](#cloud-spanner-)
|
||||||
|
|
||||||
|
|
||||||
## News
|
## News
|
||||||
|
|
||||||
_May 18, 2018_
|
_7 August 2018_
|
||||||
|
|
||||||
*v0.23.0*
|
As of November 1, the code in the repo will no longer support Go versions 1.8
|
||||||
|
and earlier. No one other than AppEngine users should be on those old versions,
|
||||||
- bigquery: Add DDL stats to query statistics.
|
and AppEngine
|
||||||
- bigtable:
|
[Standard](https://groups.google.com/forum/#!topic/google-appengine-go/e7oPNomd7ak)
|
||||||
- cbt: Add cells-per-column limit for row lookup.
|
and
|
||||||
- cbt: Make it possible to combine read filters.
|
[Flex](https://groups.google.com/forum/#!topic/google-appengine-go/wHsYtxvEbXI)
|
||||||
- dlp: v2beta2 client removed. Use the v2 client instead.
|
will stop supporting new deployments with those versions on that date.
|
||||||
- firestore, spanner: Fix compilation errors due to protobuf changes.
|
|
||||||
|
|
||||||
_May 8, 2018_
|
|
||||||
|
|
||||||
*v0.22.0*
|
|
||||||
|
|
||||||
- bigtable:
|
|
||||||
- cbt: Support cells per column limit for row read.
|
|
||||||
- bttest: Correctly handle empty RowSet.
|
|
||||||
- Fix ReadModifyWrite operation in emulator.
|
|
||||||
- Fix API path in GetCluster.
|
|
||||||
|
|
||||||
- bigquery:
|
|
||||||
- BEHAVIOR CHANGE: Retry on 503 status code.
|
|
||||||
- Add dataset.DeleteWithContents.
|
|
||||||
- Add SchemaUpdateOptions for query jobs.
|
|
||||||
- Add Timeline to QueryStatistics.
|
|
||||||
- Add more stats to ExplainQueryStage.
|
|
||||||
- Support Parquet data format.
|
|
||||||
|
|
||||||
- datastore:
|
|
||||||
- Support omitempty for times.
|
|
||||||
|
|
||||||
- dlp:
|
|
||||||
- **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client,
|
|
||||||
which is now out of beta.
|
|
||||||
- Add v2 client.
|
|
||||||
|
|
||||||
- firestore:
|
|
||||||
- BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid.
|
|
||||||
|
|
||||||
- iam:
|
|
||||||
- Support JWT signing via SignJwt callopt.
|
|
||||||
|
|
||||||
- profiler:
|
|
||||||
- BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done.
|
|
||||||
- BEHAVIOR CHANGE: Increase the initial backoff to 1 minute.
|
|
||||||
- Avoid returning empty serial port output.
|
|
||||||
|
|
||||||
- pubsub:
|
|
||||||
- BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy.
|
|
||||||
- BEHAVIOR CHANGE: Don't backoff on EOF.
|
|
||||||
- pstest: Support Acknowledge and ModifyAckDeadline RPCs.
|
|
||||||
|
|
||||||
- redis:
|
|
||||||
- Add v1 beta Redis client.
|
|
||||||
|
|
||||||
- spanner:
|
|
||||||
- Support SessionLabels.
|
|
||||||
|
|
||||||
- speech:
|
|
||||||
- Add api v1 beta1 client.
|
|
||||||
|
|
||||||
- storage:
|
|
||||||
- BEHAVIOR CHANGE: Retry reads when retryable error occurs.
|
|
||||||
- Fix delete of object in requester-pays bucket.
|
|
||||||
- Support KMS integration.
|
|
||||||
|
|
||||||
_April 9, 2018_
|
|
||||||
|
|
||||||
*v0.21.0*
|
|
||||||
|
|
||||||
- bigquery:
|
|
||||||
- Add OpenCensus tracing.
|
|
||||||
|
|
||||||
- firestore:
|
|
||||||
- **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot
|
|
||||||
whose Exists method returns false. DocumentRef.Get and Transaction.Get
|
|
||||||
return the non-nil DocumentSnapshot in addition to a NotFound error.
|
|
||||||
**DocumentRef.GetAll and Transaction.GetAll return a non-nil
|
|
||||||
DocumentSnapshot instead of nil.**
|
|
||||||
- Add DocumentIterator.Stop. **Call Stop whenever you are done with a
|
|
||||||
DocumentIterator.**
|
|
||||||
- Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime
|
|
||||||
notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen.
|
|
||||||
- Canceling an RPC now always returns a grpc.Status with codes.Canceled.
|
|
||||||
|
|
||||||
- spanner:
|
|
||||||
- Add `CommitTimestamp`, which supports inserting the commit timestamp of a
|
|
||||||
transaction into a column.
|
|
||||||
|
|
||||||
_March 22, 2018_
|
|
||||||
|
|
||||||
*v0.20.0*
|
|
||||||
|
|
||||||
- bigquery: Support SchemaUpdateOptions for load jobs.
|
|
||||||
|
|
||||||
- bigtable:
|
|
||||||
- Add SampleRowKeys.
|
|
||||||
- cbt: Support union, intersection GCPolicy.
|
|
||||||
- Retry admin RPCS.
|
|
||||||
- Add trace spans to retries.
|
|
||||||
|
|
||||||
- datastore: Add OpenCensus tracing.
|
|
||||||
|
|
||||||
- firestore:
|
|
||||||
- Fix queries involving Null and NaN.
|
|
||||||
- Allow Timestamp protobuffers for time values.
|
|
||||||
|
|
||||||
- logging: Add a WriteTimeout option.
|
|
||||||
|
|
||||||
- spanner: Support Batch API.
|
|
||||||
|
|
||||||
- storage: Add OpenCensus tracing.
|
|
||||||
|
|
||||||
|
|
||||||
_February 26, 2018_
|
Changes have been moved to [CHANGES](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CHANGES.md).
|
||||||
|
|
||||||
*v0.19.0*
|
|
||||||
|
|
||||||
- bigquery:
|
|
||||||
- Support customer-managed encryption keys.
|
|
||||||
|
|
||||||
- bigtable:
|
|
||||||
- Improved emulator support.
|
|
||||||
- Support GetCluster.
|
|
||||||
|
|
||||||
- datastore:
|
|
||||||
- Add general mutations.
|
|
||||||
- Support pointer struct fields.
|
|
||||||
- Support transaction options.
|
|
||||||
|
|
||||||
- firestore:
|
|
||||||
- Add Transaction.GetAll.
|
|
||||||
- Support document cursors.
|
|
||||||
|
|
||||||
- logging:
|
|
||||||
- Support concurrent RPCs to the service.
|
|
||||||
- Support per-entry resources.
|
|
||||||
|
|
||||||
- profiler:
|
|
||||||
- Add config options to disable heap and thread profiling.
|
|
||||||
- Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
|
|
||||||
|
|
||||||
- pubsub:
|
|
||||||
- BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
|
|
||||||
callback returns).
|
|
||||||
- Add SubscriptionInProject.
|
|
||||||
- Add OpenCensus instrumentation for streaming pull.
|
|
||||||
|
|
||||||
- storage:
|
|
||||||
- Support CORS.
|
|
||||||
|
|
||||||
|
|
||||||
_January 18, 2018_
|
|
||||||
|
|
||||||
*v0.18.0*
|
|
||||||
|
|
||||||
- bigquery:
|
|
||||||
- Marked stable.
|
|
||||||
- Schema inference of nullable fields supported.
|
|
||||||
- Added TimePartitioning to QueryConfig.
|
|
||||||
|
|
||||||
- firestore: Data provided to DocumentRef.Set with a Merge option can contain
|
|
||||||
Delete sentinels.
|
|
||||||
|
|
||||||
- logging: Clients can accept parent resources other than projects.
|
|
||||||
|
|
||||||
- pubsub:
|
|
||||||
- pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
|
|
||||||
- Support updating more subscription metadata: AckDeadline,
|
|
||||||
RetainAckedMessages and RetentionDuration.
|
|
||||||
|
|
||||||
- oslogin/apiv1beta: New client for the Cloud OS Login API.
|
|
||||||
|
|
||||||
- rpcreplay: A package for recording and replaying gRPC traffic.
|
|
||||||
|
|
||||||
- spanner:
|
|
||||||
- Add a ReadWithOptions that supports a row limit, as well as an index.
|
|
||||||
- Support query plan and execution statistics.
|
|
||||||
- Added [OpenCensus](http://opencensus.io) support.
|
|
||||||
|
|
||||||
- storage: Clarify checksum validation for gzipped files (it is not validated
|
|
||||||
when the file is served uncompressed).
|
|
||||||
|
|
||||||
|
|
||||||
_December 11, 2017_
|
|
||||||
|
|
||||||
*v0.17.0*
|
|
||||||
|
|
||||||
- firestore BREAKING CHANGES:
|
|
||||||
- Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
|
|
||||||
Change
|
|
||||||
`docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
|
|
||||||
to
|
|
||||||
`docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
|
|
||||||
|
|
||||||
Change
|
|
||||||
`docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
|
|
||||||
to
|
|
||||||
`docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
|
|
||||||
- Rename MergePaths to Merge; require args to be FieldPaths
|
|
||||||
- A value stored as an integer can be read into a floating-point field, and vice versa.
|
|
||||||
- bigtable/cmd/cbt:
|
|
||||||
- Support deleting a column.
|
|
||||||
- Add regex option for row read.
|
|
||||||
- spanner: Mark stable.
|
|
||||||
- storage:
|
|
||||||
- Add Reader.ContentEncoding method.
|
|
||||||
- Fix handling of SignedURL headers.
|
|
||||||
- bigquery:
|
|
||||||
- If Uploader.Put is called with no rows, it returns nil without making a
|
|
||||||
call.
|
|
||||||
- Schema inference supports the "nullable" option in struct tags for
|
|
||||||
non-required fields.
|
|
||||||
- TimePartitioning supports "Field".
|
|
||||||
|
|
||||||
|
|
||||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
|
||||||
|
|
||||||
## Supported APIs
|
## Supported APIs
|
||||||
|
|
||||||
Google API | Status | Package
|
Google API | Status | Package
|
||||||
---------------------------------|--------------|-----------------------------------------------------------
|
---------------------------------------------|--------------|-----------------------------------------------------------
|
||||||
|
[Asset][cloud-asset] | alpha | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
|
||||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||||
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
[Cloudtasks][cloud-tasks] | beta | [`cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref]
|
||||||
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
|
[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
||||||
|
[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
|
||||||
|
[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
|
||||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||||
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
|
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
|
||||||
|
[Dialogflow][cloud-dialogflow] | alpha | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
|
||||||
|
[Data Loss Prevention][cloud-dlp] | alpha | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
|
||||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
||||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`][cloud-iam-ref]
|
||||||
|
[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms`][cloud-kms-ref]
|
||||||
|
[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
|
||||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||||
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
||||||
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||||
|
[Memorystore][cloud-memorystore] | stable | [`cloud.google.com/go/redis/apiv1beta1`][cloud-memorystore-ref]
|
||||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||||
|
[Text To Speech][cloud-texttospeech] | alpha | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref]
|
||||||
|
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref]
|
||||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
||||||
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
[Video Intelligence][cloud-video] | alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||||
|
|
||||||
|
|
||||||
> **Alpha status**: the API is still being actively developed. As a
|
> **Alpha status**: the API is still being actively developed. As a
|
||||||
> result, it might change in backward-incompatible ways and is not recommended
|
> result, it might change in backward-incompatible ways and is not recommended
|
||||||
> for production use.
|
> for production use.
|
||||||
@ -298,9 +102,7 @@ for updates on these packages.
|
|||||||
## Go Versions Supported
|
## Go Versions Supported
|
||||||
|
|
||||||
We support the two most recent major versions of Go. If Google App Engine uses
|
We support the two most recent major versions of Go. If Google App Engine uses
|
||||||
an older version, we support that as well. You can see which versions are
|
an older version, we support that as well.
|
||||||
currently supported by looking at the lines following `go:` in
|
|
||||||
[`.travis.yml`](.travis.yml).
|
|
||||||
|
|
||||||
## Authorization
|
## Authorization
|
||||||
|
|
||||||
@ -316,12 +118,12 @@ client, err := storage.NewClient(ctx)
|
|||||||
To authorize using a
|
To authorize using a
|
||||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
||||||
pass
|
pass
|
||||||
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
|
[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile)
|
||||||
to the `NewClient` function of the desired package. For example:
|
to the `NewClient` function of the desired package. For example:
|
||||||
|
|
||||||
[snip]:# (auth-JSON)
|
[snip]:# (auth-JSON)
|
||||||
```go
|
```go
|
||||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
|
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
|
||||||
```
|
```
|
||||||
|
|
||||||
You can exert more control over authorization by using the
|
You can exert more control over authorization by using the
|
||||||
@ -457,9 +259,9 @@ if err != nil {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cloud BigQuery [](https://godoc.org/cloud.google.com/go/bigquery)
|
## BigQuery [](https://godoc.org/cloud.google.com/go/bigquery)
|
||||||
|
|
||||||
- [About Cloud BigQuery][cloud-bigquery]
|
- [About BigQuery][cloud-bigquery]
|
||||||
- [API documentation][cloud-bigquery-docs]
|
- [API documentation][cloud-bigquery-docs]
|
||||||
- [Go client documentation][cloud-bigquery-ref]
|
- [Go client documentation][cloud-bigquery-ref]
|
||||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
|
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
|
||||||
@ -658,3 +460,46 @@ for more information.
|
|||||||
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
|
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
|
||||||
|
|
||||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
||||||
|
|
||||||
|
[cloud-dataproc]: https://cloud.google.com/dataproc/
|
||||||
|
[cloud-dataproc-docs]: https://cloud.google.com/dataproc/docs
|
||||||
|
[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1
|
||||||
|
|
||||||
|
[cloud-iam]: https://cloud.google.com/iam/
|
||||||
|
[cloud-iam-docs]: https://cloud.google.com/iam/docs
|
||||||
|
[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam
|
||||||
|
|
||||||
|
[cloud-kms]: https://cloud.google.com/kms/
|
||||||
|
[cloud-kms-docs]: https://cloud.google.com/kms/docs
|
||||||
|
[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1
|
||||||
|
|
||||||
|
[cloud-natural-language]: https://cloud.google.com/natural-language/
|
||||||
|
[cloud-natural-language-docs]: https://cloud.google.com/natural-language/docs
|
||||||
|
[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
|
||||||
|
|
||||||
|
[cloud-memorystore]: https://cloud.google.com/memorystore/
|
||||||
|
[cloud-memorystore-docs]: https://cloud.google.com/memorystore/docs
|
||||||
|
[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1beta1
|
||||||
|
|
||||||
|
[cloud-texttospeech]: https://cloud.google.com/texttospeech/
|
||||||
|
[cloud-texttospeech-docs]: https://cloud.google.com/texttospeech/docs
|
||||||
|
[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1
|
||||||
|
|
||||||
|
[cloud-trace]: https://cloud.google.com/trace/
|
||||||
|
[cloud-trace-docs]: https://cloud.google.com/trace/docs
|
||||||
|
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2
|
||||||
|
|
||||||
|
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
|
||||||
|
[cloud-dialogflow-docs]: https://cloud.google.com/dialogflow-enterprise/docs/
|
||||||
|
[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2
|
||||||
|
|
||||||
|
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
|
||||||
|
[cloud-containeranalysis-docs]: https://cloud.google.com/container-analysis/api/reference/rest/
|
||||||
|
[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1
|
||||||
|
|
||||||
|
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
|
||||||
|
[cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
|
||||||
|
[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1
|
||||||
|
|
||||||
|
[cloud-tasks]: https://cloud.google.com/tasks/
|
||||||
|
[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3
|
||||||
|
58
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
58
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
@ -1,13 +1,47 @@
|
|||||||
# How to Release this Repo
|
# How to Create a New Release
|
||||||
|
|
||||||
1. Determine the current release version with `git tag -l`. It should look
|
## Prerequisites
|
||||||
something like `vX.Y.Z`. We'll call the current
|
|
||||||
version `$CV` and the new version `$NV`.
|
Install [releasetool](https://github.com/googleapis/releasetool).
|
||||||
1. On master, run `git log $CV..` to list all the changes since the last
|
|
||||||
release.
|
## Create a release
|
||||||
1. Edit the News section of `README.md` to include a summary of the changes.
|
|
||||||
1. Mail the CL containing the `README.md` changes. When the CL is approved, submit it.
|
1. `cd` into the root directory, e.g., `~/go/src/cloud.google.com/go`
|
||||||
1. Without submitting any other CLs:
|
1. Checkout the master branch and ensure a clean and up-to-date state.
|
||||||
a. Switch to master.
|
```
|
||||||
b. Tag the repo with the next version: `git tag $NV`.
|
git checkout master
|
||||||
c. Push the tag: `git push origin $NV`.
|
git pull --tags origin master
|
||||||
|
```
|
||||||
|
1. Run releasetool to generate a changelog from the last version. Note,
|
||||||
|
releasetool will prompt if the new version is a major, minor, or patch
|
||||||
|
version.
|
||||||
|
```
|
||||||
|
releasetool start --language go
|
||||||
|
```
|
||||||
|
1. Format the output to match CHANGES.md.
|
||||||
|
1. Submit a CL with the changes in CHANGES.md. The commit message should look
|
||||||
|
like this (where `v0.31.0` is instead the correct version number):
|
||||||
|
```
|
||||||
|
all: Release v0.31.0
|
||||||
|
```
|
||||||
|
1. Wait for approval from all reviewers and then submit the CL.
|
||||||
|
1. Return to the master branch and pull the release commit.
|
||||||
|
```
|
||||||
|
git checkout master
|
||||||
|
git pull origin master
|
||||||
|
```
|
||||||
|
1. Tag the current commit with the new version (e.g., `v0.31.0`)
|
||||||
|
```
|
||||||
|
releasetool tag --language go
|
||||||
|
```
|
||||||
|
1. Publish the tag to GoogleSource (i.e., origin):
|
||||||
|
```
|
||||||
|
git push origin $NEW_VERSION
|
||||||
|
```
|
||||||
|
1. Visit the [releases page][releases] on GitHub and click the "Draft a new
|
||||||
|
release" button. For tag version, enter the tag published in the previous
|
||||||
|
step. For the release title, use the version (e.g., `v0.31.0`). For the
|
||||||
|
description, copy the changes added to CHANGES.md.
|
||||||
|
|
||||||
|
|
||||||
|
[releases]: https://github.com/googleapis/google-cloud-go/releases
|
||||||
|
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
# This file configures AppVeyor (http://www.appveyor.com),
|
|
||||||
# a Windows-based CI service similar to Travis.
|
|
||||||
|
|
||||||
# Identifier for this run
|
|
||||||
version: "{build}"
|
|
||||||
|
|
||||||
# Clone the repo into this path, which conforms to the standard
|
|
||||||
# Go workspace structure.
|
|
||||||
clone_folder: c:\gopath\src\cloud.google.com\go
|
|
||||||
|
|
||||||
environment:
|
|
||||||
GOPATH: c:\gopath
|
|
||||||
GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762
|
|
||||||
GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json
|
|
||||||
KEYFILE_CONTENTS:
|
|
||||||
secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje
|
|
||||||
|
|
||||||
install:
|
|
||||||
# Info for debugging.
|
|
||||||
- echo %PATH%
|
|
||||||
- go version
|
|
||||||
- go env
|
|
||||||
- go get -v -d -t ./...
|
|
||||||
|
|
||||||
|
|
||||||
# Provide a build script, or AppVeyor will call msbuild.
|
|
||||||
build_script:
|
|
||||||
- go install -v ./...
|
|
||||||
- echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY%
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- go test -v ./...
|
|
249
vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go
generated
vendored
Normal file
249
vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package asset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/longrunning"
|
||||||
|
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
"google.golang.org/api/transport"
|
||||||
|
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
|
||||||
|
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CallOptions contains the retry settings for each method of Client.
|
||||||
|
type CallOptions struct {
|
||||||
|
ExportAssets []gax.CallOption
|
||||||
|
BatchGetAssetsHistory []gax.CallOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultClientOptions() []option.ClientOption {
|
||||||
|
return []option.ClientOption{
|
||||||
|
option.WithEndpoint("cloudasset.googleapis.com:443"),
|
||||||
|
option.WithScopes(DefaultAuthScopes()...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultCallOptions() *CallOptions {
|
||||||
|
retry := map[[2]string][]gax.CallOption{
|
||||||
|
{"default", "idempotent"}: {
|
||||||
|
gax.WithRetry(func() gax.Retryer {
|
||||||
|
return gax.OnCodes([]codes.Code{
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Unavailable,
|
||||||
|
}, gax.Backoff{
|
||||||
|
Initial: 100 * time.Millisecond,
|
||||||
|
Max: 60000 * time.Millisecond,
|
||||||
|
Multiplier: 1.3,
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &CallOptions{
|
||||||
|
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is a client for interacting with Cloud Asset API.
|
||||||
|
//
|
||||||
|
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||||
|
type Client struct {
|
||||||
|
// The connection to the service.
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
|
||||||
|
// The gRPC API client.
|
||||||
|
client assetpb.AssetServiceClient
|
||||||
|
|
||||||
|
// LROClient is used internally to handle longrunning operations.
|
||||||
|
// It is exposed so that its CallOptions can be modified if required.
|
||||||
|
// Users should not Close this client.
|
||||||
|
LROClient *lroauto.OperationsClient
|
||||||
|
|
||||||
|
// The call options for this service.
|
||||||
|
CallOptions *CallOptions
|
||||||
|
|
||||||
|
// The x-goog-* metadata to be sent with each request.
|
||||||
|
xGoogMetadata metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new asset service client.
|
||||||
|
//
|
||||||
|
// Asset service definition.
|
||||||
|
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||||
|
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
conn: conn,
|
||||||
|
CallOptions: defaultCallOptions(),
|
||||||
|
|
||||||
|
client: assetpb.NewAssetServiceClient(conn),
|
||||||
|
}
|
||||||
|
c.setGoogleClientInfo()
|
||||||
|
|
||||||
|
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||||
|
if err != nil {
|
||||||
|
// This error "should not happen", since we are just reusing old connection
|
||||||
|
// and never actually need to dial.
|
||||||
|
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||||
|
// If the user invoked the function with option.WithGRPCConn,
|
||||||
|
// we would close a connection that's still in use.
|
||||||
|
// TODO(pongad): investigate error conditions.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection returns the client's connection to the API service.
|
||||||
|
func (c *Client) Connection() *grpc.ClientConn {
|
||||||
|
return c.conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to the API service. The user should invoke this when
|
||||||
|
// the client is no longer required.
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGoogleClientInfo sets the name and version of the application in
|
||||||
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
|
// use by Google-written clients.
|
||||||
|
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||||
|
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||||
|
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportAssets exports assets with time and resource types to a given Cloud Storage
|
||||||
|
// location. The output format is newline-delimited JSON.
|
||||||
|
// This API implements the
|
||||||
|
// [google.longrunning.Operation][google.longrunning.Operation] API allowing
|
||||||
|
// you to keep track of the export.
|
||||||
|
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
|
||||||
|
var resp *longrunningpb.Operation
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ExportAssetsOperation{
|
||||||
|
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
|
||||||
|
// For RESOURCE content, this API outputs history with asset in both
|
||||||
|
// non-delete or deleted status.
|
||||||
|
// For IAM_POLICY content, this API outputs history when the asset and its
|
||||||
|
// attached IAM POLICY both exist. This can create gaps in the output history.
|
||||||
|
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
|
||||||
|
var resp *assetpb.BatchGetAssetsHistoryResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportAssetsOperation manages a long-running operation from ExportAssets.
|
||||||
|
type ExportAssetsOperation struct {
|
||||||
|
lro *longrunning.Operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
|
||||||
|
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
|
||||||
|
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
|
||||||
|
return &ExportAssetsOperation{
|
||||||
|
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||||
|
//
|
||||||
|
// See documentation of Poll for error-handling information.
|
||||||
|
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
|
||||||
|
var resp assetpb.ExportAssetsResponse
|
||||||
|
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poll fetches the latest state of the long-running operation.
|
||||||
|
//
|
||||||
|
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||||
|
//
|
||||||
|
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||||
|
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||||
|
// If Poll succeeds and the operation has completed successfully,
|
||||||
|
// op.Done will return true, and the response of the operation is returned.
|
||||||
|
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||||
|
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
|
||||||
|
var resp assetpb.ExportAssetsResponse
|
||||||
|
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !op.Done() {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metadata returns metadata associated with the long-running operation.
|
||||||
|
// Metadata itself does not contact the server, but Poll does.
|
||||||
|
// To get the latest metadata, call this method after a successful call to Poll.
|
||||||
|
// If the metadata is not available, the returned metadata and error are both nil.
|
||||||
|
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
|
||||||
|
var meta assetpb.ExportAssetsRequest
|
||||||
|
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||||
|
return nil, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &meta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done reports whether the long-running operation has completed.
|
||||||
|
func (op *ExportAssetsOperation) Done() bool {
|
||||||
|
return op.lro.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the long-running operation.
|
||||||
|
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||||
|
func (op *ExportAssetsOperation) Name() string {
|
||||||
|
return op.lro.Name()
|
||||||
|
}
|
75
vendor/cloud.google.com/go/asset/apiv1beta1/asset_client_example_test.go
generated
vendored
Normal file
75
vendor/cloud.google.com/go/asset/apiv1beta1/asset_client_example_test.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package asset_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
asset "cloud.google.com/go/asset/apiv1beta1"
|
||||||
|
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewClient() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := asset.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use client.
|
||||||
|
_ = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ExportAssets() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := asset.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &assetpb.ExportAssetsRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
op, err := c.ExportAssets(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := op.Wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_BatchGetAssetsHistory() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := asset.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &assetpb.BatchGetAssetsHistoryRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.BatchGetAssetsHistory(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
89
vendor/cloud.google.com/go/asset/apiv1beta1/doc.go
generated
vendored
Normal file
89
vendor/cloud.google.com/go/asset/apiv1beta1/doc.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Package asset is an auto-generated package for the
|
||||||
|
// Cloud Asset API.
|
||||||
|
//
|
||||||
|
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
|
||||||
|
//
|
||||||
|
// The cloud asset API manages the history and inventory of cloud resources.
|
||||||
|
package asset // import "cloud.google.com/go/asset/apiv1beta1"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
out = out.Copy()
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metadata.NewOutgoingContext(ctx, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||||
|
func DefaultAuthScopes() []string {
|
||||||
|
return []string{
|
||||||
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// versionGo returns the Go runtime version. The returned string
|
||||||
|
// has no whitespace, suitable for reporting in header.
|
||||||
|
func versionGo() string {
|
||||||
|
const develPrefix = "devel +"
|
||||||
|
|
||||||
|
s := runtime.Version()
|
||||||
|
if strings.HasPrefix(s, develPrefix) {
|
||||||
|
s = s[len(develPrefix):]
|
||||||
|
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||||
|
s = s[:p]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
notSemverRune := func(r rune) bool {
|
||||||
|
return strings.IndexRune("0123456789.", r) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(s, "go1") {
|
||||||
|
s = s[2:]
|
||||||
|
var prerelease string
|
||||||
|
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||||
|
s, prerelease = s[:p], s[p:]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(s, ".") {
|
||||||
|
s += "0"
|
||||||
|
} else if strings.Count(s, ".") < 2 {
|
||||||
|
s += ".0"
|
||||||
|
}
|
||||||
|
if prerelease != "" {
|
||||||
|
s += "-" + prerelease
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionClient = "20190306"
|
266
vendor/cloud.google.com/go/asset/apiv1beta1/mock_test.go
generated
vendored
Normal file
266
vendor/cloud.google.com/go/asset/apiv1beta1/mock_test.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package asset
|
||||||
|
|
||||||
|
import (
|
||||||
|
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
|
||||||
|
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
gstatus "google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = io.EOF
|
||||||
|
var _ = ptypes.MarshalAny
|
||||||
|
var _ status.Status
|
||||||
|
|
||||||
|
type mockAssetServer struct {
|
||||||
|
// Embed for forward compatibility.
|
||||||
|
// Tests will keep working if more methods are added
|
||||||
|
// in the future.
|
||||||
|
assetpb.AssetServiceServer
|
||||||
|
|
||||||
|
reqs []proto.Message
|
||||||
|
|
||||||
|
// If set, all calls return this error.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// responses to return if err == nil
|
||||||
|
resps []proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*longrunningpb.Operation), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientOpt is the option tests should use to connect to the test server.
|
||||||
|
// It is initialized by TestMain.
|
||||||
|
var clientOpt option.ClientOption
|
||||||
|
|
||||||
|
var (
|
||||||
|
mockAsset mockAssetServer
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
serv := grpc.NewServer()
|
||||||
|
assetpb.RegisterAssetServiceServer(serv, &mockAsset)
|
||||||
|
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
go serv.Serve(lis)
|
||||||
|
|
||||||
|
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
clientOpt = option.WithGRPCConn(conn)
|
||||||
|
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssetServiceExportAssets(t *testing.T) {
|
||||||
|
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}
|
||||||
|
|
||||||
|
mockAsset.err = nil
|
||||||
|
mockAsset.reqs = nil
|
||||||
|
|
||||||
|
any, err := ptypes.MarshalAny(expectedResponse)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
|
||||||
|
Name: "longrunning-test",
|
||||||
|
Done: true,
|
||||||
|
Result: &longrunningpb.Operation_Response{Response: any},
|
||||||
|
})
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
|
||||||
|
var request = &assetpb.ExportAssetsRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
OutputConfig: outputConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
respLRO, err := c.ExportAssets(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := respLRO.Wait(context.Background())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssetServiceExportAssetsError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockAsset.err = nil
|
||||||
|
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
|
||||||
|
Name: "longrunning-test",
|
||||||
|
Done: true,
|
||||||
|
Result: &longrunningpb.Operation_Error{
|
||||||
|
Error: &status.Status{
|
||||||
|
Code: int32(errCode),
|
||||||
|
Message: "test error",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
|
||||||
|
var request = &assetpb.ExportAssetsRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
OutputConfig: outputConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
respLRO, err := c.ExportAssets(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := respLRO.Wait(context.Background())
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
|
||||||
|
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}
|
||||||
|
|
||||||
|
mockAsset.err = nil
|
||||||
|
mockAsset.reqs = nil
|
||||||
|
|
||||||
|
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
|
||||||
|
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
|
||||||
|
var request = &assetpb.BatchGetAssetsHistoryRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
ContentType: contentType,
|
||||||
|
ReadTimeWindow: readTimeWindow,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockAsset.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
|
||||||
|
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
|
||||||
|
var request = &assetpb.BatchGetAssetsHistoryRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
ContentType: contentType,
|
||||||
|
ReadTimeWindow: readTimeWindow,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
248
vendor/cloud.google.com/go/asset/v1beta1/asset_client.go
generated
vendored
Normal file
248
vendor/cloud.google.com/go/asset/v1beta1/asset_client.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
package asset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/longrunning"
|
||||||
|
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
"google.golang.org/api/transport"
|
||||||
|
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
|
||||||
|
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CallOptions contains the retry settings for each method of Client.
|
||||||
|
type CallOptions struct {
|
||||||
|
ExportAssets []gax.CallOption
|
||||||
|
BatchGetAssetsHistory []gax.CallOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultClientOptions() []option.ClientOption {
|
||||||
|
return []option.ClientOption{
|
||||||
|
option.WithEndpoint("cloudasset.googleapis.com:443"),
|
||||||
|
option.WithScopes(DefaultAuthScopes()...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultCallOptions() *CallOptions {
|
||||||
|
retry := map[[2]string][]gax.CallOption{
|
||||||
|
{"default", "idempotent"}: {
|
||||||
|
gax.WithRetry(func() gax.Retryer {
|
||||||
|
return gax.OnCodes([]codes.Code{
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Unavailable,
|
||||||
|
}, gax.Backoff{
|
||||||
|
Initial: 100 * time.Millisecond,
|
||||||
|
Max: 60000 * time.Millisecond,
|
||||||
|
Multiplier: 1.3,
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &CallOptions{
|
||||||
|
ExportAssets: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is a client for interacting with Cloud Asset API.
|
||||||
|
//
|
||||||
|
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||||
|
type Client struct {
|
||||||
|
// The connection to the service.
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
|
||||||
|
// The gRPC API client.
|
||||||
|
client assetpb.AssetServiceClient
|
||||||
|
|
||||||
|
// LROClient is used internally to handle longrunning operations.
|
||||||
|
// It is exposed so that its CallOptions can be modified if required.
|
||||||
|
// Users should not Close this client.
|
||||||
|
LROClient *lroauto.OperationsClient
|
||||||
|
|
||||||
|
// The call options for this service.
|
||||||
|
CallOptions *CallOptions
|
||||||
|
|
||||||
|
// The x-goog-* metadata to be sent with each request.
|
||||||
|
xGoogMetadata metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new asset service client.
|
||||||
|
//
|
||||||
|
// Asset service definition.
|
||||||
|
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||||
|
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
conn: conn,
|
||||||
|
CallOptions: defaultCallOptions(),
|
||||||
|
|
||||||
|
client: assetpb.NewAssetServiceClient(conn),
|
||||||
|
}
|
||||||
|
c.setGoogleClientInfo()
|
||||||
|
|
||||||
|
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||||
|
if err != nil {
|
||||||
|
// This error "should not happen", since we are just reusing old connection
|
||||||
|
// and never actually need to dial.
|
||||||
|
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||||
|
// If the user invoked the function with option.WithGRPCConn,
|
||||||
|
// we would close a connection that's still in use.
|
||||||
|
// TODO(pongad): investigate error conditions.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection returns the client's connection to the API service.
|
||||||
|
func (c *Client) Connection() *grpc.ClientConn {
|
||||||
|
return c.conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to the API service. The user should invoke this when
|
||||||
|
// the client is no longer required.
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGoogleClientInfo sets the name and version of the application in
|
||||||
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
|
// use by Google-written clients.
|
||||||
|
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||||
|
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||||
|
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportAssets exports assets with time and resource types to a given Cloud Storage
|
||||||
|
// location. The output format is newline-delimited JSON.
|
||||||
|
// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
|
||||||
|
// to keep track of the export.
|
||||||
|
func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
|
||||||
|
var resp *longrunningpb.Operation
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ExportAssetsOperation{
|
||||||
|
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
|
||||||
|
// For RESOURCE content, this API outputs history with asset in both
|
||||||
|
// non-delete or deleted status.
|
||||||
|
// For IAM_POLICY content, this API outputs history when the asset and its
|
||||||
|
// attached IAM POLICY both exist. This can create gaps in the output history.
|
||||||
|
func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
|
||||||
|
var resp *assetpb.BatchGetAssetsHistoryResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportAssetsOperation manages a long-running operation from ExportAssets.
|
||||||
|
type ExportAssetsOperation struct {
|
||||||
|
lro *longrunning.Operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
|
||||||
|
// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
|
||||||
|
func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
|
||||||
|
return &ExportAssetsOperation{
|
||||||
|
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||||
|
//
|
||||||
|
// See documentation of Poll for error-handling information.
|
||||||
|
func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
|
||||||
|
var resp assetpb.ExportAssetsResponse
|
||||||
|
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poll fetches the latest state of the long-running operation.
|
||||||
|
//
|
||||||
|
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||||
|
//
|
||||||
|
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||||
|
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||||
|
// If Poll succeeds and the operation has completed successfully,
|
||||||
|
// op.Done will return true, and the response of the operation is returned.
|
||||||
|
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||||
|
func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
|
||||||
|
var resp assetpb.ExportAssetsResponse
|
||||||
|
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !op.Done() {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metadata returns metadata associated with the long-running operation.
|
||||||
|
// Metadata itself does not contact the server, but Poll does.
|
||||||
|
// To get the latest metadata, call this method after a successful call to Poll.
|
||||||
|
// If the metadata is not available, the returned metadata and error are both nil.
|
||||||
|
func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
|
||||||
|
var meta assetpb.ExportAssetsRequest
|
||||||
|
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||||
|
return nil, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &meta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done reports whether the long-running operation has completed.
|
||||||
|
func (op *ExportAssetsOperation) Done() bool {
|
||||||
|
return op.lro.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the long-running operation.
|
||||||
|
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||||
|
func (op *ExportAssetsOperation) Name() string {
|
||||||
|
return op.lro.Name()
|
||||||
|
}
|
75
vendor/cloud.google.com/go/asset/v1beta1/asset_client_example_test.go
generated
vendored
Normal file
75
vendor/cloud.google.com/go/asset/v1beta1/asset_client_example_test.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
package asset_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
asset "cloud.google.com/go/asset/v1beta1"
|
||||||
|
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewClient() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := asset.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use client.
|
||||||
|
_ = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ExportAssets() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := asset.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &assetpb.ExportAssetsRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
op, err := c.ExportAssets(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := op.Wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_BatchGetAssetsHistory() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := asset.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &assetpb.BatchGetAssetsHistoryRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.BatchGetAssetsHistory(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
89
vendor/cloud.google.com/go/asset/v1beta1/doc.go
generated
vendored
Normal file
89
vendor/cloud.google.com/go/asset/v1beta1/doc.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Package asset is an auto-generated package for the
|
||||||
|
// Cloud Asset API.
|
||||||
|
//
|
||||||
|
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||||
|
//
|
||||||
|
// The cloud asset API manages the history and inventory of cloud resources.
|
||||||
|
package asset // import "cloud.google.com/go/asset/v1beta1"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
out = out.Copy()
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metadata.NewOutgoingContext(ctx, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||||
|
func DefaultAuthScopes() []string {
|
||||||
|
return []string{
|
||||||
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// versionGo returns the Go runtime version. The returned string
|
||||||
|
// has no whitespace, suitable for reporting in header.
|
||||||
|
func versionGo() string {
|
||||||
|
const develPrefix = "devel +"
|
||||||
|
|
||||||
|
s := runtime.Version()
|
||||||
|
if strings.HasPrefix(s, develPrefix) {
|
||||||
|
s = s[len(develPrefix):]
|
||||||
|
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||||
|
s = s[:p]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
notSemverRune := func(r rune) bool {
|
||||||
|
return strings.IndexRune("0123456789.", r) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(s, "go1") {
|
||||||
|
s = s[2:]
|
||||||
|
var prerelease string
|
||||||
|
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||||
|
s, prerelease = s[:p], s[p:]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(s, ".") {
|
||||||
|
s += "0"
|
||||||
|
} else if strings.Count(s, ".") < 2 {
|
||||||
|
s += ".0"
|
||||||
|
}
|
||||||
|
if prerelease != "" {
|
||||||
|
s += "-" + prerelease
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionClient = "20181219"
|
266
vendor/cloud.google.com/go/asset/v1beta1/mock_test.go
generated
vendored
Normal file
266
vendor/cloud.google.com/go/asset/v1beta1/mock_test.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
package asset
|
||||||
|
|
||||||
|
import (
|
||||||
|
assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
|
||||||
|
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
gstatus "google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = io.EOF
|
||||||
|
var _ = ptypes.MarshalAny
|
||||||
|
var _ status.Status
|
||||||
|
|
||||||
|
type mockAssetServer struct {
|
||||||
|
// Embed for forward compatibility.
|
||||||
|
// Tests will keep working if more methods are added
|
||||||
|
// in the future.
|
||||||
|
assetpb.AssetServiceServer
|
||||||
|
|
||||||
|
reqs []proto.Message
|
||||||
|
|
||||||
|
// If set, all calls return this error.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// responses to return if err == nil
|
||||||
|
resps []proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockAssetServer) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest) (*longrunningpb.Operation, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*longrunningpb.Operation), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockAssetServer) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest) (*assetpb.BatchGetAssetsHistoryResponse, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*assetpb.BatchGetAssetsHistoryResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientOpt is the option tests should use to connect to the test server.
|
||||||
|
// It is initialized by TestMain.
|
||||||
|
var clientOpt option.ClientOption
|
||||||
|
|
||||||
|
var (
|
||||||
|
mockAsset mockAssetServer
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
serv := grpc.NewServer()
|
||||||
|
assetpb.RegisterAssetServiceServer(serv, &mockAsset)
|
||||||
|
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
go serv.Serve(lis)
|
||||||
|
|
||||||
|
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
clientOpt = option.WithGRPCConn(conn)
|
||||||
|
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssetServiceExportAssets(t *testing.T) {
|
||||||
|
var expectedResponse *assetpb.ExportAssetsResponse = &assetpb.ExportAssetsResponse{}
|
||||||
|
|
||||||
|
mockAsset.err = nil
|
||||||
|
mockAsset.reqs = nil
|
||||||
|
|
||||||
|
any, err := ptypes.MarshalAny(expectedResponse)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
|
||||||
|
Name: "longrunning-test",
|
||||||
|
Done: true,
|
||||||
|
Result: &longrunningpb.Operation_Response{Response: any},
|
||||||
|
})
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
|
||||||
|
var request = &assetpb.ExportAssetsRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
OutputConfig: outputConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
respLRO, err := c.ExportAssets(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := respLRO.Wait(context.Background())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssetServiceExportAssetsError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockAsset.err = nil
|
||||||
|
mockAsset.resps = append(mockAsset.resps[:0], &longrunningpb.Operation{
|
||||||
|
Name: "longrunning-test",
|
||||||
|
Done: true,
|
||||||
|
Result: &longrunningpb.Operation_Error{
|
||||||
|
Error: &status.Status{
|
||||||
|
Code: int32(errCode),
|
||||||
|
Message: "test error",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var outputConfig *assetpb.OutputConfig = &assetpb.OutputConfig{}
|
||||||
|
var request = &assetpb.ExportAssetsRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
OutputConfig: outputConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
respLRO, err := c.ExportAssets(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := respLRO.Wait(context.Background())
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
func TestAssetServiceBatchGetAssetsHistory(t *testing.T) {
|
||||||
|
var expectedResponse *assetpb.BatchGetAssetsHistoryResponse = &assetpb.BatchGetAssetsHistoryResponse{}
|
||||||
|
|
||||||
|
mockAsset.err = nil
|
||||||
|
mockAsset.reqs = nil
|
||||||
|
|
||||||
|
mockAsset.resps = append(mockAsset.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
|
||||||
|
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
|
||||||
|
var request = &assetpb.BatchGetAssetsHistoryRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
ContentType: contentType,
|
||||||
|
ReadTimeWindow: readTimeWindow,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockAsset.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssetServiceBatchGetAssetsHistoryError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockAsset.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
|
var contentType assetpb.ContentType = assetpb.ContentType_CONTENT_TYPE_UNSPECIFIED
|
||||||
|
var readTimeWindow *assetpb.TimeWindow = &assetpb.TimeWindow{}
|
||||||
|
var request = &assetpb.BatchGetAssetsHistoryRequest{
|
||||||
|
Parent: formattedParent,
|
||||||
|
ContentType: contentType,
|
||||||
|
ReadTimeWindow: readTimeWindow,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.BatchGetAssetsHistory(context.Background(), request)
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
3
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
3
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
@ -15,9 +15,10 @@
|
|||||||
package cloud_test
|
package cloud_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"cloud.google.com/go/datastore"
|
"cloud.google.com/go/datastore"
|
||||||
"cloud.google.com/go/pubsub"
|
"cloud.google.com/go/pubsub"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
)
|
)
|
||||||
|
2
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
generated
vendored
@ -17,6 +17,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -24,7 +25,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/bigquery"
|
"cloud.google.com/go/bigquery"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
10
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
10
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
@ -15,26 +15,24 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal"
|
"cloud.google.com/go/internal"
|
||||||
"cloud.google.com/go/internal/version"
|
"cloud.google.com/go/internal/version"
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
htransport "google.golang.org/api/transport/http"
|
htransport "google.golang.org/api/transport/http"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||||
|
// Scope is the Oauth2 scope for the service.
|
||||||
Scope = "https://www.googleapis.com/auth/bigquery"
|
Scope = "https://www.googleapis.com/auth/bigquery"
|
||||||
userAgent = "gcloud-golang-bigquery/20160429"
|
userAgent = "gcloud-golang-bigquery/20160429"
|
||||||
)
|
)
|
||||||
|
26493
vendor/cloud.google.com/go/bigquery/bigquery.replay
generated
vendored
Normal file
26493
vendor/cloud.google.com/go/bigquery/bigquery.replay
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
3
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
3
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
@ -15,7 +15,8 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
4
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
@ -17,10 +17,8 @@ package bigquery
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
22
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
22
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
@ -15,14 +15,13 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/optional"
|
"cloud.google.com/go/internal/optional"
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
)
|
)
|
||||||
@ -60,7 +59,7 @@ type DatasetMetadataToUpdate struct {
|
|||||||
Description optional.String // The user-friendly description of this table.
|
Description optional.String // The user-friendly description of this table.
|
||||||
Name optional.String // The user-friendly name for this dataset.
|
Name optional.String // The user-friendly name for this dataset.
|
||||||
|
|
||||||
// DefaultTableExpiration is the the default expiration time for new tables.
|
// DefaultTableExpiration is the default expiration time for new tables.
|
||||||
// If set to time.Duration(0), new tables never expire.
|
// If set to time.Duration(0), new tables never expire.
|
||||||
DefaultTableExpiration optional.Duration
|
DefaultTableExpiration optional.Duration
|
||||||
|
|
||||||
@ -404,6 +403,9 @@ type DatasetIterator struct {
|
|||||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||||
|
|
||||||
|
// Next returns the next Dataset. Its second return value is iterator.Done if
|
||||||
|
// there are no more results. Once Next returns Done, all subsequent calls will
|
||||||
|
// return Done.
|
||||||
func (it *DatasetIterator) Next() (*Dataset, error) {
|
func (it *DatasetIterator) Next() (*Dataset, error) {
|
||||||
if err := it.nextFunc(); err != nil {
|
if err := it.nextFunc(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -461,8 +463,11 @@ type AccessEntry struct {
|
|||||||
type AccessRole string
|
type AccessRole string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// OwnerRole is the OWNER AccessRole.
|
||||||
OwnerRole AccessRole = "OWNER"
|
OwnerRole AccessRole = "OWNER"
|
||||||
|
// ReaderRole is the READER AccessRole.
|
||||||
ReaderRole AccessRole = "READER"
|
ReaderRole AccessRole = "READER"
|
||||||
|
// WriterRole is the WRITER AccessRole.
|
||||||
WriterRole AccessRole = "WRITER"
|
WriterRole AccessRole = "WRITER"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -470,19 +475,20 @@ const (
|
|||||||
type EntityType int
|
type EntityType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// A domain (e.g. "example.com")
|
// DomainEntity is a domain (e.g. "example.com").
|
||||||
DomainEntity EntityType = iota + 1
|
DomainEntity EntityType = iota + 1
|
||||||
|
|
||||||
// Email address of a Google Group
|
// GroupEmailEntity is an email address of a Google Group.
|
||||||
GroupEmailEntity
|
GroupEmailEntity
|
||||||
|
|
||||||
// Email address of an individual user.
|
// UserEmailEntity is an email address of an individual user.
|
||||||
UserEmailEntity
|
UserEmailEntity
|
||||||
|
|
||||||
// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers.
|
// SpecialGroupEntity is a special group: one of projectOwners, projectReaders, projectWriters or
|
||||||
|
// allAuthenticatedUsers.
|
||||||
SpecialGroupEntity
|
SpecialGroupEntity
|
||||||
|
|
||||||
// A BigQuery view.
|
// ViewEntity is a BigQuery view.
|
||||||
ViewEntity
|
ViewEntity
|
||||||
)
|
)
|
||||||
|
|
||||||
|
6
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
@ -15,16 +15,14 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
itest "google.golang.org/api/iterator/testing"
|
itest "google.golang.org/api/iterator/testing"
|
||||||
)
|
)
|
||||||
|
6
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package datatransfer
|
package datatransfer
|
||||||
|
|
||||||
@ -21,13 +21,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
)
|
)
|
||||||
|
68
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
68
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,17 +12,18 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package datatransfer
|
package datatransfer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/version"
|
"github.com/golang/protobuf/proto"
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/api/transport"
|
"google.golang.org/api/transport"
|
||||||
@ -108,9 +109,9 @@ type Client struct {
|
|||||||
// NewClient creates a new data transfer service client.
|
// NewClient creates a new data transfer service client.
|
||||||
//
|
//
|
||||||
// The Google BigQuery Data Transfer Service API enables BigQuery users to
|
// The Google BigQuery Data Transfer Service API enables BigQuery users to
|
||||||
// configure the transfer of their data from other Google Products into BigQuery.
|
// configure the transfer of their data from other Google Products into
|
||||||
// This service contains methods that are end user exposed. It backs up the
|
// BigQuery. This service contains methods that are end user exposed. It backs
|
||||||
// frontend.
|
// up the frontend.
|
||||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -141,15 +142,16 @@ func (c *Client) Close() error {
|
|||||||
// the `x-goog-api-client` header passed on each request. Intended for
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
// use by Google-written clients.
|
// use by Google-written clients.
|
||||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDataSource retrieves a supported data source and returns its settings,
|
// GetDataSource retrieves a supported data source and returns its settings,
|
||||||
// which can be used for UI rendering.
|
// which can be used for UI rendering.
|
||||||
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
|
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
|
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
|
||||||
var resp *datatransferpb.DataSource
|
var resp *datatransferpb.DataSource
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
@ -166,9 +168,11 @@ func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataS
|
|||||||
// ListDataSources lists supported data sources and returns their settings,
|
// ListDataSources lists supported data sources and returns their settings,
|
||||||
// which can be used for UI rendering.
|
// which can be used for UI rendering.
|
||||||
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
|
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
|
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
|
||||||
it := &DataSourceIterator{}
|
it := &DataSourceIterator{}
|
||||||
|
req = proto.Clone(req).(*datatransferpb.ListDataSourcesRequest)
|
||||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
|
||||||
var resp *datatransferpb.ListDataSourcesResponse
|
var resp *datatransferpb.ListDataSourcesResponse
|
||||||
req.PageToken = pageToken
|
req.PageToken = pageToken
|
||||||
@ -196,12 +200,14 @@ func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDa
|
|||||||
return nextPageToken, nil
|
return nextPageToken, nil
|
||||||
}
|
}
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
it.pageInfo.MaxSize = int(req.PageSize)
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTransferConfig creates a new data transfer configuration.
|
// CreateTransferConfig creates a new data transfer configuration.
|
||||||
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
|
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
|
||||||
var resp *datatransferpb.TransferConfig
|
var resp *datatransferpb.TransferConfig
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
@ -218,7 +224,8 @@ func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.C
|
|||||||
// UpdateTransferConfig updates a data transfer configuration.
|
// UpdateTransferConfig updates a data transfer configuration.
|
||||||
// All fields must be set, even if they are not updated.
|
// All fields must be set, even if they are not updated.
|
||||||
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "transfer_config.name", req.GetTransferConfig().GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
|
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
|
||||||
var resp *datatransferpb.TransferConfig
|
var resp *datatransferpb.TransferConfig
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
@ -235,7 +242,8 @@ func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.U
|
|||||||
// DeleteTransferConfig deletes a data transfer configuration,
|
// DeleteTransferConfig deletes a data transfer configuration,
|
||||||
// including any associated transfer runs and logs.
|
// including any associated transfer runs and logs.
|
||||||
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
|
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
|
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
var err error
|
var err error
|
||||||
@ -247,7 +255,8 @@ func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.D
|
|||||||
|
|
||||||
// GetTransferConfig returns information about a data transfer config.
|
// GetTransferConfig returns information about a data transfer config.
|
||||||
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
|
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
|
||||||
var resp *datatransferpb.TransferConfig
|
var resp *datatransferpb.TransferConfig
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
@ -263,9 +272,11 @@ func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetT
|
|||||||
|
|
||||||
// ListTransferConfigs returns information about all data transfers in the project.
|
// ListTransferConfigs returns information about all data transfers in the project.
|
||||||
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
|
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
|
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
|
||||||
it := &TransferConfigIterator{}
|
it := &TransferConfigIterator{}
|
||||||
|
req = proto.Clone(req).(*datatransferpb.ListTransferConfigsRequest)
|
||||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
|
||||||
var resp *datatransferpb.ListTransferConfigsResponse
|
var resp *datatransferpb.ListTransferConfigsResponse
|
||||||
req.PageToken = pageToken
|
req.PageToken = pageToken
|
||||||
@ -293,6 +304,7 @@ func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.Li
|
|||||||
return nextPageToken, nil
|
return nextPageToken, nil
|
||||||
}
|
}
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
it.pageInfo.MaxSize = int(req.PageSize)
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,7 +313,8 @@ func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.Li
|
|||||||
// range, one transfer run is created.
|
// range, one transfer run is created.
|
||||||
// Note that runs are created per UTC time in the time range.
|
// Note that runs are created per UTC time in the time range.
|
||||||
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
|
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
|
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
|
||||||
var resp *datatransferpb.ScheduleTransferRunsResponse
|
var resp *datatransferpb.ScheduleTransferRunsResponse
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
@ -317,7 +330,8 @@ func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.S
|
|||||||
|
|
||||||
// GetTransferRun returns information about the particular transfer run.
|
// GetTransferRun returns information about the particular transfer run.
|
||||||
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
|
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
|
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
|
||||||
var resp *datatransferpb.TransferRun
|
var resp *datatransferpb.TransferRun
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
@ -333,7 +347,8 @@ func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTran
|
|||||||
|
|
||||||
// DeleteTransferRun deletes the specified transfer run.
|
// DeleteTransferRun deletes the specified transfer run.
|
||||||
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
|
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
|
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
var err error
|
var err error
|
||||||
@ -345,9 +360,11 @@ func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.Dele
|
|||||||
|
|
||||||
// ListTransferRuns returns information about running and completed jobs.
|
// ListTransferRuns returns information about running and completed jobs.
|
||||||
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
|
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
|
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
|
||||||
it := &TransferRunIterator{}
|
it := &TransferRunIterator{}
|
||||||
|
req = proto.Clone(req).(*datatransferpb.ListTransferRunsRequest)
|
||||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
|
||||||
var resp *datatransferpb.ListTransferRunsResponse
|
var resp *datatransferpb.ListTransferRunsResponse
|
||||||
req.PageToken = pageToken
|
req.PageToken = pageToken
|
||||||
@ -375,14 +392,17 @@ func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListT
|
|||||||
return nextPageToken, nil
|
return nextPageToken, nil
|
||||||
}
|
}
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
it.pageInfo.MaxSize = int(req.PageSize)
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListTransferLogs returns user facing log messages for the data transfer run.
|
// ListTransferLogs returns user facing log messages for the data transfer run.
|
||||||
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
|
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
|
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
|
||||||
it := &TransferMessageIterator{}
|
it := &TransferMessageIterator{}
|
||||||
|
req = proto.Clone(req).(*datatransferpb.ListTransferLogsRequest)
|
||||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
|
||||||
var resp *datatransferpb.ListTransferLogsResponse
|
var resp *datatransferpb.ListTransferLogsResponse
|
||||||
req.PageToken = pageToken
|
req.PageToken = pageToken
|
||||||
@ -410,6 +430,7 @@ func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListT
|
|||||||
return nextPageToken, nil
|
return nextPageToken, nil
|
||||||
}
|
}
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
it.pageInfo.MaxSize = int(req.PageSize)
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -420,7 +441,8 @@ func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListT
|
|||||||
// token for the particular user, which is a pre-requisite before user can
|
// token for the particular user, which is a pre-requisite before user can
|
||||||
// create a transfer config.
|
// create a transfer config.
|
||||||
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
|
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
|
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
|
||||||
var resp *datatransferpb.CheckValidCredsResponse
|
var resp *datatransferpb.CheckValidCredsResponse
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,13 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package datatransfer_test
|
package datatransfer_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cloud.google.com/go/bigquery/datatransfer/apiv1"
|
"context"
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
datatransfer "cloud.google.com/go/bigquery/datatransfer/apiv1"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||||
)
|
)
|
||||||
|
49
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
49
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
// Package datatransfer is an auto-generated package for the
|
// Package datatransfer is an auto-generated package for the
|
||||||
// BigQuery Data Transfer API.
|
// BigQuery Data Transfer API.
|
||||||
@ -24,7 +24,11 @@
|
|||||||
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
|
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -45,3 +49,42 @@ func DefaultAuthScopes() []string {
|
|||||||
"https://www.googleapis.com/auth/cloud-platform",
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// versionGo returns the Go runtime version. The returned string
|
||||||
|
// has no whitespace, suitable for reporting in header.
|
||||||
|
func versionGo() string {
|
||||||
|
const develPrefix = "devel +"
|
||||||
|
|
||||||
|
s := runtime.Version()
|
||||||
|
if strings.HasPrefix(s, develPrefix) {
|
||||||
|
s = s[len(develPrefix):]
|
||||||
|
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||||
|
s = s[:p]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
notSemverRune := func(r rune) bool {
|
||||||
|
return strings.IndexRune("0123456789.", r) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(s, "go1") {
|
||||||
|
s = s[2:]
|
||||||
|
var prerelease string
|
||||||
|
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||||
|
s, prerelease = s[:p], s[p:]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(s, ".") {
|
||||||
|
s += "0"
|
||||||
|
} else if strings.Count(s, ".") < 2 {
|
||||||
|
s += ".0"
|
||||||
|
}
|
||||||
|
if prerelease != "" {
|
||||||
|
s += "-" + prerelease
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionClient = "20190306"
|
||||||
|
6
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package datatransfer
|
package datatransfer
|
||||||
|
|
||||||
@ -24,6 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -35,7 +36,6 @@ import (
|
|||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
status "google.golang.org/genproto/googleapis/rpc/status"
|
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
17
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
17
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
@ -40,7 +40,7 @@ To query existing tables, create a Query and call its Read method:
|
|||||||
|
|
||||||
q := client.Query(`
|
q := client.Query(`
|
||||||
SELECT year, SUM(number) as num
|
SELECT year, SUM(number) as num
|
||||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
FROM ` + "`bigquery-public-data.usa_names.usa_1910_2013`" + `
|
||||||
WHERE name = "William"
|
WHERE name = "William"
|
||||||
GROUP BY year
|
GROUP BY year
|
||||||
ORDER BY year
|
ORDER BY year
|
||||||
@ -174,9 +174,9 @@ Or you can infer the schema from a struct:
|
|||||||
Struct inference supports tags like those of the encoding/json package, so you can
|
Struct inference supports tags like those of the encoding/json package, so you can
|
||||||
change names, ignore fields, or mark a field as nullable (non-required). Fields
|
change names, ignore fields, or mark a field as nullable (non-required). Fields
|
||||||
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool,
|
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool,
|
||||||
NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as
|
NullTimestamp, NullDate, NullTime, NullDateTime, and NullGeography) are
|
||||||
nullable, so the "nullable" tag is only needed for []byte, *big.Rat and
|
automatically inferred as nullable, so the "nullable" tag is only needed for []byte,
|
||||||
pointer-to-struct fields.
|
*big.Rat and pointer-to-struct fields.
|
||||||
|
|
||||||
type student2 struct {
|
type student2 struct {
|
||||||
Name string `bigquery:"full_name"`
|
Name string `bigquery:"full_name"`
|
||||||
@ -297,5 +297,14 @@ Extractor, then optionally configure it, and lastly call its Run method.
|
|||||||
extractor.DisableHeader = true
|
extractor.DisableHeader = true
|
||||||
job, err = extractor.Run(ctx)
|
job, err = extractor.Run(ctx)
|
||||||
// Poll the job for completion if desired, as above.
|
// Poll the job for completion if desired, as above.
|
||||||
|
|
||||||
|
Errors
|
||||||
|
|
||||||
|
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
|
||||||
|
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
|
||||||
|
|
||||||
|
if e, ok := err.(*googleapi.Error); ok {
|
||||||
|
if e.Code = 409 { ... }
|
||||||
|
}
|
||||||
*/
|
*/
|
||||||
package bigquery // import "cloud.google.com/go/bigquery"
|
package bigquery // import "cloud.google.com/go/bigquery"
|
||||||
|
1
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
1
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// An Error contains detailed information about a failed bigquery operation.
|
// An Error contains detailed information about a failed bigquery operation.
|
||||||
|
// Detailed description of possible Reasons can be found here: https://cloud.google.com/bigquery/troubleshooting-errors.
|
||||||
type Error struct {
|
type Error struct {
|
||||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||||
Location, Message, Reason string
|
Location, Message, Reason string
|
||||||
|
1
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
1
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
42
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
42
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
@ -15,12 +15,12 @@
|
|||||||
package bigquery_test
|
package bigquery_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/bigquery"
|
"cloud.google.com/go/bigquery"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -521,26 +521,26 @@ func ExampleTable_Metadata() {
|
|||||||
fmt.Println(md)
|
fmt.Println(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleTable_Uploader() {
|
func ExampleTable_Inserter() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
|
||||||
_ = u // TODO: Use u.
|
_ = ins // TODO: Use ins.
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleTable_Uploader_options() {
|
func ExampleTable_Inserter_options() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
|
||||||
u.SkipInvalidRows = true
|
ins.SkipInvalidRows = true
|
||||||
u.IgnoreUnknownValues = true
|
ins.IgnoreUnknownValues = true
|
||||||
_ = u // TODO: Use u.
|
_ = ins // TODO: Use ins.
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleTable_CopierFrom() {
|
func ExampleTable_CopierFrom() {
|
||||||
@ -737,33 +737,33 @@ func (i *Item) Save() (map[string]bigquery.Value, string, error) {
|
|||||||
}, "", nil
|
}, "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleUploader_Put() {
|
func ExampleInserter_Put() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
|
||||||
// Item implements the ValueSaver interface.
|
// Item implements the ValueSaver interface.
|
||||||
items := []*Item{
|
items := []*Item{
|
||||||
{Name: "n1", Size: 32.6, Count: 7},
|
{Name: "n1", Size: 32.6, Count: 7},
|
||||||
{Name: "n2", Size: 4, Count: 2},
|
{Name: "n2", Size: 4, Count: 2},
|
||||||
{Name: "n3", Size: 101.5, Count: 1},
|
{Name: "n3", Size: 101.5, Count: 1},
|
||||||
}
|
}
|
||||||
if err := u.Put(ctx, items); err != nil {
|
if err := ins.Put(ctx, items); err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var schema bigquery.Schema
|
var schema bigquery.Schema
|
||||||
|
|
||||||
func ExampleUploader_Put_structSaver() {
|
func ExampleInserter_Put_structSaver() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
|
||||||
|
|
||||||
type score struct {
|
type score struct {
|
||||||
Name string
|
Name string
|
||||||
@ -776,18 +776,18 @@ func ExampleUploader_Put_structSaver() {
|
|||||||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
|
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
|
||||||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
|
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
|
||||||
}
|
}
|
||||||
if err := u.Put(ctx, savers); err != nil {
|
if err := ins.Put(ctx, savers); err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleUploader_Put_struct() {
|
func ExampleInserter_Put_struct() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
|
||||||
|
|
||||||
type score struct {
|
type score struct {
|
||||||
Name string
|
Name string
|
||||||
@ -799,19 +799,19 @@ func ExampleUploader_Put_struct() {
|
|||||||
{Name: "n3", Num: 7},
|
{Name: "n3", Num: 7},
|
||||||
}
|
}
|
||||||
// Schema is inferred from the score type.
|
// Schema is inferred from the score type.
|
||||||
if err := u.Put(ctx, scores); err != nil {
|
if err := ins.Put(ctx, scores); err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleUploader_Put_valuesSaver() {
|
func ExampleInserter_Put_valuesSaver() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
|
|
||||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
ins := client.Dataset("my_dataset").Table("my_table").Inserter()
|
||||||
|
|
||||||
var vss []*bigquery.ValuesSaver
|
var vss []*bigquery.ValuesSaver
|
||||||
for i, name := range []string{"n1", "n2", "n3"} {
|
for i, name := range []string{"n1", "n2", "n3"} {
|
||||||
@ -823,7 +823,7 @@ func ExampleUploader_Put_valuesSaver() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := u.Put(ctx, vss); err != nil {
|
if err := ins.Put(ctx, vss); err != nil {
|
||||||
// TODO: Handle error.
|
// TODO: Handle error.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
1
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
1
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
@ -33,6 +33,7 @@ const (
|
|||||||
GoogleSheets DataFormat = "GOOGLE_SHEETS"
|
GoogleSheets DataFormat = "GOOGLE_SHEETS"
|
||||||
Bigtable DataFormat = "BIGTABLE"
|
Bigtable DataFormat = "BIGTABLE"
|
||||||
Parquet DataFormat = "PARQUET"
|
Parquet DataFormat = "PARQUET"
|
||||||
|
ORC DataFormat = "ORC"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
|
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
|
||||||
|
3
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
3
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
@ -15,8 +15,9 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
4
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
@ -17,10 +17,8 @@ package bigquery
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
4
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
@ -49,7 +49,7 @@ func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader
|
|||||||
// loaded into a table via the Table.LoaderFromReader.
|
// loaded into a table via the Table.LoaderFromReader.
|
||||||
type FileConfig struct {
|
type FileConfig struct {
|
||||||
// SourceFormat is the format of the data to be read.
|
// SourceFormat is the format of the data to be read.
|
||||||
// Allowed values are: CSV, Avro, Parquet, JSON, DatastoreBackup. The default is CSV.
|
// Allowed values are: Avro, CSV, DatastoreBackup, JSON, ORC, and Parquet. The default is CSV.
|
||||||
SourceFormat DataFormat
|
SourceFormat DataFormat
|
||||||
|
|
||||||
// Indicates if we should automatically infer the options and
|
// Indicates if we should automatically infer the options and
|
||||||
@ -130,6 +130,8 @@ func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfigurat
|
|||||||
type Encoding string
|
type Encoding string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// UTF_8 specifies the UTF-8 encoding type.
|
||||||
UTF_8 Encoding = "UTF-8"
|
UTF_8 Encoding = "UTF-8"
|
||||||
|
// ISO_8859_1 specifies the ISO-8859-1 encoding type.
|
||||||
ISO_8859_1 Encoding = "ISO-8859-1"
|
ISO_8859_1 Encoding = "ISO-8859-1"
|
||||||
)
|
)
|
||||||
|
2
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
@ -53,7 +53,9 @@ func NewGCSReference(uri ...string) *GCSReference {
|
|||||||
type Compression string
|
type Compression string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// None specifies no compression.
|
||||||
None Compression = "NONE"
|
None Compression = "NONE"
|
||||||
|
// Gzip specifies gzip compression.
|
||||||
Gzip Compression = "GZIP"
|
Gzip Compression = "GZIP"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
238
vendor/cloud.google.com/go/bigquery/inserter.go
generated
vendored
Normal file
238
vendor/cloud.google.com/go/bigquery/inserter.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
|||||||
|
// Copyright 2015 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package bigquery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An Inserter does streaming inserts into a BigQuery table.
|
||||||
|
// It is safe for concurrent use.
|
||||||
|
type Inserter struct {
|
||||||
|
t *Table
|
||||||
|
|
||||||
|
// SkipInvalidRows causes rows containing invalid data to be silently
|
||||||
|
// ignored. The default value is false, which causes the entire request to
|
||||||
|
// fail if there is an attempt to insert an invalid row.
|
||||||
|
SkipInvalidRows bool
|
||||||
|
|
||||||
|
// IgnoreUnknownValues causes values not matching the schema to be ignored.
|
||||||
|
// The default value is false, which causes records containing such values
|
||||||
|
// to be treated as invalid records.
|
||||||
|
IgnoreUnknownValues bool
|
||||||
|
|
||||||
|
// A TableTemplateSuffix allows Inserters to create tables automatically.
|
||||||
|
//
|
||||||
|
// Experimental: this option is experimental and may be modified or removed in future versions,
|
||||||
|
// regardless of any other documented package stability guarantees.
|
||||||
|
//
|
||||||
|
// When you specify a suffix, the table you upload data to
|
||||||
|
// will be used as a template for creating a new table, with the same schema,
|
||||||
|
// called <table> + <suffix>.
|
||||||
|
//
|
||||||
|
// More information is available at
|
||||||
|
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
||||||
|
TableTemplateSuffix string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inserter returns an Inserter that can be used to append rows to t.
|
||||||
|
// The returned Inserter may optionally be further configured before its Put method is called.
|
||||||
|
//
|
||||||
|
// To stream rows into a date-partitioned table at a particular date, add the
|
||||||
|
// $yyyymmdd suffix to the table name when constructing the Table.
|
||||||
|
func (t *Table) Inserter() *Inserter {
|
||||||
|
return &Inserter{t: t}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uploader calls Inserter.
|
||||||
|
// Deprecated: use Table.Inserter instead.
|
||||||
|
func (t *Table) Uploader() *Inserter { return t.Inserter() }
|
||||||
|
|
||||||
|
// Put uploads one or more rows to the BigQuery service.
|
||||||
|
//
|
||||||
|
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
|
||||||
|
//
|
||||||
|
// If src is a struct or pointer to a struct, then a schema is inferred from it
|
||||||
|
// and used to create a StructSaver. The InsertID of the StructSaver will be
|
||||||
|
// empty.
|
||||||
|
//
|
||||||
|
// If src is a slice of ValueSavers, structs, or struct pointers, then each
|
||||||
|
// element of the slice is treated as above, and multiple rows are uploaded.
|
||||||
|
//
|
||||||
|
// Put returns a PutMultiError if one or more rows failed to be uploaded.
|
||||||
|
// The PutMultiError contains a RowInsertionError for each failed row.
|
||||||
|
//
|
||||||
|
// Put will retry on temporary errors (see
|
||||||
|
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
|
||||||
|
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
|
||||||
|
// the call will run indefinitely. Pass a context with a timeout to prevent
|
||||||
|
// hanging calls.
|
||||||
|
func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
savers, err := valueSavers(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.putMulti(ctx, savers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func valueSavers(src interface{}) ([]ValueSaver, error) {
|
||||||
|
saver, ok, err := toValueSaver(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
return []ValueSaver{saver}, nil
|
||||||
|
}
|
||||||
|
srcVal := reflect.ValueOf(src)
|
||||||
|
if srcVal.Kind() != reflect.Slice {
|
||||||
|
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
|
||||||
|
|
||||||
|
}
|
||||||
|
var savers []ValueSaver
|
||||||
|
for i := 0; i < srcVal.Len(); i++ {
|
||||||
|
s := srcVal.Index(i).Interface()
|
||||||
|
saver, ok, err := toValueSaver(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
|
||||||
|
}
|
||||||
|
savers = append(savers, saver)
|
||||||
|
}
|
||||||
|
return savers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a ValueSaver from x, which must implement ValueSaver already
|
||||||
|
// or be a struct or pointer to struct.
|
||||||
|
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
||||||
|
if _, ok := x.(StructSaver); ok {
|
||||||
|
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
|
||||||
|
}
|
||||||
|
var insertID string
|
||||||
|
// Handle StructSavers specially so we can infer the schema if necessary.
|
||||||
|
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
|
||||||
|
x = ss.Struct
|
||||||
|
insertID = ss.InsertID
|
||||||
|
// Fall through so we can infer the schema.
|
||||||
|
}
|
||||||
|
if saver, ok := x.(ValueSaver); ok {
|
||||||
|
return saver, ok, nil
|
||||||
|
}
|
||||||
|
v := reflect.ValueOf(x)
|
||||||
|
// Support Put with []interface{}
|
||||||
|
if v.Kind() == reflect.Interface {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
schema, err := inferSchemaReflectCached(v.Type())
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return &StructSaver{
|
||||||
|
Struct: x,
|
||||||
|
InsertID: insertID,
|
||||||
|
Schema: schema,
|
||||||
|
}, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||||
|
req, err := u.newInsertRequest(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if req == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
|
||||||
|
call = call.Context(ctx)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
var res *bq.TableDataInsertAllResponse
|
||||||
|
err = runWithRetry(ctx, func() (err error) {
|
||||||
|
res, err = call.Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return handleInsertErrors(res.InsertErrors, req.Rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
|
||||||
|
if savers == nil { // If there are no rows, do nothing.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
req := &bq.TableDataInsertAllRequest{
|
||||||
|
TemplateSuffix: u.TableTemplateSuffix,
|
||||||
|
IgnoreUnknownValues: u.IgnoreUnknownValues,
|
||||||
|
SkipInvalidRows: u.SkipInvalidRows,
|
||||||
|
}
|
||||||
|
for _, saver := range savers {
|
||||||
|
row, insertID, err := saver.Save()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if insertID == "" {
|
||||||
|
insertID = randomIDFn()
|
||||||
|
}
|
||||||
|
m := make(map[string]bq.JsonValue)
|
||||||
|
for k, v := range row {
|
||||||
|
m[k] = bq.JsonValue(v)
|
||||||
|
}
|
||||||
|
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||||
|
InsertId: insertID,
|
||||||
|
Json: m,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
|
||||||
|
if len(ierrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var errs PutMultiError
|
||||||
|
for _, e := range ierrs {
|
||||||
|
if int(e.Index) > len(rows) {
|
||||||
|
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||||
|
}
|
||||||
|
rie := RowInsertionError{
|
||||||
|
InsertID: rows[e.Index].InsertId,
|
||||||
|
RowIndex: int(e.Index),
|
||||||
|
}
|
||||||
|
for _, errp := range e.Errors {
|
||||||
|
rie.Errors = append(rie.Errors, bqToError(errp))
|
||||||
|
}
|
||||||
|
errs = append(errs, rie)
|
||||||
|
}
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uploader is an obsolete name for Inserter.
|
||||||
|
type Uploader = Inserter
|
210
vendor/cloud.google.com/go/bigquery/inserter_test.go
generated
vendored
Normal file
210
vendor/cloud.google.com/go/bigquery/inserter_test.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
// Copyright 2015 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package bigquery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/pretty"
|
||||||
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testSaver struct {
|
||||||
|
row map[string]Value
|
||||||
|
insertID string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts testSaver) Save() (map[string]Value, string, error) {
|
||||||
|
return ts.row, ts.insertID, ts.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewInsertRequest(t *testing.T) {
|
||||||
|
prev := randomIDFn
|
||||||
|
n := 0
|
||||||
|
randomIDFn = func() string { n++; return strconv.Itoa(n) }
|
||||||
|
defer func() { randomIDFn = prev }()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
ul *Uploader
|
||||||
|
savers []ValueSaver
|
||||||
|
req *bq.TableDataInsertAllRequest
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
ul: &Uploader{},
|
||||||
|
savers: nil,
|
||||||
|
req: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ul: &Uploader{},
|
||||||
|
savers: []ValueSaver{
|
||||||
|
testSaver{row: map[string]Value{"one": 1}},
|
||||||
|
testSaver{row: map[string]Value{"two": 2}},
|
||||||
|
},
|
||||||
|
req: &bq.TableDataInsertAllRequest{
|
||||||
|
Rows: []*bq.TableDataInsertAllRequestRows{
|
||||||
|
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
|
||||||
|
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ul: &Uploader{
|
||||||
|
TableTemplateSuffix: "suffix",
|
||||||
|
IgnoreUnknownValues: true,
|
||||||
|
SkipInvalidRows: true,
|
||||||
|
},
|
||||||
|
savers: []ValueSaver{
|
||||||
|
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
|
||||||
|
testSaver{insertID: "", row: map[string]Value{"two": 2}},
|
||||||
|
},
|
||||||
|
req: &bq.TableDataInsertAllRequest{
|
||||||
|
Rows: []*bq.TableDataInsertAllRequestRows{
|
||||||
|
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
|
||||||
|
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
|
||||||
|
},
|
||||||
|
TemplateSuffix: "suffix",
|
||||||
|
SkipInvalidRows: true,
|
||||||
|
IgnoreUnknownValues: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, tc := range tests {
|
||||||
|
got, err := tc.ul.newInsertRequest(tc.savers)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
want := tc.req
|
||||||
|
if !testutil.Equal(got, want) {
|
||||||
|
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewInsertRequestErrors(t *testing.T) {
|
||||||
|
var u Uploader
|
||||||
|
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("bang")}})
|
||||||
|
if err == nil {
|
||||||
|
t.Error("got nil, want error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandleInsertErrors(t *testing.T) {
|
||||||
|
rows := []*bq.TableDataInsertAllRequestRows{
|
||||||
|
{InsertId: "a"},
|
||||||
|
{InsertId: "b"},
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
in []*bq.TableDataInsertAllResponseInsertErrors
|
||||||
|
want error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: nil,
|
||||||
|
want: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
||||||
|
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
||||||
|
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []*bq.TableDataInsertAllResponseInsertErrors{
|
||||||
|
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
|
||||||
|
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
|
||||||
|
},
|
||||||
|
want: PutMultiError{
|
||||||
|
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
|
||||||
|
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got := handleInsertErrors(test.in, rows)
|
||||||
|
if !testutil.Equal(got, test.want) {
|
||||||
|
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValueSavers(t *testing.T) {
|
||||||
|
ts := &testSaver{}
|
||||||
|
type T struct{ I int }
|
||||||
|
schema, err := InferSchema(T{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
in interface{}
|
||||||
|
want []ValueSaver
|
||||||
|
}{
|
||||||
|
{[]interface{}(nil), nil},
|
||||||
|
{[]interface{}{}, nil},
|
||||||
|
{ts, []ValueSaver{ts}},
|
||||||
|
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
|
||||||
|
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
|
||||||
|
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
|
||||||
|
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
|
||||||
|
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||||
|
&StructSaver{Schema: schema, Struct: T{I: 2}},
|
||||||
|
}},
|
||||||
|
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
|
||||||
|
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||||
|
&StructSaver{Schema: schema, Struct: &T{I: 2}},
|
||||||
|
}},
|
||||||
|
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
|
||||||
|
[]ValueSaver{
|
||||||
|
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
|
||||||
|
}},
|
||||||
|
} {
|
||||||
|
got, err := valueSavers(test.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
|
||||||
|
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||||
|
}
|
||||||
|
// Make sure Save is successful.
|
||||||
|
for i, vs := range got {
|
||||||
|
_, _, err := vs.Save()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValueSaversErrors(t *testing.T) {
|
||||||
|
inputs := []interface{}{
|
||||||
|
nil,
|
||||||
|
1,
|
||||||
|
[]int{1, 2},
|
||||||
|
[]interface{}{
|
||||||
|
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
StructSaver{},
|
||||||
|
}
|
||||||
|
for _, in := range inputs {
|
||||||
|
if _, err := valueSavers(in); err == nil {
|
||||||
|
t.Errorf("%#v: got nil, want error", in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
431
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
431
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
@ -15,6 +15,8 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -27,22 +29,25 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
|
||||||
gax "github.com/googleapis/gax-go"
|
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
|
"cloud.google.com/go/httpreplay"
|
||||||
"cloud.google.com/go/internal"
|
"cloud.google.com/go/internal"
|
||||||
"cloud.google.com/go/internal/pretty"
|
"cloud.google.com/go/internal/pretty"
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"cloud.google.com/go/internal/uid"
|
"cloud.google.com/go/internal/uid"
|
||||||
"cloud.google.com/go/storage"
|
"cloud.google.com/go/storage"
|
||||||
"golang.org/x/net/context"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const replayFilename = "bigquery.replay"
|
||||||
|
|
||||||
|
var record = flag.Bool("record", false, "record RPCs")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
client *Client
|
client *Client
|
||||||
storageClient *storage.Client
|
storageClient *storage.Client
|
||||||
@ -55,10 +60,7 @@ var (
|
|||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
testTableExpiration time.Time
|
testTableExpiration time.Time
|
||||||
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
|
datasetIDs, tableIDs *uid.Space
|
||||||
// with underscores.
|
|
||||||
datasetIDs = uid.NewSpace("dataset", &uid.Options{Sep: '_'})
|
|
||||||
tableIDs = uid.NewSpace("table", &uid.Options{Sep: '_'})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Note: integration tests cannot be run in parallel, because TestIntegration_Location
|
// Note: integration tests cannot be run in parallel, because TestIntegration_Location
|
||||||
@ -78,34 +80,126 @@ func getClient(t *testing.T) *Client {
|
|||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
// If integration tests will be run, create a unique bucket for them.
|
// If integration tests will be run, create a unique dataset for them.
|
||||||
|
// Return a cleanup function.
|
||||||
func initIntegrationTest() func() {
|
func initIntegrationTest() func() {
|
||||||
flag.Parse() // needed for testing.Short()
|
|
||||||
if testing.Short() {
|
|
||||||
return func() {}
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
flag.Parse() // needed for testing.Short()
|
||||||
|
projID := testutil.ProjID()
|
||||||
|
switch {
|
||||||
|
case testing.Short() && *record:
|
||||||
|
log.Fatal("cannot combine -short and -record")
|
||||||
|
return func() {}
|
||||||
|
|
||||||
|
case testing.Short() && httpreplay.Supported() && testutil.CanReplay(replayFilename) && projID != "":
|
||||||
|
// go test -short with a replay file will replay the integration tests if the
|
||||||
|
// environment variables are set.
|
||||||
|
log.Printf("replaying from %s", replayFilename)
|
||||||
|
httpreplay.DebugHeaders()
|
||||||
|
replayer, err := httpreplay.NewReplayer(replayFilename)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
var t time.Time
|
||||||
|
if err := json.Unmarshal(replayer.Initial(), &t); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
hc, err := replayer.Client(ctx) // no creds needed
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
client, err = NewClient(ctx, projID, option.WithHTTPClient(hc))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
storageClient, err = storage.NewClient(ctx, option.WithHTTPClient(hc))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
cleanup := initTestState(client, t)
|
||||||
|
return func() {
|
||||||
|
cleanup()
|
||||||
|
_ = replayer.Close() // No actionable error returned.
|
||||||
|
}
|
||||||
|
|
||||||
|
case testing.Short():
|
||||||
|
// go test -short without a replay file skips the integration tests.
|
||||||
|
if testutil.CanReplay(replayFilename) && projID != "" {
|
||||||
|
log.Print("replay not supported for Go versions before 1.8")
|
||||||
|
}
|
||||||
|
client = nil
|
||||||
|
storageClient = nil
|
||||||
|
return func() {}
|
||||||
|
|
||||||
|
default: // Run integration tests against a real backend.
|
||||||
ts := testutil.TokenSource(ctx, Scope)
|
ts := testutil.TokenSource(ctx, Scope)
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||||
return func() {}
|
return func() {}
|
||||||
}
|
}
|
||||||
projID := testutil.ProjID()
|
bqOpt := option.WithTokenSource(ts)
|
||||||
|
sOpt := option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl))
|
||||||
|
cleanup := func() {}
|
||||||
|
now := time.Now().UTC()
|
||||||
|
if *record {
|
||||||
|
if !httpreplay.Supported() {
|
||||||
|
log.Print("record not supported for Go versions before 1.8")
|
||||||
|
} else {
|
||||||
|
nowBytes, err := json.Marshal(now)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
recorder, err := httpreplay.NewRecorder(replayFilename, nowBytes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("could not record: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("recording to %s", replayFilename)
|
||||||
|
hc, err := recorder.Client(ctx, bqOpt)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
bqOpt = option.WithHTTPClient(hc)
|
||||||
|
hc, err = recorder.Client(ctx, sOpt)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
sOpt = option.WithHTTPClient(hc)
|
||||||
|
cleanup = func() {
|
||||||
|
if err := recorder.Close(); err != nil {
|
||||||
|
log.Printf("saving recording: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
var err error
|
var err error
|
||||||
client, err = NewClient(ctx, projID, option.WithTokenSource(ts))
|
client, err = NewClient(ctx, projID, bqOpt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("NewClient: %v", err)
|
log.Fatalf("NewClient: %v", err)
|
||||||
}
|
}
|
||||||
storageClient, err = storage.NewClient(ctx,
|
storageClient, err = storage.NewClient(ctx, sOpt)
|
||||||
option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl)))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("storage.NewClient: %v", err)
|
log.Fatalf("storage.NewClient: %v", err)
|
||||||
}
|
}
|
||||||
|
c := initTestState(client, now)
|
||||||
|
return func() { c(); cleanup() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initTestState(client *Client, t time.Time) func() {
|
||||||
|
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
|
||||||
|
// with underscores.
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := &uid.Options{Sep: '_', Time: t}
|
||||||
|
datasetIDs = uid.NewSpace("dataset", opts)
|
||||||
|
tableIDs = uid.NewSpace("table", opts)
|
||||||
|
testTableExpiration = t.Add(10 * time.Minute).Round(time.Second)
|
||||||
|
// For replayability, seed the random source with t.
|
||||||
|
Seed(t.UnixNano())
|
||||||
|
|
||||||
dataset = client.Dataset(datasetIDs.New())
|
dataset = client.Dataset(datasetIDs.New())
|
||||||
if err := dataset.Create(ctx, nil); err != nil {
|
if err := dataset.Create(ctx, nil); err != nil {
|
||||||
log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
|
log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
|
||||||
}
|
}
|
||||||
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
|
|
||||||
return func() {
|
return func() {
|
||||||
if err := dataset.DeleteWithContents(ctx); err != nil {
|
if err := dataset.DeleteWithContents(ctx); err != nil {
|
||||||
log.Printf("could not delete %s", dataset.DatasetID)
|
log.Printf("could not delete %s", dataset.DatasetID)
|
||||||
@ -124,7 +218,7 @@ func TestIntegration_TableCreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
err := table.Create(context.Background(), &TableMetadata{
|
err := table.Create(context.Background(), &TableMetadata{
|
||||||
Schema: schema,
|
Schema: schema,
|
||||||
ExpirationTime: time.Now().Add(5 * time.Minute),
|
ExpirationTime: testTableExpiration.Add(5 * time.Minute),
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("want error, got nil")
|
t.Fatal("want error, got nil")
|
||||||
@ -159,6 +253,8 @@ func TestIntegration_TableCreateView(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegration_TableMetadata(t *testing.T) {
|
func TestIntegration_TableMetadata(t *testing.T) {
|
||||||
|
t.Skip("Internal bug 128670231")
|
||||||
|
|
||||||
if client == nil {
|
if client == nil {
|
||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
@ -191,14 +287,17 @@ func TestIntegration_TableMetadata(t *testing.T) {
|
|||||||
timePartitioning TimePartitioning
|
timePartitioning TimePartitioning
|
||||||
wantExpiration time.Duration
|
wantExpiration time.Duration
|
||||||
wantField string
|
wantField string
|
||||||
|
wantPruneFilter bool
|
||||||
}{
|
}{
|
||||||
{TimePartitioning{}, time.Duration(0), ""},
|
{TimePartitioning{}, time.Duration(0), "", false},
|
||||||
{TimePartitioning{Expiration: time.Second}, time.Second, ""},
|
{TimePartitioning{Expiration: time.Second}, time.Second, "", false},
|
||||||
|
{TimePartitioning{RequirePartitionFilter: true}, time.Duration(0), "", true},
|
||||||
{
|
{
|
||||||
TimePartitioning{
|
TimePartitioning{
|
||||||
Expiration: time.Second,
|
Expiration: time.Second,
|
||||||
Field: "date",
|
Field: "date",
|
||||||
}, time.Second, "date"},
|
RequirePartitionFilter: true,
|
||||||
|
}, time.Second, "date", true},
|
||||||
}
|
}
|
||||||
|
|
||||||
schema2 := Schema{
|
schema2 := Schema{
|
||||||
@ -206,30 +305,124 @@ func TestIntegration_TableMetadata(t *testing.T) {
|
|||||||
{Name: "date", Type: DateFieldType},
|
{Name: "date", Type: DateFieldType},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clustering := &Clustering{
|
||||||
|
Fields: []string{"name"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently, clustering depends on partitioning. Interleave testing of the two features.
|
||||||
for i, c := range partitionCases {
|
for i, c := range partitionCases {
|
||||||
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
|
table := dataset.Table(fmt.Sprintf("t_metadata_partition_nocluster_%v", i))
|
||||||
|
clusterTable := dataset.Table(fmt.Sprintf("t_metadata_partition_cluster_%v", i))
|
||||||
|
|
||||||
|
// Create unclustered, partitioned variant and get metadata.
|
||||||
err = table.Create(context.Background(), &TableMetadata{
|
err = table.Create(context.Background(), &TableMetadata{
|
||||||
Schema: schema2,
|
Schema: schema2,
|
||||||
TimePartitioning: &c.timePartitioning,
|
TimePartitioning: &c.timePartitioning,
|
||||||
ExpirationTime: time.Now().Add(5 * time.Minute),
|
ExpirationTime: testTableExpiration,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
md, err = table.Metadata(ctx)
|
md, err := table.Metadata(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
got := md.TimePartitioning
|
// Created clustered table and get metadata.
|
||||||
|
err = clusterTable.Create(context.Background(), &TableMetadata{
|
||||||
|
Schema: schema2,
|
||||||
|
TimePartitioning: &c.timePartitioning,
|
||||||
|
ExpirationTime: testTableExpiration,
|
||||||
|
Clustering: clustering,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
clusterMD, err := clusterTable.Metadata(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range []*TableMetadata{md, clusterMD} {
|
||||||
|
got := v.TimePartitioning
|
||||||
want := &TimePartitioning{
|
want := &TimePartitioning{
|
||||||
Expiration: c.wantExpiration,
|
Expiration: c.wantExpiration,
|
||||||
Field: c.wantField,
|
Field: c.wantField,
|
||||||
|
RequirePartitionFilter: c.wantPruneFilter,
|
||||||
}
|
}
|
||||||
if !testutil.Equal(got, want) {
|
if !testutil.Equal(got, want) {
|
||||||
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
|
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
// check that RequirePartitionFilter can be inverted.
|
||||||
|
mdUpdate := TableMetadataToUpdate{
|
||||||
|
TimePartitioning: &TimePartitioning{
|
||||||
|
Expiration: v.TimePartitioning.Expiration,
|
||||||
|
RequirePartitionFilter: !want.RequirePartitionFilter,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
newmd, err := table.Update(ctx, mdUpdate, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to invert RequirePartitionFilter on %s: %v", table.FullyQualifiedName(), err)
|
||||||
|
}
|
||||||
|
if newmd.TimePartitioning.RequirePartitionFilter == want.RequirePartitionFilter {
|
||||||
|
t.Errorf("inverting RequirePartitionFilter on %s failed, want %t got %t", table.FullyQualifiedName(), !want.RequirePartitionFilter, newmd.TimePartitioning.RequirePartitionFilter)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if md.Clustering != nil {
|
||||||
|
t.Errorf("metadata.Clustering was not nil on unclustered table %s", table.TableID)
|
||||||
|
}
|
||||||
|
got := clusterMD.Clustering
|
||||||
|
want := clustering
|
||||||
|
if clusterMD.Clustering != clustering {
|
||||||
|
if !testutil.Equal(got, want) {
|
||||||
|
t.Errorf("metadata.Clustering: got %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntegration_RemoveTimePartitioning(t *testing.T) {
|
||||||
|
if client == nil {
|
||||||
|
t.Skip("Integration tests skipped")
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
table := dataset.Table(tableIDs.New())
|
||||||
|
want := 24 * time.Hour
|
||||||
|
err := table.Create(ctx, &TableMetadata{
|
||||||
|
ExpirationTime: testTableExpiration,
|
||||||
|
TimePartitioning: &TimePartitioning{
|
||||||
|
Expiration: want,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
|
md, err := table.Metadata(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if got := md.TimePartitioning.Expiration; got != want {
|
||||||
|
t.Fatalf("TimeParitioning expiration want = %v, got = %v", want, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove time partitioning expiration
|
||||||
|
md, err = table.Update(context.Background(), TableMetadataToUpdate{
|
||||||
|
TimePartitioning: &TimePartitioning{Expiration: 0},
|
||||||
|
}, md.ETag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
want = time.Duration(0)
|
||||||
|
if got := md.TimePartitioning.Expiration; got != want {
|
||||||
|
t.Fatalf("TimeParitioning expiration want = %v, got = %v", want, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,12 +565,12 @@ func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
|
|||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
md, err := dataset.Metadata(ctx)
|
_, err := dataset.Metadata(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Set the default expiration time.
|
// Set the default expiration time.
|
||||||
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
|
md, err := dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -439,13 +632,13 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
|||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
md, err := dataset.Metadata(ctx)
|
_, err := dataset.Metadata(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var dm DatasetMetadataToUpdate
|
var dm DatasetMetadataToUpdate
|
||||||
dm.SetLabel("label", "value")
|
dm.SetLabel("label", "value")
|
||||||
md, err = dataset.Update(ctx, dm, "")
|
md, err := dataset.Update(ctx, dm, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -532,7 +725,7 @@ func TestIntegration_Tables(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegration_UploadAndRead(t *testing.T) {
|
func TestIntegration_InsertAndRead(t *testing.T) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
@ -541,7 +734,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
// Populate the table.
|
// Populate the table.
|
||||||
upl := table.Uploader()
|
ins := table.Inserter()
|
||||||
var (
|
var (
|
||||||
wantRows [][]Value
|
wantRows [][]Value
|
||||||
saverRows []*ValuesSaver
|
saverRows []*ValuesSaver
|
||||||
@ -555,7 +748,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||||||
Row: row,
|
Row: row,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err := upl.Put(ctx, saverRows); err != nil {
|
if err := ins.Put(ctx, saverRows); err != nil {
|
||||||
t.Fatal(putError(err))
|
t.Fatal(putError(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -564,7 +757,6 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||||||
if err := waitForRow(ctx, table); err != nil {
|
if err := waitForRow(ctx, table); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the table.
|
// Read the table.
|
||||||
checkRead(t, "upload", table.Read(ctx), wantRows)
|
checkRead(t, "upload", table.Read(ctx), wantRows)
|
||||||
|
|
||||||
@ -651,6 +843,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type SubSubTestStruct struct {
|
type SubSubTestStruct struct {
|
||||||
@ -674,6 +867,7 @@ type TestStruct struct {
|
|||||||
Time civil.Time
|
Time civil.Time
|
||||||
DateTime civil.DateTime
|
DateTime civil.DateTime
|
||||||
Numeric *big.Rat
|
Numeric *big.Rat
|
||||||
|
Geography string
|
||||||
|
|
||||||
StringArray []string
|
StringArray []string
|
||||||
IntegerArray []int64
|
IntegerArray []int64
|
||||||
@ -684,6 +878,7 @@ type TestStruct struct {
|
|||||||
TimeArray []civil.Time
|
TimeArray []civil.Time
|
||||||
DateTimeArray []civil.DateTime
|
DateTimeArray []civil.DateTime
|
||||||
NumericArray []*big.Rat
|
NumericArray []*big.Rat
|
||||||
|
GeographyArray []string
|
||||||
|
|
||||||
Record SubTestStruct
|
Record SubTestStruct
|
||||||
RecordArray []SubTestStruct
|
RecordArray []SubTestStruct
|
||||||
@ -693,7 +888,7 @@ type TestStruct struct {
|
|||||||
var roundToMicros = cmp.Transformer("RoundToMicros",
|
var roundToMicros = cmp.Transformer("RoundToMicros",
|
||||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
||||||
|
|
||||||
func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
func TestIntegration_InsertAndReadStructs(t *testing.T) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
@ -714,9 +909,11 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||||||
tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0}
|
tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0}
|
||||||
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
||||||
dtm2 := civil.DateTime{Date: d2, Time: tm2}
|
dtm2 := civil.DateTime{Date: d2, Time: tm2}
|
||||||
|
g := "POINT(-122.350220 47.649154)"
|
||||||
|
g2 := "POINT(-122.0836791 37.421827)"
|
||||||
|
|
||||||
// Populate the table.
|
// Populate the table.
|
||||||
upl := table.Uploader()
|
ins := table.Inserter()
|
||||||
want := []*TestStruct{
|
want := []*TestStruct{
|
||||||
{
|
{
|
||||||
"a",
|
"a",
|
||||||
@ -729,6 +926,7 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||||||
tm,
|
tm,
|
||||||
dtm,
|
dtm,
|
||||||
big.NewRat(57, 100),
|
big.NewRat(57, 100),
|
||||||
|
g,
|
||||||
[]string{"a", "b"},
|
[]string{"a", "b"},
|
||||||
[]int64{1, 2},
|
[]int64{1, 2},
|
||||||
[]float64{1, 1.41},
|
[]float64{1, 1.41},
|
||||||
@ -738,6 +936,7 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||||||
[]civil.Time{tm, tm2},
|
[]civil.Time{tm, tm2},
|
||||||
[]civil.DateTime{dtm, dtm2},
|
[]civil.DateTime{dtm, dtm2},
|
||||||
[]*big.Rat{big.NewRat(1, 2), big.NewRat(3, 5)},
|
[]*big.Rat{big.NewRat(1, 2), big.NewRat(3, 5)},
|
||||||
|
[]string{g, g2},
|
||||||
SubTestStruct{
|
SubTestStruct{
|
||||||
"string",
|
"string",
|
||||||
SubSubTestStruct{24},
|
SubSubTestStruct{24},
|
||||||
@ -769,7 +968,7 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||||||
for _, s := range want {
|
for _, s := range want {
|
||||||
savers = append(savers, &StructSaver{Schema: schema, Struct: s})
|
savers = append(savers, &StructSaver{Schema: schema, Struct: s})
|
||||||
}
|
}
|
||||||
if err := upl.Put(ctx, savers); err != nil {
|
if err := ins.Put(ctx, savers); err != nil {
|
||||||
t.Fatal(putError(err))
|
t.Fatal(putError(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -811,15 +1010,34 @@ func (b byName) Len() int { return len(b) }
|
|||||||
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||||
|
|
||||||
func TestIntegration_UploadAndReadNullable(t *testing.T) {
|
func TestIntegration_InsertAndReadNullable(t *testing.T) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
|
ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
|
||||||
cdt := civil.DateTime{Date: testDate, Time: ctm}
|
cdt := civil.DateTime{Date: testDate, Time: ctm}
|
||||||
rat := big.NewRat(33, 100)
|
rat := big.NewRat(33, 100)
|
||||||
testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
|
geo := "POINT(-122.198939 47.669865)"
|
||||||
testUploadAndReadNullable(t, testStructNullable{
|
|
||||||
|
// Nil fields in the struct.
|
||||||
|
testInsertAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
|
||||||
|
|
||||||
|
// Explicitly invalidate the Null* types within the struct.
|
||||||
|
testInsertAndReadNullable(t, testStructNullable{
|
||||||
|
String: NullString{Valid: false},
|
||||||
|
Integer: NullInt64{Valid: false},
|
||||||
|
Float: NullFloat64{Valid: false},
|
||||||
|
Boolean: NullBool{Valid: false},
|
||||||
|
Timestamp: NullTimestamp{Valid: false},
|
||||||
|
Date: NullDate{Valid: false},
|
||||||
|
Time: NullTime{Valid: false},
|
||||||
|
DateTime: NullDateTime{Valid: false},
|
||||||
|
Geography: NullGeography{Valid: false},
|
||||||
|
},
|
||||||
|
make([]Value, len(testStructNullableSchema)))
|
||||||
|
|
||||||
|
// Populate the struct with values.
|
||||||
|
testInsertAndReadNullable(t, testStructNullable{
|
||||||
String: NullString{"x", true},
|
String: NullString{"x", true},
|
||||||
Bytes: []byte{1, 2, 3},
|
Bytes: []byte{1, 2, 3},
|
||||||
Integer: NullInt64{1, true},
|
Integer: NullInt64{1, true},
|
||||||
@ -830,19 +1048,20 @@ func TestIntegration_UploadAndReadNullable(t *testing.T) {
|
|||||||
Time: NullTime{ctm, true},
|
Time: NullTime{ctm, true},
|
||||||
DateTime: NullDateTime{cdt, true},
|
DateTime: NullDateTime{cdt, true},
|
||||||
Numeric: rat,
|
Numeric: rat,
|
||||||
|
Geography: NullGeography{geo, true},
|
||||||
Record: &subNullable{X: NullInt64{4, true}},
|
Record: &subNullable{X: NullInt64{4, true}},
|
||||||
},
|
},
|
||||||
[]Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, rat, []Value{int64(4)}})
|
[]Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, rat, geo, []Value{int64(4)}})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testUploadAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
|
func testInsertAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
table := newTable(t, testStructNullableSchema)
|
table := newTable(t, testStructNullableSchema)
|
||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
// Populate the table.
|
// Populate the table.
|
||||||
upl := table.Uploader()
|
ins := table.Inserter()
|
||||||
if err := upl.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
|
if err := ins.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
|
||||||
t.Fatal(putError(err))
|
t.Fatal(putError(err))
|
||||||
}
|
}
|
||||||
// Wait until the data has been uploaded. This can take a few seconds, according
|
// Wait until the data has been uploaded. This can take a few seconds, according
|
||||||
@ -1058,7 +1277,7 @@ func TestIntegration_DML(t *testing.T) {
|
|||||||
('b', [1], STRUCT<BOOL>(FALSE)),
|
('b', [1], STRUCT<BOOL>(FALSE)),
|
||||||
('c', [2], STRUCT<BOOL>(TRUE))`,
|
('c', [2], STRUCT<BOOL>(TRUE))`,
|
||||||
table.DatasetID, table.TableID)
|
table.DatasetID, table.TableID)
|
||||||
if err := dmlInsert(ctx, sql); err != nil {
|
if err := runDML(ctx, sql); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
wantRows := [][]Value{
|
wantRows := [][]Value{
|
||||||
@ -1069,25 +1288,29 @@ func TestIntegration_DML(t *testing.T) {
|
|||||||
checkRead(t, "DML", table.Read(ctx), wantRows)
|
checkRead(t, "DML", table.Read(ctx), wantRows)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dmlInsert(ctx context.Context, sql string) error {
|
func runDML(ctx context.Context, sql string) error {
|
||||||
// Retry insert; sometimes it fails with INTERNAL.
|
// Retry insert; sometimes it fails with INTERNAL.
|
||||||
return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
|
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||||
// Use DML to insert.
|
ri, err := client.Query(sql).Read(ctx)
|
||||||
q := client.Query(sql)
|
|
||||||
job, err := q.Run(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
||||||
return true, err // fail on 4xx
|
return true, err // fail on 4xx
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if err := wait(ctx, job); err != nil {
|
// It is OK to try to iterate over DML results. The first call to Next
|
||||||
|
// will return iterator.Done.
|
||||||
|
err = ri.Next(nil)
|
||||||
|
if err == nil {
|
||||||
|
return true, errors.New("want iterator.Done on the first call, got nil")
|
||||||
|
}
|
||||||
|
if err == iterator.Done {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
||||||
return true, err // fail on 4xx
|
return true, err // fail on 4xx
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1112,8 +1335,8 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||||||
wantRows := [][]Value{
|
wantRows := [][]Value{
|
||||||
{d, tm, dtm, ts},
|
{d, tm, dtm, ts},
|
||||||
}
|
}
|
||||||
upl := table.Uploader()
|
ins := table.Inserter()
|
||||||
if err := upl.Put(ctx, []*ValuesSaver{
|
if err := ins.Put(ctx, []*ValuesSaver{
|
||||||
{Schema: dtSchema, Row: wantRows[0]},
|
{Schema: dtSchema, Row: wantRows[0]},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatal(putError(err))
|
t.Fatal(putError(err))
|
||||||
@ -1128,7 +1351,7 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||||||
"VALUES ('%s', '%s', '%s', '%s')",
|
"VALUES ('%s', '%s', '%s', '%s')",
|
||||||
table.DatasetID, table.TableID,
|
table.DatasetID, table.TableID,
|
||||||
d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
|
d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
|
||||||
if err := dmlInsert(ctx, query); err != nil {
|
if err := runDML(ctx, query); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
wantRows = append(wantRows, wantRows[0])
|
wantRows = append(wantRows, wantRows[0])
|
||||||
@ -1394,6 +1617,9 @@ func TestIntegration_QueryDryRun(t *testing.T) {
|
|||||||
if s.Statistics.Details.(*QueryStatistics).Schema == nil {
|
if s.Statistics.Details.(*QueryStatistics).Schema == nil {
|
||||||
t.Fatal("no schema")
|
t.Fatal("no schema")
|
||||||
}
|
}
|
||||||
|
if s.Statistics.Details.(*QueryStatistics).TotalBytesProcessedAccuracy == "" {
|
||||||
|
t.Fatal("no cost accuracy")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegration_ExtractExternal(t *testing.T) {
|
func TestIntegration_ExtractExternal(t *testing.T) {
|
||||||
@ -1413,7 +1639,7 @@ func TestIntegration_ExtractExternal(t *testing.T) {
|
|||||||
sql := fmt.Sprintf(`INSERT %s.%s (name, num)
|
sql := fmt.Sprintf(`INSERT %s.%s (name, num)
|
||||||
VALUES ('a', 1), ('b', 2), ('c', 3)`,
|
VALUES ('a', 1), ('b', 2), ('c', 3)`,
|
||||||
table.DatasetID, table.TableID)
|
table.DatasetID, table.TableID)
|
||||||
if err := dmlInsert(ctx, sql); err != nil {
|
if err := runDML(ctx, sql); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Extract to a GCS object as CSV.
|
// Extract to a GCS object as CSV.
|
||||||
@ -1450,7 +1676,12 @@ func TestIntegration_ExtractExternal(t *testing.T) {
|
|||||||
SourceFormat: CSV,
|
SourceFormat: CSV,
|
||||||
SourceURIs: []string{uri},
|
SourceURIs: []string{uri},
|
||||||
Schema: schema,
|
Schema: schema,
|
||||||
Options: &CSVOptions{SkipLeadingRows: 1},
|
Options: &CSVOptions{
|
||||||
|
SkipLeadingRows: 1,
|
||||||
|
// This is the default. Since we use edc as an expectation later on,
|
||||||
|
// let's just be explicit.
|
||||||
|
FieldDelimiter: ",",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
// Query that CSV file directly.
|
// Query that CSV file directly.
|
||||||
q := client.Query("SELECT * FROM csv")
|
q := client.Query("SELECT * FROM csv")
|
||||||
@ -1506,12 +1737,12 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
|
|||||||
table := newTable(t, schema)
|
table := newTable(t, schema)
|
||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
upl := table.Uploader()
|
ins := table.Inserter()
|
||||||
row := &ValuesSaver{
|
row := &ValuesSaver{
|
||||||
Schema: schema,
|
Schema: schema,
|
||||||
Row: []Value{nil, []Value{}, []Value{nil}},
|
Row: []Value{nil, []Value{}, []Value{nil}},
|
||||||
}
|
}
|
||||||
if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
|
if err := ins.Put(ctx, []*ValuesSaver{row}); err != nil {
|
||||||
t.Fatal(putError(err))
|
t.Fatal(putError(err))
|
||||||
}
|
}
|
||||||
if err := waitForRow(ctx, table); err != nil {
|
if err := waitForRow(ctx, table); err != nil {
|
||||||
@ -1539,7 +1770,7 @@ const (
|
|||||||
|
|
||||||
// These tests exploit the fact that the two SQL versions have different syntaxes for
|
// These tests exploit the fact that the two SQL versions have different syntaxes for
|
||||||
// fully-qualified table names.
|
// fully-qualified table names.
|
||||||
var useLegacySqlTests = []struct {
|
var useLegacySQLTests = []struct {
|
||||||
t string // name of table
|
t string // name of table
|
||||||
std, legacy bool // use standard/legacy SQL
|
std, legacy bool // use standard/legacy SQL
|
||||||
err bool // do we expect an error?
|
err bool // do we expect an error?
|
||||||
@ -1560,7 +1791,7 @@ func TestIntegration_QueryUseLegacySQL(t *testing.T) {
|
|||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for _, test := range useLegacySqlTests {
|
for _, test := range useLegacySQLTests {
|
||||||
q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
|
q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
|
||||||
q.UseStandardSQL = test.std
|
q.UseStandardSQL = test.std
|
||||||
q.UseLegacySQL = test.legacy
|
q.UseLegacySQL = test.legacy
|
||||||
@ -1582,7 +1813,7 @@ func TestIntegration_TableUseLegacySQL(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
table := newTable(t, schema)
|
table := newTable(t, schema)
|
||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
for i, test := range useLegacySqlTests {
|
for i, test := range useLegacySQLTests {
|
||||||
view := dataset.Table(fmt.Sprintf("t_view_%d", i))
|
view := dataset.Table(fmt.Sprintf("t_view_%d", i))
|
||||||
tm := &TableMetadata{
|
tm := &TableMetadata{
|
||||||
ViewQuery: fmt.Sprintf("SELECT word from %s", test.t),
|
ViewQuery: fmt.Sprintf("SELECT word from %s", test.t),
|
||||||
@ -1756,8 +1987,8 @@ func TestIntegration_NumericErrors(t *testing.T) {
|
|||||||
if _, ok := tooBigRat.SetString("1e40"); !ok {
|
if _, ok := tooBigRat.SetString("1e40"); !ok {
|
||||||
t.Fatal("big.Rat.SetString failed")
|
t.Fatal("big.Rat.SetString failed")
|
||||||
}
|
}
|
||||||
upl := table.Uploader()
|
ins := table.Inserter()
|
||||||
err := upl.Put(ctx, []*ValuesSaver{{Schema: schema, Row: []Value{tooBigRat}}})
|
err := ins.Put(ctx, []*ValuesSaver{{Schema: schema, Row: []Value{tooBigRat}}})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("got nil, want error")
|
t.Fatal("got nil, want error")
|
||||||
}
|
}
|
||||||
@ -1777,6 +2008,74 @@ func TestIntegration_QueryErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIntegration_Model(t *testing.T) {
|
||||||
|
// Create an ML model.
|
||||||
|
if client == nil {
|
||||||
|
t.Skip("Integration tests skipped")
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
schema := Schema{
|
||||||
|
{Name: "input", Type: IntegerFieldType},
|
||||||
|
{Name: "label", Type: IntegerFieldType},
|
||||||
|
}
|
||||||
|
table := newTable(t, schema)
|
||||||
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
|
// Insert table data.
|
||||||
|
tableName := fmt.Sprintf("%s.%s", table.DatasetID, table.TableID)
|
||||||
|
sql := fmt.Sprintf(`INSERT %s (input, label)
|
||||||
|
VALUES (1, 0), (2, 1), (3, 0), (4, 1)`,
|
||||||
|
tableName)
|
||||||
|
wantNumRows := 4
|
||||||
|
|
||||||
|
if err := runDML(ctx, sql); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
model := dataset.Table("my_model")
|
||||||
|
modelName := fmt.Sprintf("%s.%s", model.DatasetID, model.TableID)
|
||||||
|
sql = fmt.Sprintf(`CREATE MODEL %s OPTIONS (model_type='logistic_reg') AS SELECT input, label FROM %s`,
|
||||||
|
modelName, tableName)
|
||||||
|
if err := runDML(ctx, sql); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer model.Delete(ctx)
|
||||||
|
|
||||||
|
sql = fmt.Sprintf(`SELECT * FROM ml.PREDICT(MODEL %s, TABLE %s)`, modelName, tableName)
|
||||||
|
q := client.Query(sql)
|
||||||
|
ri, err := q.Read(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
rows, _, _, err := readAll(ri)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if got := len(rows); got != wantNumRows {
|
||||||
|
t.Fatalf("got %d rows in prediction table, want %d", got, wantNumRows)
|
||||||
|
}
|
||||||
|
iter := dataset.Tables(ctx)
|
||||||
|
seen := false
|
||||||
|
for {
|
||||||
|
tbl, err := iter.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if tbl.TableID == "my_model" {
|
||||||
|
seen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !seen {
|
||||||
|
t.Fatal("model not listed in dataset")
|
||||||
|
}
|
||||||
|
if err := model.Delete(ctx); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Creates a new, temporary table with a unique name and the given schema.
|
// Creates a new, temporary table with a unique name and the given schema.
|
||||||
func newTable(t *testing.T, s Schema) *Table {
|
func newTable(t *testing.T, s Schema) *Table {
|
||||||
table := dataset.Table(tableIDs.New())
|
table := dataset.Table(tableIDs.New())
|
||||||
|
11
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
11
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
@ -15,24 +15,28 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Construct a RowIterator.
|
||||||
|
// If pf is nil, there are no rows in the result set.
|
||||||
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
|
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
|
||||||
it := &RowIterator{
|
it := &RowIterator{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
table: t,
|
table: t,
|
||||||
pf: pf,
|
pf: pf,
|
||||||
}
|
}
|
||||||
|
if pf != nil {
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||||
it.fetch,
|
it.fetch,
|
||||||
func() int { return len(it.rows) },
|
func() int { return len(it.rows) },
|
||||||
func() interface{} { r := it.rows; it.rows = nil; return r })
|
func() interface{} { r := it.rows; it.rows = nil; return r })
|
||||||
|
}
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +69,7 @@ type RowIterator struct {
|
|||||||
//
|
//
|
||||||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
|
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
|
||||||
//
|
//
|
||||||
// If dst is a *[]Value, it will be set to to new []Value whose i'th element
|
// If dst is a *[]Value, it will be set to new []Value whose i'th element
|
||||||
// will be populated with the i'th column of the row.
|
// will be populated with the i'th column of the row.
|
||||||
//
|
//
|
||||||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
|
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
|
||||||
@ -99,6 +103,9 @@ type RowIterator struct {
|
|||||||
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
|
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
|
||||||
// table with NULLs.
|
// table with NULLs.
|
||||||
func (it *RowIterator) Next(dst interface{}) error {
|
func (it *RowIterator) Next(dst interface{}) error {
|
||||||
|
if it.pf == nil { // There are no rows in the result set.
|
||||||
|
return iterator.Done
|
||||||
|
}
|
||||||
var vl ValueLoader
|
var vl ValueLoader
|
||||||
switch dst := dst.(type) {
|
switch dst := dst.(type) {
|
||||||
case ValueLoader:
|
case ValueLoader:
|
||||||
|
3
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
3
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
@ -15,13 +15,12 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
92
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
92
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
@ -15,17 +15,14 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal"
|
"cloud.google.com/go/internal"
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
@ -37,7 +34,7 @@ type Job struct {
|
|||||||
projectID string
|
projectID string
|
||||||
jobID string
|
jobID string
|
||||||
location string
|
location string
|
||||||
|
email string
|
||||||
config *bq.JobConfiguration
|
config *bq.JobConfiguration
|
||||||
lastStatus *JobStatus
|
lastStatus *JobStatus
|
||||||
}
|
}
|
||||||
@ -76,13 +73,22 @@ func (j *Job) Location() string {
|
|||||||
return j.location
|
return j.location
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Email returns the email of the job's creator.
|
||||||
|
func (j *Job) Email() string {
|
||||||
|
return j.email
|
||||||
|
}
|
||||||
|
|
||||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||||
type State int
|
type State int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
StateUnspecified State = iota // used only as a default in JobIterator
|
// StateUnspecified is the default JobIterator state.
|
||||||
|
StateUnspecified State = iota
|
||||||
|
// Pending is a state that describes that the job is pending.
|
||||||
Pending
|
Pending
|
||||||
|
// Running is a state that describes that the job is running.
|
||||||
Running
|
Running
|
||||||
|
// Done is a state that describes that the job is done.
|
||||||
Done
|
Done
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -164,31 +170,6 @@ func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
|
|||||||
return jr
|
return jr
|
||||||
}
|
}
|
||||||
|
|
||||||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
|
||||||
|
|
||||||
var (
|
|
||||||
rngMu sync.Mutex
|
|
||||||
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
|
|
||||||
)
|
|
||||||
|
|
||||||
// For testing.
|
|
||||||
var randomIDFn = randomID
|
|
||||||
|
|
||||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
|
|
||||||
// suffixes.
|
|
||||||
const randomIDLen = 27
|
|
||||||
|
|
||||||
func randomID() string {
|
|
||||||
// This is used for both job IDs and insert IDs.
|
|
||||||
var b [randomIDLen]byte
|
|
||||||
rngMu.Lock()
|
|
||||||
for i := 0; i < len(b); i++ {
|
|
||||||
b[i] = alphanum[rng.Intn(len(alphanum))]
|
|
||||||
}
|
|
||||||
rngMu.Unlock()
|
|
||||||
return string(b[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done reports whether the job has completed.
|
// Done reports whether the job has completed.
|
||||||
// After Done returns true, the Err method will return an error if the job completed unsuccessfully.
|
// After Done returns true, the Err method will return an error if the job completed unsuccessfully.
|
||||||
func (s *JobStatus) Done() bool {
|
func (s *JobStatus) Done() bool {
|
||||||
@ -254,7 +235,7 @@ func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) {
|
|||||||
|
|
||||||
if j.isQuery() {
|
if j.isQuery() {
|
||||||
// We can avoid polling for query jobs.
|
// We can avoid polling for query jobs.
|
||||||
if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
|
if _, _, err := j.waitForQuery(ctx, j.projectID); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Note: extra RPC even if you just want to wait for the query to finish.
|
// Note: extra RPC even if you just want to wait for the query to finish.
|
||||||
@ -290,7 +271,7 @@ func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) {
|
|||||||
return j.read(ctx, j.waitForQuery, fetchPage)
|
return j.read(ctx, j.waitForQuery, fetchPage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) {
|
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, uint64, error), pf pageFetcher) (*RowIterator, error) {
|
||||||
if !j.isQuery() {
|
if !j.isQuery() {
|
||||||
return nil, errors.New("bigquery: cannot read from a non-query job")
|
return nil, errors.New("bigquery: cannot read from a non-query job")
|
||||||
}
|
}
|
||||||
@ -300,7 +281,7 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
|
|||||||
if destTable != nil && projectID != destTable.ProjectId {
|
if destTable != nil && projectID != destTable.ProjectId {
|
||||||
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
|
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
|
||||||
}
|
}
|
||||||
schema, err := waitForQuery(ctx, projectID)
|
schema, totalRows, err := waitForQuery(ctx, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -308,13 +289,18 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
|
|||||||
return nil, errors.New("bigquery: query job missing destination table")
|
return nil, errors.New("bigquery: query job missing destination table")
|
||||||
}
|
}
|
||||||
dt := bqToTable(destTable, j.c)
|
dt := bqToTable(destTable, j.c)
|
||||||
|
if totalRows == 0 {
|
||||||
|
pf = nil
|
||||||
|
}
|
||||||
it := newRowIterator(ctx, dt, pf)
|
it := newRowIterator(ctx, dt, pf)
|
||||||
it.Schema = schema
|
it.Schema = schema
|
||||||
|
it.TotalRows = totalRows
|
||||||
return it, nil
|
return it, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForQuery waits for the query job to complete and returns its schema.
|
// waitForQuery waits for the query job to complete and returns its schema. It also
|
||||||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
|
// returns the total number of rows in the result set.
|
||||||
|
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint64, error) {
|
||||||
// Use GetQueryResults only to wait for completion, not to read results.
|
// Use GetQueryResults only to wait for completion, not to read results.
|
||||||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
|
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
@ -335,9 +321,9 @@ func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error
|
|||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
return bqToSchema(res.Schema), nil
|
return bqToSchema(res.Schema), res.TotalRows, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobStatistics contains statistics about a job.
|
// JobStatistics contains statistics about a job.
|
||||||
@ -397,6 +383,14 @@ type QueryStatistics struct {
|
|||||||
// Total bytes processed for the job.
|
// Total bytes processed for the job.
|
||||||
TotalBytesProcessed int64
|
TotalBytesProcessed int64
|
||||||
|
|
||||||
|
// For dry run queries, indicates how accurate the TotalBytesProcessed value is.
|
||||||
|
// When indicated, values include:
|
||||||
|
// UNKNOWN: accuracy of the estimate is unknown.
|
||||||
|
// PRECISE: estimate is precise.
|
||||||
|
// LOWER_BOUND: estimate is lower bound of what the query would cost.
|
||||||
|
// UPPER_BOUND: estiamte is upper bound of what the query would cost.
|
||||||
|
TotalBytesProcessedAccuracy string
|
||||||
|
|
||||||
// Describes execution plan for the query.
|
// Describes execution plan for the query.
|
||||||
QueryPlan []*ExplainQueryStage
|
QueryPlan []*ExplainQueryStage
|
||||||
|
|
||||||
@ -574,6 +568,8 @@ type JobIterator struct {
|
|||||||
ProjectID string // Project ID of the jobs to list. Default is the client's project.
|
ProjectID string // Project ID of the jobs to list. Default is the client's project.
|
||||||
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
|
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
|
||||||
State State // List only jobs in the given state. Defaults to all states.
|
State State // List only jobs in the given state. Defaults to all states.
|
||||||
|
MinCreationTime time.Time // List only jobs created after this time.
|
||||||
|
MaxCreationTime time.Time // List only jobs created before this time.
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
c *Client
|
c *Client
|
||||||
@ -582,8 +578,12 @@ type JobIterator struct {
|
|||||||
items []*Job
|
items []*Job
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PageInfo is a getter for the JobIterator's PageInfo.
|
||||||
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||||
|
|
||||||
|
// Next returns the next Job. Its second return value is iterator.Done if
|
||||||
|
// there are no more results. Once Next returns Done, all subsequent calls will
|
||||||
|
// return Done.
|
||||||
func (it *JobIterator) Next() (*Job, error) {
|
func (it *JobIterator) Next() (*Job, error) {
|
||||||
if err := it.nextFunc(); err != nil {
|
if err := it.nextFunc(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -616,6 +616,12 @@ func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||||||
if st != "" {
|
if st != "" {
|
||||||
req.StateFilter(st)
|
req.StateFilter(st)
|
||||||
}
|
}
|
||||||
|
if !it.MinCreationTime.IsZero() {
|
||||||
|
req.MinCreationTime(uint64(it.MinCreationTime.UnixNano() / 1e6))
|
||||||
|
}
|
||||||
|
if !it.MaxCreationTime.IsZero() {
|
||||||
|
req.MaxCreationTime(uint64(it.MaxCreationTime.UnixNano() / 1e6))
|
||||||
|
}
|
||||||
setClientHeader(req.Header())
|
setClientHeader(req.Header())
|
||||||
if pageSize > 0 {
|
if pageSize > 0 {
|
||||||
req.MaxResults(int64(pageSize))
|
req.MaxResults(int64(pageSize))
|
||||||
@ -635,7 +641,7 @@ func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
|
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
|
||||||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
|
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, j.UserEmail, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
|
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||||
@ -659,15 +665,16 @@ func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fie
|
|||||||
}
|
}
|
||||||
|
|
||||||
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
|
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
|
||||||
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c)
|
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, q.UserEmail, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) {
|
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, email string, c *Client) (*Job, error) {
|
||||||
j := &Job{
|
j := &Job{
|
||||||
projectID: qr.ProjectId,
|
projectID: qr.ProjectId,
|
||||||
jobID: qr.JobId,
|
jobID: qr.JobId,
|
||||||
location: qr.Location,
|
location: qr.Location,
|
||||||
c: c,
|
c: c,
|
||||||
|
email: email,
|
||||||
}
|
}
|
||||||
j.setConfig(qc)
|
j.setConfig(qc)
|
||||||
if err := j.setStatus(qs); err != nil {
|
if err := j.setStatus(qs); err != nil {
|
||||||
@ -750,6 +757,7 @@ func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
|
|||||||
StatementType: s.Query.StatementType,
|
StatementType: s.Query.StatementType,
|
||||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||||
|
TotalBytesProcessedAccuracy: s.Query.TotalBytesProcessedAccuracy,
|
||||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||||
Schema: bqToSchema(s.Query.Schema),
|
Schema: bqToSchema(s.Query.Schema),
|
||||||
|
14
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
14
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
@ -15,10 +15,10 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -44,12 +44,20 @@ type LoadConfig struct {
|
|||||||
// If non-nil, the destination table is partitioned by time.
|
// If non-nil, the destination table is partitioned by time.
|
||||||
TimePartitioning *TimePartitioning
|
TimePartitioning *TimePartitioning
|
||||||
|
|
||||||
|
// Clustering specifies the data clustering configuration for the destination table.
|
||||||
|
Clustering *Clustering
|
||||||
|
|
||||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||||
DestinationEncryptionConfig *EncryptionConfig
|
DestinationEncryptionConfig *EncryptionConfig
|
||||||
|
|
||||||
// Allows the schema of the destination table to be updated as a side effect of
|
// Allows the schema of the destination table to be updated as a side effect of
|
||||||
// the load job.
|
// the load job.
|
||||||
SchemaUpdateOptions []string
|
SchemaUpdateOptions []string
|
||||||
|
|
||||||
|
// For Avro-based loads, controls whether logical type annotations are used.
|
||||||
|
// See https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types
|
||||||
|
// for additional information.
|
||||||
|
UseAvroLogicalTypes bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||||
@ -60,8 +68,10 @@ func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
|||||||
WriteDisposition: string(l.WriteDisposition),
|
WriteDisposition: string(l.WriteDisposition),
|
||||||
DestinationTable: l.Dst.toBQ(),
|
DestinationTable: l.Dst.toBQ(),
|
||||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||||
|
Clustering: l.Clustering.toBQ(),
|
||||||
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
|
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
|
||||||
SchemaUpdateOptions: l.SchemaUpdateOptions,
|
SchemaUpdateOptions: l.SchemaUpdateOptions,
|
||||||
|
UseAvroLogicalTypes: l.UseAvroLogicalTypes,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
media := l.Src.populateLoadConfig(config.Load)
|
media := l.Src.populateLoadConfig(config.Load)
|
||||||
@ -75,8 +85,10 @@ func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
|||||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||||
|
Clustering: bqToClustering(q.Load.Clustering),
|
||||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
|
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
|
||||||
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
|
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
|
||||||
|
UseAvroLogicalTypes: q.Load.UseAvroLogicalTypes,
|
||||||
}
|
}
|
||||||
var fc *FileConfig
|
var fc *FileConfig
|
||||||
if len(q.Load.SourceUris) == 0 {
|
if len(q.Load.SourceUris) == 0 {
|
||||||
|
40
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
40
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
|||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -104,6 +103,7 @@ func TestLoad(t *testing.T) {
|
|||||||
WriteDisposition: WriteTruncate,
|
WriteDisposition: WriteTruncate,
|
||||||
Labels: map[string]string{"a": "b"},
|
Labels: map[string]string{"a": "b"},
|
||||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||||
|
Clustering: &Clustering{Fields: []string{"cfield1"}},
|
||||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
||||||
},
|
},
|
||||||
@ -117,6 +117,9 @@ func TestLoad(t *testing.T) {
|
|||||||
Type: "DAY",
|
Type: "DAY",
|
||||||
ExpirationMs: 1234,
|
ExpirationMs: 1234,
|
||||||
}
|
}
|
||||||
|
j.Configuration.Load.Clustering = &bq.Clustering{
|
||||||
|
Fields: []string{"cfield1"},
|
||||||
|
}
|
||||||
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||||
j.JobReference = &bq.JobReference{
|
j.JobReference = &bq.JobReference{
|
||||||
JobId: "ajob",
|
JobId: "ajob",
|
||||||
@ -234,6 +237,41 @@ func TestLoad(t *testing.T) {
|
|||||||
return j
|
return j
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||||
|
src: func() *GCSReference {
|
||||||
|
g := NewGCSReference("uri")
|
||||||
|
g.SourceFormat = Avro
|
||||||
|
return g
|
||||||
|
}(),
|
||||||
|
config: LoadConfig{
|
||||||
|
UseAvroLogicalTypes: true,
|
||||||
|
},
|
||||||
|
want: func() *bq.Job {
|
||||||
|
j := defaultLoadJob()
|
||||||
|
j.Configuration.Load.SourceFormat = "AVRO"
|
||||||
|
j.Configuration.Load.UseAvroLogicalTypes = true
|
||||||
|
return j
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||||
|
src: func() *ReaderSource {
|
||||||
|
r := NewReaderSource(strings.NewReader("foo"))
|
||||||
|
r.SourceFormat = Avro
|
||||||
|
return r
|
||||||
|
}(),
|
||||||
|
config: LoadConfig{
|
||||||
|
UseAvroLogicalTypes: true,
|
||||||
|
},
|
||||||
|
want: func() *bq.Job {
|
||||||
|
j := defaultLoadJob()
|
||||||
|
j.Configuration.Load.SourceUris = nil
|
||||||
|
j.Configuration.Load.SourceFormat = "AVRO"
|
||||||
|
j.Configuration.Load.UseAvroLogicalTypes = true
|
||||||
|
return j
|
||||||
|
}(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
|
49
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
49
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
@ -41,6 +41,14 @@ type NullString struct {
|
|||||||
|
|
||||||
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
|
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
|
||||||
|
|
||||||
|
// NullGeography represents a BigQuery GEOGRAPHY string that may be NULL.
|
||||||
|
type NullGeography struct {
|
||||||
|
GeographyVal string
|
||||||
|
Valid bool // Valid is true if GeographyVal is not NULL.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n NullGeography) String() string { return nullstr(n.Valid, n.GeographyVal) }
|
||||||
|
|
||||||
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
|
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
|
||||||
type NullFloat64 struct {
|
type NullFloat64 struct {
|
||||||
Float64 float64
|
Float64 float64
|
||||||
@ -99,13 +107,28 @@ func (n NullDateTime) String() string {
|
|||||||
return CivilDateTimeString(n.DateTime)
|
return CivilDateTimeString(n.DateTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullInt64 to JSON.
|
||||||
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
|
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullFloat64 to JSON.
|
||||||
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
|
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullBool to JSON.
|
||||||
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
|
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullString to JSON.
|
||||||
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
|
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullGeography to JSON.
|
||||||
|
func (n NullGeography) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.GeographyVal) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullTimestamp to JSON.
|
||||||
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
|
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullDate to JSON.
|
||||||
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
|
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullTime to JSON.
|
||||||
func (n NullTime) MarshalJSON() ([]byte, error) {
|
func (n NullTime) MarshalJSON() ([]byte, error) {
|
||||||
if !n.Valid {
|
if !n.Valid {
|
||||||
return jsonNull, nil
|
return jsonNull, nil
|
||||||
@ -113,6 +136,7 @@ func (n NullTime) MarshalJSON() ([]byte, error) {
|
|||||||
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
|
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON converts the NullDateTime to JSON.
|
||||||
func (n NullDateTime) MarshalJSON() ([]byte, error) {
|
func (n NullDateTime) MarshalJSON() ([]byte, error) {
|
||||||
if !n.Valid {
|
if !n.Valid {
|
||||||
return jsonNull, nil
|
return jsonNull, nil
|
||||||
@ -136,6 +160,7 @@ func nulljson(valid bool, v interface{}) ([]byte, error) {
|
|||||||
return json.Marshal(v)
|
return json.Marshal(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullInt64.
|
||||||
func (n *NullInt64) UnmarshalJSON(b []byte) error {
|
func (n *NullInt64) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.Int64 = 0
|
n.Int64 = 0
|
||||||
@ -150,6 +175,7 @@ func (n *NullInt64) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullFloat64.
|
||||||
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
|
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.Float64 = 0
|
n.Float64 = 0
|
||||||
@ -164,6 +190,7 @@ func (n *NullFloat64) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullBool.
|
||||||
func (n *NullBool) UnmarshalJSON(b []byte) error {
|
func (n *NullBool) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.Bool = false
|
n.Bool = false
|
||||||
@ -178,6 +205,7 @@ func (n *NullBool) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullString.
|
||||||
func (n *NullString) UnmarshalJSON(b []byte) error {
|
func (n *NullString) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.StringVal = ""
|
n.StringVal = ""
|
||||||
@ -192,6 +220,21 @@ func (n *NullString) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullGeography.
|
||||||
|
func (n *NullGeography) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.GeographyVal = ""
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(b, &n.GeographyVal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullTimestamp.
|
||||||
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
|
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.Timestamp = time.Time{}
|
n.Timestamp = time.Time{}
|
||||||
@ -206,6 +249,7 @@ func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullDate.
|
||||||
func (n *NullDate) UnmarshalJSON(b []byte) error {
|
func (n *NullDate) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.Date = civil.Date{}
|
n.Date = civil.Date{}
|
||||||
@ -220,6 +264,7 @@ func (n *NullDate) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullTime.
|
||||||
func (n *NullTime) UnmarshalJSON(b []byte) error {
|
func (n *NullTime) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.Time = civil.Time{}
|
n.Time = civil.Time{}
|
||||||
@ -242,6 +287,7 @@ func (n *NullTime) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON converts JSON into a NullDateTime.
|
||||||
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
|
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
|
||||||
n.Valid = false
|
n.Valid = false
|
||||||
n.DateTime = civil.DateTime{}
|
n.DateTime = civil.DateTime{}
|
||||||
@ -269,6 +315,7 @@ var (
|
|||||||
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
|
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
|
||||||
typeOfNullBool = reflect.TypeOf(NullBool{})
|
typeOfNullBool = reflect.TypeOf(NullBool{})
|
||||||
typeOfNullString = reflect.TypeOf(NullString{})
|
typeOfNullString = reflect.TypeOf(NullString{})
|
||||||
|
typeOfNullGeography = reflect.TypeOf(NullGeography{})
|
||||||
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
|
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
|
||||||
typeOfNullDate = reflect.TypeOf(NullDate{})
|
typeOfNullDate = reflect.TypeOf(NullDate{})
|
||||||
typeOfNullTime = reflect.TypeOf(NullTime{})
|
typeOfNullTime = reflect.TypeOf(NullTime{})
|
||||||
@ -285,6 +332,8 @@ func nullableFieldType(t reflect.Type) FieldType {
|
|||||||
return BooleanFieldType
|
return BooleanFieldType
|
||||||
case typeOfNullString:
|
case typeOfNullString:
|
||||||
return StringFieldType
|
return StringFieldType
|
||||||
|
case typeOfNullGeography:
|
||||||
|
return GeographyFieldType
|
||||||
case typeOfNullTimestamp:
|
case typeOfNullTimestamp:
|
||||||
return TimestampFieldType
|
return TimestampFieldType
|
||||||
case typeOfNullDate:
|
case typeOfNullDate:
|
||||||
|
2
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
@ -37,6 +37,7 @@ func TestNullsJSON(t *testing.T) {
|
|||||||
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
|
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
|
||||||
{&NullBool{Valid: true, Bool: true}, `true`},
|
{&NullBool{Valid: true, Bool: true}, `true`},
|
||||||
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
|
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
|
||||||
|
{&NullGeography{Valid: true, GeographyVal: "ST_GEOPOINT(47.649154, -122.350220)"}, `"ST_GEOPOINT(47.649154, -122.350220)"`},
|
||||||
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
|
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
|
||||||
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
|
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
|
||||||
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
|
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
|
||||||
@ -46,6 +47,7 @@ func TestNullsJSON(t *testing.T) {
|
|||||||
{&NullFloat64{}, `null`},
|
{&NullFloat64{}, `null`},
|
||||||
{&NullBool{}, `null`},
|
{&NullBool{}, `null`},
|
||||||
{&NullString{}, `null`},
|
{&NullString{}, `null`},
|
||||||
|
{&NullGeography{}, `null`},
|
||||||
{&NullTimestamp{}, `null`},
|
{&NullTimestamp{}, `null`},
|
||||||
{&NullDate{}, `null`},
|
{&NullDate{}, `null`},
|
||||||
{&NullTime{}, `null`},
|
{&NullTime{}, `null`},
|
||||||
|
4
vendor/cloud.google.com/go/bigquery/oc_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/oc_test.go
generated
vendored
@ -12,15 +12,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOCTracing(t *testing.T) {
|
func TestOCTracing(t *testing.T) {
|
||||||
|
17
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
17
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
|||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
"cloud.google.com/go/internal/fields"
|
"cloud.google.com/go/internal/fields"
|
||||||
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -45,7 +44,7 @@ func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}
|
|||||||
return "", false, nil, err
|
return "", false, nil, err
|
||||||
}
|
}
|
||||||
if name != "" && !validFieldName.MatchString(name) {
|
if name != "" && !validFieldName.MatchString(name) {
|
||||||
return "", false, nil, errInvalidFieldName
|
return "", false, nil, invalidFieldNameError(name)
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
if opt != nullableTagOption {
|
if opt != nullableTagOption {
|
||||||
@ -57,6 +56,12 @@ func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}
|
|||||||
return name, keep, opts, nil
|
return name, keep, opts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type invalidFieldNameError string
|
||||||
|
|
||||||
|
func (e invalidFieldNameError) Error() string {
|
||||||
|
return fmt.Sprintf("bigquery: invalid name %q of field in struct", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -102,6 +107,10 @@ type QueryParameter struct {
|
|||||||
// Arrays and slices of the above.
|
// Arrays and slices of the above.
|
||||||
// Structs of the above. Only the exported fields are used.
|
// Structs of the above. Only the exported fields are used.
|
||||||
//
|
//
|
||||||
|
// BigQuery does not support params of type GEOGRAPHY. For users wishing
|
||||||
|
// to parameterize Geography values, use string parameters and cast in the
|
||||||
|
// SQL query, e.g. `SELECT ST_GeogFromText(@string_param) as geo`
|
||||||
|
//
|
||||||
// When a QueryParameter is returned inside a QueryConfig from a call to
|
// When a QueryParameter is returned inside a QueryConfig from a call to
|
||||||
// Job.Config:
|
// Job.Config:
|
||||||
// Integers are of type int64.
|
// Integers are of type int64.
|
||||||
@ -277,6 +286,10 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
|||||||
// None of the above: assume a scalar type. (If it's not a valid type,
|
// None of the above: assume a scalar type. (If it's not a valid type,
|
||||||
// paramType will catch the error.)
|
// paramType will catch the error.)
|
||||||
res.Value = fmt.Sprint(v.Interface())
|
res.Value = fmt.Sprint(v.Interface())
|
||||||
|
// Ensure empty string values are sent.
|
||||||
|
if res.Value == "" {
|
||||||
|
res.ForceSendFields = append(res.ForceSendFields, "Value")
|
||||||
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
30
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
30
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
@ -15,6 +15,7 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -22,11 +23,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"golang.org/x/net/context"
|
"github.com/google/go-cmp/cmp"
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -62,7 +61,6 @@ type (
|
|||||||
}
|
}
|
||||||
S2 struct {
|
S2 struct {
|
||||||
D string
|
D string
|
||||||
e int
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -361,3 +359,27 @@ func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, er
|
|||||||
}
|
}
|
||||||
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
|
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestQueryParameter_toBQ(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
in QueryParameter
|
||||||
|
want []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: QueryParameter{Name: "name", Value: ""},
|
||||||
|
want: []string{"Value"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
q, err := test.in.toBQ()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := q.ParameterValue.ForceSendFields
|
||||||
|
if !cmp.Equal(test.want, got) {
|
||||||
|
t.Fatalf("want %v, got %v", test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
23
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
23
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
@ -15,10 +15,10 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -105,6 +105,9 @@ type QueryConfig struct {
|
|||||||
// for the destination table.
|
// for the destination table.
|
||||||
TimePartitioning *TimePartitioning
|
TimePartitioning *TimePartitioning
|
||||||
|
|
||||||
|
// Clustering specifies the data clustering configuration for the destination table.
|
||||||
|
Clustering *Clustering
|
||||||
|
|
||||||
// The labels associated with this job.
|
// The labels associated with this job.
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
|
|
||||||
@ -134,6 +137,7 @@ func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
|||||||
Priority: string(qc.Priority),
|
Priority: string(qc.Priority),
|
||||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||||
TimePartitioning: qc.TimePartitioning.toBQ(),
|
TimePartitioning: qc.TimePartitioning.toBQ(),
|
||||||
|
Clustering: qc.Clustering.toBQ(),
|
||||||
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
|
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
|
||||||
SchemaUpdateOptions: qc.SchemaUpdateOptions,
|
SchemaUpdateOptions: qc.SchemaUpdateOptions,
|
||||||
}
|
}
|
||||||
@ -204,6 +208,7 @@ func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
|||||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||||
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql,
|
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql,
|
||||||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
|
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
|
||||||
|
Clustering: bqToClustering(qq.Clustering),
|
||||||
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
|
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
|
||||||
SchemaUpdateOptions: qq.SchemaUpdateOptions,
|
SchemaUpdateOptions: qq.SchemaUpdateOptions,
|
||||||
}
|
}
|
||||||
@ -249,7 +254,23 @@ func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
|||||||
type QueryPriority string
|
type QueryPriority string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// BatchPriority specifies that the query should be scheduled with the
|
||||||
|
// batch priority. BigQuery queues each batch query on your behalf, and
|
||||||
|
// starts the query as soon as idle resources are available, usually within
|
||||||
|
// a few minutes. If BigQuery hasn't started the query within 24 hours,
|
||||||
|
// BigQuery changes the job priority to interactive. Batch queries don't
|
||||||
|
// count towards your concurrent rate limit, which can make it easier to
|
||||||
|
// start many queries at once.
|
||||||
|
//
|
||||||
|
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#batchqueries.
|
||||||
BatchPriority QueryPriority = "BATCH"
|
BatchPriority QueryPriority = "BATCH"
|
||||||
|
// InteractivePriority specifies that the query should be scheduled with
|
||||||
|
// interactive priority, which means that the query is executed as soon as
|
||||||
|
// possible. Interactive queries count towards your concurrent rate limit
|
||||||
|
// and your daily limit. It is the default priority with which queries get
|
||||||
|
// executed.
|
||||||
|
//
|
||||||
|
// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#queries.
|
||||||
InteractivePriority QueryPriority = "INTERACTIVE"
|
InteractivePriority QueryPriority = "INTERACTIVE"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
8
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
8
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
@ -18,10 +18,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -351,6 +349,9 @@ func TestConfiguringQuery(t *testing.T) {
|
|||||||
query.DefaultProjectID = "def-project-id"
|
query.DefaultProjectID = "def-project-id"
|
||||||
query.DefaultDatasetID = "def-dataset-id"
|
query.DefaultDatasetID = "def-dataset-id"
|
||||||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
||||||
|
query.Clustering = &Clustering{
|
||||||
|
Fields: []string{"cfield1"},
|
||||||
|
}
|
||||||
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
|
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
|
||||||
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
|
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
|
||||||
|
|
||||||
@ -368,6 +369,7 @@ func TestConfiguringQuery(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UseLegacySql: &pfalse,
|
UseLegacySql: &pfalse,
|
||||||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
||||||
|
Clustering: &bq.Clustering{Fields: []string{"cfield1"}},
|
||||||
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
||||||
},
|
},
|
||||||
|
56
vendor/cloud.google.com/go/bigquery/random.go
generated
vendored
Normal file
56
vendor/cloud.google.com/go/bigquery/random.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package bigquery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Support for random values (typically job IDs and insert IDs).
|
||||||
|
|
||||||
|
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||||
|
|
||||||
|
var (
|
||||||
|
rngMu sync.Mutex
|
||||||
|
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
|
||||||
|
)
|
||||||
|
|
||||||
|
// For testing.
|
||||||
|
var randomIDFn = randomID
|
||||||
|
|
||||||
|
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
|
||||||
|
// suffixes.
|
||||||
|
const randomIDLen = 27
|
||||||
|
|
||||||
|
func randomID() string {
|
||||||
|
// This is used for both job IDs and insert IDs.
|
||||||
|
var b [randomIDLen]byte
|
||||||
|
rngMu.Lock()
|
||||||
|
for i := 0; i < len(b); i++ {
|
||||||
|
b[i] = alphanum[rng.Intn(len(alphanum))]
|
||||||
|
}
|
||||||
|
rngMu.Unlock()
|
||||||
|
return string(b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed seeds this package's random number generator, used for generating job and
|
||||||
|
// insert IDs. Use Seed to obtain repeatable, deterministic behavior from bigquery
|
||||||
|
// clients. Seed should be called before any clients are created.
|
||||||
|
func Seed(s int64) {
|
||||||
|
rng = rand.New(rand.NewSource(s))
|
||||||
|
}
|
12
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
12
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
@ -15,14 +15,12 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
)
|
)
|
||||||
@ -56,8 +54,8 @@ func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Sc
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForQueryStub(context.Context, string) (Schema, error) {
|
func waitForQueryStub(context.Context, string) (Schema, uint64, error) {
|
||||||
return nil, nil
|
return nil, 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRead(t *testing.T) {
|
func TestRead(t *testing.T) {
|
||||||
@ -156,7 +154,7 @@ func TestNoMoreValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errBang = errors.New("bang!")
|
var errBang = errors.New("bang")
|
||||||
|
|
||||||
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
|
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
|
||||||
return nil, errBang
|
return nil, errBang
|
||||||
|
161
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
161
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
@ -15,17 +15,19 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/atomiccache"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Schema describes the fields in a table or query result.
|
// Schema describes the fields in a table or query result.
|
||||||
type Schema []*FieldSchema
|
type Schema []*FieldSchema
|
||||||
|
|
||||||
|
// FieldSchema describes a single field.
|
||||||
type FieldSchema struct {
|
type FieldSchema struct {
|
||||||
// The field name.
|
// The field name.
|
||||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
||||||
@ -102,27 +104,54 @@ func bqToSchema(ts *bq.TableSchema) Schema {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FieldType is the type of field.
|
||||||
type FieldType string
|
type FieldType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// StringFieldType is a string field type.
|
||||||
StringFieldType FieldType = "STRING"
|
StringFieldType FieldType = "STRING"
|
||||||
|
// BytesFieldType is a bytes field type.
|
||||||
BytesFieldType FieldType = "BYTES"
|
BytesFieldType FieldType = "BYTES"
|
||||||
|
// IntegerFieldType is a integer field type.
|
||||||
IntegerFieldType FieldType = "INTEGER"
|
IntegerFieldType FieldType = "INTEGER"
|
||||||
|
// FloatFieldType is a float field type.
|
||||||
FloatFieldType FieldType = "FLOAT"
|
FloatFieldType FieldType = "FLOAT"
|
||||||
|
// BooleanFieldType is a boolean field type.
|
||||||
BooleanFieldType FieldType = "BOOLEAN"
|
BooleanFieldType FieldType = "BOOLEAN"
|
||||||
|
// TimestampFieldType is a timestamp field type.
|
||||||
TimestampFieldType FieldType = "TIMESTAMP"
|
TimestampFieldType FieldType = "TIMESTAMP"
|
||||||
|
// RecordFieldType is a record field type. It is typically used to create columns with repeated or nested data.
|
||||||
RecordFieldType FieldType = "RECORD"
|
RecordFieldType FieldType = "RECORD"
|
||||||
|
// DateFieldType is a date field type.
|
||||||
DateFieldType FieldType = "DATE"
|
DateFieldType FieldType = "DATE"
|
||||||
|
// TimeFieldType is a time field type.
|
||||||
TimeFieldType FieldType = "TIME"
|
TimeFieldType FieldType = "TIME"
|
||||||
|
// DateTimeFieldType is a datetime field type.
|
||||||
DateTimeFieldType FieldType = "DATETIME"
|
DateTimeFieldType FieldType = "DATETIME"
|
||||||
|
// NumericFieldType is a numeric field type. Numeric types include integer types, floating point types and the
|
||||||
|
// NUMERIC data type.
|
||||||
NumericFieldType FieldType = "NUMERIC"
|
NumericFieldType FieldType = "NUMERIC"
|
||||||
|
// GeographyFieldType is a string field type. Geography types represent a set of points
|
||||||
|
// on the Earth's surface, represented in Well Known Text (WKT) format.
|
||||||
|
GeographyFieldType FieldType = "GEOGRAPHY"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
|
errEmptyJSONSchema = errors.New("bigquery: empty JSON schema")
|
||||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
|
fieldTypes = map[FieldType]bool{
|
||||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
|
StringFieldType: true,
|
||||||
errBadNullable = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`)
|
BytesFieldType: true,
|
||||||
|
IntegerFieldType: true,
|
||||||
|
FloatFieldType: true,
|
||||||
|
BooleanFieldType: true,
|
||||||
|
TimestampFieldType: true,
|
||||||
|
RecordFieldType: true,
|
||||||
|
DateFieldType: true,
|
||||||
|
TimeFieldType: true,
|
||||||
|
DateTimeFieldType: true,
|
||||||
|
NumericFieldType: true,
|
||||||
|
GeographyFieldType: true,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||||
@ -153,6 +182,9 @@ var typeOfByteSlice = reflect.TypeOf([]byte{})
|
|||||||
// A Go slice or array type is inferred to be a BigQuery repeated field of the
|
// A Go slice or array type is inferred to be a BigQuery repeated field of the
|
||||||
// element type. The element type must be one of the above listed types.
|
// element type. The element type must be one of the above listed types.
|
||||||
//
|
//
|
||||||
|
// Due to lack of unique native Go type for GEOGRAPHY, there is no schema
|
||||||
|
// inference to GEOGRAPHY at this time.
|
||||||
|
//
|
||||||
// Nullable fields are inferred from the NullXXX types, declared in this package:
|
// Nullable fields are inferred from the NullXXX types, declared in this package:
|
||||||
//
|
//
|
||||||
// STRING NullString
|
// STRING NullString
|
||||||
@ -163,6 +195,7 @@ var typeOfByteSlice = reflect.TypeOf([]byte{})
|
|||||||
// DATE NullDate
|
// DATE NullDate
|
||||||
// TIME NullTime
|
// TIME NullTime
|
||||||
// DATETIME NullDateTime
|
// DATETIME NullDateTime
|
||||||
|
// GEOGRAPHY NullGeography
|
||||||
//
|
//
|
||||||
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
|
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
|
||||||
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable".
|
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable".
|
||||||
@ -193,8 +226,7 @@ func InferSchema(st interface{}) (Schema, error) {
|
|||||||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(jba): replace with sync.Map for Go 1.9.
|
var schemaCache sync.Map
|
||||||
var schemaCache atomiccache.Cache
|
|
||||||
|
|
||||||
type cacheVal struct {
|
type cacheVal struct {
|
||||||
schema Schema
|
schema Schema
|
||||||
@ -202,10 +234,15 @@ type cacheVal struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
|
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
|
||||||
cv := schemaCache.Get(t, func() interface{} {
|
var cv cacheVal
|
||||||
|
v, ok := schemaCache.Load(t)
|
||||||
|
if ok {
|
||||||
|
cv = v.(cacheVal)
|
||||||
|
} else {
|
||||||
s, err := inferSchemaReflect(t)
|
s, err := inferSchemaReflect(t)
|
||||||
return cacheVal{s, err}
|
cv = cacheVal{s, err}
|
||||||
}).(cacheVal)
|
schemaCache.Store(t, cv)
|
||||||
|
}
|
||||||
return cv.schema, cv.err
|
return cv.schema, cv.err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,7 +261,7 @@ func inferStruct(t reflect.Type) (Schema, error) {
|
|||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
if t.Elem().Kind() != reflect.Struct {
|
if t.Elem().Kind() != reflect.Struct {
|
||||||
return nil, errNoStruct
|
return nil, noStructError{t}
|
||||||
}
|
}
|
||||||
t = t.Elem()
|
t = t.Elem()
|
||||||
fallthrough
|
fallthrough
|
||||||
@ -232,15 +269,15 @@ func inferStruct(t reflect.Type) (Schema, error) {
|
|||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
return inferFields(t)
|
return inferFields(t)
|
||||||
default:
|
default:
|
||||||
return nil, errNoStruct
|
return nil, noStructError{t}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// inferFieldSchema infers the FieldSchema for a Go type
|
// inferFieldSchema infers the FieldSchema for a Go type
|
||||||
func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
||||||
// Only []byte and struct pointers can be tagged nullable.
|
// Only []byte and struct pointers can be tagged nullable.
|
||||||
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
|
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
|
||||||
return nil, errBadNullable
|
return nil, badNullableError{fieldName, rt}
|
||||||
}
|
}
|
||||||
switch rt {
|
switch rt {
|
||||||
case typeOfByteSlice:
|
case typeOfByteSlice:
|
||||||
@ -267,13 +304,13 @@ func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
|||||||
et := rt.Elem()
|
et := rt.Elem()
|
||||||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
|
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
|
||||||
// Multi dimensional slices/arrays are not supported by BigQuery
|
// Multi dimensional slices/arrays are not supported by BigQuery
|
||||||
return nil, errUnsupportedFieldType
|
return nil, unsupportedFieldTypeError{fieldName, rt}
|
||||||
}
|
}
|
||||||
if nullableFieldType(et) != "" {
|
if nullableFieldType(et) != "" {
|
||||||
// Repeated nullable types are not supported by BigQuery.
|
// Repeated nullable types are not supported by BigQuery.
|
||||||
return nil, errUnsupportedFieldType
|
return nil, unsupportedFieldTypeError{fieldName, rt}
|
||||||
}
|
}
|
||||||
f, err := inferFieldSchema(et, false)
|
f, err := inferFieldSchema(fieldName, et, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -282,7 +319,7 @@ func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
|||||||
return f, nil
|
return f, nil
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
if rt.Elem().Kind() != reflect.Struct {
|
if rt.Elem().Kind() != reflect.Struct {
|
||||||
return nil, errUnsupportedFieldType
|
return nil, unsupportedFieldTypeError{fieldName, rt}
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
@ -298,7 +335,7 @@ func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
|||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
|
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
|
||||||
default:
|
default:
|
||||||
return nil, errUnsupportedFieldType
|
return nil, unsupportedFieldTypeError{fieldName, rt}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,7 +354,7 @@ func inferFields(rt reflect.Type) (Schema, error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
f, err := inferFieldSchema(field.Type, nullable)
|
f, err := inferFieldSchema(field.Name, field.Type, nullable)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -395,3 +432,87 @@ func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
|
|||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bigQuerySchemaJSONField is an individual field in a JSON BigQuery table schema definition
|
||||||
|
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema).
|
||||||
|
type bigQueryJSONField struct {
|
||||||
|
Description string `json:"description"`
|
||||||
|
Fields []bigQueryJSONField `json:"fields"`
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertSchemaFromJSON generates a Schema:
|
||||||
|
func convertSchemaFromJSON(fs []bigQueryJSONField) (Schema, error) {
|
||||||
|
convertedSchema := Schema{}
|
||||||
|
for _, f := range fs {
|
||||||
|
convertedFieldSchema := &FieldSchema{
|
||||||
|
Description: f.Description,
|
||||||
|
Name: f.Name,
|
||||||
|
Required: f.Mode == "REQUIRED",
|
||||||
|
Repeated: f.Mode == "REPEATED",
|
||||||
|
}
|
||||||
|
if len(f.Fields) > 0 {
|
||||||
|
convertedNestedFieldSchema, err := convertSchemaFromJSON(f.Fields)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
convertedFieldSchema.Schema = convertedNestedFieldSchema
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the field-type (string) maps to a known FieldType:
|
||||||
|
if _, ok := fieldTypes[FieldType(f.Type)]; !ok {
|
||||||
|
return nil, fmt.Errorf("unknown field type (%v)", f.Type)
|
||||||
|
}
|
||||||
|
convertedFieldSchema.Type = FieldType(f.Type)
|
||||||
|
|
||||||
|
convertedSchema = append(convertedSchema, convertedFieldSchema)
|
||||||
|
}
|
||||||
|
return convertedSchema, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemaFromJSON takes a JSON BigQuery table schema definition
|
||||||
|
// (as generated by https://github.com/GoogleCloudPlatform/protoc-gen-bq-schema)
|
||||||
|
// and returns a fully-populated Schema.
|
||||||
|
func SchemaFromJSON(schemaJSON []byte) (Schema, error) {
|
||||||
|
|
||||||
|
var bigQuerySchema []bigQueryJSONField
|
||||||
|
|
||||||
|
// Make sure we actually have some content:
|
||||||
|
if len(schemaJSON) == 0 {
|
||||||
|
return nil, errEmptyJSONSchema
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(schemaJSON, &bigQuerySchema); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return convertSchemaFromJSON(bigQuerySchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
type noStructError struct {
|
||||||
|
typ reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e noStructError) Error() string {
|
||||||
|
return fmt.Sprintf("bigquery: can only infer schema from struct or pointer to struct, not %s", e.typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
type badNullableError struct {
|
||||||
|
name string
|
||||||
|
typ reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e badNullableError) Error() string {
|
||||||
|
return fmt.Sprintf(`bigquery: field %q of type %s: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`, e.name, e.typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
type unsupportedFieldTypeError struct {
|
||||||
|
name string
|
||||||
|
typ reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e unsupportedFieldTypeError) Error() string {
|
||||||
|
return fmt.Sprintf("bigquery: field %q: type %s is not supported", e.name, e.typ)
|
||||||
|
}
|
||||||
|
248
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
248
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
"cloud.google.com/go/internal/pretty"
|
"cloud.google.com/go/internal/pretty"
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -170,6 +169,16 @@ func TestSchemaConversion(t *testing.T) {
|
|||||||
fieldSchema("desc", "n", "NUMERIC", false, false),
|
fieldSchema("desc", "n", "NUMERIC", false, false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
bqSchema: &bq.TableSchema{
|
||||||
|
Fields: []*bq.TableFieldSchema{
|
||||||
|
bqTableFieldSchema("geo", "g", "GEOGRAPHY", ""),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
schema: Schema{
|
||||||
|
fieldSchema("geo", "g", "GEOGRAPHY", false, false),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
// nested
|
// nested
|
||||||
bqSchema: &bq.TableSchema{
|
bqSchema: &bq.TableSchema{
|
||||||
@ -349,7 +358,6 @@ func TestSimpleInference(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type containsNested struct {
|
type containsNested struct {
|
||||||
hidden string
|
|
||||||
NotNested int
|
NotNested int
|
||||||
Nested struct {
|
Nested struct {
|
||||||
Inside int
|
Inside int
|
||||||
@ -529,6 +537,7 @@ type allNulls struct {
|
|||||||
F NullTime
|
F NullTime
|
||||||
G NullDate
|
G NullDate
|
||||||
H NullDateTime
|
H NullDateTime
|
||||||
|
I NullGeography
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNullInference(t *testing.T) {
|
func TestNullInference(t *testing.T) {
|
||||||
@ -545,6 +554,7 @@ func TestNullInference(t *testing.T) {
|
|||||||
optField("F", "TIME"),
|
optField("F", "TIME"),
|
||||||
optField("G", "DATE"),
|
optField("G", "DATE"),
|
||||||
optField("H", "DATETIME"),
|
optField("H", "DATETIME"),
|
||||||
|
optField("I", "GEOGRAPHY"),
|
||||||
}
|
}
|
||||||
if diff := testutil.Diff(got, want); diff != "" {
|
if diff := testutil.Diff(got, want); diff != "" {
|
||||||
t.Error(diff)
|
t.Error(diff)
|
||||||
@ -704,52 +714,31 @@ func TestTagInference(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTagInferenceErrors(t *testing.T) {
|
func TestTagInferenceErrors(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []interface{}{
|
||||||
in interface{}
|
struct {
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
in: struct {
|
|
||||||
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
|
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
|
||||||
}{},
|
}{},
|
||||||
err: errInvalidFieldName,
|
struct {
|
||||||
},
|
|
||||||
{
|
|
||||||
in: struct {
|
|
||||||
UnsupporedStartChar int `bigquery:"øab"`
|
UnsupporedStartChar int `bigquery:"øab"`
|
||||||
}{},
|
}{},
|
||||||
err: errInvalidFieldName,
|
struct {
|
||||||
},
|
|
||||||
{
|
|
||||||
in: struct {
|
|
||||||
UnsupportedEndChar int `bigquery:"abø"`
|
UnsupportedEndChar int `bigquery:"abø"`
|
||||||
}{},
|
}{},
|
||||||
err: errInvalidFieldName,
|
struct {
|
||||||
},
|
|
||||||
{
|
|
||||||
in: struct {
|
|
||||||
UnsupportedMiddleChar int `bigquery:"aøb"`
|
UnsupportedMiddleChar int `bigquery:"aøb"`
|
||||||
}{},
|
}{},
|
||||||
err: errInvalidFieldName,
|
struct {
|
||||||
},
|
|
||||||
{
|
|
||||||
in: struct {
|
|
||||||
StartInt int `bigquery:"1abc"`
|
StartInt int `bigquery:"1abc"`
|
||||||
}{},
|
}{},
|
||||||
err: errInvalidFieldName,
|
struct {
|
||||||
},
|
|
||||||
{
|
|
||||||
in: struct {
|
|
||||||
Hyphens int `bigquery:"a-b"`
|
Hyphens int `bigquery:"a-b"`
|
||||||
}{},
|
}{},
|
||||||
err: errInvalidFieldName,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
want := tc.err
|
|
||||||
_, got := InferSchema(tc.in)
|
_, got := InferSchema(tc)
|
||||||
if got != want {
|
if _, ok := got.(invalidFieldNameError); !ok {
|
||||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
|
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant invalidFieldNameError", i, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -764,114 +753,113 @@ func TestTagInferenceErrors(t *testing.T) {
|
|||||||
func TestSchemaErrors(t *testing.T) {
|
func TestSchemaErrors(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
in interface{}
|
in interface{}
|
||||||
err error
|
want interface{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
in: []byte{},
|
in: []byte{},
|
||||||
err: errNoStruct,
|
want: noStructError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: new(int),
|
in: new(int),
|
||||||
err: errNoStruct,
|
want: noStructError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Uint uint }{},
|
in: struct{ Uint uint }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Uint64 uint64 }{},
|
in: struct{ Uint64 uint64 }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Uintptr uintptr }{},
|
in: struct{ Uintptr uintptr }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Complex complex64 }{},
|
in: struct{ Complex complex64 }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Map map[string]int }{},
|
in: struct{ Map map[string]int }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Chan chan bool }{},
|
in: struct{ Chan chan bool }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Ptr *int }{},
|
in: struct{ Ptr *int }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ Interface interface{} }{},
|
in: struct{ Interface interface{} }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ MultiDimensional [][]int }{},
|
in: struct{ MultiDimensional [][]int }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ MultiDimensional [][][]byte }{},
|
in: struct{ MultiDimensional [][][]byte }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ SliceOfPointer []*int }{},
|
in: struct{ SliceOfPointer []*int }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ SliceOfNull []NullInt64 }{},
|
in: struct{ SliceOfNull []NullInt64 }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ ChanSlice []chan bool }{},
|
in: struct{ ChanSlice []chan bool }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct {
|
in: struct {
|
||||||
X int `bigquery:",nullable"`
|
X int `bigquery:",nullable"`
|
||||||
}{},
|
}{},
|
||||||
err: errBadNullable,
|
want: badNullableError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct {
|
in: struct {
|
||||||
X bool `bigquery:",nullable"`
|
X bool `bigquery:",nullable"`
|
||||||
}{},
|
}{},
|
||||||
err: errBadNullable,
|
want: badNullableError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct {
|
in: struct {
|
||||||
X struct{ N int } `bigquery:",nullable"`
|
X struct{ N int } `bigquery:",nullable"`
|
||||||
}{},
|
}{},
|
||||||
err: errBadNullable,
|
want: badNullableError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct {
|
in: struct {
|
||||||
X []int `bigquery:",nullable"`
|
X []int `bigquery:",nullable"`
|
||||||
}{},
|
}{},
|
||||||
err: errBadNullable,
|
want: badNullableError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ X *[]byte }{},
|
in: struct{ X *[]byte }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ X *[]int }{},
|
in: struct{ X *[]int }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: struct{ X *int }{},
|
in: struct{ X *int }{},
|
||||||
err: errUnsupportedFieldType,
|
want: unsupportedFieldTypeError{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
want := tc.err
|
|
||||||
_, got := InferSchema(tc.in)
|
_, got := InferSchema(tc.in)
|
||||||
if got != want {
|
if reflect.TypeOf(got) != reflect.TypeOf(tc.want) {
|
||||||
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
|
t.Errorf("%#v: got:\n%#v\nwant type %T", tc.in, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -887,7 +875,6 @@ func TestHasRecursiveType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
recUnexported struct {
|
recUnexported struct {
|
||||||
A int
|
A int
|
||||||
b *rec
|
|
||||||
}
|
}
|
||||||
hasRec struct {
|
hasRec struct {
|
||||||
A int
|
A int
|
||||||
@ -918,3 +905,140 @@ func TestHasRecursiveType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSchemaFromJSON(t *testing.T) {
|
||||||
|
testCasesExpectingSuccess := []struct {
|
||||||
|
bqSchemaJSON []byte
|
||||||
|
description string
|
||||||
|
expectedSchema Schema
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Flat table with a mixture of NULLABLE and REQUIRED fields",
|
||||||
|
bqSchemaJSON: []byte(`
|
||||||
|
[
|
||||||
|
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
|
||||||
|
{"name":"flat_bytes","type":"BYTES","mode":"REQUIRED","description":"Flat required BYTES"},
|
||||||
|
{"name":"flat_integer","type":"INTEGER","mode":"NULLABLE","description":"Flat nullable INTEGER"},
|
||||||
|
{"name":"flat_float","type":"FLOAT","mode":"REQUIRED","description":"Flat required FLOAT"},
|
||||||
|
{"name":"flat_boolean","type":"BOOLEAN","mode":"NULLABLE","description":"Flat nullable BOOLEAN"},
|
||||||
|
{"name":"flat_timestamp","type":"TIMESTAMP","mode":"REQUIRED","description":"Flat required TIMESTAMP"},
|
||||||
|
{"name":"flat_date","type":"DATE","mode":"NULLABLE","description":"Flat required DATE"},
|
||||||
|
{"name":"flat_time","type":"TIME","mode":"REQUIRED","description":"Flat nullable TIME"},
|
||||||
|
{"name":"flat_datetime","type":"DATETIME","mode":"NULLABLE","description":"Flat required DATETIME"},
|
||||||
|
{"name":"flat_numeric","type":"NUMERIC","mode":"REQUIRED","description":"Flat nullable NUMERIC"},
|
||||||
|
{"name":"flat_geography","type":"GEOGRAPHY","mode":"REQUIRED","description":"Flat required GEOGRAPHY"}
|
||||||
|
]`),
|
||||||
|
expectedSchema: Schema{
|
||||||
|
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
|
||||||
|
fieldSchema("Flat required BYTES", "flat_bytes", "BYTES", false, true),
|
||||||
|
fieldSchema("Flat nullable INTEGER", "flat_integer", "INTEGER", false, false),
|
||||||
|
fieldSchema("Flat required FLOAT", "flat_float", "FLOAT", false, true),
|
||||||
|
fieldSchema("Flat nullable BOOLEAN", "flat_boolean", "BOOLEAN", false, false),
|
||||||
|
fieldSchema("Flat required TIMESTAMP", "flat_timestamp", "TIMESTAMP", false, true),
|
||||||
|
fieldSchema("Flat required DATE", "flat_date", "DATE", false, false),
|
||||||
|
fieldSchema("Flat nullable TIME", "flat_time", "TIME", false, true),
|
||||||
|
fieldSchema("Flat required DATETIME", "flat_datetime", "DATETIME", false, false),
|
||||||
|
fieldSchema("Flat nullable NUMERIC", "flat_numeric", "NUMERIC", false, true),
|
||||||
|
fieldSchema("Flat required GEOGRAPHY", "flat_geography", "GEOGRAPHY", false, true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Table with a nested RECORD",
|
||||||
|
bqSchemaJSON: []byte(`
|
||||||
|
[
|
||||||
|
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
|
||||||
|
{"name":"nested_record","type":"RECORD","mode":"NULLABLE","description":"Nested nullable RECORD","fields":[{"name":"record_field_1","type":"STRING","mode":"NULLABLE","description":"First nested record field"},{"name":"record_field_2","type":"INTEGER","mode":"REQUIRED","description":"Second nested record field"}]}
|
||||||
|
]`),
|
||||||
|
expectedSchema: Schema{
|
||||||
|
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
|
||||||
|
&FieldSchema{
|
||||||
|
Description: "Nested nullable RECORD",
|
||||||
|
Name: "nested_record",
|
||||||
|
Required: false,
|
||||||
|
Type: "RECORD",
|
||||||
|
Schema: Schema{
|
||||||
|
{
|
||||||
|
Description: "First nested record field",
|
||||||
|
Name: "record_field_1",
|
||||||
|
Required: false,
|
||||||
|
Type: "STRING",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Description: "Second nested record field",
|
||||||
|
Name: "record_field_2",
|
||||||
|
Required: true,
|
||||||
|
Type: "INTEGER",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Table with a repeated RECORD",
|
||||||
|
bqSchemaJSON: []byte(`
|
||||||
|
[
|
||||||
|
{"name":"flat_string","type":"STRING","mode":"NULLABLE","description":"Flat nullable string"},
|
||||||
|
{"name":"nested_record","type":"RECORD","mode":"REPEATED","description":"Nested nullable RECORD","fields":[{"name":"record_field_1","type":"STRING","mode":"NULLABLE","description":"First nested record field"},{"name":"record_field_2","type":"INTEGER","mode":"REQUIRED","description":"Second nested record field"}]}
|
||||||
|
]`),
|
||||||
|
expectedSchema: Schema{
|
||||||
|
fieldSchema("Flat nullable string", "flat_string", "STRING", false, false),
|
||||||
|
&FieldSchema{
|
||||||
|
Description: "Nested nullable RECORD",
|
||||||
|
Name: "nested_record",
|
||||||
|
Repeated: true,
|
||||||
|
Required: false,
|
||||||
|
Type: "RECORD",
|
||||||
|
Schema: Schema{
|
||||||
|
{
|
||||||
|
Description: "First nested record field",
|
||||||
|
Name: "record_field_1",
|
||||||
|
Required: false,
|
||||||
|
Type: "STRING",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Description: "Second nested record field",
|
||||||
|
Name: "record_field_2",
|
||||||
|
Required: true,
|
||||||
|
Type: "INTEGER",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCasesExpectingSuccess {
|
||||||
|
convertedSchema, err := SchemaFromJSON(tc.bqSchemaJSON)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("encountered an error when converting JSON table schema (%s): %v", tc.description, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !testutil.Equal(convertedSchema, tc.expectedSchema) {
|
||||||
|
t.Errorf("generated JSON table schema (%s) differs from the expected schema", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testCasesExpectingFailure := []struct {
|
||||||
|
bqSchemaJSON []byte
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Schema with invalid JSON",
|
||||||
|
bqSchemaJSON: []byte(`This is not JSON`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Schema with unknown field type",
|
||||||
|
bqSchemaJSON: []byte(`[{"name":"strange_type","type":"STRANGE","description":"This type should not exist"}]`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Schema with zero length",
|
||||||
|
bqSchemaJSON: []byte(``),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCasesExpectingFailure {
|
||||||
|
_, err := SchemaFromJSON(tc.bqSchemaJSON)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("converting this schema should have returned an error (%s): %v", tc.description, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
255
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client.go
generated
vendored
Normal file
255
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client.go
generated
vendored
Normal file
@ -0,0 +1,255 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
"google.golang.org/api/transport"
|
||||||
|
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigQueryStorageCallOptions contains the retry settings for each method of BigQueryStorageClient.
|
||||||
|
type BigQueryStorageCallOptions struct {
|
||||||
|
CreateReadSession []gax.CallOption
|
||||||
|
ReadRows []gax.CallOption
|
||||||
|
BatchCreateReadSessionStreams []gax.CallOption
|
||||||
|
FinalizeStream []gax.CallOption
|
||||||
|
SplitReadStream []gax.CallOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultBigQueryStorageClientOptions() []option.ClientOption {
|
||||||
|
return []option.ClientOption{
|
||||||
|
option.WithEndpoint("bigquerystorage.googleapis.com:443"),
|
||||||
|
option.WithScopes(DefaultAuthScopes()...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultBigQueryStorageCallOptions() *BigQueryStorageCallOptions {
|
||||||
|
retry := map[[2]string][]gax.CallOption{
|
||||||
|
{"default", "idempotent"}: {
|
||||||
|
gax.WithRetry(func() gax.Retryer {
|
||||||
|
return gax.OnCodes([]codes.Code{
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Unavailable,
|
||||||
|
}, gax.Backoff{
|
||||||
|
Initial: 100 * time.Millisecond,
|
||||||
|
Max: 60000 * time.Millisecond,
|
||||||
|
Multiplier: 1.3,
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &BigQueryStorageCallOptions{
|
||||||
|
CreateReadSession: retry[[2]string{"default", "idempotent"}],
|
||||||
|
ReadRows: retry[[2]string{"default", "idempotent"}],
|
||||||
|
BatchCreateReadSessionStreams: retry[[2]string{"default", "idempotent"}],
|
||||||
|
FinalizeStream: retry[[2]string{"default", "idempotent"}],
|
||||||
|
SplitReadStream: retry[[2]string{"default", "idempotent"}],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigQueryStorageClient is a client for interacting with BigQuery Storage API.
|
||||||
|
//
|
||||||
|
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||||
|
type BigQueryStorageClient struct {
|
||||||
|
// The connection to the service.
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
|
||||||
|
// The gRPC API client.
|
||||||
|
bigQueryStorageClient storagepb.BigQueryStorageClient
|
||||||
|
|
||||||
|
// The call options for this service.
|
||||||
|
CallOptions *BigQueryStorageCallOptions
|
||||||
|
|
||||||
|
// The x-goog-* metadata to be sent with each request.
|
||||||
|
xGoogMetadata metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBigQueryStorageClient creates a new big query storage client.
|
||||||
|
//
|
||||||
|
// BigQuery storage API.
|
||||||
|
//
|
||||||
|
// The BigQuery storage API can be used to read data stored in BigQuery.
|
||||||
|
func NewBigQueryStorageClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryStorageClient, error) {
|
||||||
|
conn, err := transport.DialGRPC(ctx, append(defaultBigQueryStorageClientOptions(), opts...)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c := &BigQueryStorageClient{
|
||||||
|
conn: conn,
|
||||||
|
CallOptions: defaultBigQueryStorageCallOptions(),
|
||||||
|
|
||||||
|
bigQueryStorageClient: storagepb.NewBigQueryStorageClient(conn),
|
||||||
|
}
|
||||||
|
c.setGoogleClientInfo()
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection returns the client's connection to the API service.
|
||||||
|
func (c *BigQueryStorageClient) Connection() *grpc.ClientConn {
|
||||||
|
return c.conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to the API service. The user should invoke this when
|
||||||
|
// the client is no longer required.
|
||||||
|
func (c *BigQueryStorageClient) Close() error {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGoogleClientInfo sets the name and version of the application in
|
||||||
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
|
// use by Google-written clients.
|
||||||
|
func (c *BigQueryStorageClient) setGoogleClientInfo(keyval ...string) {
|
||||||
|
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||||
|
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateReadSession creates a new read session. A read session divides the contents of a
|
||||||
|
// BigQuery table into one or more streams, which can then be used to read
|
||||||
|
// data from the table. The read session also specifies properties of the
|
||||||
|
// data to be read, such as a list of columns or a push-down filter describing
|
||||||
|
// the rows to be returned.
|
||||||
|
//
|
||||||
|
// A particular row can be read by at most one stream. When the caller has
|
||||||
|
// reached the end of each stream in the session, then all the data in the
|
||||||
|
// table has been read.
|
||||||
|
//
|
||||||
|
// Read sessions automatically expire 24 hours after they are created and do
|
||||||
|
// not require manual clean-up by the caller.
|
||||||
|
func (c *BigQueryStorageClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v", "table_reference.project_id", req.GetTableReference().GetProjectId(), "table_reference.dataset_id", req.GetTableReference().GetDatasetId()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...)
|
||||||
|
var resp *storagepb.ReadSession
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.bigQueryStorageClient.CreateReadSession(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadRows reads rows from the table in the format prescribed by the read session.
|
||||||
|
// Each response contains one or more table rows, up to a maximum of 10 MiB
|
||||||
|
// per response; read requests which attempt to read individual rows larger
|
||||||
|
// than this will fail.
|
||||||
|
//
|
||||||
|
// Each request also returns a set of stream statistics reflecting the
|
||||||
|
// estimated total number of rows in the read stream. This number is computed
|
||||||
|
// based on the total table size and the number of active streams in the read
|
||||||
|
// session, and may change as other streams continue to read data.
|
||||||
|
func (c *BigQueryStorageClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryStorage_ReadRowsClient, error) {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_position.stream.name", req.GetReadPosition().GetStream().GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...)
|
||||||
|
var resp storagepb.BigQueryStorage_ReadRowsClient
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.bigQueryStorageClient.ReadRows(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCreateReadSessionStreams creates additional streams for a ReadSession. This API can be used to
|
||||||
|
// dynamically adjust the parallelism of a batch processing task upwards by
|
||||||
|
// adding additional workers.
|
||||||
|
func (c *BigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest, opts ...gax.CallOption) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "session.name", req.GetSession().GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
opts = append(c.CallOptions.BatchCreateReadSessionStreams[0:len(c.CallOptions.BatchCreateReadSessionStreams):len(c.CallOptions.BatchCreateReadSessionStreams)], opts...)
|
||||||
|
var resp *storagepb.BatchCreateReadSessionStreamsResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.bigQueryStorageClient.BatchCreateReadSessionStreams(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeStream triggers the graceful termination of a single stream in a ReadSession. This
|
||||||
|
// API can be used to dynamically adjust the parallelism of a batch processing
|
||||||
|
// task downwards without losing data.
|
||||||
|
//
|
||||||
|
// This API does not delete the stream -- it remains visible in the
|
||||||
|
// ReadSession, and any data processed by the stream is not released to other
|
||||||
|
// streams. However, no additional data will be assigned to the stream once
|
||||||
|
// this call completes. Callers must continue reading data on the stream until
|
||||||
|
// the end of the stream is reached so that data which has already been
|
||||||
|
// assigned to the stream will be processed.
|
||||||
|
//
|
||||||
|
// This method will return an error if there are no other live streams
|
||||||
|
// in the Session, or if SplitReadStream() has been called on the given
|
||||||
|
// Stream.
|
||||||
|
func (c *BigQueryStorageClient) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest, opts ...gax.CallOption) error {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "stream.name", req.GetStream().GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
opts = append(c.CallOptions.FinalizeStream[0:len(c.CallOptions.FinalizeStream):len(c.CallOptions.FinalizeStream)], opts...)
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
_, err = c.bigQueryStorageClient.FinalizeStream(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitReadStream splits a given read stream into two Streams. These streams are referred to
|
||||||
|
// as the primary and the residual of the split. The original stream can still
|
||||||
|
// be read from in the same manner as before. Both of the returned streams can
|
||||||
|
// also be read from, and the total rows return by both child streams will be
|
||||||
|
// the same as the rows read from the original stream.
|
||||||
|
//
|
||||||
|
// Moreover, the two child streams will be allocated back to back in the
|
||||||
|
// original Stream. Concretely, it is guaranteed that for streams Original,
|
||||||
|
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
|
||||||
|
// Original[j-n] = Residual[0-m] once the streams have been read to
|
||||||
|
// completion.
|
||||||
|
//
|
||||||
|
// This method is guaranteed to be idempotent.
|
||||||
|
func (c *BigQueryStorageClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "original_stream.name", req.GetOriginalStream().GetName()))
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...)
|
||||||
|
var resp *storagepb.SplitReadStreamResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.bigQueryStorageClient.SplitReadStream(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
132
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client_example_test.go
generated
vendored
Normal file
132
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/big_query_storage_client_example_test.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package storage_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
storage "cloud.google.com/go/bigquery/storage/apiv1beta1"
|
||||||
|
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewBigQueryStorageClient() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := storage.NewBigQueryStorageClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use client.
|
||||||
|
_ = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBigQueryStorageClient_CreateReadSession() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := storage.NewBigQueryStorageClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &storagepb.CreateReadSessionRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.CreateReadSession(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBigQueryStorageClient_ReadRows() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := storage.NewBigQueryStorageClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &storagepb.ReadRowsRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
stream, err := c.ReadRows(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// TODO: handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBigQueryStorageClient_BatchCreateReadSessionStreams() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := storage.NewBigQueryStorageClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &storagepb.BatchCreateReadSessionStreamsRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.BatchCreateReadSessionStreams(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBigQueryStorageClient_FinalizeStream() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := storage.NewBigQueryStorageClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &storagepb.FinalizeStreamRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
err = c.FinalizeStream(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBigQueryStorageClient_SplitReadStream() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := storage.NewBigQueryStorageClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &storagepb.SplitReadStreamRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.SplitReadStream(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
89
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/doc.go
generated
vendored
Normal file
89
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/doc.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Package storage is an auto-generated package for the
|
||||||
|
// BigQuery Storage API.
|
||||||
|
//
|
||||||
|
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
|
||||||
|
//
|
||||||
|
package storage // import "cloud.google.com/go/bigquery/storage/apiv1beta1"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
out = out.Copy()
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metadata.NewOutgoingContext(ctx, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||||
|
func DefaultAuthScopes() []string {
|
||||||
|
return []string{
|
||||||
|
"https://www.googleapis.com/auth/bigquery",
|
||||||
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// versionGo returns the Go runtime version. The returned string
|
||||||
|
// has no whitespace, suitable for reporting in header.
|
||||||
|
func versionGo() string {
|
||||||
|
const develPrefix = "devel +"
|
||||||
|
|
||||||
|
s := runtime.Version()
|
||||||
|
if strings.HasPrefix(s, develPrefix) {
|
||||||
|
s = s[len(develPrefix):]
|
||||||
|
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||||
|
s = s[:p]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
notSemverRune := func(r rune) bool {
|
||||||
|
return strings.IndexRune("0123456789.", r) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(s, "go1") {
|
||||||
|
s = s[2:]
|
||||||
|
var prerelease string
|
||||||
|
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||||
|
s, prerelease = s[:p], s[p:]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(s, ".") {
|
||||||
|
s += "0"
|
||||||
|
} else if strings.Count(s, ".") < 2 {
|
||||||
|
s += ".0"
|
||||||
|
}
|
||||||
|
if prerelease != "" {
|
||||||
|
s += "-" + prerelease
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionClient = "20190306"
|
452
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/mock_test.go
generated
vendored
Normal file
452
vendor/cloud.google.com/go/bigquery/storage/apiv1beta1/mock_test.go
generated
vendored
Normal file
@ -0,0 +1,452 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||||
|
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
gstatus "google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = io.EOF
|
||||||
|
var _ = ptypes.MarshalAny
|
||||||
|
var _ status.Status
|
||||||
|
|
||||||
|
type mockBigQueryStorageServer struct {
|
||||||
|
// Embed for forward compatibility.
|
||||||
|
// Tests will keep working if more methods are added
|
||||||
|
// in the future.
|
||||||
|
storagepb.BigQueryStorageServer
|
||||||
|
|
||||||
|
reqs []proto.Message
|
||||||
|
|
||||||
|
// If set, all calls return this error.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// responses to return if err == nil
|
||||||
|
resps []proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockBigQueryStorageServer) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest) (*storagepb.ReadSession, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*storagepb.ReadSession), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockBigQueryStorageServer) ReadRows(req *storagepb.ReadRowsRequest, stream storagepb.BigQueryStorage_ReadRowsServer) error {
|
||||||
|
md, _ := metadata.FromIncomingContext(stream.Context())
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return s.err
|
||||||
|
}
|
||||||
|
for _, v := range s.resps {
|
||||||
|
if err := stream.Send(v.(*storagepb.ReadRowsResponse)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *storagepb.BatchCreateReadSessionStreamsRequest) (*storagepb.BatchCreateReadSessionStreamsResponse, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*storagepb.BatchCreateReadSessionStreamsResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockBigQueryStorageServer) FinalizeStream(ctx context.Context, req *storagepb.FinalizeStreamRequest) (*emptypb.Empty, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*emptypb.Empty), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockBigQueryStorageServer) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest) (*storagepb.SplitReadStreamResponse, error) {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||||
|
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||||
|
}
|
||||||
|
s.reqs = append(s.reqs, req)
|
||||||
|
if s.err != nil {
|
||||||
|
return nil, s.err
|
||||||
|
}
|
||||||
|
return s.resps[0].(*storagepb.SplitReadStreamResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientOpt is the option tests should use to connect to the test server.
|
||||||
|
// It is initialized by TestMain.
|
||||||
|
var clientOpt option.ClientOption
|
||||||
|
|
||||||
|
var (
|
||||||
|
mockBigQueryStorage mockBigQueryStorageServer
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
serv := grpc.NewServer()
|
||||||
|
storagepb.RegisterBigQueryStorageServer(serv, &mockBigQueryStorage)
|
||||||
|
|
||||||
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
go serv.Serve(lis)
|
||||||
|
|
||||||
|
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
clientOpt = option.WithGRPCConn(conn)
|
||||||
|
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigQueryStorageCreateReadSession(t *testing.T) {
|
||||||
|
var name string = "name3373707"
|
||||||
|
var expectedResponse = &storagepb.ReadSession{
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
mockBigQueryStorage.err = nil
|
||||||
|
mockBigQueryStorage.reqs = nil
|
||||||
|
|
||||||
|
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var tableReference *storagepb.TableReference = &storagepb.TableReference{}
|
||||||
|
var parent string = "parent-995424086"
|
||||||
|
var request = &storagepb.CreateReadSessionRequest{
|
||||||
|
TableReference: tableReference,
|
||||||
|
Parent: parent,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.CreateReadSession(context.Background(), request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigQueryStorageCreateReadSessionError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var tableReference *storagepb.TableReference = &storagepb.TableReference{}
|
||||||
|
var parent string = "parent-995424086"
|
||||||
|
var request = &storagepb.CreateReadSessionRequest{
|
||||||
|
TableReference: tableReference,
|
||||||
|
Parent: parent,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.CreateReadSession(context.Background(), request)
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
func TestBigQueryStorageReadRows(t *testing.T) {
|
||||||
|
var expectedResponse *storagepb.ReadRowsResponse = &storagepb.ReadRowsResponse{}
|
||||||
|
|
||||||
|
mockBigQueryStorage.err = nil
|
||||||
|
mockBigQueryStorage.reqs = nil
|
||||||
|
|
||||||
|
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
|
||||||
|
var request = &storagepb.ReadRowsRequest{
|
||||||
|
ReadPosition: readPosition,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream, err := c.ReadRows(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigQueryStorageReadRowsError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var readPosition *storagepb.StreamPosition = &storagepb.StreamPosition{}
|
||||||
|
var request = &storagepb.ReadRowsRequest{
|
||||||
|
ReadPosition: readPosition,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream, err := c.ReadRows(context.Background(), request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
func TestBigQueryStorageBatchCreateReadSessionStreams(t *testing.T) {
|
||||||
|
var expectedResponse *storagepb.BatchCreateReadSessionStreamsResponse = &storagepb.BatchCreateReadSessionStreamsResponse{}
|
||||||
|
|
||||||
|
mockBigQueryStorage.err = nil
|
||||||
|
mockBigQueryStorage.reqs = nil
|
||||||
|
|
||||||
|
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var session *storagepb.ReadSession = &storagepb.ReadSession{}
|
||||||
|
var requestedStreams int32 = 1017221410
|
||||||
|
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
|
||||||
|
Session: session,
|
||||||
|
RequestedStreams: requestedStreams,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigQueryStorageBatchCreateReadSessionStreamsError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var session *storagepb.ReadSession = &storagepb.ReadSession{}
|
||||||
|
var requestedStreams int32 = 1017221410
|
||||||
|
var request = &storagepb.BatchCreateReadSessionStreamsRequest{
|
||||||
|
Session: session,
|
||||||
|
RequestedStreams: requestedStreams,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.BatchCreateReadSessionStreams(context.Background(), request)
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
func TestBigQueryStorageFinalizeStream(t *testing.T) {
|
||||||
|
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
|
||||||
|
|
||||||
|
mockBigQueryStorage.err = nil
|
||||||
|
mockBigQueryStorage.reqs = nil
|
||||||
|
|
||||||
|
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var stream *storagepb.Stream = &storagepb.Stream{}
|
||||||
|
var request = &storagepb.FinalizeStreamRequest{
|
||||||
|
Stream: stream,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.FinalizeStream(context.Background(), request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigQueryStorageFinalizeStreamError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var stream *storagepb.Stream = &storagepb.Stream{}
|
||||||
|
var request = &storagepb.FinalizeStreamRequest{
|
||||||
|
Stream: stream,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.FinalizeStream(context.Background(), request)
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestBigQueryStorageSplitReadStream(t *testing.T) {
|
||||||
|
var expectedResponse *storagepb.SplitReadStreamResponse = &storagepb.SplitReadStreamResponse{}
|
||||||
|
|
||||||
|
mockBigQueryStorage.err = nil
|
||||||
|
mockBigQueryStorage.reqs = nil
|
||||||
|
|
||||||
|
mockBigQueryStorage.resps = append(mockBigQueryStorage.resps[:0], expectedResponse)
|
||||||
|
|
||||||
|
var originalStream *storagepb.Stream = &storagepb.Stream{}
|
||||||
|
var request = &storagepb.SplitReadStreamRequest{
|
||||||
|
OriginalStream: originalStream,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.SplitReadStream(context.Background(), request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := request, mockBigQueryStorage.reqs[0]; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong request %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||||
|
t.Errorf("wrong response %q, want %q)", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBigQueryStorageSplitReadStreamError(t *testing.T) {
|
||||||
|
errCode := codes.PermissionDenied
|
||||||
|
mockBigQueryStorage.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
|
var originalStream *storagepb.Stream = &storagepb.Stream{}
|
||||||
|
var request = &storagepb.SplitReadStreamRequest{
|
||||||
|
OriginalStream: originalStream,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewBigQueryStorageClient(context.Background(), clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.SplitReadStream(context.Background(), request)
|
||||||
|
|
||||||
|
if st, ok := gstatus.FromError(err); !ok {
|
||||||
|
t.Errorf("got error %v, expected grpc error", err)
|
||||||
|
} else if c := st.Code(); c != errCode {
|
||||||
|
t.Errorf("got error code %q, want %q", c, errCode)
|
||||||
|
}
|
||||||
|
_ = resp
|
||||||
|
}
|
122
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
122
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
@ -15,14 +15,13 @@
|
|||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/optional"
|
"cloud.google.com/go/internal/optional"
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -67,8 +66,12 @@ type TableMetadata struct {
|
|||||||
// If non-nil, the table is partitioned by time.
|
// If non-nil, the table is partitioned by time.
|
||||||
TimePartitioning *TimePartitioning
|
TimePartitioning *TimePartitioning
|
||||||
|
|
||||||
// The time when this table expires. If not set, the table will persist
|
// Clustering specifies the data clustering configuration for the table.
|
||||||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
Clustering *Clustering
|
||||||
|
|
||||||
|
// The time when this table expires. If set, this table will expire at the
|
||||||
|
// specified time. Expired tables will be deleted and their storage
|
||||||
|
// reclaimed. The zero value is ignored.
|
||||||
ExpirationTime time.Time
|
ExpirationTime time.Time
|
||||||
|
|
||||||
// User-provided labels.
|
// User-provided labels.
|
||||||
@ -91,6 +94,11 @@ type TableMetadata struct {
|
|||||||
// This does not include data that is being buffered during a streaming insert.
|
// This does not include data that is being buffered during a streaming insert.
|
||||||
NumBytes int64
|
NumBytes int64
|
||||||
|
|
||||||
|
// The number of bytes in the table considered "long-term storage" for reduced
|
||||||
|
// billing purposes. See https://cloud.google.com/bigquery/pricing#long-term-storage
|
||||||
|
// for more information.
|
||||||
|
NumLongTermBytes int64
|
||||||
|
|
||||||
// The number of rows of data in this table.
|
// The number of rows of data in this table.
|
||||||
// This does not include data that is being buffered during a streaming insert.
|
// This does not include data that is being buffered during a streaming insert.
|
||||||
NumRows uint64
|
NumRows uint64
|
||||||
@ -140,8 +148,14 @@ const (
|
|||||||
type TableType string
|
type TableType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// RegularTable is a regular table.
|
||||||
RegularTable TableType = "TABLE"
|
RegularTable TableType = "TABLE"
|
||||||
|
// ViewTable is a table type describing that the table is view. See more
|
||||||
|
// information at https://cloud.google.com/bigquery/docs/views.
|
||||||
ViewTable TableType = "VIEW"
|
ViewTable TableType = "VIEW"
|
||||||
|
// ExternalTable is a table type describing that the table is an external
|
||||||
|
// table (also known as a federated data source). See more information at
|
||||||
|
// https://cloud.google.com/bigquery/external-data-sources.
|
||||||
ExternalTable TableType = "EXTERNAL"
|
ExternalTable TableType = "EXTERNAL"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -156,6 +170,10 @@ type TimePartitioning struct {
|
|||||||
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
|
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
|
||||||
// DATE field. Its mode must be NULLABLE or REQUIRED.
|
// DATE field. Its mode must be NULLABLE or REQUIRED.
|
||||||
Field string
|
Field string
|
||||||
|
|
||||||
|
// If true, queries that reference this table must include a filter (e.g. a WHERE predicate)
|
||||||
|
// that can be used for partition elimination.
|
||||||
|
RequirePartitionFilter bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
|
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
|
||||||
@ -166,6 +184,7 @@ func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
|
|||||||
Type: "DAY",
|
Type: "DAY",
|
||||||
ExpirationMs: int64(p.Expiration / time.Millisecond),
|
ExpirationMs: int64(p.Expiration / time.Millisecond),
|
||||||
Field: p.Field,
|
Field: p.Field,
|
||||||
|
RequirePartitionFilter: p.RequirePartitionFilter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,6 +195,31 @@ func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
|
|||||||
return &TimePartitioning{
|
return &TimePartitioning{
|
||||||
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
|
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
|
||||||
Field: q.Field,
|
Field: q.Field,
|
||||||
|
RequirePartitionFilter: q.RequirePartitionFilter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clustering governs the organization of data within a partitioned table.
|
||||||
|
// For more information, see https://cloud.google.com/bigquery/docs/clustered-tables
|
||||||
|
type Clustering struct {
|
||||||
|
Fields []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Clustering) toBQ() *bq.Clustering {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &bq.Clustering{
|
||||||
|
Fields: c.Fields,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func bqToClustering(q *bq.Clustering) *Clustering {
|
||||||
|
if q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &Clustering{
|
||||||
|
Fields: q.Fields,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,7 +284,7 @@ func (t *Table) implicitTable() bool {
|
|||||||
// Create creates a table in the BigQuery service.
|
// Create creates a table in the BigQuery service.
|
||||||
// Pass in a TableMetadata value to configure the table.
|
// Pass in a TableMetadata value to configure the table.
|
||||||
// If tm.View.Query is non-empty, the created table will be of type VIEW.
|
// If tm.View.Query is non-empty, the created table will be of type VIEW.
|
||||||
// Expiration can only be set during table creation.
|
// If no ExpirationTime is specified, the table will never expire.
|
||||||
// After table creation, a view can be modified only if its table was initially created
|
// After table creation, a view can be modified only if its table was initially created
|
||||||
// with a view.
|
// with a view.
|
||||||
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
|
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
|
||||||
@ -291,7 +335,13 @@ func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
|||||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
||||||
}
|
}
|
||||||
t.TimePartitioning = tm.TimePartitioning.toBQ()
|
t.TimePartitioning = tm.TimePartitioning.toBQ()
|
||||||
if !tm.ExpirationTime.IsZero() {
|
t.Clustering = tm.Clustering.toBQ()
|
||||||
|
|
||||||
|
if !validExpiration(tm.ExpirationTime) {
|
||||||
|
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
|
||||||
|
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
|
||||||
|
}
|
||||||
|
if !tm.ExpirationTime.IsZero() && tm.ExpirationTime != NeverExpire {
|
||||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||||
}
|
}
|
||||||
if tm.ExternalDataConfig != nil {
|
if tm.ExternalDataConfig != nil {
|
||||||
@ -314,6 +364,9 @@ func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
|||||||
if tm.NumBytes != 0 {
|
if tm.NumBytes != 0 {
|
||||||
return nil, errors.New("cannot set NumBytes on create")
|
return nil, errors.New("cannot set NumBytes on create")
|
||||||
}
|
}
|
||||||
|
if tm.NumLongTermBytes != 0 {
|
||||||
|
return nil, errors.New("cannot set NumLongTermBytes on create")
|
||||||
|
}
|
||||||
if tm.NumRows != 0 {
|
if tm.NumRows != 0 {
|
||||||
return nil, errors.New("cannot set NumRows on create")
|
return nil, errors.New("cannot set NumRows on create")
|
||||||
}
|
}
|
||||||
@ -352,6 +405,7 @@ func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
|||||||
FullID: t.Id,
|
FullID: t.Id,
|
||||||
Labels: t.Labels,
|
Labels: t.Labels,
|
||||||
NumBytes: t.NumBytes,
|
NumBytes: t.NumBytes,
|
||||||
|
NumLongTermBytes: t.NumLongTermBytes,
|
||||||
NumRows: t.NumRows,
|
NumRows: t.NumRows,
|
||||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||||
CreationTime: unixMillisToTime(t.CreationTime),
|
CreationTime: unixMillisToTime(t.CreationTime),
|
||||||
@ -367,6 +421,7 @@ func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
|||||||
md.UseLegacySQL = t.View.UseLegacySql
|
md.UseLegacySQL = t.View.UseLegacySql
|
||||||
}
|
}
|
||||||
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
|
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
|
||||||
|
md.Clustering = bqToClustering(t.Clustering)
|
||||||
if t.StreamingBuffer != nil {
|
if t.StreamingBuffer != nil {
|
||||||
md.StreamingBuffer = &StreamingBuffer{
|
md.StreamingBuffer = &StreamingBuffer{
|
||||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||||
@ -403,12 +458,18 @@ func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
|
|||||||
return newRowIterator(ctx, t, pf)
|
return newRowIterator(ctx, t, pf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NeverExpire is a sentinel value used to remove a table'e expiration time.
|
||||||
|
var NeverExpire = time.Time{}.Add(-1)
|
||||||
|
|
||||||
// Update modifies specific Table metadata fields.
|
// Update modifies specific Table metadata fields.
|
||||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) {
|
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) {
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
bqt := tm.toBQ()
|
bqt, err := tm.toBQ()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
|
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
if etag != "" {
|
if etag != "" {
|
||||||
@ -424,7 +485,7 @@ func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag strin
|
|||||||
return bqToTableMetadata(res)
|
return bqToTableMetadata(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
|
func (tm *TableMetadataToUpdate) toBQ() (*bq.Table, error) {
|
||||||
t := &bq.Table{}
|
t := &bq.Table{}
|
||||||
forceSend := func(field string) {
|
forceSend := func(field string) {
|
||||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||||
@ -442,10 +503,27 @@ func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
|
|||||||
t.Schema = tm.Schema.toBQ()
|
t.Schema = tm.Schema.toBQ()
|
||||||
forceSend("Schema")
|
forceSend("Schema")
|
||||||
}
|
}
|
||||||
if !tm.ExpirationTime.IsZero() {
|
if tm.EncryptionConfig != nil {
|
||||||
|
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !validExpiration(tm.ExpirationTime) {
|
||||||
|
return nil, fmt.Errorf("invalid expiration time: %v.\n"+
|
||||||
|
"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
|
||||||
|
}
|
||||||
|
if tm.ExpirationTime == NeverExpire {
|
||||||
|
t.NullFields = append(t.NullFields, "ExpirationTime")
|
||||||
|
} else if !tm.ExpirationTime.IsZero() {
|
||||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||||
forceSend("ExpirationTime")
|
forceSend("ExpirationTime")
|
||||||
}
|
}
|
||||||
|
if tm.TimePartitioning != nil {
|
||||||
|
t.TimePartitioning = tm.TimePartitioning.toBQ()
|
||||||
|
t.TimePartitioning.ForceSendFields = []string{"RequirePartitionFilter"}
|
||||||
|
if tm.TimePartitioning.Expiration == 0 {
|
||||||
|
t.TimePartitioning.NullFields = []string{"ExpirationMs"}
|
||||||
|
}
|
||||||
|
}
|
||||||
if tm.ViewQuery != nil {
|
if tm.ViewQuery != nil {
|
||||||
t.View = &bq.ViewDefinition{
|
t.View = &bq.ViewDefinition{
|
||||||
Query: optional.ToString(tm.ViewQuery),
|
Query: optional.ToString(tm.ViewQuery),
|
||||||
@ -463,7 +541,16 @@ func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
|
|||||||
t.Labels = labels
|
t.Labels = labels
|
||||||
t.ForceSendFields = append(t.ForceSendFields, forces...)
|
t.ForceSendFields = append(t.ForceSendFields, forces...)
|
||||||
t.NullFields = append(t.NullFields, nulls...)
|
t.NullFields = append(t.NullFields, nulls...)
|
||||||
return t
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validExpiration ensures a specified time is either the sentinel NeverExpire,
|
||||||
|
// the zero value, or within the defined range of UnixNano. Internal
|
||||||
|
// represetations of expiration times are based upon Time.UnixNano. Any time
|
||||||
|
// before 1678 or after 2262 cannot be represented by an int64 and is therefore
|
||||||
|
// undefined and invalid. See https://godoc.org/time#Time.UnixNano.
|
||||||
|
func validExpiration(t time.Time) bool {
|
||||||
|
return t == NeverExpire || t.IsZero() || time.Unix(0, t.UnixNano()).Equal(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||||
@ -479,7 +566,12 @@ type TableMetadataToUpdate struct {
|
|||||||
// When updating a schema, you can add columns but not remove them.
|
// When updating a schema, you can add columns but not remove them.
|
||||||
Schema Schema
|
Schema Schema
|
||||||
|
|
||||||
// The time when this table expires.
|
// The table's encryption configuration. When calling Update, ensure that
|
||||||
|
// all mutable fields of EncryptionConfig are populated.
|
||||||
|
EncryptionConfig *EncryptionConfig
|
||||||
|
|
||||||
|
// The time when this table expires. To remove a table's expiration,
|
||||||
|
// set ExpirationTime to NeverExpire. The zero value is ignored.
|
||||||
ExpirationTime time.Time
|
ExpirationTime time.Time
|
||||||
|
|
||||||
// The query to use for a view.
|
// The query to use for a view.
|
||||||
@ -488,6 +580,12 @@ type TableMetadataToUpdate struct {
|
|||||||
// Use Legacy SQL for the view query.
|
// Use Legacy SQL for the view query.
|
||||||
UseLegacySQL optional.Bool
|
UseLegacySQL optional.Bool
|
||||||
|
|
||||||
|
// TimePartitioning allows modification of certain aspects of partition
|
||||||
|
// configuration such as partition expiration and whether partition
|
||||||
|
// filtration is required at query time. When calling Update, ensure
|
||||||
|
// that all mutable fields of TimePartitioning are populated.
|
||||||
|
TimePartitioning *TimePartitioning
|
||||||
|
|
||||||
labelUpdater
|
labelUpdater
|
||||||
}
|
}
|
||||||
|
|
||||||
|
79
vendor/cloud.google.com/go/bigquery/table_test.go
generated
vendored
79
vendor/cloud.google.com/go/bigquery/table_test.go
generated
vendored
@ -53,6 +53,9 @@ func TestBQToTableMetadata(t *testing.T) {
|
|||||||
Type: "DAY",
|
Type: "DAY",
|
||||||
Field: "pfield",
|
Field: "pfield",
|
||||||
},
|
},
|
||||||
|
Clustering: &bq.Clustering{
|
||||||
|
Fields: []string{"cfield1", "cfield2"},
|
||||||
|
},
|
||||||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||||
Type: "EXTERNAL",
|
Type: "EXTERNAL",
|
||||||
View: &bq.ViewDefinition{Query: "view-query"},
|
View: &bq.ViewDefinition{Query: "view-query"},
|
||||||
@ -73,11 +76,15 @@ func TestBQToTableMetadata(t *testing.T) {
|
|||||||
CreationTime: aTime.Truncate(time.Millisecond),
|
CreationTime: aTime.Truncate(time.Millisecond),
|
||||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||||
NumBytes: 123,
|
NumBytes: 123,
|
||||||
|
NumLongTermBytes: 23,
|
||||||
NumRows: 7,
|
NumRows: 7,
|
||||||
TimePartitioning: &TimePartitioning{
|
TimePartitioning: &TimePartitioning{
|
||||||
Expiration: 7890 * time.Millisecond,
|
Expiration: 7890 * time.Millisecond,
|
||||||
Field: "pfield",
|
Field: "pfield",
|
||||||
},
|
},
|
||||||
|
Clustering: &Clustering{
|
||||||
|
Fields: []string{"cfield1", "cfield2"},
|
||||||
|
},
|
||||||
StreamingBuffer: &StreamingBuffer{
|
StreamingBuffer: &StreamingBuffer{
|
||||||
EstimatedBytes: 11,
|
EstimatedBytes: 11,
|
||||||
EstimatedRows: 3,
|
EstimatedRows: 3,
|
||||||
@ -168,6 +175,9 @@ func TestTableMetadataToBQ(t *testing.T) {
|
|||||||
Expiration: time.Second,
|
Expiration: time.Second,
|
||||||
Field: "ofDreams",
|
Field: "ofDreams",
|
||||||
},
|
},
|
||||||
|
Clustering: &Clustering{
|
||||||
|
Fields: []string{"cfield1"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
&bq.Table{
|
&bq.Table{
|
||||||
View: &bq.ViewDefinition{
|
View: &bq.ViewDefinition{
|
||||||
@ -180,8 +190,15 @@ func TestTableMetadataToBQ(t *testing.T) {
|
|||||||
ExpirationMs: 1000,
|
ExpirationMs: 1000,
|
||||||
Field: "ofDreams",
|
Field: "ofDreams",
|
||||||
},
|
},
|
||||||
|
Clustering: &bq.Clustering{
|
||||||
|
Fields: []string{"cfield1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
&TableMetadata{ExpirationTime: NeverExpire},
|
||||||
|
&bq.Table{ExpirationTime: 0},
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
got, err := test.in.toBQ()
|
got, err := test.in.toBQ()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -203,9 +220,14 @@ func TestTableMetadataToBQ(t *testing.T) {
|
|||||||
{CreationTime: aTime},
|
{CreationTime: aTime},
|
||||||
{LastModifiedTime: aTime},
|
{LastModifiedTime: aTime},
|
||||||
{NumBytes: 1},
|
{NumBytes: 1},
|
||||||
|
{NumLongTermBytes: 1},
|
||||||
{NumRows: 1},
|
{NumRows: 1},
|
||||||
{StreamingBuffer: &StreamingBuffer{}},
|
{StreamingBuffer: &StreamingBuffer{}},
|
||||||
{ETag: "x"},
|
{ETag: "x"},
|
||||||
|
// expiration time outside allowable range is invalid
|
||||||
|
// See https://godoc.org/time#Time.UnixNano
|
||||||
|
{ExpirationTime: time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC).Add(-1)},
|
||||||
|
{ExpirationTime: time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC).Add(1)},
|
||||||
} {
|
} {
|
||||||
_, err := in.toBQ()
|
_, err := in.toBQ()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -286,10 +308,65 @@ func TestTableMetadataToUpdateToBQ(t *testing.T) {
|
|||||||
NullFields: []string{"Labels.D"},
|
NullFields: []string{"Labels.D"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
tm: TableMetadataToUpdate{ExpirationTime: NeverExpire},
|
||||||
|
want: &bq.Table{
|
||||||
|
NullFields: []string{"ExpirationTime"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: 0}},
|
||||||
|
want: &bq.Table{
|
||||||
|
TimePartitioning: &bq.TimePartitioning{
|
||||||
|
Type: "DAY",
|
||||||
|
ForceSendFields: []string{"RequirePartitionFilter"},
|
||||||
|
NullFields: []string{"ExpirationMs"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
tm: TableMetadataToUpdate{TimePartitioning: &TimePartitioning{Expiration: time.Duration(time.Hour)}},
|
||||||
|
want: &bq.Table{
|
||||||
|
TimePartitioning: &bq.TimePartitioning{
|
||||||
|
ExpirationMs: 3600000,
|
||||||
|
Type: "DAY",
|
||||||
|
ForceSendFields: []string{"RequirePartitionFilter"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
got := test.tm.toBQ()
|
got, _ := test.tm.toBQ()
|
||||||
if !testutil.Equal(got, test.want) {
|
if !testutil.Equal(got, test.want) {
|
||||||
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
|
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTableMetadataToUpdateToBQErrors(t *testing.T) {
|
||||||
|
// See https://godoc.org/time#Time.UnixNano
|
||||||
|
start := time.Date(1677, 9, 21, 0, 12, 43, 145224192, time.UTC)
|
||||||
|
end := time.Date(2262, 04, 11, 23, 47, 16, 854775807, time.UTC)
|
||||||
|
|
||||||
|
for _, test := range []struct {
|
||||||
|
desc string
|
||||||
|
aTime time.Time
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{desc: "ignored zero value", aTime: time.Time{}, wantErr: false},
|
||||||
|
{desc: "earliest valid time", aTime: start, wantErr: false},
|
||||||
|
{desc: "latested valid time", aTime: end, wantErr: false},
|
||||||
|
{desc: "invalid times before 1678", aTime: start.Add(-1), wantErr: true},
|
||||||
|
{desc: "invalid times after 2262", aTime: end.Add(1), wantErr: true},
|
||||||
|
{desc: "valid times after 1678", aTime: start.Add(1), wantErr: false},
|
||||||
|
{desc: "valid times before 2262", aTime: end.Add(-1), wantErr: false},
|
||||||
|
} {
|
||||||
|
tm := &TableMetadataToUpdate{ExpirationTime: test.aTime}
|
||||||
|
_, err := tm.toBQ()
|
||||||
|
if test.wantErr && err == nil {
|
||||||
|
t.Errorf("[%s] got no error, want error", test.desc)
|
||||||
|
}
|
||||||
|
if !test.wantErr && err != nil {
|
||||||
|
t.Errorf("[%s] got error, want no error", test.desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
231
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
231
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
@ -1,231 +0,0 @@
|
|||||||
// Copyright 2015 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package bigquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An Uploader does streaming inserts into a BigQuery table.
|
|
||||||
// It is safe for concurrent use.
|
|
||||||
type Uploader struct {
|
|
||||||
t *Table
|
|
||||||
|
|
||||||
// SkipInvalidRows causes rows containing invalid data to be silently
|
|
||||||
// ignored. The default value is false, which causes the entire request to
|
|
||||||
// fail if there is an attempt to insert an invalid row.
|
|
||||||
SkipInvalidRows bool
|
|
||||||
|
|
||||||
// IgnoreUnknownValues causes values not matching the schema to be ignored.
|
|
||||||
// The default value is false, which causes records containing such values
|
|
||||||
// to be treated as invalid records.
|
|
||||||
IgnoreUnknownValues bool
|
|
||||||
|
|
||||||
// A TableTemplateSuffix allows Uploaders to create tables automatically.
|
|
||||||
//
|
|
||||||
// Experimental: this option is experimental and may be modified or removed in future versions,
|
|
||||||
// regardless of any other documented package stability guarantees.
|
|
||||||
//
|
|
||||||
// When you specify a suffix, the table you upload data to
|
|
||||||
// will be used as a template for creating a new table, with the same schema,
|
|
||||||
// called <table> + <suffix>.
|
|
||||||
//
|
|
||||||
// More information is available at
|
|
||||||
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
|
||||||
TableTemplateSuffix string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uploader returns an Uploader that can be used to append rows to t.
|
|
||||||
// The returned Uploader may optionally be further configured before its Put method is called.
|
|
||||||
//
|
|
||||||
// To stream rows into a date-partitioned table at a particular date, add the
|
|
||||||
// $yyyymmdd suffix to the table name when constructing the Table.
|
|
||||||
func (t *Table) Uploader() *Uploader {
|
|
||||||
return &Uploader{t: t}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads one or more rows to the BigQuery service.
|
|
||||||
//
|
|
||||||
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
|
|
||||||
//
|
|
||||||
// If src is a struct or pointer to a struct, then a schema is inferred from it
|
|
||||||
// and used to create a StructSaver. The InsertID of the StructSaver will be
|
|
||||||
// empty.
|
|
||||||
//
|
|
||||||
// If src is a slice of ValueSavers, structs, or struct pointers, then each
|
|
||||||
// element of the slice is treated as above, and multiple rows are uploaded.
|
|
||||||
//
|
|
||||||
// Put returns a PutMultiError if one or more rows failed to be uploaded.
|
|
||||||
// The PutMultiError contains a RowInsertionError for each failed row.
|
|
||||||
//
|
|
||||||
// Put will retry on temporary errors (see
|
|
||||||
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
|
|
||||||
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
|
|
||||||
// the call will run indefinitely. Pass a context with a timeout to prevent
|
|
||||||
// hanging calls.
|
|
||||||
func (u *Uploader) Put(ctx context.Context, src interface{}) (err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Uploader.Put")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
savers, err := valueSavers(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return u.putMulti(ctx, savers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func valueSavers(src interface{}) ([]ValueSaver, error) {
|
|
||||||
saver, ok, err := toValueSaver(src)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
return []ValueSaver{saver}, nil
|
|
||||||
}
|
|
||||||
srcVal := reflect.ValueOf(src)
|
|
||||||
if srcVal.Kind() != reflect.Slice {
|
|
||||||
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
|
|
||||||
|
|
||||||
}
|
|
||||||
var savers []ValueSaver
|
|
||||||
for i := 0; i < srcVal.Len(); i++ {
|
|
||||||
s := srcVal.Index(i).Interface()
|
|
||||||
saver, ok, err := toValueSaver(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
|
|
||||||
}
|
|
||||||
savers = append(savers, saver)
|
|
||||||
}
|
|
||||||
return savers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a ValueSaver from x, which must implement ValueSaver already
|
|
||||||
// or be a struct or pointer to struct.
|
|
||||||
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
|
||||||
if _, ok := x.(StructSaver); ok {
|
|
||||||
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
|
|
||||||
}
|
|
||||||
var insertID string
|
|
||||||
// Handle StructSavers specially so we can infer the schema if necessary.
|
|
||||||
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
|
|
||||||
x = ss.Struct
|
|
||||||
insertID = ss.InsertID
|
|
||||||
// Fall through so we can infer the schema.
|
|
||||||
}
|
|
||||||
if saver, ok := x.(ValueSaver); ok {
|
|
||||||
return saver, ok, nil
|
|
||||||
}
|
|
||||||
v := reflect.ValueOf(x)
|
|
||||||
// Support Put with []interface{}
|
|
||||||
if v.Kind() == reflect.Interface {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
if v.Kind() != reflect.Struct {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
schema, err := inferSchemaReflectCached(v.Type())
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
return &StructSaver{
|
|
||||||
Struct: x,
|
|
||||||
InsertID: insertID,
|
|
||||||
Schema: schema,
|
|
||||||
}, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
|
||||||
req, err := u.newInsertRequest(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if req == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
|
|
||||||
call = call.Context(ctx)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
var res *bq.TableDataInsertAllResponse
|
|
||||||
err = runWithRetry(ctx, func() (err error) {
|
|
||||||
res, err = call.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return handleInsertErrors(res.InsertErrors, req.Rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
|
|
||||||
if savers == nil { // If there are no rows, do nothing.
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
req := &bq.TableDataInsertAllRequest{
|
|
||||||
TemplateSuffix: u.TableTemplateSuffix,
|
|
||||||
IgnoreUnknownValues: u.IgnoreUnknownValues,
|
|
||||||
SkipInvalidRows: u.SkipInvalidRows,
|
|
||||||
}
|
|
||||||
for _, saver := range savers {
|
|
||||||
row, insertID, err := saver.Save()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if insertID == "" {
|
|
||||||
insertID = randomIDFn()
|
|
||||||
}
|
|
||||||
m := make(map[string]bq.JsonValue)
|
|
||||||
for k, v := range row {
|
|
||||||
m[k] = bq.JsonValue(v)
|
|
||||||
}
|
|
||||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
|
||||||
InsertId: insertID,
|
|
||||||
Json: m,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
|
|
||||||
if len(ierrs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var errs PutMultiError
|
|
||||||
for _, e := range ierrs {
|
|
||||||
if int(e.Index) > len(rows) {
|
|
||||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
|
||||||
}
|
|
||||||
rie := RowInsertionError{
|
|
||||||
InsertID: rows[e.Index].InsertId,
|
|
||||||
RowIndex: int(e.Index),
|
|
||||||
}
|
|
||||||
for _, errp := range e.Errors {
|
|
||||||
rie.Errors = append(rie.Errors, bqToError(errp))
|
|
||||||
}
|
|
||||||
errs = append(errs, rie)
|
|
||||||
}
|
|
||||||
return errs
|
|
||||||
}
|
|
211
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
211
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
@ -1,211 +0,0 @@
|
|||||||
// Copyright 2015 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package bigquery
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/pretty"
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type testSaver struct {
|
|
||||||
row map[string]Value
|
|
||||||
insertID string
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts testSaver) Save() (map[string]Value, string, error) {
|
|
||||||
return ts.row, ts.insertID, ts.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewInsertRequest(t *testing.T) {
|
|
||||||
prev := randomIDFn
|
|
||||||
n := 0
|
|
||||||
randomIDFn = func() string { n++; return strconv.Itoa(n) }
|
|
||||||
defer func() { randomIDFn = prev }()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
ul *Uploader
|
|
||||||
savers []ValueSaver
|
|
||||||
req *bq.TableDataInsertAllRequest
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
ul: &Uploader{},
|
|
||||||
savers: nil,
|
|
||||||
req: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ul: &Uploader{},
|
|
||||||
savers: []ValueSaver{
|
|
||||||
testSaver{row: map[string]Value{"one": 1}},
|
|
||||||
testSaver{row: map[string]Value{"two": 2}},
|
|
||||||
},
|
|
||||||
req: &bq.TableDataInsertAllRequest{
|
|
||||||
Rows: []*bq.TableDataInsertAllRequestRows{
|
|
||||||
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
|
|
||||||
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ul: &Uploader{
|
|
||||||
TableTemplateSuffix: "suffix",
|
|
||||||
IgnoreUnknownValues: true,
|
|
||||||
SkipInvalidRows: true,
|
|
||||||
},
|
|
||||||
savers: []ValueSaver{
|
|
||||||
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
|
|
||||||
testSaver{insertID: "", row: map[string]Value{"two": 2}},
|
|
||||||
},
|
|
||||||
req: &bq.TableDataInsertAllRequest{
|
|
||||||
Rows: []*bq.TableDataInsertAllRequestRows{
|
|
||||||
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
|
|
||||||
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
|
|
||||||
},
|
|
||||||
TemplateSuffix: "suffix",
|
|
||||||
SkipInvalidRows: true,
|
|
||||||
IgnoreUnknownValues: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i, tc := range tests {
|
|
||||||
got, err := tc.ul.newInsertRequest(tc.savers)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
want := tc.req
|
|
||||||
if !testutil.Equal(got, want) {
|
|
||||||
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewInsertRequestErrors(t *testing.T) {
|
|
||||||
var u Uploader
|
|
||||||
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}})
|
|
||||||
if err == nil {
|
|
||||||
t.Error("got nil, want error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleInsertErrors(t *testing.T) {
|
|
||||||
rows := []*bq.TableDataInsertAllRequestRows{
|
|
||||||
{InsertId: "a"},
|
|
||||||
{InsertId: "b"},
|
|
||||||
}
|
|
||||||
for _, test := range []struct {
|
|
||||||
in []*bq.TableDataInsertAllResponseInsertErrors
|
|
||||||
want error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
in: nil,
|
|
||||||
want: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
|
||||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
|
||||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: []*bq.TableDataInsertAllResponseInsertErrors{
|
|
||||||
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
|
|
||||||
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
|
|
||||||
},
|
|
||||||
want: PutMultiError{
|
|
||||||
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
|
|
||||||
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
got := handleInsertErrors(test.in, rows)
|
|
||||||
if !testutil.Equal(got, test.want) {
|
|
||||||
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValueSavers(t *testing.T) {
|
|
||||||
ts := &testSaver{}
|
|
||||||
type T struct{ I int }
|
|
||||||
schema, err := InferSchema(T{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for _, test := range []struct {
|
|
||||||
in interface{}
|
|
||||||
want []ValueSaver
|
|
||||||
}{
|
|
||||||
{[]interface{}(nil), nil},
|
|
||||||
{[]interface{}{}, nil},
|
|
||||||
{ts, []ValueSaver{ts}},
|
|
||||||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
|
|
||||||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
|
|
||||||
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
|
|
||||||
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
|
|
||||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
|
||||||
&StructSaver{Schema: schema, Struct: T{I: 2}},
|
|
||||||
}},
|
|
||||||
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
|
|
||||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
|
||||||
&StructSaver{Schema: schema, Struct: &T{I: 2}},
|
|
||||||
}},
|
|
||||||
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
|
|
||||||
[]ValueSaver{
|
|
||||||
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
|
|
||||||
}},
|
|
||||||
} {
|
|
||||||
got, err := valueSavers(test.in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
|
|
||||||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
|
|
||||||
}
|
|
||||||
// Make sure Save is successful.
|
|
||||||
for i, vs := range got {
|
|
||||||
_, _, err := vs.Save()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValueSaversErrors(t *testing.T) {
|
|
||||||
inputs := []interface{}{
|
|
||||||
nil,
|
|
||||||
1,
|
|
||||||
[]int{1, 2},
|
|
||||||
[]interface{}{
|
|
||||||
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
|
|
||||||
1,
|
|
||||||
},
|
|
||||||
StructSaver{},
|
|
||||||
}
|
|
||||||
for _, in := range inputs {
|
|
||||||
if _, err := valueSavers(in); err == nil {
|
|
||||||
t.Errorf("%#v: got nil, want error", in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
37
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
37
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
|
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -171,6 +170,14 @@ func setString(v reflect.Value, x interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setGeography(v reflect.Value, x interface{}) error {
|
||||||
|
if x == nil {
|
||||||
|
return errNoNulls
|
||||||
|
}
|
||||||
|
v.SetString(x.(string))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func setBytes(v reflect.Value, x interface{}) error {
|
func setBytes(v reflect.Value, x interface{}) error {
|
||||||
if x == nil {
|
if x == nil {
|
||||||
v.SetBytes(nil)
|
v.SetBytes(nil)
|
||||||
@ -290,6 +297,18 @@ func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case GeographyFieldType:
|
||||||
|
if ftype.Kind() == reflect.String {
|
||||||
|
return setGeography
|
||||||
|
}
|
||||||
|
if ftype == typeOfNullGeography {
|
||||||
|
return func(v reflect.Value, x interface{}) error {
|
||||||
|
return setNull(v, x, func() interface{} {
|
||||||
|
return NullGeography{GeographyVal: x.(string), Valid: true}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
case BytesFieldType:
|
case BytesFieldType:
|
||||||
if ftype == typeOfByteSlice {
|
if ftype == typeOfByteSlice {
|
||||||
return setBytes
|
return setBytes
|
||||||
@ -498,7 +517,7 @@ func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
|
|||||||
|
|
||||||
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
||||||
if len(vs) != len(schema) {
|
if len(vs) != len(schema) {
|
||||||
return nil, errors.New("Schema does not match length of row to be inserted")
|
return nil, errors.New("schema does not match length of row to be inserted")
|
||||||
}
|
}
|
||||||
|
|
||||||
m := make(map[string]Value)
|
m := make(map[string]Value)
|
||||||
@ -630,10 +649,11 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
|
|||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
// A repeated nested field is converted into a slice of maps.
|
// A repeated nested field is converted into a slice of maps.
|
||||||
if vfield.Len() == 0 {
|
// If the field is zero-length (but not nil), we return a zero-length []Value.
|
||||||
|
if vfield.IsNil() {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
var vals []Value
|
vals := []Value{}
|
||||||
for i := 0; i < vfield.Len(); i++ {
|
for i := 0; i < vfield.Len(); i++ {
|
||||||
m, err := structToMap(vfield.Index(i), schemaField.Schema)
|
m, err := structToMap(vfield.Index(i), schemaField.Schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -707,11 +727,10 @@ func formatUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value)
|
|||||||
func CivilTimeString(t civil.Time) string {
|
func CivilTimeString(t civil.Time) string {
|
||||||
if t.Nanosecond == 0 {
|
if t.Nanosecond == 0 {
|
||||||
return t.String()
|
return t.String()
|
||||||
} else {
|
}
|
||||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||||
t.Nanosecond = 0
|
t.Nanosecond = 0
|
||||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
|
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
|
||||||
@ -735,10 +754,10 @@ func parseCivilDateTime(s string) (civil.DateTime, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// The maximum number of digits in a NUMERIC value.
|
// NumericPrecisionDigits is the maximum number of digits in a NUMERIC value.
|
||||||
NumericPrecisionDigits = 38
|
NumericPrecisionDigits = 38
|
||||||
|
|
||||||
// The maximum number of digits after the decimal point in a NUMERIC value.
|
// NumericScaleDigits is the maximum number of digits after the decimal point in a NUMERIC value.
|
||||||
NumericScaleDigits = 9
|
NumericScaleDigits = 9
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -865,6 +884,8 @@ func convertBasicType(val string, typ FieldType) (Value, error) {
|
|||||||
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val)
|
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val)
|
||||||
}
|
}
|
||||||
return Value(r), nil
|
return Value(r), nil
|
||||||
|
case GeographyFieldType:
|
||||||
|
return val, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unrecognized type: %s", typ)
|
return nil, fmt.Errorf("unrecognized type: %s", typ)
|
||||||
}
|
}
|
||||||
|
62
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
62
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
@ -22,11 +22,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
bq "google.golang.org/api/bigquery/v2"
|
bq "google.golang.org/api/bigquery/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,6 +36,7 @@ func TestConvertBasicValues(t *testing.T) {
|
|||||||
{Type: BooleanFieldType},
|
{Type: BooleanFieldType},
|
||||||
{Type: BytesFieldType},
|
{Type: BytesFieldType},
|
||||||
{Type: NumericFieldType},
|
{Type: NumericFieldType},
|
||||||
|
{Type: GeographyFieldType},
|
||||||
}
|
}
|
||||||
row := &bq.TableRow{
|
row := &bq.TableRow{
|
||||||
F: []*bq.TableCell{
|
F: []*bq.TableCell{
|
||||||
@ -47,6 +46,7 @@ func TestConvertBasicValues(t *testing.T) {
|
|||||||
{V: "true"},
|
{V: "true"},
|
||||||
{V: base64.StdEncoding.EncodeToString([]byte("foo"))},
|
{V: base64.StdEncoding.EncodeToString([]byte("foo"))},
|
||||||
{V: "123.123456789"},
|
{V: "123.123456789"},
|
||||||
|
{V: testGeography},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
got, err := convertRow(row, schema)
|
got, err := convertRow(row, schema)
|
||||||
@ -54,7 +54,7 @@ func TestConvertBasicValues(t *testing.T) {
|
|||||||
t.Fatalf("error converting: %v", err)
|
t.Fatalf("error converting: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
want := []Value{"a", int64(1), 1.2, true, []byte("foo"), big.NewRat(123123456789, 1e9)}
|
want := []Value{"a", int64(1), 1.2, true, []byte("foo"), big.NewRat(123123456789, 1e9), testGeography}
|
||||||
if !testutil.Equal(got, want) {
|
if !testutil.Equal(got, want) {
|
||||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
|
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
|
||||||
}
|
}
|
||||||
@ -406,6 +406,7 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||||||
{Name: "strField", Type: StringFieldType},
|
{Name: "strField", Type: StringFieldType},
|
||||||
{Name: "dtField", Type: DateTimeFieldType},
|
{Name: "dtField", Type: DateTimeFieldType},
|
||||||
{Name: "nField", Type: NumericFieldType},
|
{Name: "nField", Type: NumericFieldType},
|
||||||
|
{Name: "geoField", Type: GeographyFieldType},
|
||||||
},
|
},
|
||||||
InsertID: "iid",
|
InsertID: "iid",
|
||||||
Row: []Value{1, "a",
|
Row: []Value{1, "a",
|
||||||
@ -413,6 +414,7 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||||||
Date: civil.Date{Year: 1, Month: 2, Day: 3},
|
Date: civil.Date{Year: 1, Month: 2, Day: 3},
|
||||||
Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 7000}},
|
Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 7000}},
|
||||||
big.NewRat(123456789000, 1e9),
|
big.NewRat(123456789000, 1e9),
|
||||||
|
testGeography,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantInsertID: "iid",
|
wantInsertID: "iid",
|
||||||
@ -421,6 +423,7 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||||||
"strField": "a",
|
"strField": "a",
|
||||||
"dtField": "0001-02-03 04:05:06.000007",
|
"dtField": "0001-02-03 04:05:06.000007",
|
||||||
"nField": "123.456789000",
|
"nField": "123.456789000",
|
||||||
|
"geoField": testGeography,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -539,6 +542,8 @@ func TestStructSaver(t *testing.T) {
|
|||||||
{Name: "p", Type: IntegerFieldType, Required: false},
|
{Name: "p", Type: IntegerFieldType, Required: false},
|
||||||
{Name: "n", Type: NumericFieldType, Required: false},
|
{Name: "n", Type: NumericFieldType, Required: false},
|
||||||
{Name: "nr", Type: NumericFieldType, Repeated: true},
|
{Name: "nr", Type: NumericFieldType, Repeated: true},
|
||||||
|
{Name: "g", Type: GeographyFieldType, Required: false},
|
||||||
|
{Name: "gr", Type: GeographyFieldType, Repeated: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -553,6 +558,8 @@ func TestStructSaver(t *testing.T) {
|
|||||||
P NullInt64
|
P NullInt64
|
||||||
N *big.Rat
|
N *big.Rat
|
||||||
NR []*big.Rat
|
NR []*big.Rat
|
||||||
|
G NullGeography
|
||||||
|
GR []string // Repeated Geography
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -586,6 +593,8 @@ func TestStructSaver(t *testing.T) {
|
|||||||
P: NullInt64{Valid: true, Int64: 17},
|
P: NullInt64{Valid: true, Int64: 17},
|
||||||
N: big.NewRat(123456, 1000),
|
N: big.NewRat(123456, 1000),
|
||||||
NR: []*big.Rat{big.NewRat(3, 1), big.NewRat(56789, 1e5)},
|
NR: []*big.Rat{big.NewRat(3, 1), big.NewRat(56789, 1e5)},
|
||||||
|
G: NullGeography{Valid: true, GeographyVal: "POINT(-122.350220 47.649154)"},
|
||||||
|
GR: []string{"POINT(-122.350220 47.649154)", "POINT(-122.198939 47.669865)"},
|
||||||
}
|
}
|
||||||
want := map[string]Value{
|
want := map[string]Value{
|
||||||
"s": "x",
|
"s": "x",
|
||||||
@ -597,10 +606,12 @@ func TestStructSaver(t *testing.T) {
|
|||||||
"p": NullInt64{Valid: true, Int64: 17},
|
"p": NullInt64{Valid: true, Int64: 17},
|
||||||
"n": "123.456000000",
|
"n": "123.456000000",
|
||||||
"nr": []string{"3.000000000", "0.567890000"},
|
"nr": []string{"3.000000000", "0.567890000"},
|
||||||
|
"g": NullGeography{Valid: true, GeographyVal: "POINT(-122.350220 47.649154)"},
|
||||||
|
"gr": []string{"POINT(-122.350220 47.649154)", "POINT(-122.198939 47.669865)"},
|
||||||
}
|
}
|
||||||
check("all values", in, want)
|
check("all values", in, want)
|
||||||
check("all values, ptr", &in, want)
|
check("all values, ptr", &in, want)
|
||||||
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00", "p": NullInt64{}})
|
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00", "p": NullInt64{}, "g": NullGeography{}})
|
||||||
|
|
||||||
// Missing and extra fields ignored.
|
// Missing and extra fields ignored.
|
||||||
type T2 struct {
|
type T2 struct {
|
||||||
@ -615,8 +626,18 @@ func TestStructSaver(t *testing.T) {
|
|||||||
"s": "",
|
"s": "",
|
||||||
"t": "00:00:00",
|
"t": "00:00:00",
|
||||||
"p": NullInt64{},
|
"p": NullInt64{},
|
||||||
|
"g": NullGeography{},
|
||||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
check("zero-length repeated", T{Rnested: []*N{}},
|
||||||
|
map[string]Value{
|
||||||
|
"rnested": []Value{},
|
||||||
|
"s": "",
|
||||||
|
"t": "00:00:00",
|
||||||
|
"p": NullInt64{},
|
||||||
|
"g": NullGeography{},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStructSaverErrors(t *testing.T) {
|
func TestStructSaverErrors(t *testing.T) {
|
||||||
@ -629,7 +650,7 @@ func TestStructSaverErrors(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
struct_ interface{}
|
inputStruct interface{}
|
||||||
schema Schema
|
schema Schema
|
||||||
}{
|
}{
|
||||||
{0, nil}, // not a struct
|
{0, nil}, // not a struct
|
||||||
@ -639,10 +660,10 @@ func TestStructSaverErrors(t *testing.T) {
|
|||||||
{&badRN{[]int{0}}, // nested repeated field has bad type
|
{&badRN{[]int{0}}, // nested repeated field has bad type
|
||||||
Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}},
|
Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}},
|
||||||
} {
|
} {
|
||||||
ss := &StructSaver{Struct: test.struct_, Schema: test.schema}
|
ss := &StructSaver{Struct: test.inputStruct, Schema: test.schema}
|
||||||
_, _, err := ss.Save()
|
_, _, err := ss.Save()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("#%d, %v, %v: got nil, want error", i, test.struct_, test.schema)
|
t.Errorf("#%d, %v, %v: got nil, want error", i, test.inputStruct, test.schema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -671,6 +692,7 @@ func TestConvertRows(t *testing.T) {
|
|||||||
{Type: IntegerFieldType},
|
{Type: IntegerFieldType},
|
||||||
{Type: FloatFieldType},
|
{Type: FloatFieldType},
|
||||||
{Type: BooleanFieldType},
|
{Type: BooleanFieldType},
|
||||||
|
{Type: GeographyFieldType},
|
||||||
}
|
}
|
||||||
rows := []*bq.TableRow{
|
rows := []*bq.TableRow{
|
||||||
{F: []*bq.TableCell{
|
{F: []*bq.TableCell{
|
||||||
@ -678,17 +700,19 @@ func TestConvertRows(t *testing.T) {
|
|||||||
{V: "1"},
|
{V: "1"},
|
||||||
{V: "1.2"},
|
{V: "1.2"},
|
||||||
{V: "true"},
|
{V: "true"},
|
||||||
|
{V: "POINT(-122.350220 47.649154)"},
|
||||||
}},
|
}},
|
||||||
{F: []*bq.TableCell{
|
{F: []*bq.TableCell{
|
||||||
{V: "b"},
|
{V: "b"},
|
||||||
{V: "2"},
|
{V: "2"},
|
||||||
{V: "2.2"},
|
{V: "2.2"},
|
||||||
{V: "false"},
|
{V: "false"},
|
||||||
|
{V: "POINT(-122.198939 47.669865)"},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
want := [][]Value{
|
want := [][]Value{
|
||||||
{"a", int64(1), 1.2, true},
|
{"a", int64(1), 1.2, true, "POINT(-122.350220 47.649154)"},
|
||||||
{"b", int64(2), 2.2, false},
|
{"b", int64(2), 2.2, false, "POINT(-122.198939 47.669865)"},
|
||||||
}
|
}
|
||||||
got, err := convertRows(rows, schema)
|
got, err := convertRows(rows, schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -724,7 +748,7 @@ func TestValueList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load truncates, not appends.
|
// Load truncates, not appends.
|
||||||
// https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437
|
// https://github.com/googleapis/google-cloud-go/issues/437
|
||||||
if err := vl.Load(want, schema); err != nil {
|
if err := vl.Load(want, schema); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -802,6 +826,7 @@ var (
|
|||||||
{Name: "T", Type: TimeFieldType},
|
{Name: "T", Type: TimeFieldType},
|
||||||
{Name: "DT", Type: DateTimeFieldType},
|
{Name: "DT", Type: DateTimeFieldType},
|
||||||
{Name: "N", Type: NumericFieldType},
|
{Name: "N", Type: NumericFieldType},
|
||||||
|
{Name: "G", Type: GeographyFieldType},
|
||||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||||
{Name: "nestS", Type: StringFieldType},
|
{Name: "nestS", Type: StringFieldType},
|
||||||
{Name: "nestI", Type: IntegerFieldType},
|
{Name: "nestI", Type: IntegerFieldType},
|
||||||
@ -814,9 +839,11 @@ var (
|
|||||||
testTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 8}
|
testTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 8}
|
||||||
testDateTime = civil.DateTime{Date: testDate, Time: testTime}
|
testDateTime = civil.DateTime{Date: testDate, Time: testTime}
|
||||||
testNumeric = big.NewRat(123, 456)
|
testNumeric = big.NewRat(123, 456)
|
||||||
|
// testGeography is a WKT string representing a single point.
|
||||||
|
testGeography = "POINT(-122.350220 47.649154)"
|
||||||
|
|
||||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true,
|
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true,
|
||||||
testTimestamp, testDate, testTime, testDateTime, testNumeric,
|
testTimestamp, testDate, testTime, testDateTime, testNumeric, testGeography,
|
||||||
[]Value{"nested", int64(17)}, "z"}
|
[]Value{"nested", int64(17)}, "z"}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -828,9 +855,9 @@ type testStruct1 struct {
|
|||||||
S string
|
S string
|
||||||
S2 String
|
S2 String
|
||||||
By []byte
|
By []byte
|
||||||
s string
|
|
||||||
F float64
|
F float64
|
||||||
N *big.Rat
|
N *big.Rat
|
||||||
|
G string
|
||||||
Nested nested
|
Nested nested
|
||||||
Tagged string `bigquery:"t"`
|
Tagged string `bigquery:"t"`
|
||||||
}
|
}
|
||||||
@ -864,6 +891,7 @@ func TestStructLoader(t *testing.T) {
|
|||||||
S2: "y",
|
S2: "y",
|
||||||
By: []byte{1, 2, 3},
|
By: []byte{1, 2, 3},
|
||||||
N: big.NewRat(123, 456),
|
N: big.NewRat(123, 456),
|
||||||
|
G: testGeography,
|
||||||
Nested: nested{NestS: "nested", NestI: 17},
|
Nested: nested{NestS: "nested", NestI: 17},
|
||||||
Tagged: "z",
|
Tagged: "z",
|
||||||
}
|
}
|
||||||
@ -964,6 +992,7 @@ type testStructNullable struct {
|
|||||||
Time NullTime
|
Time NullTime
|
||||||
DateTime NullDateTime
|
DateTime NullDateTime
|
||||||
Numeric *big.Rat
|
Numeric *big.Rat
|
||||||
|
Geography NullGeography
|
||||||
Record *subNullable
|
Record *subNullable
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -982,6 +1011,7 @@ var testStructNullableSchema = Schema{
|
|||||||
{Name: "Time", Type: TimeFieldType, Required: false},
|
{Name: "Time", Type: TimeFieldType, Required: false},
|
||||||
{Name: "DateTime", Type: DateTimeFieldType, Required: false},
|
{Name: "DateTime", Type: DateTimeFieldType, Required: false},
|
||||||
{Name: "Numeric", Type: NumericFieldType, Required: false},
|
{Name: "Numeric", Type: NumericFieldType, Required: false},
|
||||||
|
{Name: "Geography", Type: GeographyFieldType, Required: false},
|
||||||
{Name: "Record", Type: RecordFieldType, Required: false, Schema: Schema{
|
{Name: "Record", Type: RecordFieldType, Required: false, Schema: Schema{
|
||||||
{Name: "X", Type: IntegerFieldType, Required: false},
|
{Name: "X", Type: IntegerFieldType, Required: false},
|
||||||
}},
|
}},
|
||||||
@ -997,7 +1027,7 @@ func TestStructLoaderNullable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nonnilVals := []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, testTime,
|
nonnilVals := []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, testTime,
|
||||||
testDateTime, big.NewRat(1, 2), []Value{int64(4)}}
|
testDateTime, big.NewRat(1, 2), testGeography, []Value{int64(4)}}
|
||||||
|
|
||||||
// All ts fields are nil. Loading non-nil values will cause them all to
|
// All ts fields are nil. Loading non-nil values will cause them all to
|
||||||
// be allocated.
|
// be allocated.
|
||||||
@ -1013,6 +1043,7 @@ func TestStructLoaderNullable(t *testing.T) {
|
|||||||
Time: NullTime{Time: testTime, Valid: true},
|
Time: NullTime{Time: testTime, Valid: true},
|
||||||
DateTime: NullDateTime{DateTime: testDateTime, Valid: true},
|
DateTime: NullDateTime{DateTime: testDateTime, Valid: true},
|
||||||
Numeric: big.NewRat(1, 2),
|
Numeric: big.NewRat(1, 2),
|
||||||
|
Geography: NullGeography{GeographyVal: testGeography, Valid: true},
|
||||||
Record: &subNullable{X: NullInt64{Int64: 4, Valid: true}},
|
Record: &subNullable{X: NullInt64{Int64: 4, Valid: true}},
|
||||||
}
|
}
|
||||||
if diff := testutil.Diff(ts, want); diff != "" {
|
if diff := testutil.Diff(ts, want); diff != "" {
|
||||||
@ -1022,7 +1053,7 @@ func TestStructLoaderNullable(t *testing.T) {
|
|||||||
// Struct pointers are reused, byte slices are not.
|
// Struct pointers are reused, byte slices are not.
|
||||||
want = ts
|
want = ts
|
||||||
want.Bytes = []byte{17}
|
want.Bytes = []byte{17}
|
||||||
vals2 := []Value{nil, []byte{17}, nil, nil, nil, nil, nil, nil, nil, nil, []Value{int64(7)}}
|
vals2 := []Value{nil, []byte{17}, nil, nil, nil, nil, nil, nil, nil, nil, nil, []Value{int64(7)}}
|
||||||
mustLoad(t, &ts, testStructNullableSchema, vals2)
|
mustLoad(t, &ts, testStructNullableSchema, vals2)
|
||||||
if ts.Record != want.Record {
|
if ts.Record != want.Record {
|
||||||
t.Error("record pointers not identical")
|
t.Error("record pointers not identical")
|
||||||
@ -1158,7 +1189,6 @@ func TestStructLoaderErrors(t *testing.T) {
|
|||||||
I int
|
I int
|
||||||
times
|
times
|
||||||
S string
|
S string
|
||||||
s string
|
|
||||||
Nums []int
|
Nums []int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
137
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
137
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package bigtable
|
package bigtable
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
@ -32,7 +33,6 @@ import (
|
|||||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/cloudresourcemanager/v1"
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
@ -154,9 +154,9 @@ func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, sp
|
|||||||
// CreateTableFromConf creates a new table in the instance from the given configuration.
|
// CreateTableFromConf creates a new table in the instance from the given configuration.
|
||||||
func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error {
|
func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||||
var req_splits []*btapb.CreateTableRequest_Split
|
var reqSplits []*btapb.CreateTableRequest_Split
|
||||||
for _, split := range conf.SplitKeys {
|
for _, split := range conf.SplitKeys {
|
||||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{Key: []byte(split)})
|
reqSplits = append(reqSplits, &btapb.CreateTableRequest_Split{Key: []byte(split)})
|
||||||
}
|
}
|
||||||
var tbl btapb.Table
|
var tbl btapb.Table
|
||||||
if conf.Families != nil {
|
if conf.Families != nil {
|
||||||
@ -170,7 +170,7 @@ func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf)
|
|||||||
Parent: prefix,
|
Parent: prefix,
|
||||||
TableId: conf.TableID,
|
TableId: conf.TableID,
|
||||||
Table: &tbl,
|
Table: &tbl,
|
||||||
InitialSplits: req_splits,
|
InitialSplits: reqSplits,
|
||||||
}
|
}
|
||||||
_, err := ac.tClient.CreateTable(ctx, req)
|
_, err := ac.tClient.CreateTable(ctx, req)
|
||||||
return err
|
return err
|
||||||
@ -312,10 +312,12 @@ func (ac *AdminClient) CreateTableFromSnapshot(ctx context.Context, table, clust
|
|||||||
return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
|
return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultSnapshotDuration is the default TTL for a snapshot.
|
||||||
const DefaultSnapshotDuration time.Duration = 0
|
const DefaultSnapshotDuration time.Duration = 0
|
||||||
|
|
||||||
// Creates a new snapshot in the specified cluster from the specified source table.
|
// SnapshotTable creates a new snapshot in the specified cluster from the
|
||||||
// Setting the ttl to `DefaultSnapshotDuration` will use the server side default for the duration.
|
// specified source table. Setting the TTL to `DefaultSnapshotDuration` will
|
||||||
|
// use the server side default for the duration.
|
||||||
//
|
//
|
||||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
// is not currently available to most Cloud Bigtable customers. This feature
|
||||||
@ -371,14 +373,19 @@ func (ac *AdminClient) Snapshots(ctx context.Context, cluster string) *SnapshotI
|
|||||||
req.PageSize = int32(pageSize)
|
req.PageSize = int32(pageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := ac.tClient.ListSnapshots(ctx, req)
|
var resp *btapb.ListSnapshotsResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
resp, err = ac.tClient.ListSnapshots(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
for _, s := range resp.Snapshots {
|
for _, s := range resp.Snapshots {
|
||||||
snapshotInfo, err := newSnapshotInfo(s)
|
snapshotInfo, err := newSnapshotInfo(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Failed to parse snapshot proto %v", err)
|
return "", fmt.Errorf("failed to parse snapshot proto %v", err)
|
||||||
}
|
}
|
||||||
it.items = append(it.items, snapshotInfo)
|
it.items = append(it.items, snapshotInfo)
|
||||||
}
|
}
|
||||||
@ -400,12 +407,12 @@ func newSnapshotInfo(snapshot *btapb.Snapshot) (*SnapshotInfo, error) {
|
|||||||
|
|
||||||
createTime, err := ptypes.Timestamp(snapshot.CreateTime)
|
createTime, err := ptypes.Timestamp(snapshot.CreateTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Invalid createTime: %v", err)
|
return nil, fmt.Errorf("invalid createTime: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteTime, err := ptypes.Timestamp(snapshot.DeleteTime)
|
deleteTime, err := ptypes.Timestamp(snapshot.DeleteTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Invalid deleteTime: %v", err)
|
return nil, fmt.Errorf("invalid deleteTime: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &SnapshotInfo{
|
return &SnapshotInfo{
|
||||||
@ -417,7 +424,7 @@ func newSnapshotInfo(snapshot *btapb.Snapshot) (*SnapshotInfo, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// An EntryIterator iterates over log entries.
|
// SnapshotIterator is an EntryIterator that iterates over log entries.
|
||||||
//
|
//
|
||||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
// is not currently available to most Cloud Bigtable customers. This feature
|
||||||
@ -446,6 +453,7 @@ func (it *SnapshotIterator) Next() (*SnapshotInfo, error) {
|
|||||||
return item, nil
|
return item, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SnapshotInfo contains snapshot metadata.
|
||||||
type SnapshotInfo struct {
|
type SnapshotInfo struct {
|
||||||
Name string
|
Name string
|
||||||
SourceTable string
|
SourceTable string
|
||||||
@ -454,7 +462,7 @@ type SnapshotInfo struct {
|
|||||||
DeleteTime time.Time
|
DeleteTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get snapshot metadata.
|
// SnapshotInfo gets snapshot metadata.
|
||||||
//
|
//
|
||||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
// is not currently available to most Cloud Bigtable customers. This feature
|
||||||
@ -470,7 +478,12 @@ func (ac *AdminClient) SnapshotInfo(ctx context.Context, cluster, snapshot strin
|
|||||||
Name: snapshotPath,
|
Name: snapshotPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := ac.tClient.GetSnapshot(ctx, req)
|
var resp *btapb.Snapshot
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
resp, err = ac.tClient.GetSnapshot(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -478,7 +491,7 @@ func (ac *AdminClient) SnapshotInfo(ctx context.Context, cluster, snapshot strin
|
|||||||
return newSnapshotInfo(resp)
|
return newSnapshotInfo(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete a snapshot in a cluster.
|
// DeleteSnapshot deletes a snapshot in a cluster.
|
||||||
//
|
//
|
||||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
// is not currently available to most Cloud Bigtable customers. This feature
|
||||||
@ -530,11 +543,6 @@ func (ac *AdminClient) isConsistent(ctx context.Context, tableName, token string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForReplication waits until all the writes committed before the call started have been propagated to all the clusters in the instance via replication.
|
// WaitForReplication waits until all the writes committed before the call started have been propagated to all the clusters in the instance via replication.
|
||||||
//
|
|
||||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
func (ac *AdminClient) WaitForReplication(ctx context.Context, table string) error {
|
func (ac *AdminClient) WaitForReplication(ctx context.Context, table string) error {
|
||||||
// Get the token.
|
// Get the token.
|
||||||
prefix := ac.instancePrefix()
|
prefix := ac.instancePrefix()
|
||||||
@ -684,13 +692,8 @@ func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *Instan
|
|||||||
return iac.CreateInstanceWithClusters(ctx, &newConfig)
|
return iac.CreateInstanceWithClusters(ctx, &newConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateInstance creates a new instance with configured clusters in the project.
|
// CreateInstanceWithClusters creates a new instance with configured clusters in the project.
|
||||||
// This method will return when the instance has been created or when an error occurs.
|
// This method will return when the instance has been created or when an error occurs.
|
||||||
//
|
|
||||||
// Instances with multiple clusters are part of a private alpha release of Cloud Bigtable replication.
|
|
||||||
// This feature is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context, conf *InstanceWithClustersConfig) error {
|
func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context, conf *InstanceWithClustersConfig) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
clusters := make(map[string]*btapb.Cluster)
|
clusters := make(map[string]*btapb.Cluster)
|
||||||
@ -727,7 +730,12 @@ func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo,
|
|||||||
req := &btapb.ListInstancesRequest{
|
req := &btapb.ListInstancesRequest{
|
||||||
Parent: "projects/" + iac.project,
|
Parent: "projects/" + iac.project,
|
||||||
}
|
}
|
||||||
res, err := iac.iClient.ListInstances(ctx, req)
|
var res *btapb.ListInstancesResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
res, err = iac.iClient.ListInstances(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -757,7 +765,12 @@ func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceID str
|
|||||||
req := &btapb.GetInstanceRequest{
|
req := &btapb.GetInstanceRequest{
|
||||||
Name: "projects/" + iac.project + "/instances/" + instanceID,
|
Name: "projects/" + iac.project + "/instances/" + instanceID,
|
||||||
}
|
}
|
||||||
res, err := iac.iClient.GetInstance(ctx, req)
|
var res *btapb.Instance
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
res, err = iac.iClient.GetInstance(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -797,11 +810,6 @@ type ClusterInfo struct {
|
|||||||
|
|
||||||
// CreateCluster creates a new cluster in an instance.
|
// CreateCluster creates a new cluster in an instance.
|
||||||
// This method will return when the cluster has been created or when an error occurs.
|
// This method will return when the cluster has been created or when an error occurs.
|
||||||
//
|
|
||||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *ClusterConfig) error {
|
func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *ClusterConfig) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
|
|
||||||
@ -820,11 +828,6 @@ func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *Cluster
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DeleteCluster deletes a cluster from an instance.
|
// DeleteCluster deletes a cluster from an instance.
|
||||||
//
|
|
||||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceID, clusterID string) error {
|
func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceID, clusterID string) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
|
req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
|
||||||
@ -849,7 +852,12 @@ func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceID, c
|
|||||||
func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceID string) ([]*ClusterInfo, error) {
|
func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceID string) ([]*ClusterInfo, error) {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceID}
|
req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceID}
|
||||||
res, err := iac.iClient.ListClusters(ctx, req)
|
var res *btapb.ListClustersResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
res, err = iac.iClient.ListClusters(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -872,7 +880,12 @@ func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceID string)
|
|||||||
func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) {
|
func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
|
req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
|
||||||
c, err := iac.iClient.GetCluster(ctx, req)
|
var c *btapb.Cluster
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
c, err = iac.iClient.GetCluster(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -888,6 +901,7 @@ func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clus
|
|||||||
return cis, nil
|
return cis, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstanceIAM returns the instance's IAM handle.
|
||||||
func (iac *InstanceAdminClient) InstanceIAM(instanceID string) *iam.Handle {
|
func (iac *InstanceAdminClient) InstanceIAM(instanceID string) *iam.Handle {
|
||||||
return iam.InternalNewHandleGRPCClient(iac.iClient, "projects/"+iac.project+"/instances/"+instanceID)
|
return iam.InternalNewHandleGRPCClient(iac.iClient, "projects/"+iac.project+"/instances/"+instanceID)
|
||||||
|
|
||||||
@ -895,7 +909,15 @@ func (iac *InstanceAdminClient) InstanceIAM(instanceID string) *iam.Handle {
|
|||||||
|
|
||||||
// Routing policies.
|
// Routing policies.
|
||||||
const (
|
const (
|
||||||
|
// MultiClusterRouting is a policy that allows read/write requests to be
|
||||||
|
// routed to any cluster in the instance. Requests will will fail over to
|
||||||
|
// another cluster in the event of transient errors or delays. Choosing
|
||||||
|
// this option sacrifices read-your-writes consistency to improve
|
||||||
|
// availability.
|
||||||
MultiClusterRouting = "multi_cluster_routing_use_any"
|
MultiClusterRouting = "multi_cluster_routing_use_any"
|
||||||
|
// SingleClusterRouting is a policy that unconditionally routes all
|
||||||
|
// read/write requests to a specific cluster. This option preserves
|
||||||
|
// read-your-writes consistency, but does not improve availability.
|
||||||
SingleClusterRouting = "single_cluster_routing"
|
SingleClusterRouting = "single_cluster_routing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -909,15 +931,19 @@ type ProfileConf struct {
|
|||||||
RoutingPolicy string
|
RoutingPolicy string
|
||||||
ClusterID string
|
ClusterID string
|
||||||
AllowTransactionalWrites bool
|
AllowTransactionalWrites bool
|
||||||
|
|
||||||
|
// If true, warnings are ignored
|
||||||
|
IgnoreWarnings bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProfileIterator iterates over profiles.
|
||||||
type ProfileIterator struct {
|
type ProfileIterator struct {
|
||||||
items []*btapb.AppProfile
|
items []*btapb.AppProfile
|
||||||
pageInfo *iterator.PageInfo
|
pageInfo *iterator.PageInfo
|
||||||
nextFunc func() error
|
nextFunc func() error
|
||||||
}
|
}
|
||||||
|
|
||||||
//set this to patch app profile. If unset, no fields will be replaced.
|
// ProfileAttrsToUpdate define addrs to update during an Update call. If unset, no fields will be replaced.
|
||||||
type ProfileAttrsToUpdate struct {
|
type ProfileAttrsToUpdate struct {
|
||||||
// If set, updates the description.
|
// If set, updates the description.
|
||||||
Description optional.String
|
Description optional.String
|
||||||
@ -928,8 +954,12 @@ type ProfileAttrsToUpdate struct {
|
|||||||
//If RoutingPolicy is updated to SingleClusterRouting, set these fields as well.
|
//If RoutingPolicy is updated to SingleClusterRouting, set these fields as well.
|
||||||
ClusterID string
|
ClusterID string
|
||||||
AllowTransactionalWrites bool
|
AllowTransactionalWrites bool
|
||||||
|
|
||||||
|
// If true, warnings are ignored
|
||||||
|
IgnoreWarnings bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetFieldMaskPath returns the field mask path.
|
||||||
func (p *ProfileAttrsToUpdate) GetFieldMaskPath() []string {
|
func (p *ProfileAttrsToUpdate) GetFieldMaskPath() []string {
|
||||||
path := make([]string, 0)
|
path := make([]string, 0)
|
||||||
if p.Description != nil {
|
if p.Description != nil {
|
||||||
@ -992,7 +1022,7 @@ func (iac *InstanceAdminClient) CreateAppProfile(ctx context.Context, profile Pr
|
|||||||
Parent: parent,
|
Parent: parent,
|
||||||
AppProfile: appProfile,
|
AppProfile: appProfile,
|
||||||
AppProfileId: profile.ProfileID,
|
AppProfileId: profile.ProfileID,
|
||||||
IgnoreWarnings: true,
|
IgnoreWarnings: profile.IgnoreWarnings,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1002,8 +1032,16 @@ func (iac *InstanceAdminClient) GetAppProfile(ctx context.Context, instanceID, n
|
|||||||
profileRequest := &btapb.GetAppProfileRequest{
|
profileRequest := &btapb.GetAppProfileRequest{
|
||||||
Name: "projects/" + iac.project + "/instances/" + instanceID + "/appProfiles/" + name,
|
Name: "projects/" + iac.project + "/instances/" + instanceID + "/appProfiles/" + name,
|
||||||
}
|
}
|
||||||
return iac.iClient.GetAppProfile(ctx, profileRequest)
|
var ap *btapb.AppProfile
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
ap, err = iac.iClient.GetAppProfile(ctx, profileRequest)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ap, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListAppProfiles lists information about app profiles in an instance.
|
// ListAppProfiles lists information about app profiles in an instance.
|
||||||
@ -1016,14 +1054,17 @@ func (iac *InstanceAdminClient) ListAppProfiles(ctx context.Context, instanceID
|
|||||||
pit := &ProfileIterator{}
|
pit := &ProfileIterator{}
|
||||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||||
listRequest.PageToken = pageToken
|
listRequest.PageToken = pageToken
|
||||||
profileRes, err := iac.iClient.ListAppProfiles(ctx, listRequest)
|
var profileRes *btapb.ListAppProfilesResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
profileRes, err = iac.iClient.ListAppProfiles(ctx, listRequest)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range profileRes.AppProfiles {
|
pit.items = append(pit.items, profileRes.AppProfiles...)
|
||||||
pit.items = append(pit.items, a)
|
|
||||||
}
|
|
||||||
return profileRes.NextPageToken, nil
|
return profileRes.NextPageToken, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1068,7 +1109,7 @@ func (iac *InstanceAdminClient) UpdateAppProfile(ctx context.Context, instanceID
|
|||||||
UpdateMask: &field_mask.FieldMask{
|
UpdateMask: &field_mask.FieldMask{
|
||||||
Paths: updateAttrs.GetFieldMaskPath(),
|
Paths: updateAttrs.GetFieldMaskPath(),
|
||||||
},
|
},
|
||||||
IgnoreWarnings: true,
|
IgnoreWarnings: updateAttrs.IgnoreWarnings,
|
||||||
}
|
}
|
||||||
updateRequest, err := iac.iClient.UpdateAppProfile(ctx, patchRequest)
|
updateRequest, err := iac.iClient.UpdateAppProfile(ctx, patchRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
@ -15,6 +15,7 @@
|
|||||||
package bigtable
|
package bigtable
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
@ -24,7 +25,6 @@ import (
|
|||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||||
)
|
)
|
||||||
@ -173,7 +173,7 @@ func TestAdminIntegration(t *testing.T) {
|
|||||||
|
|
||||||
var gotRowCount int
|
var gotRowCount int
|
||||||
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
|
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
|
||||||
gotRowCount += 1
|
gotRowCount++
|
||||||
if !strings.HasPrefix(row.Key(), "b") {
|
if !strings.HasPrefix(row.Key(), "b") {
|
||||||
t.Errorf("Invalid row after dropping range: %v", row)
|
t.Errorf("Invalid row after dropping range: %v", row)
|
||||||
}
|
}
|
||||||
|
59
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
59
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package bigtable // import "cloud.google.com/go/bigtable"
|
package bigtable // import "cloud.google.com/go/bigtable"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -26,7 +27,6 @@ import (
|
|||||||
"cloud.google.com/go/bigtable/internal/gax"
|
"cloud.google.com/go/bigtable/internal/gax"
|
||||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
gtransport "google.golang.org/api/transport/grpc"
|
gtransport "google.golang.org/api/transport/grpc"
|
||||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||||
@ -45,11 +45,6 @@ type Client struct {
|
|||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
client btpb.BigtableClient
|
client btpb.BigtableClient
|
||||||
project, instance string
|
project, instance string
|
||||||
// App Profiles are part of the private alpha release of Cloud Bigtable replication.
|
|
||||||
// This feature
|
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
appProfile string
|
appProfile string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,6 +61,7 @@ func NewClient(ctx context.Context, project, instance string, opts ...option.Cli
|
|||||||
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...)
|
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewClientWithConfig creates a new client with the given config.
|
||||||
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
|
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
|
||||||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
|
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -75,7 +71,7 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C
|
|||||||
o = append(o,
|
o = append(o,
|
||||||
option.WithGRPCConnectionPool(4),
|
option.WithGRPCConnectionPool(4),
|
||||||
// Set the max size to correspond to server-side limits.
|
// Set the max size to correspond to server-side limits.
|
||||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))),
|
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1<<28), grpc.MaxCallRecvMsgSize(1<<28))),
|
||||||
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
|
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
|
||||||
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
|
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
|
||||||
option.WithGRPCDialOption(grpc.WithBlock()))
|
option.WithGRPCDialOption(grpc.WithBlock()))
|
||||||
@ -468,7 +464,10 @@ func mutationsAreRetryable(muts []*btpb.Mutation) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply applies a Mutation to a specific row.
|
const maxMutations = 100000
|
||||||
|
|
||||||
|
// Apply mutates a row atomically. A mutation must contain at least one
|
||||||
|
// operation and at most 100000 operations.
|
||||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||||
after := func(res proto.Message) {
|
after := func(res proto.Message) {
|
||||||
@ -657,23 +656,22 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
|
|||||||
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
|
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// entries will be reduced after each invocation to just what needs to be retried.
|
|
||||||
entries := make([]*entryErr, len(rowKeys))
|
|
||||||
copy(entries, origEntries)
|
|
||||||
var err error
|
var err error
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk")
|
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk")
|
||||||
defer func() { traceEndSpan(ctx, err) }()
|
defer func() { traceEndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
for _, group := range groupEntries(origEntries, maxMutations) {
|
||||||
attrMap := make(map[string]interface{})
|
attrMap := make(map[string]interface{})
|
||||||
err = gax.Invoke(ctx, func(ctx context.Context) error {
|
err = gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
attrMap["rowCount"] = len(entries)
|
attrMap["rowCount"] = len(group)
|
||||||
tracePrintf(ctx, attrMap, "Row count in ApplyBulk")
|
tracePrintf(ctx, attrMap, "Row count in ApplyBulk")
|
||||||
err := t.doApplyBulk(ctx, entries, opts...)
|
err := t.doApplyBulk(ctx, group, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We want to retry the entire request with the current entries
|
// We want to retry the entire request with the current group
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
entries = t.getApplyBulkRetries(entries)
|
group = t.getApplyBulkRetries(group)
|
||||||
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
|
if len(group) > 0 && len(idempotentRetryCodes) > 0 {
|
||||||
// We have at least one mutation that needs to be retried.
|
// We have at least one mutation that needs to be retried.
|
||||||
// Return an arbitrary error that is retryable according to callOptions.
|
// Return an arbitrary error that is retryable according to callOptions.
|
||||||
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
||||||
@ -683,6 +681,7 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Accumulate all of the errors into an array to return, interspersed with nils for successful
|
// Accumulate all of the errors into an array to return, interspersed with nils for successful
|
||||||
// entries. The absence of any errors means we should return nil.
|
// entries. The absence of any errors means we should return nil.
|
||||||
@ -756,6 +755,32 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// groupEntries groups entries into groups of a specified size without breaking up
|
||||||
|
// individual entries.
|
||||||
|
func groupEntries(entries []*entryErr, maxSize int) [][]*entryErr {
|
||||||
|
var (
|
||||||
|
res [][]*entryErr
|
||||||
|
start int
|
||||||
|
gmuts int
|
||||||
|
)
|
||||||
|
addGroup := func(end int) {
|
||||||
|
if end-start > 0 {
|
||||||
|
res = append(res, entries[start:end])
|
||||||
|
start = end
|
||||||
|
gmuts = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, e := range entries {
|
||||||
|
emuts := len(e.Entry.Mutations)
|
||||||
|
if gmuts+emuts > maxSize {
|
||||||
|
addGroup(i)
|
||||||
|
}
|
||||||
|
gmuts += emuts
|
||||||
|
}
|
||||||
|
addGroup(len(entries))
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
// Timestamp is in units of microseconds since 1 January 1970.
|
// Timestamp is in units of microseconds since 1 January 1970.
|
||||||
type Timestamp int64
|
type Timestamp int64
|
||||||
|
|
||||||
@ -849,6 +874,8 @@ func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context
|
|||||||
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
|
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SampleRowKeys returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of
|
||||||
|
// the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces.
|
||||||
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) {
|
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) {
|
||||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||||
var sampledRowKeys []string
|
var sampledRowKeys []string
|
||||||
|
110
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
110
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package bigtable
|
package bigtable
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strings"
|
"strings"
|
||||||
@ -25,9 +26,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
|
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -79,7 +80,86 @@ func TestApplyErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGroupEntries(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
desc string
|
||||||
|
in []*entryErr
|
||||||
|
size int
|
||||||
|
want [][]*entryErr
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "one entry less than max size is one group",
|
||||||
|
in: []*entryErr{buildEntry(5)},
|
||||||
|
size: 10,
|
||||||
|
want: [][]*entryErr{{buildEntry(5)}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "one entry equal to max size is one group",
|
||||||
|
in: []*entryErr{buildEntry(10)},
|
||||||
|
size: 10,
|
||||||
|
want: [][]*entryErr{{buildEntry(10)}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "one entry greater than max size is one group",
|
||||||
|
in: []*entryErr{buildEntry(15)},
|
||||||
|
size: 10,
|
||||||
|
want: [][]*entryErr{{buildEntry(15)}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "all entries fitting within max size are one group",
|
||||||
|
in: []*entryErr{buildEntry(10), buildEntry(10)},
|
||||||
|
size: 20,
|
||||||
|
want: [][]*entryErr{{buildEntry(10), buildEntry(10)}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "entries each under max size and together over max size are grouped separately",
|
||||||
|
in: []*entryErr{buildEntry(10), buildEntry(10)},
|
||||||
|
size: 15,
|
||||||
|
want: [][]*entryErr{{buildEntry(10)}, {buildEntry(10)}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "entries together over max size are grouped by max size",
|
||||||
|
in: []*entryErr{buildEntry(5), buildEntry(5), buildEntry(5)},
|
||||||
|
size: 10,
|
||||||
|
want: [][]*entryErr{{buildEntry(5), buildEntry(5)}, {buildEntry(5)}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "one entry over max size and one entry under max size are two groups",
|
||||||
|
in: []*entryErr{buildEntry(15), buildEntry(5)},
|
||||||
|
size: 10,
|
||||||
|
want: [][]*entryErr{{buildEntry(15)}, {buildEntry(5)}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
if got, want := groupEntries(test.in, test.size), test.want; !cmp.Equal(mutationCounts(got), mutationCounts(want)) {
|
||||||
|
t.Errorf("[%s] want = %v, got = %v", test.desc, mutationCounts(want), mutationCounts(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildEntry(numMutations int) *entryErr {
|
||||||
|
var muts []*btpb.Mutation
|
||||||
|
for i := 0; i < numMutations; i++ {
|
||||||
|
muts = append(muts, &btpb.Mutation{})
|
||||||
|
}
|
||||||
|
return &entryErr{Entry: &btpb.MutateRowsRequest_Entry{Mutations: muts}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mutationCounts(batched [][]*entryErr) []int {
|
||||||
|
var res []int
|
||||||
|
for _, entries := range batched {
|
||||||
|
var count int
|
||||||
|
for _, e := range entries {
|
||||||
|
count += len(e.Entry.Mutations)
|
||||||
|
}
|
||||||
|
res = append(res, count)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
func TestClientIntegration(t *testing.T) {
|
func TestClientIntegration(t *testing.T) {
|
||||||
|
// TODO(jba): go1.9: Use subtests.
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
lastCheckpoint := start
|
lastCheckpoint := start
|
||||||
checkpoint := func(s string) {
|
checkpoint := func(s string) {
|
||||||
@ -98,7 +178,7 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
timeout = 10 * time.Minute
|
timeout = 10 * time.Minute
|
||||||
t.Logf("Running test against production")
|
t.Logf("Running test against production")
|
||||||
} else {
|
} else {
|
||||||
timeout = 1 * time.Minute
|
timeout = 5 * time.Minute
|
||||||
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
|
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
@ -237,6 +317,12 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
||||||
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "read all, with ColumnFilter, prefix",
|
||||||
|
rr: RowRange{},
|
||||||
|
filter: ColumnFilter("j"), // no matches
|
||||||
|
want: "",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
desc: "read range, with ColumnRangeFilter",
|
desc: "read range, with ColumnRangeFilter",
|
||||||
rr: RowRange{},
|
rr: RowRange{},
|
||||||
@ -261,6 +347,12 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
filter: RowKeyFilter(".*wash.*"),
|
filter: RowKeyFilter(".*wash.*"),
|
||||||
want: "gwashington-jadams-1",
|
want: "gwashington-jadams-1",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "read with RowKeyFilter, prefix",
|
||||||
|
rr: RowRange{},
|
||||||
|
filter: RowKeyFilter("gwash"),
|
||||||
|
want: "",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
desc: "read with RowKeyFilter, no matches",
|
desc: "read with RowKeyFilter, no matches",
|
||||||
rr: RowRange{},
|
rr: RowRange{},
|
||||||
@ -369,7 +461,7 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
opts = append(opts, tc.limit)
|
opts = append(opts, tc.limit)
|
||||||
}
|
}
|
||||||
var elt []string
|
var elt []string
|
||||||
err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool {
|
err := tbl.ReadRows(ctx, tc.rr, func(r Row) bool {
|
||||||
for _, ris := range r {
|
for _, ris := range r {
|
||||||
for _, ri := range ris {
|
for _, ri := range ris {
|
||||||
elt = append(elt, formatReadItem(ri))
|
elt = append(elt, formatReadItem(ri))
|
||||||
@ -385,6 +477,7 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want)
|
t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read a RowList
|
// Read a RowList
|
||||||
var elt []string
|
var elt []string
|
||||||
keys := RowList{"wmckinley", "gwashington", "jadams"}
|
keys := RowList{"wmckinley", "gwashington", "jadams"}
|
||||||
@ -494,11 +587,11 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator.
|
// Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator.
|
||||||
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0}))
|
_, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
|
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
|
||||||
}
|
}
|
||||||
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0}))
|
_, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
|
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
|
||||||
}
|
}
|
||||||
@ -713,7 +806,7 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
if !testutil.Equal(r, wantRow) {
|
if !testutil.Equal(r, wantRow) {
|
||||||
t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow)
|
t.Errorf("Column family was deleted unexpectedly.\n got %v\n want %v", r, wantRow)
|
||||||
}
|
}
|
||||||
checkpoint("tested family delete")
|
checkpoint("tested family delete")
|
||||||
|
|
||||||
@ -875,6 +968,9 @@ func TestClientIntegration(t *testing.T) {
|
|||||||
rc++
|
rc++
|
||||||
return true
|
return true
|
||||||
}, LimitRows(int64(wantRc)))
|
}, LimitRows(int64(wantRc)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
if rc != wantRc {
|
if rc != wantRc {
|
||||||
t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc)
|
t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc)
|
||||||
}
|
}
|
||||||
|
2
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
@ -16,12 +16,12 @@ limitations under the License.
|
|||||||
package bttest_test
|
package bttest_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"cloud.google.com/go/bigtable"
|
"cloud.google.com/go/bigtable"
|
||||||
"cloud.google.com/go/bigtable/bttest"
|
"cloud.google.com/go/bigtable/bttest"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
231
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
231
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
@ -30,6 +30,8 @@ To use a Server, create it, and then connect to it with no security:
|
|||||||
package bttest // import "cloud.google.com/go/bigtable/bttest"
|
package bttest // import "cloud.google.com/go/bigtable/bttest"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
@ -41,14 +43,12 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||||
"github.com/golang/protobuf/ptypes/wrappers"
|
"github.com/golang/protobuf/ptypes/wrappers"
|
||||||
"github.com/google/btree"
|
"github.com/google/btree"
|
||||||
"golang.org/x/net/context"
|
|
||||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||||
|
"google.golang.org/genproto/googleapis/longrunning"
|
||||||
statpb "google.golang.org/genproto/googleapis/rpc/status"
|
statpb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -63,6 +63,10 @@ const (
|
|||||||
maxValidMilliSeconds = int64(time.Millisecond) * 253402300800
|
maxValidMilliSeconds = int64(time.Millisecond) * 253402300800
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
validLabelTransformer = regexp.MustCompile(`[a-z0-9\-]{1,15}`)
|
||||||
|
)
|
||||||
|
|
||||||
// Server is an in-memory Cloud Bigtable fake.
|
// Server is an in-memory Cloud Bigtable fake.
|
||||||
// It is unauthenticated, and only a rough approximation.
|
// It is unauthenticated, and only a rough approximation.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
@ -137,6 +141,10 @@ func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest)
|
|||||||
return &btapb.Table{Name: tbl}, nil
|
return &btapb.Table{Name: tbl}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *server) CreateTableFromSnapshot(context.Context, *btapb.CreateTableFromSnapshotRequest) (*longrunning.Operation, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "the emulator does not currently support snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) {
|
func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) {
|
||||||
res := &btapb.ListTablesResponse{}
|
res := &btapb.ListTablesResponse{}
|
||||||
prefix := req.Parent + "/tables/"
|
prefix := req.Parent + "/tables/"
|
||||||
@ -179,8 +187,6 @@ func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) {
|
func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) {
|
||||||
tblName := req.Name[strings.LastIndex(req.Name, "/")+1:]
|
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
tbl, ok := s.tables[req.Name]
|
tbl, ok := s.tables[req.Name]
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
@ -224,7 +230,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
|
|||||||
|
|
||||||
s.needGC()
|
s.needGC()
|
||||||
return &btapb.Table{
|
return &btapb.Table{
|
||||||
Name: tblName,
|
Name: req.Name,
|
||||||
ColumnFamilies: toColumnFamilies(tbl.families),
|
ColumnFamilies: toColumnFamilies(tbl.families),
|
||||||
Granularity: btapb.Table_TimestampGranularity(btapb.Table_MILLIS),
|
Granularity: btapb.Table_TimestampGranularity(btapb.Table_MILLIS),
|
||||||
}, nil
|
}, nil
|
||||||
@ -257,9 +263,8 @@ func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeReques
|
|||||||
if strings.HasPrefix(r.key, prefix) {
|
if strings.HasPrefix(r.key, prefix) {
|
||||||
rowsToDelete = append(rowsToDelete, r)
|
rowsToDelete = append(rowsToDelete, r)
|
||||||
return true
|
return true
|
||||||
} else {
|
|
||||||
return false // stop iteration
|
|
||||||
}
|
}
|
||||||
|
return false // stop iteration
|
||||||
})
|
})
|
||||||
for _, r := range rowsToDelete {
|
for _, r := range rowsToDelete {
|
||||||
tbl.rows.Delete(r)
|
tbl.rows.Delete(r)
|
||||||
@ -268,10 +273,6 @@ func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeReques
|
|||||||
return &emptypb.Empty{}, nil
|
return &emptypb.Empty{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
func (s *server) GenerateConsistencyToken(ctx context.Context, req *btapb.GenerateConsistencyTokenRequest) (*btapb.GenerateConsistencyTokenResponse, error) {
|
func (s *server) GenerateConsistencyToken(ctx context.Context, req *btapb.GenerateConsistencyTokenRequest) (*btapb.GenerateConsistencyTokenResponse, error) {
|
||||||
// Check that the table exists.
|
// Check that the table exists.
|
||||||
_, ok := s.tables[req.Name]
|
_, ok := s.tables[req.Name]
|
||||||
@ -284,10 +285,6 @@ func (s *server) GenerateConsistencyToken(ctx context.Context, req *btapb.Genera
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
|
||||||
// is not currently available to most Cloud Bigtable customers. This feature
|
|
||||||
// might be changed in backward-incompatible ways and is not recommended for
|
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
|
||||||
func (s *server) CheckConsistency(ctx context.Context, req *btapb.CheckConsistencyRequest) (*btapb.CheckConsistencyResponse, error) {
|
func (s *server) CheckConsistency(ctx context.Context, req *btapb.CheckConsistencyRequest) (*btapb.CheckConsistencyResponse, error) {
|
||||||
// Check that the table exists.
|
// Check that the table exists.
|
||||||
_, ok := s.tables[req.Name]
|
_, ok := s.tables[req.Name]
|
||||||
@ -306,6 +303,20 @@ func (s *server) CheckConsistency(ctx context.Context, req *btapb.CheckConsisten
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *server) SnapshotTable(context.Context, *btapb.SnapshotTableRequest) (*longrunning.Operation, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "the emulator does not currently support snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) GetSnapshot(context.Context, *btapb.GetSnapshotRequest) (*btapb.Snapshot, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "the emulator does not currently support snapshots")
|
||||||
|
}
|
||||||
|
func (s *server) ListSnapshots(context.Context, *btapb.ListSnapshotsRequest) (*btapb.ListSnapshotsResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "the emulator does not currently support snapshots")
|
||||||
|
}
|
||||||
|
func (s *server) DeleteSnapshot(context.Context, *btapb.DeleteSnapshotRequest) (*emptypb.Empty, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "the emulator does not currently support snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error {
|
func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
tbl, ok := s.tables[req.TableName]
|
tbl, ok := s.tables[req.TableName]
|
||||||
@ -369,8 +380,14 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
|
|||||||
|
|
||||||
rows := make([]*row, 0, len(rowSet))
|
rows := make([]*row, 0, len(rowSet))
|
||||||
for _, r := range rowSet {
|
for _, r := range rowSet {
|
||||||
|
r.mu.Lock()
|
||||||
|
fams := len(r.families)
|
||||||
|
r.mu.Unlock()
|
||||||
|
|
||||||
|
if fams != 0 {
|
||||||
rows = append(rows, r)
|
rows = append(rows, r)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
sort.Sort(byRowKey(rows))
|
sort.Sort(byRowKey(rows))
|
||||||
|
|
||||||
limit := int(req.RowsLimit)
|
limit := int(req.RowsLimit)
|
||||||
@ -398,7 +415,11 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (
|
|||||||
r.mu.Unlock()
|
r.mu.Unlock()
|
||||||
r = nr
|
r = nr
|
||||||
|
|
||||||
if !filterRow(f, r) {
|
match, err := filterRow(f, r)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if !match {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,7 +431,6 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (
|
|||||||
if len(cells) == 0 {
|
if len(cells) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// TODO(dsymonds): Apply transformers.
|
|
||||||
for _, cell := range cells {
|
for _, cell := range cells {
|
||||||
rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{
|
rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{
|
||||||
RowKey: []byte(r.key),
|
RowKey: []byte(r.key),
|
||||||
@ -418,6 +438,7 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (
|
|||||||
Qualifier: &wrappers.BytesValue{Value: []byte(colName)},
|
Qualifier: &wrappers.BytesValue{Value: []byte(colName)},
|
||||||
TimestampMicros: cell.ts,
|
TimestampMicros: cell.ts,
|
||||||
Value: cell.value,
|
Value: cell.value,
|
||||||
|
Labels: cell.labels,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -432,24 +453,28 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches,
|
// filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches,
|
||||||
// false otherwise.
|
// false otherwise. If a filter is invalid, filterRow returns false and an error.
|
||||||
func filterRow(f *btpb.RowFilter, r *row) bool {
|
func filterRow(f *btpb.RowFilter, r *row) (bool, error) {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
// Handle filters that apply beyond just including/excluding cells.
|
// Handle filters that apply beyond just including/excluding cells.
|
||||||
switch f := f.Filter.(type) {
|
switch f := f.Filter.(type) {
|
||||||
case *btpb.RowFilter_BlockAllFilter:
|
case *btpb.RowFilter_BlockAllFilter:
|
||||||
return !f.BlockAllFilter
|
return !f.BlockAllFilter, nil
|
||||||
case *btpb.RowFilter_PassAllFilter:
|
case *btpb.RowFilter_PassAllFilter:
|
||||||
return f.PassAllFilter
|
return f.PassAllFilter, nil
|
||||||
case *btpb.RowFilter_Chain_:
|
case *btpb.RowFilter_Chain_:
|
||||||
for _, sub := range f.Chain.Filters {
|
for _, sub := range f.Chain.Filters {
|
||||||
if !filterRow(sub, r) {
|
match, err := filterRow(sub, r)
|
||||||
return false
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if !match {
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true, nil
|
||||||
case *btpb.RowFilter_Interleave_:
|
case *btpb.RowFilter_Interleave_:
|
||||||
srs := make([]*row, 0, len(f.Interleave.Filters))
|
srs := make([]*row, 0, len(f.Interleave.Filters))
|
||||||
for _, sub := range f.Interleave.Filters {
|
for _, sub := range f.Interleave.Filters {
|
||||||
@ -475,7 +500,7 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
|||||||
count += len(cs)
|
count += len(cs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return count > 0
|
return count > 0, nil
|
||||||
case *btpb.RowFilter_CellsPerColumnLimitFilter:
|
case *btpb.RowFilter_CellsPerColumnLimitFilter:
|
||||||
lim := int(f.CellsPerColumnLimitFilter)
|
lim := int(f.CellsPerColumnLimitFilter)
|
||||||
for _, fam := range r.families {
|
for _, fam := range r.families {
|
||||||
@ -485,27 +510,29 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true, nil
|
||||||
case *btpb.RowFilter_Condition_:
|
case *btpb.RowFilter_Condition_:
|
||||||
if filterRow(f.Condition.PredicateFilter, r.copy()) {
|
match, err := filterRow(f.Condition.PredicateFilter, r.copy())
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if match {
|
||||||
if f.Condition.TrueFilter == nil {
|
if f.Condition.TrueFilter == nil {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
return filterRow(f.Condition.TrueFilter, r)
|
return filterRow(f.Condition.TrueFilter, r)
|
||||||
}
|
}
|
||||||
if f.Condition.FalseFilter == nil {
|
if f.Condition.FalseFilter == nil {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
return filterRow(f.Condition.FalseFilter, r)
|
return filterRow(f.Condition.FalseFilter, r)
|
||||||
case *btpb.RowFilter_RowKeyRegexFilter:
|
case *btpb.RowFilter_RowKeyRegexFilter:
|
||||||
pat := string(f.RowKeyRegexFilter)
|
rx, err := newRegexp(f.RowKeyRegexFilter)
|
||||||
rx, err := regexp.Compile(pat)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Bad rowkey_regex_filter pattern %q: %v", pat, err)
|
return false, status.Errorf(codes.InvalidArgument, "Error in field 'rowkey_regex_filter' : %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
if !rx.MatchString(r.key) {
|
if !rx.MatchString(r.key) {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
case *btpb.RowFilter_CellsPerRowLimitFilter:
|
case *btpb.RowFilter_CellsPerRowLimitFilter:
|
||||||
// Grab the first n cells in the row.
|
// Grab the first n cells in the row.
|
||||||
@ -521,7 +548,7 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true, nil
|
||||||
case *btpb.RowFilter_CellsPerRowOffsetFilter:
|
case *btpb.RowFilter_CellsPerRowOffsetFilter:
|
||||||
// Skip the first n cells in the row.
|
// Skip the first n cells in the row.
|
||||||
offset := int(f.CellsPerRowOffsetFilter)
|
offset := int(f.CellsPerRowOffsetFilter)
|
||||||
@ -531,96 +558,122 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
|||||||
if len(cs) > offset {
|
if len(cs) > offset {
|
||||||
fam.cells[col] = cs[offset:]
|
fam.cells[col] = cs[offset:]
|
||||||
offset = 0
|
offset = 0
|
||||||
return true
|
return true, nil
|
||||||
} else {
|
}
|
||||||
fam.cells[col] = cs[:0]
|
fam.cells[col] = cs[:0]
|
||||||
offset -= len(cs)
|
offset -= len(cs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return true, nil
|
||||||
|
case *btpb.RowFilter_RowSampleFilter:
|
||||||
|
// The row sample filter "matches all cells from a row with probability
|
||||||
|
// p, and matches no cells from the row with probability 1-p."
|
||||||
|
// See https://github.com/googleapis/googleapis/blob/master/google/bigtable/v2/data.proto
|
||||||
|
if f.RowSampleFilter <= 0.0 || f.RowSampleFilter >= 1.0 {
|
||||||
|
return false, status.Error(codes.InvalidArgument, "row_sample_filter argument must be between 0.0 and 1.0")
|
||||||
}
|
}
|
||||||
return true
|
return randFloat() < f.RowSampleFilter, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Any other case, operate on a per-cell basis.
|
// Any other case, operate on a per-cell basis.
|
||||||
cellCount := 0
|
cellCount := 0
|
||||||
for _, fam := range r.families {
|
for _, fam := range r.families {
|
||||||
for colName, cs := range fam.cells {
|
for colName, cs := range fam.cells {
|
||||||
fam.cells[colName] = filterCells(f, fam.name, colName, cs)
|
filtered, err := filterCells(f, fam.name, colName, cs)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
fam.cells[colName] = filtered
|
||||||
cellCount += len(fam.cells[colName])
|
cellCount += len(fam.cells[colName])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cellCount > 0
|
return cellCount > 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell {
|
var randFloat = rand.Float64
|
||||||
|
|
||||||
|
func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) ([]cell, error) {
|
||||||
var ret []cell
|
var ret []cell
|
||||||
for _, cell := range cs {
|
for _, cell := range cs {
|
||||||
if includeCell(f, fam, col, cell) {
|
include, err := includeCell(f, fam, col, cell)
|
||||||
cell = modifyCell(f, cell)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if include {
|
||||||
|
cell, err = modifyCell(f, cell)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
ret = append(ret, cell)
|
ret = append(ret, cell)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func modifyCell(f *btpb.RowFilter, c cell) cell {
|
func modifyCell(f *btpb.RowFilter, c cell) (cell, error) {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return c
|
return c, nil
|
||||||
}
|
}
|
||||||
// Consider filters that may modify the cell contents
|
// Consider filters that may modify the cell contents
|
||||||
switch f.Filter.(type) {
|
switch filter := f.Filter.(type) {
|
||||||
case *btpb.RowFilter_StripValueTransformer:
|
case *btpb.RowFilter_StripValueTransformer:
|
||||||
return cell{ts: c.ts}
|
return cell{ts: c.ts}, nil
|
||||||
|
case *btpb.RowFilter_ApplyLabelTransformer:
|
||||||
|
if !validLabelTransformer.MatchString(filter.ApplyLabelTransformer) {
|
||||||
|
return cell{}, status.Errorf(
|
||||||
|
codes.InvalidArgument,
|
||||||
|
`apply_label_transformer must match RE2([a-z0-9\-]+), but found %v`,
|
||||||
|
filter.ApplyLabelTransformer,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return cell{ts: c.ts, value: c.value, labels: []string{filter.ApplyLabelTransformer}}, nil
|
||||||
default:
|
default:
|
||||||
return c
|
return c, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool {
|
func includeCell(f *btpb.RowFilter, fam, col string, cell cell) (bool, error) {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
// TODO(dsymonds): Implement many more filters.
|
// TODO(dsymonds): Implement many more filters.
|
||||||
switch f := f.Filter.(type) {
|
switch f := f.Filter.(type) {
|
||||||
case *btpb.RowFilter_CellsPerColumnLimitFilter:
|
case *btpb.RowFilter_CellsPerColumnLimitFilter:
|
||||||
// Don't log, row-level filter
|
// Don't log, row-level filter
|
||||||
return true
|
return true, nil
|
||||||
case *btpb.RowFilter_RowKeyRegexFilter:
|
case *btpb.RowFilter_RowKeyRegexFilter:
|
||||||
// Don't log, row-level filter
|
// Don't log, row-level filter
|
||||||
return true
|
return true, nil
|
||||||
case *btpb.RowFilter_StripValueTransformer:
|
case *btpb.RowFilter_StripValueTransformer:
|
||||||
// Don't log, cell-modifying filter
|
// Don't log, cell-modifying filter
|
||||||
return true
|
return true, nil
|
||||||
|
case *btpb.RowFilter_ApplyLabelTransformer:
|
||||||
|
// Don't log, cell-modifying filter
|
||||||
|
return true, nil
|
||||||
default:
|
default:
|
||||||
log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f)
|
log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f)
|
||||||
return true
|
return true, nil
|
||||||
case *btpb.RowFilter_FamilyNameRegexFilter:
|
case *btpb.RowFilter_FamilyNameRegexFilter:
|
||||||
pat := string(f.FamilyNameRegexFilter)
|
rx, err := newRegexp([]byte(f.FamilyNameRegexFilter))
|
||||||
rx, err := regexp.Compile(pat)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err)
|
return false, status.Errorf(codes.InvalidArgument, "Error in field 'family_name_regex_filter' : %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return rx.MatchString(fam)
|
return rx.MatchString(fam), nil
|
||||||
case *btpb.RowFilter_ColumnQualifierRegexFilter:
|
case *btpb.RowFilter_ColumnQualifierRegexFilter:
|
||||||
pat := string(f.ColumnQualifierRegexFilter)
|
rx, err := newRegexp(f.ColumnQualifierRegexFilter)
|
||||||
rx, err := regexp.Compile(pat)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err)
|
return false, status.Errorf(codes.InvalidArgument, "Error in field 'column_qualifier_regex_filter' : %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return rx.MatchString(col)
|
return rx.MatchString(toUTF8([]byte(col))), nil
|
||||||
case *btpb.RowFilter_ValueRegexFilter:
|
case *btpb.RowFilter_ValueRegexFilter:
|
||||||
pat := string(f.ValueRegexFilter)
|
rx, err := newRegexp(f.ValueRegexFilter)
|
||||||
rx, err := regexp.Compile(pat)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Bad value_regex_filter pattern %q: %v", pat, err)
|
return false, status.Errorf(codes.InvalidArgument, "Error in field 'value_regex_filter' : %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return rx.Match(cell.value)
|
return rx.Match(cell.value), nil
|
||||||
case *btpb.RowFilter_ColumnRangeFilter:
|
case *btpb.RowFilter_ColumnRangeFilter:
|
||||||
if fam != f.ColumnRangeFilter.FamilyName {
|
if fam != f.ColumnRangeFilter.FamilyName {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
// Start qualifier defaults to empty string closed
|
// Start qualifier defaults to empty string closed
|
||||||
inRangeStart := func() bool { return col >= "" }
|
inRangeStart := func() bool { return col >= "" }
|
||||||
@ -638,11 +691,11 @@ func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool {
|
|||||||
case *btpb.ColumnRange_EndQualifierOpen:
|
case *btpb.ColumnRange_EndQualifierOpen:
|
||||||
inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) }
|
inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) }
|
||||||
}
|
}
|
||||||
return inRangeStart() && inRangeEnd()
|
return inRangeStart() && inRangeEnd(), nil
|
||||||
case *btpb.RowFilter_TimestampRangeFilter:
|
case *btpb.RowFilter_TimestampRangeFilter:
|
||||||
// Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity.
|
// Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity.
|
||||||
return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros &&
|
return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros &&
|
||||||
(f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros)
|
(f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros), nil
|
||||||
case *btpb.RowFilter_ValueRangeFilter:
|
case *btpb.RowFilter_ValueRangeFilter:
|
||||||
v := cell.value
|
v := cell.value
|
||||||
// Start value defaults to empty string closed
|
// Start value defaults to empty string closed
|
||||||
@ -661,10 +714,27 @@ func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool {
|
|||||||
case *btpb.ValueRange_EndValueOpen:
|
case *btpb.ValueRange_EndValueOpen:
|
||||||
inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 }
|
inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 }
|
||||||
}
|
}
|
||||||
return inRangeStart() && inRangeEnd()
|
return inRangeStart() && inRangeEnd(), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toUTF8(bs []byte) string {
|
||||||
|
var rs []rune
|
||||||
|
for _, b := range bs {
|
||||||
|
rs = append(rs, rune(b))
|
||||||
|
}
|
||||||
|
return string(rs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRegexp(patBytes []byte) (*regexp.Regexp, error) {
|
||||||
|
pat := toUTF8(patBytes)
|
||||||
|
re, err := regexp.Compile("^" + pat + "$") // match entire target
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Bad pattern %q: %v", pat, err)
|
||||||
|
}
|
||||||
|
return re, err
|
||||||
|
}
|
||||||
|
|
||||||
func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) {
|
func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
tbl, ok := s.tables[req.TableName]
|
tbl, ok := s.tables[req.TableName]
|
||||||
@ -789,9 +859,13 @@ func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*co
|
|||||||
if !tbl.validTimestamp(tsr.StartTimestampMicros) {
|
if !tbl.validTimestamp(tsr.StartTimestampMicros) {
|
||||||
return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros)
|
return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros)
|
||||||
}
|
}
|
||||||
if !tbl.validTimestamp(tsr.EndTimestampMicros) {
|
if !tbl.validTimestamp(tsr.EndTimestampMicros) && tsr.EndTimestampMicros != 0 {
|
||||||
return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros)
|
return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros)
|
||||||
}
|
}
|
||||||
|
if tsr.StartTimestampMicros >= tsr.EndTimestampMicros && tsr.EndTimestampMicros != 0 {
|
||||||
|
return fmt.Errorf("inverted or invalid timestamp range [%d, %d]", tsr.StartTimestampMicros, tsr.EndTimestampMicros)
|
||||||
|
}
|
||||||
|
|
||||||
// Find half-open interval to remove.
|
// Find half-open interval to remove.
|
||||||
// Cells are in descending timestamp order,
|
// Cells are in descending timestamp order,
|
||||||
// so the predicates to sort.Search are inverted.
|
// so the predicates to sort.Search are inverted.
|
||||||
@ -1061,7 +1135,7 @@ func newTable(ctr *btapb.CreateTableRequest) *table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *table) validTimestamp(ts int64) bool {
|
func (t *table) validTimestamp(ts int64) bool {
|
||||||
if ts <= minValidMilliSeconds || ts >= maxValidMilliSeconds {
|
if ts < minValidMilliSeconds || ts > maxValidMilliSeconds {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1302,6 +1376,7 @@ func (f *family) cellsByColumn(name string) []cell {
|
|||||||
type cell struct {
|
type cell struct {
|
||||||
ts int64
|
ts int64
|
||||||
value []byte
|
value []byte
|
||||||
|
labels []string
|
||||||
}
|
}
|
||||||
|
|
||||||
type byDescTS []cell
|
type byDescTS []cell
|
||||||
|
394
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
394
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
@ -15,6 +15,7 @@
|
|||||||
package bttest
|
package bttest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -23,9 +24,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
"golang.org/x/net/context"
|
|
||||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@ -420,6 +421,85 @@ func TestReadRows(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadRowsError(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
s := &server{
|
||||||
|
tables: make(map[string]*table),
|
||||||
|
}
|
||||||
|
newTbl := btapb.Table{
|
||||||
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
|
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Creating table: %v", err)
|
||||||
|
}
|
||||||
|
mreq := &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
|
FamilyName: "cf0",
|
||||||
|
ColumnQualifier: []byte("col"),
|
||||||
|
TimestampMicros: 1000,
|
||||||
|
Value: []byte{},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
if _, err := s.MutateRow(ctx, mreq); err != nil {
|
||||||
|
t.Fatalf("Populating table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock := &MockReadRowsServer{}
|
||||||
|
req := &btpb.ReadRowsRequest{TableName: tblInfo.Name, Filter: &btpb.RowFilter{
|
||||||
|
Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte("[")}}, // Invalid regex.
|
||||||
|
}
|
||||||
|
if err = s.ReadRows(req, mock); err == nil {
|
||||||
|
t.Fatal("ReadRows got no error, want error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadRowsAfterDeletion(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
s := &server{
|
||||||
|
tables: make(map[string]*table),
|
||||||
|
}
|
||||||
|
newTbl := btapb.Table{
|
||||||
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
|
"cf0": {},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{
|
||||||
|
Parent: "cluster", TableId: "t", Table: &newTbl,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Creating table: %v", err)
|
||||||
|
}
|
||||||
|
populateTable(ctx, s)
|
||||||
|
dreq := &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_DeleteFromRow_{
|
||||||
|
DeleteFromRow: &btpb.Mutation_DeleteFromRow{},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
if _, err := s.MutateRow(ctx, dreq); err != nil {
|
||||||
|
t.Fatalf("Deleting from table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock := &MockReadRowsServer{}
|
||||||
|
req := &btpb.ReadRowsRequest{TableName: tblInfo.Name}
|
||||||
|
if err = s.ReadRows(req, mock); err != nil {
|
||||||
|
t.Fatalf("ReadRows error: %v", err)
|
||||||
|
}
|
||||||
|
if got, want := len(mock.responses), 0; got != want {
|
||||||
|
t.Errorf("response count: got %d, want %d", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestReadRowsOrder(t *testing.T) {
|
func TestReadRowsOrder(t *testing.T) {
|
||||||
s := &server{
|
s := &server{
|
||||||
tables: make(map[string]*table),
|
tables: make(map[string]*table),
|
||||||
@ -516,8 +596,8 @@ func TestReadRowsOrder(t *testing.T) {
|
|||||||
|
|
||||||
// Read with interleave filter
|
// Read with interleave filter
|
||||||
inter := &btpb.RowFilter_Interleave{}
|
inter := &btpb.RowFilter_Interleave{}
|
||||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "1"}}
|
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "cf1"}}
|
||||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("2")}}
|
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("col2")}}
|
||||||
inter.Filters = append(inter.Filters, fnr, cqr)
|
inter.Filters = append(inter.Filters, fnr, cqr)
|
||||||
req = &btpb.ReadRowsRequest{
|
req = &btpb.ReadRowsRequest{
|
||||||
TableName: tblInfo.Name,
|
TableName: tblInfo.Name,
|
||||||
@ -573,6 +653,78 @@ func TestReadRowsOrder(t *testing.T) {
|
|||||||
testOrder(mock)
|
testOrder(mock)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadRowsWithlabelTransformer(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
s := &server{
|
||||||
|
tables: make(map[string]*table),
|
||||||
|
}
|
||||||
|
newTbl := btapb.Table{
|
||||||
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
|
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Creating table: %v", err)
|
||||||
|
}
|
||||||
|
mreq := &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
|
FamilyName: "cf0",
|
||||||
|
ColumnQualifier: []byte("col"),
|
||||||
|
TimestampMicros: 1000,
|
||||||
|
Value: []byte{},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
if _, err := s.MutateRow(ctx, mreq); err != nil {
|
||||||
|
t.Fatalf("Populating table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock := &MockReadRowsServer{}
|
||||||
|
req := &btpb.ReadRowsRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
Filter: &btpb.RowFilter{
|
||||||
|
Filter: &btpb.RowFilter_ApplyLabelTransformer{
|
||||||
|
ApplyLabelTransformer: "label",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err = s.ReadRows(req, mock); err != nil {
|
||||||
|
t.Fatalf("ReadRows error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, want := len(mock.responses), 1; got != want {
|
||||||
|
t.Fatalf("response count: got %d, want %d", got, want)
|
||||||
|
}
|
||||||
|
resp := mock.responses[0]
|
||||||
|
if got, want := len(resp.Chunks), 1; got != want {
|
||||||
|
t.Fatalf("chunks count: got %d, want %d", got, want)
|
||||||
|
}
|
||||||
|
chunk := resp.Chunks[0]
|
||||||
|
if got, want := len(chunk.Labels), 1; got != want {
|
||||||
|
t.Fatalf("labels count: got %d, want %d", got, want)
|
||||||
|
}
|
||||||
|
if got, want := chunk.Labels[0], "label"; got != want {
|
||||||
|
t.Fatalf("label: got %s, want %s", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock = &MockReadRowsServer{}
|
||||||
|
req = &btpb.ReadRowsRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
Filter: &btpb.RowFilter{
|
||||||
|
Filter: &btpb.RowFilter_ApplyLabelTransformer{
|
||||||
|
ApplyLabelTransformer: "", // invalid label
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err = s.ReadRows(req, mock); err == nil {
|
||||||
|
t.Fatal("ReadRows want invalid label error, got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||||
s := &server{
|
s := &server{
|
||||||
tables: make(map[string]*table),
|
tables: make(map[string]*table),
|
||||||
@ -803,3 +955,239 @@ func TestFilters(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_Mutation_DeleteFromColumn(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
s := &server{
|
||||||
|
tables: make(map[string]*table),
|
||||||
|
}
|
||||||
|
|
||||||
|
tblInfo, err := populateTable(ctx, s)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
in *btpb.MutateRowRequest
|
||||||
|
fail bool
|
||||||
|
}{
|
||||||
|
{in: &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||||
|
FamilyName: "cf1",
|
||||||
|
ColumnQualifier: []byte("col1"),
|
||||||
|
TimeRange: &btpb.TimestampRange{
|
||||||
|
StartTimestampMicros: 2000,
|
||||||
|
EndTimestampMicros: 1000,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
fail: true,
|
||||||
|
},
|
||||||
|
{in: &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||||
|
FamilyName: "cf2",
|
||||||
|
ColumnQualifier: []byte("col2"),
|
||||||
|
TimeRange: &btpb.TimestampRange{
|
||||||
|
StartTimestampMicros: 1000,
|
||||||
|
EndTimestampMicros: 2000,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
fail: false,
|
||||||
|
},
|
||||||
|
{in: &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||||
|
FamilyName: "cf3",
|
||||||
|
ColumnQualifier: []byte("col3"),
|
||||||
|
TimeRange: &btpb.TimestampRange{
|
||||||
|
StartTimestampMicros: 1000,
|
||||||
|
EndTimestampMicros: 0,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
fail: false,
|
||||||
|
},
|
||||||
|
{in: &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||||
|
FamilyName: "cf4",
|
||||||
|
ColumnQualifier: []byte("col4"),
|
||||||
|
TimeRange: &btpb.TimestampRange{
|
||||||
|
StartTimestampMicros: 0,
|
||||||
|
EndTimestampMicros: 1000,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
fail: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tst := range tests {
|
||||||
|
_, err = s.MutateRow(ctx, tst.in)
|
||||||
|
|
||||||
|
if err != nil && !tst.fail {
|
||||||
|
t.Errorf("expected passed got failure for : %v \n with err: %v", tst.in, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && tst.fail {
|
||||||
|
t.Errorf("expected failure got passed for : %v", tst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterRow(t *testing.T) {
|
||||||
|
row := &row{
|
||||||
|
key: "row",
|
||||||
|
families: map[string]*family{
|
||||||
|
"fam": {
|
||||||
|
name: "fam",
|
||||||
|
cells: map[string][]cell{
|
||||||
|
"col": {{ts: 100, value: []byte("val")}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
filter *btpb.RowFilter
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
// The regexp-based filters perform whole-string, case-sensitive matches.
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte("row")}}, true},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte("ro")}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte("ROW")}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte("moo")}}, false},
|
||||||
|
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"fam"}}, true},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"f.*"}}, true},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"[fam]+"}}, true},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"fa"}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"FAM"}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"moo"}}, false},
|
||||||
|
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("col")}}, true},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("co")}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("COL")}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("moo")}}, false},
|
||||||
|
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("val")}}, true},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("va")}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("VAL")}}, false},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("moo")}}, false},
|
||||||
|
} {
|
||||||
|
got, _ := filterRow(test.filter, row.copy())
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("%s: got %t, want %t", proto.CompactTextString(test.filter), got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterRowWithErrors(t *testing.T) {
|
||||||
|
row := &row{
|
||||||
|
key: "row",
|
||||||
|
families: map[string]*family{
|
||||||
|
"fam": {
|
||||||
|
name: "fam",
|
||||||
|
cells: map[string][]cell{
|
||||||
|
"col": {{ts: 100, value: []byte("val")}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
badRegex *btpb.RowFilter
|
||||||
|
}{
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte("[")}}},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"["}}},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("[")}}},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("[")}}},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_Chain_{
|
||||||
|
Chain: &btpb.RowFilter_Chain{Filters: []*btpb.RowFilter{
|
||||||
|
{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("[")}}},
|
||||||
|
},
|
||||||
|
}}},
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_Condition_{
|
||||||
|
Condition: &btpb.RowFilter_Condition{
|
||||||
|
PredicateFilter: &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte("[")}},
|
||||||
|
},
|
||||||
|
}}},
|
||||||
|
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowSampleFilter{0.0}}}, // 0.0 is invalid.
|
||||||
|
{&btpb.RowFilter{Filter: &btpb.RowFilter_RowSampleFilter{1.0}}}, // 1.0 is invalid.
|
||||||
|
} {
|
||||||
|
got, err := filterRow(test.badRegex, row.copy())
|
||||||
|
if got != false {
|
||||||
|
t.Errorf("%s: got true, want false", proto.CompactTextString(test.badRegex))
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got no error, want error", proto.CompactTextString(test.badRegex))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterRowWithRowSampleFilter(t *testing.T) {
|
||||||
|
prev := randFloat
|
||||||
|
randFloat = func() float64 { return 0.5 }
|
||||||
|
defer func() { randFloat = prev }()
|
||||||
|
for _, test := range []struct {
|
||||||
|
p float64
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{0.1, false}, // Less than random float. Return no rows.
|
||||||
|
{0.5, false}, // Equal to random float. Return no rows.
|
||||||
|
{0.9, true}, // Greater than random float. Return all rows.
|
||||||
|
} {
|
||||||
|
got, err := filterRow(&btpb.RowFilter{Filter: &btpb.RowFilter_RowSampleFilter{test.p}}, &row{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%f: %v", test.p, err)
|
||||||
|
}
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("%v: got %t, want %t", test.p, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterRowWithBinaryColumnQualifier(t *testing.T) {
|
||||||
|
rs := []byte{128, 128}
|
||||||
|
row := &row{
|
||||||
|
key: string(rs),
|
||||||
|
families: map[string]*family{
|
||||||
|
"fam": {
|
||||||
|
name: "fam",
|
||||||
|
cells: map[string][]cell{
|
||||||
|
string(rs): {{ts: 100, value: []byte("val")}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
filter []byte
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{[]byte{128, 128}, true}, // succeeds, exact match
|
||||||
|
{[]byte{128, 129}, false}, // fails
|
||||||
|
{[]byte{128}, false}, // fails, because the regexp must match the entire input
|
||||||
|
{[]byte{128, '*'}, true}, // succeeds: 0 or more 128s
|
||||||
|
{[]byte{'[', 127, 128, ']', '{', '2', '}'}, true}, // succeeds: exactly two of either 127 or 128
|
||||||
|
} {
|
||||||
|
got, _ := filterRow(&btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{test.filter}}, row.copy())
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("%v: got %t, want %t", test.filter, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
337
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
337
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
@ -20,6 +20,8 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/format"
|
"go/format"
|
||||||
@ -34,11 +36,8 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"encoding/csv"
|
|
||||||
|
|
||||||
"cloud.google.com/go/bigtable"
|
"cloud.google.com/go/bigtable"
|
||||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@ -81,7 +80,6 @@ func getClient(clientConf bigtable.ClientConfig) *bigtable.Client {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Making bigtable.Client: %v", err)
|
log.Fatalf("Making bigtable.Client: %v", err)
|
||||||
}
|
}
|
||||||
opts = append(opts, option.WithUserAgent(cliUserAgent))
|
|
||||||
}
|
}
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
@ -190,14 +188,20 @@ Alpha features are not currently available to most Cloud Bigtable customers. The
|
|||||||
features might be changed in backward-incompatible ways and are not recommended
|
features might be changed in backward-incompatible ways and are not recommended
|
||||||
for production use. They are not subject to any SLA or deprecation policy.
|
for production use. They are not subject to any SLA or deprecation policy.
|
||||||
|
|
||||||
|
Note: cbt does not support specifying arbitrary bytes on the command line for
|
||||||
|
any value that Cloud Bigtable otherwise supports (for example, the row key and
|
||||||
|
column qualifier).
|
||||||
|
|
||||||
For convenience, values of the -project, -instance, -creds,
|
For convenience, values of the -project, -instance, -creds,
|
||||||
-admin-endpoint and -data-endpoint flags may be specified in
|
-admin-endpoint and -data-endpoint flags may be specified in
|
||||||
~/.cbtrc in this format:
|
~/.cbtrc in this format:
|
||||||
|
|
||||||
project = my-project-123
|
project = my-project-123
|
||||||
instance = my-instance
|
instance = my-instance
|
||||||
creds = path-to-account-key.json
|
creds = path-to-account-key.json
|
||||||
admin-endpoint = hostname:port
|
admin-endpoint = hostname:port
|
||||||
data-endpoint = hostname:port
|
data-endpoint = hostname:port
|
||||||
|
|
||||||
All values are optional, and all will be overridden by flags.
|
All values are optional, and all will be overridden by flags.
|
||||||
`
|
`
|
||||||
|
|
||||||
@ -229,7 +233,7 @@ var commands = []struct {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "createcluster",
|
Name: "createcluster",
|
||||||
Desc: "Create a cluster in the configured instance (replication alpha)",
|
Desc: "Create a cluster in the configured instance ",
|
||||||
do: doCreateCluster,
|
do: doCreateCluster,
|
||||||
Usage: "cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>\n" +
|
Usage: "cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>\n" +
|
||||||
" cluster-id Permanent, unique id for the cluster in the instance\n" +
|
" cluster-id Permanent, unique id for the cluster in the instance\n" +
|
||||||
@ -249,8 +253,9 @@ var commands = []struct {
|
|||||||
Name: "createtable",
|
Name: "createtable",
|
||||||
Desc: "Create a table",
|
Desc: "Create a table",
|
||||||
do: doCreateTable,
|
do: doCreateTable,
|
||||||
Usage: "cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...]\n" +
|
Usage: "cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...]\n" +
|
||||||
" families: Column families and their associated GC policies. See \"setgcpolicy\".\n" +
|
" families: Column families and their associated GC policies. For gcpolicy,\n" +
|
||||||
|
" see \"setgcpolicy\".\n" +
|
||||||
" Example: families=family1:maxage=1w,family2:maxversions=1\n" +
|
" Example: families=family1:maxage=1w,family2:maxversions=1\n" +
|
||||||
" splits: Row key to be used to initially split the table",
|
" splits: Row key to be used to initially split the table",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
@ -273,7 +278,7 @@ var commands = []struct {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "deletecluster",
|
Name: "deletecluster",
|
||||||
Desc: "Delete a cluster from the configured instance (replication alpha)",
|
Desc: "Delete a cluster from the configured instance ",
|
||||||
do: doDeleteCluster,
|
do: doDeleteCluster,
|
||||||
Usage: "cbt deletecluster <cluster>",
|
Usage: "cbt deletecluster <cluster>",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
@ -283,7 +288,7 @@ var commands = []struct {
|
|||||||
Desc: "Delete all cells in a column",
|
Desc: "Delete all cells in a column",
|
||||||
do: doDeleteColumn,
|
do: doDeleteColumn,
|
||||||
Usage: "cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]\n" +
|
Usage: "cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]\n" +
|
||||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
" app-profile=<app profile id> The app profile id to use for the request\n",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -298,7 +303,7 @@ var commands = []struct {
|
|||||||
Desc: "Delete a row",
|
Desc: "Delete a row",
|
||||||
do: doDeleteRow,
|
do: doDeleteRow,
|
||||||
Usage: "cbt deleterow <table> <row> [app-profile=<app profile id>]\n" +
|
Usage: "cbt deleterow <table> <row> [app-profile=<app profile id>]\n" +
|
||||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
" app-profile=<app profile id> The app profile id to use for the request\n",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -344,7 +349,7 @@ var commands = []struct {
|
|||||||
"[app-profile=<app profile id>]\n" +
|
"[app-profile=<app profile id>]\n" +
|
||||||
" columns=[family]:[qualifier],... Read only these columns, comma-separated\n" +
|
" columns=[family]:[qualifier],... Read only these columns, comma-separated\n" +
|
||||||
" cells-per-column=<n> Read only this many cells per column\n" +
|
" cells-per-column=<n> Read only this many cells per column\n" +
|
||||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
" app-profile=<app profile id> The app profile id to use for the request\n",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -376,7 +381,7 @@ var commands = []struct {
|
|||||||
" columns=[family]:[qualifier],... Read only these columns, comma-separated\n" +
|
" columns=[family]:[qualifier],... Read only these columns, comma-separated\n" +
|
||||||
" count=<n> Read only this many rows\n" +
|
" count=<n> Read only this many rows\n" +
|
||||||
" cells-per-column=<n> Read only this many cells per column\n" +
|
" cells-per-column=<n> Read only this many cells per column\n" +
|
||||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
" app-profile=<app profile id> The app profile id to use for the request\n",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -384,7 +389,7 @@ var commands = []struct {
|
|||||||
Desc: "Set value of a cell",
|
Desc: "Set value of a cell",
|
||||||
do: doSet,
|
do: doSet,
|
||||||
Usage: "cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...\n" +
|
Usage: "cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...\n" +
|
||||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n" +
|
" app-profile=<app profile id> The app profile id to use for the request\n" +
|
||||||
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
|
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
" ts is an optional integer timestamp.\n" +
|
" ts is an optional integer timestamp.\n" +
|
||||||
@ -396,7 +401,7 @@ var commands = []struct {
|
|||||||
Name: "setgcpolicy",
|
Name: "setgcpolicy",
|
||||||
Desc: "Set the GC policy for a column family",
|
Desc: "Set the GC policy for a column family",
|
||||||
do: doSetGCPolicy,
|
do: doSetGCPolicy,
|
||||||
Usage: "cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )\n" +
|
Usage: "cbt setgcpolicy <table> <family> ((maxage=<d> | maxversions=<n>) [(and|or) (maxage=<d> | maxversions=<n>),...] | never)\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
` maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
|
` maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
|
||||||
" maxversions=<n> Maximum number of versions to preserve",
|
" maxversions=<n> Maximum number of versions to preserve",
|
||||||
@ -404,7 +409,7 @@ var commands = []struct {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "waitforreplication",
|
Name: "waitforreplication",
|
||||||
Desc: "Block until all the completed writes have been replicated to all the clusters (replication alpha)",
|
Desc: "Block until all the completed writes have been replicated to all the clusters",
|
||||||
do: doWaitForReplicaiton,
|
do: doWaitForReplicaiton,
|
||||||
Usage: "cbt waitforreplication <table>",
|
Usage: "cbt waitforreplication <table>",
|
||||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
@ -456,6 +461,45 @@ var commands = []struct {
|
|||||||
Usage: "cbt version",
|
Usage: "cbt version",
|
||||||
Required: cbtconfig.NoneRequired,
|
Required: cbtconfig.NoneRequired,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "createappprofile",
|
||||||
|
Desc: "Creates app profile for an instance",
|
||||||
|
do: doCreateAppProfile,
|
||||||
|
Usage: "usage: cbt createappprofile <instance-id> <profile-id> <description> " +
|
||||||
|
"(route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag] \n" +
|
||||||
|
"optional flags may be `force`",
|
||||||
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "getappprofile",
|
||||||
|
Desc: "Reads app profile for an instance",
|
||||||
|
do: doGetAppProfile,
|
||||||
|
Usage: "cbt getappprofile <instance-id> <profile-id>",
|
||||||
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "listappprofile",
|
||||||
|
Desc: "Lists app profile for an instance",
|
||||||
|
do: doListAppProfiles,
|
||||||
|
Usage: "cbt listappprofile <instance-id> ",
|
||||||
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "updateappprofile",
|
||||||
|
Desc: "Updates app profile for an instance",
|
||||||
|
do: doUpdateAppProfile,
|
||||||
|
Usage: "usage: cbt updateappprofile <instance-id> <profile-id> <description>" +
|
||||||
|
"(route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag] \n" +
|
||||||
|
"optional flags may be `force`",
|
||||||
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "deleteappprofile",
|
||||||
|
Desc: "Deletes app profile for an instance",
|
||||||
|
do: doDeleteAppProfile,
|
||||||
|
Usage: "cbt deleteappprofile <instance-id> <profile-id>",
|
||||||
|
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func doCount(ctx context.Context, args ...string) {
|
func doCount(ctx context.Context, args ...string) {
|
||||||
@ -767,12 +811,12 @@ var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
|
|||||||
|
|
||||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||||
// Run "go generate" to regenerate.
|
// Run "go generate" to regenerate.
|
||||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
//go:generate go run cbt.go gcpolicy.go -o cbtdoc.go doc
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
||||||
install the cbt tool, see the
|
install the cbt tool, see the
|
||||||
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
|
[cbt overview](https://cloud.google.com/bigtable/docs/cbt-overview).
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
@ -973,7 +1017,9 @@ var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{
|
|||||||
"indent": indentLines,
|
"indent": indentLines,
|
||||||
}).
|
}).
|
||||||
Parse(`
|
Parse(`
|
||||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
||||||
|
install the cbt tool, see the
|
||||||
|
[cbt overview](https://cloud.google.com/bigtable/docs/cbt-overview).
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
@ -1114,11 +1160,10 @@ func doSet(ctx context.Context, args ...string) {
|
|||||||
|
|
||||||
func doSetGCPolicy(ctx context.Context, args ...string) {
|
func doSetGCPolicy(ctx context.Context, args ...string) {
|
||||||
if len(args) < 3 {
|
if len(args) < 3 {
|
||||||
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> | maxage=<d> (and|or) maxversions=<n> )")
|
log.Fatalf("usage: cbt setgcpolicy <table> <family> ((maxage=<d> | maxversions=<n>) [(and|or) (maxage=<d> | maxversions=<n>),...] | never)")
|
||||||
}
|
}
|
||||||
table := args[0]
|
table := args[0]
|
||||||
fam := args[1]
|
fam := args[1]
|
||||||
|
|
||||||
pol, err := parseGCPolicy(strings.Join(args[2:], " "))
|
pol, err := parseGCPolicy(strings.Join(args[2:], " "))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@ -1140,58 +1185,6 @@ func doWaitForReplicaiton(ctx context.Context, args ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) {
|
|
||||||
words := strings.Fields(policyStr)
|
|
||||||
switch len(words) {
|
|
||||||
case 1:
|
|
||||||
return parseSinglePolicy(words[0])
|
|
||||||
case 3:
|
|
||||||
p1, err := parseSinglePolicy(words[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p2, err := parseSinglePolicy(words[2])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch words[1] {
|
|
||||||
case "and":
|
|
||||||
return bigtable.IntersectionPolicy(p1, p2), nil
|
|
||||||
case "or":
|
|
||||||
return bigtable.UnionPolicy(p1, p2), nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("Expected 'and' or 'or', saw %q", words[1])
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("Expected '1' or '3' parameter count, saw %d", len(words))
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSinglePolicy(s string) (bigtable.GCPolicy, error) {
|
|
||||||
words := strings.Split(s, "=")
|
|
||||||
if len(words) != 2 {
|
|
||||||
return nil, fmt.Errorf("Expected 'name=value', got %q", words)
|
|
||||||
}
|
|
||||||
switch words[0] {
|
|
||||||
case "maxage":
|
|
||||||
d, err := parseDuration(words[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bigtable.MaxAgePolicy(d), nil
|
|
||||||
case "maxversions":
|
|
||||||
n, err := strconv.ParseUint(words[1], 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bigtable.MaxVersionsPolicy(int(n)), nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("Expected 'maxage' or 'maxversions', got %q", words[1])
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {
|
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {
|
||||||
switch storageTypeStr {
|
switch storageTypeStr {
|
||||||
case "SSD":
|
case "SSD":
|
||||||
@ -1310,6 +1303,164 @@ func doDeleteSnapshot(ctx context.Context, args ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func doCreateAppProfile(ctx context.Context, args ...string) {
|
||||||
|
if len(args) < 4 || len(args) > 6 {
|
||||||
|
log.Fatal("usage: cbt createappprofile <instance-id> <profile-id> <description> " +
|
||||||
|
" (route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag] \n" +
|
||||||
|
"optional flags may be `force`")
|
||||||
|
}
|
||||||
|
|
||||||
|
routingPolicy, clusterID, err := parseProfileRoute(args[3])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln("Exactly one of (route-any | [route-to : transactional-writes]) must be specified.")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := bigtable.ProfileConf{
|
||||||
|
RoutingPolicy: routingPolicy,
|
||||||
|
InstanceID: args[0],
|
||||||
|
ProfileID: args[1],
|
||||||
|
Description: args[2],
|
||||||
|
}
|
||||||
|
|
||||||
|
opFlags := []string{"force", "transactional-writes"}
|
||||||
|
parseValues, err := parseArgs(args[4:], opFlags)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("optional flags can be specified as (force=<true>|transactional-writes=<true>) got %s ", args[4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range opFlags {
|
||||||
|
fv, err := parseProfileOpts(f, parseValues)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("optional flags can be specified as (force=<true>|transactional-writes=<true>) got %s ", args[4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
switch f {
|
||||||
|
case opFlags[0]:
|
||||||
|
config.IgnoreWarnings = fv
|
||||||
|
case opFlags[1]:
|
||||||
|
config.AllowTransactionalWrites = fv
|
||||||
|
default:
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if routingPolicy == bigtable.SingleClusterRouting {
|
||||||
|
config.ClusterID = clusterID
|
||||||
|
}
|
||||||
|
|
||||||
|
profile, err := getInstanceAdminClient().CreateAppProfile(ctx, config)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create app profile : %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Name: %s\n", profile.Name)
|
||||||
|
fmt.Printf("RoutingPolicy: %v\n", profile.RoutingPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doGetAppProfile(ctx context.Context, args ...string) {
|
||||||
|
if len(args) != 2 {
|
||||||
|
log.Fatalln("usage: cbt getappprofile <instance-id> <profile-id>")
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceID := args[0]
|
||||||
|
profileID := args[1]
|
||||||
|
profile, err := getInstanceAdminClient().GetAppProfile(ctx, instanceID, profileID)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to get app profile : %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Name: %s\n", profile.Name)
|
||||||
|
fmt.Printf("Etag: %s\n", profile.Etag)
|
||||||
|
fmt.Printf("Description: %s\n", profile.Description)
|
||||||
|
fmt.Printf("RoutingPolicy: %v\n", profile.RoutingPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doListAppProfiles(ctx context.Context, args ...string) {
|
||||||
|
if len(args) != 1 {
|
||||||
|
log.Fatalln("usage: cbt listappprofile <instance-id>")
|
||||||
|
}
|
||||||
|
|
||||||
|
instance := args[0]
|
||||||
|
|
||||||
|
it := getInstanceAdminClient().ListAppProfiles(ctx, instance)
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "AppProfile\tProfile Description\tProfile Etag\tProfile Routing Policy\n")
|
||||||
|
fmt.Fprintf(tw, "-----------\t--------------------\t------------\t----------------------\n")
|
||||||
|
|
||||||
|
for {
|
||||||
|
profile, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to fetch app profile %v", err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", profile.Name, profile.Description, profile.Etag, profile.RoutingPolicy)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func doUpdateAppProfile(ctx context.Context, args ...string) {
|
||||||
|
|
||||||
|
if len(args) < 4 {
|
||||||
|
log.Fatal("usage: cbt updateappprofile <instance-id> <profile-id> <description>" +
|
||||||
|
" (route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag] \n" +
|
||||||
|
"optional flags may be `force`")
|
||||||
|
}
|
||||||
|
|
||||||
|
routingPolicy, clusterID, err := parseProfileRoute(args[3])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln("Exactly one of (route-any | [route-to : transactional-writes]) must be specified.")
|
||||||
|
}
|
||||||
|
InstanceID := args[0]
|
||||||
|
ProfileID := args[1]
|
||||||
|
config := bigtable.ProfileAttrsToUpdate{
|
||||||
|
RoutingPolicy: routingPolicy,
|
||||||
|
Description: args[2],
|
||||||
|
}
|
||||||
|
opFlags := []string{"force", "transactional-writes"}
|
||||||
|
parseValues, err := parseArgs(args[4:], opFlags)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("optional flags can be specified as (force=<true>|transactional-writes=<true>) got %s ", args[4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range opFlags {
|
||||||
|
fv, err := parseProfileOpts(f, parseValues)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("optional flags can be specified as (force=<true>|transactional-writes=<true>) got %s ", args[4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
switch f {
|
||||||
|
case opFlags[0]:
|
||||||
|
config.IgnoreWarnings = fv
|
||||||
|
case opFlags[1]:
|
||||||
|
config.AllowTransactionalWrites = fv
|
||||||
|
default:
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if routingPolicy == bigtable.SingleClusterRouting {
|
||||||
|
config.ClusterID = clusterID
|
||||||
|
}
|
||||||
|
|
||||||
|
err = getInstanceAdminClient().UpdateAppProfile(ctx, InstanceID, ProfileID, config)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to update app profile : %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func doDeleteAppProfile(ctx context.Context, args ...string) {
|
||||||
|
if len(args) != 2 {
|
||||||
|
log.Println("usage: cbt deleteappprofile <instance-id> <profile-id>")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := getInstanceAdminClient().DeleteAppProfile(ctx, args[0], args[1])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to delete app profile : %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// parseDuration parses a duration string.
|
// parseDuration parses a duration string.
|
||||||
// It is similar to Go's time.ParseDuration, except with a different set of supported units,
|
// It is similar to Go's time.ParseDuration, except with a different set of supported units,
|
||||||
// and only simple formats supported.
|
// and only simple formats supported.
|
||||||
@ -1392,7 +1543,8 @@ func parseColumnsFilter(columns string) (bigtable.Filter, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return filter, nil
|
return filter, nil
|
||||||
} else {
|
}
|
||||||
|
|
||||||
var columnFilters []bigtable.Filter
|
var columnFilters []bigtable.Filter
|
||||||
for _, column := range splitColumns {
|
for _, column := range splitColumns {
|
||||||
filter, err := columnFilter(column)
|
filter, err := columnFilter(column)
|
||||||
@ -1402,7 +1554,6 @@ func parseColumnsFilter(columns string) (bigtable.Filter, error) {
|
|||||||
columnFilters = append(columnFilters, filter)
|
columnFilters = append(columnFilters, filter)
|
||||||
}
|
}
|
||||||
return bigtable.InterleaveFilters(columnFilters...), nil
|
return bigtable.InterleaveFilters(columnFilters...), nil
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func columnFilter(column string) (bigtable.Filter, error) {
|
func columnFilter(column string) (bigtable.Filter, error) {
|
||||||
@ -1423,3 +1574,41 @@ func columnFilter(column string) (bigtable.Filter, error) {
|
|||||||
return nil, fmt.Errorf("Bad format for column %q", column)
|
return nil, fmt.Errorf("Bad format for column %q", column)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseProfileRoute(str string) (routingPolicy, clusterID string, err error) {
|
||||||
|
|
||||||
|
route := strings.Split(str, "=")
|
||||||
|
switch route[0] {
|
||||||
|
case "route-any":
|
||||||
|
if len(route) > 1 {
|
||||||
|
err = fmt.Errorf("got %v", route)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
routingPolicy = bigtable.MultiClusterRouting
|
||||||
|
|
||||||
|
case "route-to":
|
||||||
|
if len(route) != 2 || route[1] == "" {
|
||||||
|
err = fmt.Errorf("got %v", route)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
routingPolicy = bigtable.SingleClusterRouting
|
||||||
|
clusterID = route[1]
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("got %v", route)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProfileOpts(opt string, parsedArgs map[string]string) (bool, error) {
|
||||||
|
|
||||||
|
if val, ok := parsedArgs[opt]; ok {
|
||||||
|
status, err := strconv.ParseBool(val)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("expected %s = <true> got %s ", opt, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return status, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
51
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
51
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
@ -62,57 +62,6 @@ func TestParseDuration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseGCPolicy(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
in string
|
|
||||||
out bigtable.GCPolicy
|
|
||||||
fail bool
|
|
||||||
}{
|
|
||||||
{in: "maxage=1h", out: bigtable.MaxAgePolicy(time.Hour * 1)},
|
|
||||||
{in: "maxversions=2", out: bigtable.MaxVersionsPolicy(int(2))},
|
|
||||||
{in: "maxversions=2 and maxage=1h", out: bigtable.IntersectionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)},
|
|
||||||
{in: "maxversions=2 or maxage=1h", out: bigtable.UnionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)},
|
|
||||||
|
|
||||||
{in: "maxage=1", fail: true},
|
|
||||||
{in: "maxage = 1h", fail: true},
|
|
||||||
{in: "maxage =1h", fail: true},
|
|
||||||
{in: "maxage= 1h", fail: true},
|
|
||||||
{in: "foomaxage=1h", fail: true},
|
|
||||||
{in: "maxversions=1h", fail: true},
|
|
||||||
{in: "maxversions= 1", fail: true},
|
|
||||||
{in: "maxversions = 1", fail: true},
|
|
||||||
{in: "maxversions =1", fail: true},
|
|
||||||
{in: "barmaxversions=1", fail: true},
|
|
||||||
{in: "maxage = 1h or maxversions=1h", fail: true},
|
|
||||||
{in: "foomaxversions=2 or maxage=1h", fail: true},
|
|
||||||
{in: "maxversions=2 or barmaxage=1h", fail: true},
|
|
||||||
{in: "foomaxversions=2 or barmaxage=1h", fail: true},
|
|
||||||
{in: "maxage = 1h and maxversions=1h", fail: true},
|
|
||||||
{in: "foomaxage=1h and maxversions=1", fail: true},
|
|
||||||
{in: "maxage=1h and barmaxversions=1", fail: true},
|
|
||||||
{in: "foomaxage=1h and barmaxversions=1", fail: true},
|
|
||||||
}
|
|
||||||
for _, tc := range tests {
|
|
||||||
got, err := parseGCPolicy(tc.in)
|
|
||||||
if !tc.fail && err != nil {
|
|
||||||
t.Errorf("parseGCPolicy(%q) unexpectedly failed: %v", tc.in, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tc.fail && err == nil {
|
|
||||||
t.Errorf("parseGCPolicy(%q) did not fail", tc.in)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tc.fail {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var cmpOpts cmp.Options
|
|
||||||
cmpOpts = append(cmpOpts, cmp.AllowUnexported(bigtable.IntersectionPolicy([]bigtable.GCPolicy{}...)), cmp.AllowUnexported(bigtable.UnionPolicy([]bigtable.GCPolicy{}...)))
|
|
||||||
if !cmp.Equal(got, tc.out, cmpOpts) {
|
|
||||||
t.Errorf("parseGCPolicy(%q) =%v, want %v", tc.in, got, tc.out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseArgs(t *testing.T) {
|
func TestParseArgs(t *testing.T) {
|
||||||
got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"})
|
got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
100
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
100
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
@ -14,12 +14,12 @@
|
|||||||
|
|
||||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||||
// Run "go generate" to regenerate.
|
// Run "go generate" to regenerate.
|
||||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
//go:generate go run cbt.go gcpolicy.go -o cbtdoc.go doc
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
||||||
install the cbt tool, see the
|
install the cbt tool, see the
|
||||||
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
|
[cbt overview](https://cloud.google.com/bigtable/docs/cbt-overview).
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
@ -29,12 +29,12 @@ The commands are:
|
|||||||
|
|
||||||
count Count rows in a table
|
count Count rows in a table
|
||||||
createinstance Create an instance with an initial cluster
|
createinstance Create an instance with an initial cluster
|
||||||
createcluster Create a cluster in the configured instance (replication alpha)
|
createcluster Create a cluster in the configured instance
|
||||||
createfamily Create a column family
|
createfamily Create a column family
|
||||||
createtable Create a table
|
createtable Create a table
|
||||||
updatecluster Update a cluster in the configured instance
|
updatecluster Update a cluster in the configured instance
|
||||||
deleteinstance Deletes an instance
|
deleteinstance Delete an instance
|
||||||
deletecluster Deletes a cluster from the configured instance (replication alpha)
|
deletecluster Delete a cluster from the configured instance
|
||||||
deletecolumn Delete all cells in a column
|
deletecolumn Delete all cells in a column
|
||||||
deletefamily Delete a column family
|
deletefamily Delete a column family
|
||||||
deleterow Delete a row
|
deleterow Delete a row
|
||||||
@ -42,20 +42,25 @@ The commands are:
|
|||||||
doc Print godoc-suitable documentation for cbt
|
doc Print godoc-suitable documentation for cbt
|
||||||
help Print help text
|
help Print help text
|
||||||
listinstances List instances in a project
|
listinstances List instances in a project
|
||||||
listclusters List instances in an instance
|
listclusters List clusters in an instance
|
||||||
lookup Read from a single row
|
lookup Read from a single row
|
||||||
ls List tables and column families
|
ls List tables and column families
|
||||||
mddoc Print documentation for cbt in Markdown format
|
mddoc Print documentation for cbt in Markdown format
|
||||||
read Read rows
|
read Read rows
|
||||||
set Set value of a cell
|
set Set value of a cell
|
||||||
setgcpolicy Set the GC policy for a column family
|
setgcpolicy Set the GC policy for a column family
|
||||||
waitforreplication Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
|
waitforreplication Block until all the completed writes have been replicated to all the clusters
|
||||||
createtablefromsnapshot Create a table from a snapshot (snapshots alpha)
|
createtablefromsnapshot Create a table from a snapshot (snapshots alpha)
|
||||||
createsnapshot Create a snapshot from a source table (snapshots alpha)
|
createsnapshot Create a snapshot from a source table (snapshots alpha)
|
||||||
listsnapshots List snapshots in a cluster (snapshots alpha)
|
listsnapshots List snapshots in a cluster (snapshots alpha)
|
||||||
getsnapshot Get snapshot info (snapshots alpha)
|
getsnapshot Get snapshot info (snapshots alpha)
|
||||||
deletesnapshot Delete snapshot in a cluster (snapshots alpha)
|
deletesnapshot Delete snapshot in a cluster (snapshots alpha)
|
||||||
version Print the current cbt version
|
version Print the current cbt version
|
||||||
|
createappprofile Creates app profile for an instance
|
||||||
|
getappprofile Reads app profile for an instance
|
||||||
|
listappprofile Lists app profile for an instance
|
||||||
|
updateappprofile Updates app profile for an instance
|
||||||
|
deleteappprofile Deletes app profile for an instance
|
||||||
|
|
||||||
Use "cbt help <command>" for more information about a command.
|
Use "cbt help <command>" for more information about a command.
|
||||||
|
|
||||||
@ -73,14 +78,20 @@ Alpha features are not currently available to most Cloud Bigtable customers. The
|
|||||||
features might be changed in backward-incompatible ways and are not recommended
|
features might be changed in backward-incompatible ways and are not recommended
|
||||||
for production use. They are not subject to any SLA or deprecation policy.
|
for production use. They are not subject to any SLA or deprecation policy.
|
||||||
|
|
||||||
|
Note: cbt does not support specifying arbitrary bytes on the command line for
|
||||||
|
any value that Bigtable otherwise supports (e.g., row key, column qualifier,
|
||||||
|
etc.).
|
||||||
|
|
||||||
For convenience, values of the -project, -instance, -creds,
|
For convenience, values of the -project, -instance, -creds,
|
||||||
-admin-endpoint and -data-endpoint flags may be specified in
|
-admin-endpoint and -data-endpoint flags may be specified in
|
||||||
~/.cbtrc in this format:
|
~/.cbtrc in this format:
|
||||||
|
|
||||||
project = my-project-123
|
project = my-project-123
|
||||||
instance = my-instance
|
instance = my-instance
|
||||||
creds = path-to-account-key.json
|
creds = path-to-account-key.json
|
||||||
admin-endpoint = hostname:port
|
admin-endpoint = hostname:port
|
||||||
data-endpoint = hostname:port
|
data-endpoint = hostname:port
|
||||||
|
|
||||||
All values are optional, and all will be overridden by flags.
|
All values are optional, and all will be overridden by flags.
|
||||||
|
|
||||||
|
|
||||||
@ -108,7 +119,7 @@ Usage:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
Create a cluster in the configured instance (replication alpha)
|
Create a cluster in the configured instance
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>
|
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>
|
||||||
@ -132,8 +143,9 @@ Usage:
|
|||||||
Create a table
|
Create a table
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...]
|
cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...]
|
||||||
families: Column families and their associated GC policies. See "setgcpolicy".
|
families: Column families and their associated GC policies. For gcpolicy,
|
||||||
|
see "setgcpolicy".
|
||||||
Example: families=family1:maxage=1w,family2:maxversions=1
|
Example: families=family1:maxage=1w,family2:maxversions=1
|
||||||
splits: Row key to be used to initially split the table
|
splits: Row key to be used to initially split the table
|
||||||
|
|
||||||
@ -150,7 +162,7 @@ Usage:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
Deletes an instance
|
Delete an instance
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt deleteinstance <instance>
|
cbt deleteinstance <instance>
|
||||||
@ -158,7 +170,7 @@ Usage:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
Deletes a cluster from the configured instance (replication alpha)
|
Delete a cluster from the configured instance
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt deletecluster <cluster>
|
cbt deletecluster <cluster>
|
||||||
@ -170,7 +182,7 @@ Delete all cells in a column
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]
|
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]
|
||||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
app-profile=<app profile id> The app profile id to use for the request
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -188,7 +200,7 @@ Delete a row
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt deleterow <table> <row> [app-profile=<app profile id>]
|
cbt deleterow <table> <row> [app-profile=<app profile id>]
|
||||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
app-profile=<app profile id> The app profile id to use for the request
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -226,7 +238,7 @@ Usage:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
List instances in an instance
|
List clusters in an instance
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt listclusters
|
cbt listclusters
|
||||||
@ -237,9 +249,10 @@ Usage:
|
|||||||
Read from a single row
|
Read from a single row
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt lookup <table> <row> [cells-per-column=<n>] [app-profile=<app profile id>]
|
cbt lookup <table> <row> [columns=[family]:[qualifier],...] [cells-per-column=<n>] [app-profile=<app profile id>]
|
||||||
|
columns=[family]:[qualifier],... Read only these columns, comma-separated
|
||||||
cells-per-column=<n> Read only this many cells per column
|
cells-per-column=<n> Read only this many cells per column
|
||||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
app-profile=<app profile id> The app profile id to use for the request
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -265,14 +278,15 @@ Usage:
|
|||||||
Read rows
|
Read rows
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>]
|
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [columns=[family]:[qualifier],...] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>]
|
||||||
start=<row> Start reading at this row
|
start=<row> Start reading at this row
|
||||||
end=<row> Stop reading before this row
|
end=<row> Stop reading before this row
|
||||||
prefix=<prefix> Read rows with this prefix
|
prefix=<prefix> Read rows with this prefix
|
||||||
regex=<regex> Read rows with keys matching this regex
|
regex=<regex> Read rows with keys matching this regex
|
||||||
|
columns=[family]:[qualifier],... Read only these columns, comma-separated
|
||||||
count=<n> Read only this many rows
|
count=<n> Read only this many rows
|
||||||
cells-per-column=<n> Read only this many cells per column
|
cells-per-column=<n> Read only this many cells per column
|
||||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
app-profile=<app profile id> The app profile id to use for the request
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -282,7 +296,7 @@ Set value of a cell
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...
|
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...
|
||||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
app-profile=<app profile id> The app profile id to use for the request
|
||||||
family:column=val[@ts] may be repeated to set multiple cells.
|
family:column=val[@ts] may be repeated to set multiple cells.
|
||||||
|
|
||||||
ts is an optional integer timestamp.
|
ts is an optional integer timestamp.
|
||||||
@ -295,7 +309,7 @@ Usage:
|
|||||||
Set the GC policy for a column family
|
Set the GC policy for a column family
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )
|
cbt setgcpolicy <table> <family> ((maxage=<d> | maxversions=<n>) [(and|or) (maxage=<d> | maxversions=<n>),...] | never)
|
||||||
|
|
||||||
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
|
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
|
||||||
maxversions=<n> Maximum number of versions to preserve
|
maxversions=<n> Maximum number of versions to preserve
|
||||||
@ -303,7 +317,7 @@ Usage:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
|
Block until all the completed writes have been replicated to all the clusters
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
cbt waitforreplication <table>
|
cbt waitforreplication <table>
|
||||||
@ -365,5 +379,47 @@ Usage:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Creates app profile for an instance
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
usage: cbt createappprofile <instance-id> <profile-id> <description> (route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag]
|
||||||
|
optional flags may be `force`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Reads app profile for an instance
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
cbt getappprofile <instance-id> <profile-id>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Lists app profile for an instance
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
cbt listappprofile <instance-id>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Updates app profile for an instance
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
usage: cbt updateappprofile <instance-id> <profile-id> <description>(route-any | [ route-to=<cluster-id> : transactional-writes]) [optional flag]
|
||||||
|
optional flags may be `force`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Deletes app profile for an instance
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
cbt deleteappprofile <instance-id> <profile-id>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package main
|
package main
|
||||||
|
215
vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy.go
generated
vendored
Normal file
215
vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 Google LLC
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"cloud.google.com/go/bigtable"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse a GC policy. Valid policies include
|
||||||
|
// never
|
||||||
|
// maxage = 5d
|
||||||
|
// maxversions = 3
|
||||||
|
// maxage = 5d || maxversions = 3
|
||||||
|
// maxage=30d || (maxage=3d && maxversions=100)
|
||||||
|
func parseGCPolicy(s string) (bigtable.GCPolicy, error) {
|
||||||
|
if strings.TrimSpace(s) == "never" {
|
||||||
|
return bigtable.NoGcPolicy(), nil
|
||||||
|
}
|
||||||
|
r := strings.NewReader(s)
|
||||||
|
p, err := parsePolicyExpr(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid GC policy: %v", err)
|
||||||
|
}
|
||||||
|
tok, err := getToken(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if tok != "" {
|
||||||
|
return nil, fmt.Errorf("invalid GC policy: want end of input, got %q", tok)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expr ::= term (op term)*
|
||||||
|
// op ::= "and" | "or" | "&&" | "||"
|
||||||
|
func parsePolicyExpr(r io.RuneScanner) (bigtable.GCPolicy, error) {
|
||||||
|
policy, err := parsePolicyTerm(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
tok, err := getToken(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var f func(...bigtable.GCPolicy) bigtable.GCPolicy
|
||||||
|
switch tok {
|
||||||
|
case "and", "&&":
|
||||||
|
f = bigtable.IntersectionPolicy
|
||||||
|
case "or", "||":
|
||||||
|
f = bigtable.UnionPolicy
|
||||||
|
default:
|
||||||
|
ungetToken(tok)
|
||||||
|
return policy, nil
|
||||||
|
}
|
||||||
|
p2, err := parsePolicyTerm(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
policy = f(policy, p2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// term ::= "maxage" "=" duration | "maxversions" "=" int | "(" policy ")"
|
||||||
|
func parsePolicyTerm(r io.RuneScanner) (bigtable.GCPolicy, error) {
|
||||||
|
tok, err := getToken(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch tok {
|
||||||
|
case "":
|
||||||
|
return nil, errors.New("empty GC policy term")
|
||||||
|
|
||||||
|
case "maxage", "maxversions":
|
||||||
|
if err := expectToken(r, "="); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tok2, err := getToken(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if tok2 == "" {
|
||||||
|
return nil, errors.New("expected a token after '='")
|
||||||
|
}
|
||||||
|
if tok == "maxage" {
|
||||||
|
dur, err := parseDuration(tok2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bigtable.MaxAgePolicy(dur), nil
|
||||||
|
}
|
||||||
|
n, err := strconv.ParseUint(tok2, 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bigtable.MaxVersionsPolicy(int(n)), nil
|
||||||
|
|
||||||
|
case "(":
|
||||||
|
p, err := parsePolicyExpr(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := expectToken(r, ")"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unexpected token: %q", tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectToken(r io.RuneScanner, want string) error {
|
||||||
|
got, err := getToken(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if got != want {
|
||||||
|
return fmt.Errorf("expected %q, saw %q", want, got)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const noToken = "_" // empty token is valid, so use "_" instead
|
||||||
|
|
||||||
|
// If not noToken, getToken will return this instead of reading a new token
|
||||||
|
// from the input.
|
||||||
|
var ungotToken = noToken
|
||||||
|
|
||||||
|
// getToken extracts the first token from the input. Valid tokens include
|
||||||
|
// any sequence of letters and digits, and these symbols: &&, ||, =, ( and ).
|
||||||
|
// getToken returns ("", nil) at end of input.
|
||||||
|
func getToken(r io.RuneScanner) (string, error) {
|
||||||
|
if ungotToken != noToken {
|
||||||
|
t := ungotToken
|
||||||
|
ungotToken = noToken
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
// Skip leading whitespace.
|
||||||
|
c := ' '
|
||||||
|
for unicode.IsSpace(c) {
|
||||||
|
c, _, err = r.ReadRune()
|
||||||
|
if err == io.EOF {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case c == '=' || c == '(' || c == ')':
|
||||||
|
return string(c), nil
|
||||||
|
|
||||||
|
case c == '&' || c == '|':
|
||||||
|
c2, _, err := r.ReadRune()
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if c != c2 {
|
||||||
|
return "", fmt.Errorf("expected %c%c", c, c)
|
||||||
|
}
|
||||||
|
return string([]rune{c, c}), nil
|
||||||
|
|
||||||
|
case unicode.IsLetter(c) || unicode.IsDigit(c):
|
||||||
|
// Collect an alphanumeric token.
|
||||||
|
var b bytes.Buffer
|
||||||
|
for unicode.IsLetter(c) || unicode.IsDigit(c) {
|
||||||
|
b.WriteRune(c)
|
||||||
|
c, _, err = r.ReadRune()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.UnreadRune()
|
||||||
|
return b.String(), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("bad rune %q", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// "unget" a token so the next call to getToken will return it.
|
||||||
|
func ungetToken(tok string) {
|
||||||
|
if ungotToken != noToken {
|
||||||
|
panic("ungetToken called twice")
|
||||||
|
}
|
||||||
|
ungotToken = tok
|
||||||
|
}
|
196
vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy_test.go
generated
vendored
Normal file
196
vendor/cloud.google.com/go/bigtable/cmd/cbt/gcpolicy_test.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 Google LLC
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/bigtable"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseGCPolicy(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
want bigtable.GCPolicy
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"never",
|
||||||
|
bigtable.NoGcPolicy(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage=3h",
|
||||||
|
bigtable.MaxAgePolicy(3 * time.Hour),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxversions=2",
|
||||||
|
bigtable.MaxVersionsPolicy(2),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxversions=2 and maxage=1h",
|
||||||
|
bigtable.IntersectionPolicy(bigtable.MaxVersionsPolicy(2), bigtable.MaxAgePolicy(time.Hour)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"(((maxversions=2 and (maxage=1h))))",
|
||||||
|
bigtable.IntersectionPolicy(bigtable.MaxVersionsPolicy(2), bigtable.MaxAgePolicy(time.Hour)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxversions=7 or maxage=8h",
|
||||||
|
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxversions = 7||maxage = 8h",
|
||||||
|
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxversions=7||maxage=8h",
|
||||||
|
bigtable.UnionPolicy(bigtable.MaxVersionsPolicy(7), bigtable.MaxAgePolicy(8*time.Hour)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage=30d || (maxage=3d && maxversions=100)",
|
||||||
|
bigtable.UnionPolicy(
|
||||||
|
bigtable.MaxAgePolicy(30*24*time.Hour),
|
||||||
|
bigtable.IntersectionPolicy(
|
||||||
|
bigtable.MaxAgePolicy(3*24*time.Hour),
|
||||||
|
bigtable.MaxVersionsPolicy(100))),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage=30d || (maxage=3d && maxversions=100) || maxversions=7",
|
||||||
|
bigtable.UnionPolicy(
|
||||||
|
bigtable.UnionPolicy(
|
||||||
|
bigtable.MaxAgePolicy(30*24*time.Hour),
|
||||||
|
bigtable.IntersectionPolicy(
|
||||||
|
bigtable.MaxAgePolicy(3*24*time.Hour),
|
||||||
|
bigtable.MaxVersionsPolicy(100))),
|
||||||
|
bigtable.MaxVersionsPolicy(7)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// && and || have same precedence, left associativity
|
||||||
|
"maxage=1h && maxage=2h || maxage=3h",
|
||||||
|
bigtable.UnionPolicy(
|
||||||
|
bigtable.IntersectionPolicy(
|
||||||
|
bigtable.MaxAgePolicy(1*time.Hour),
|
||||||
|
bigtable.MaxAgePolicy(2*time.Hour)),
|
||||||
|
bigtable.MaxAgePolicy(3*time.Hour)),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got, err := parseGCPolicy(test.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %v", test.in, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !cmp.Equal(got, test.want, cmp.AllowUnexported(bigtable.IntersectionPolicy(), bigtable.UnionPolicy())) {
|
||||||
|
t.Errorf("%s: got %+v, want %+v", test.in, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseGCPolicyErrors(t *testing.T) {
|
||||||
|
for _, in := range []string{
|
||||||
|
"",
|
||||||
|
"a",
|
||||||
|
"b = 1h",
|
||||||
|
"c = 1",
|
||||||
|
"maxage=1", // need duration
|
||||||
|
"maxversions=1h", // need int
|
||||||
|
"maxage",
|
||||||
|
"maxversions",
|
||||||
|
"never=never",
|
||||||
|
"maxversions=1 && never",
|
||||||
|
"(((maxage=1h))",
|
||||||
|
"((maxage=1h)))",
|
||||||
|
"maxage=30d || ((maxage=3d && maxversions=100)",
|
||||||
|
"maxversions = 3 and",
|
||||||
|
} {
|
||||||
|
_, err := parseGCPolicy(in)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got nil, want error", in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenizeGCPolicy(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
want []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"maxage=5d",
|
||||||
|
[]string{"maxage", "=", "5d"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage = 5d",
|
||||||
|
[]string{"maxage", "=", "5d"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage=5d or maxversions=5",
|
||||||
|
[]string{"maxage", "=", "5d", "or", "maxversions", "=", "5"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage=5d || (maxversions=5)",
|
||||||
|
[]string{"maxage", "=", "5d", "||", "(", "maxversions", "=", "5", ")"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"maxage=5d||( maxversions=5 )",
|
||||||
|
[]string{"maxage", "=", "5d", "||", "(", "maxversions", "=", "5", ")"},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got, err := tokenizeGCPolicy(test.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %v", test.in, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(got, test.want); diff != "" {
|
||||||
|
t.Errorf("%s: %s", test.in, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenizeGCPolicyErrors(t *testing.T) {
|
||||||
|
for _, in := range []string{
|
||||||
|
"a &",
|
||||||
|
"a & b",
|
||||||
|
"a &x b",
|
||||||
|
"a |",
|
||||||
|
"a | b",
|
||||||
|
"a |& b",
|
||||||
|
"a % b",
|
||||||
|
} {
|
||||||
|
_, err := tokenizeGCPolicy(in)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got nil, want error", in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func tokenizeGCPolicy(s string) ([]string, error) {
|
||||||
|
var tokens []string
|
||||||
|
r := strings.NewReader(s)
|
||||||
|
for {
|
||||||
|
tok, err := getToken(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if tok == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
tokens = append(tokens, tok)
|
||||||
|
}
|
||||||
|
return tokens, nil
|
||||||
|
}
|
10
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
generated
vendored
10
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
generated
vendored
@ -31,10 +31,18 @@ var (
|
|||||||
port = flag.Int("port", 9000, "the port number to bind to on the local machine")
|
port = flag.Int("port", 9000, "the port number to bind to on the local machine")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxMsgSize = 256 * 1024 * 1024 // 256 MiB
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
grpc.EnableTracing = false
|
grpc.EnableTracing = false
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port))
|
opts := []grpc.ServerOption{
|
||||||
|
grpc.MaxRecvMsgSize(maxMsgSize),
|
||||||
|
grpc.MaxSendMsgSize(maxMsgSize),
|
||||||
|
}
|
||||||
|
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port), opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to start emulator: %v", err)
|
log.Fatalf("failed to start emulator: %v", err)
|
||||||
}
|
}
|
||||||
|
2
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
@ -21,6 +21,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
@ -34,7 +35,6 @@ import (
|
|||||||
"cloud.google.com/go/bigtable"
|
"cloud.google.com/go/bigtable"
|
||||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||||
"cloud.google.com/go/bigtable/internal/stat"
|
"cloud.google.com/go/bigtable/internal/stat"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
2
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
generated
vendored
@ -22,6 +22,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
@ -35,7 +36,6 @@ import (
|
|||||||
"cloud.google.com/go/bigtable"
|
"cloud.google.com/go/bigtable"
|
||||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||||
"cloud.google.com/go/bigtable/internal/stat"
|
"cloud.google.com/go/bigtable/internal/stat"
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
2
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
@ -35,7 +35,7 @@ is the simplest option. Those credentials will be used by default when NewClient
|
|||||||
|
|
||||||
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
|
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
|
||||||
For instance, you can use service account credentials by visiting
|
For instance, you can use service account credentials by visiting
|
||||||
https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
|
https://cloud.google.com/console/project/_/apiui/credential,
|
||||||
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
|
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
|
||||||
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
|
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
|
||||||
...
|
...
|
||||||
|
17
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
17
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package bigtable
|
package bigtable
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -24,7 +25,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/bigtable/bttest"
|
"cloud.google.com/go/bigtable/bttest"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
@ -85,9 +85,8 @@ func NewIntegrationEnv() (IntegrationEnv, error) {
|
|||||||
|
|
||||||
if integrationConfig.UseProd {
|
if integrationConfig.UseProd {
|
||||||
return NewProdEnv(c)
|
return NewProdEnv(c)
|
||||||
} else {
|
|
||||||
return NewEmulatedEnv(c)
|
|
||||||
}
|
}
|
||||||
|
return NewEmulatedEnv(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmulatedEnv encapsulates the state of an emulator
|
// EmulatedEnv encapsulates the state of an emulator
|
||||||
@ -190,33 +189,27 @@ func (e *ProdEnv) Config() IntegrationTestConfig {
|
|||||||
|
|
||||||
// NewAdminClient builds a new connected admin client for this environment
|
// NewAdminClient builds a new connected admin client for this environment
|
||||||
func (e *ProdEnv) NewAdminClient() (*AdminClient, error) {
|
func (e *ProdEnv) NewAdminClient() (*AdminClient, error) {
|
||||||
timeout := 20 * time.Second
|
|
||||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
|
||||||
var clientOpts []option.ClientOption
|
var clientOpts []option.ClientOption
|
||||||
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
||||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||||
}
|
}
|
||||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
return NewAdminClient(context.Background(), e.config.Project, e.config.Instance, clientOpts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInstanceAdminClient returns a new connected instance admin client for this environment
|
// NewInstanceAdminClient returns a new connected instance admin client for this environment
|
||||||
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
||||||
timeout := 20 * time.Second
|
|
||||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
|
||||||
var clientOpts []option.ClientOption
|
var clientOpts []option.ClientOption
|
||||||
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
||||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||||
}
|
}
|
||||||
return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...)
|
return NewInstanceAdminClient(context.Background(), e.config.Project, clientOpts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient builds a connected data client for this environment
|
// NewClient builds a connected data client for this environment
|
||||||
func (e *ProdEnv) NewClient() (*Client, error) {
|
func (e *ProdEnv) NewClient() (*Client, error) {
|
||||||
timeout := 20 * time.Second
|
|
||||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
|
||||||
var clientOpts []option.ClientOption
|
var clientOpts []option.ClientOption
|
||||||
if endpoint := e.config.DataEndpoint; endpoint != "" {
|
if endpoint := e.config.DataEndpoint; endpoint != "" {
|
||||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||||
}
|
}
|
||||||
return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
return NewClient(context.Background(), e.config.Project, e.config.Instance, clientOpts...)
|
||||||
}
|
}
|
||||||
|
15
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
15
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
@ -314,4 +314,17 @@ func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
|
|||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(dsymonds): More filters: sampling
|
// RowSampleFilter returns a filter that matches a row with a probability of p (must be in the interval (0, 1)).
|
||||||
|
func RowSampleFilter(p float64) Filter {
|
||||||
|
return rowSampleFilter(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
type rowSampleFilter float64
|
||||||
|
|
||||||
|
func (rsf rowSampleFilter) String() string {
|
||||||
|
return fmt.Sprintf("filter(%f)", rsf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rsf rowSampleFilter) proto() *btpb.RowFilter {
|
||||||
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowSampleFilter{RowSampleFilter: float64(rsf)}}
|
||||||
|
}
|
||||||
|
11
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
11
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
@ -130,10 +130,19 @@ func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type noGCPolicy struct{}
|
||||||
|
|
||||||
|
func (n noGCPolicy) String() string { return "" }
|
||||||
|
|
||||||
|
func (n noGCPolicy) proto() *bttdpb.GcRule { return &bttdpb.GcRule{Rule: nil} }
|
||||||
|
|
||||||
|
// NoGcPolicy applies to all cells setting maxage and maxversions to nil implies no gc policies
|
||||||
|
func NoGcPolicy() GCPolicy { return noGCPolicy{} }
|
||||||
|
|
||||||
// GCRuleToString converts the given GcRule proto to a user-visible string.
|
// GCRuleToString converts the given GcRule proto to a user-visible string.
|
||||||
func GCRuleToString(rule *bttdpb.GcRule) string {
|
func GCRuleToString(rule *bttdpb.GcRule) string {
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
return "<default>"
|
return "<never>"
|
||||||
}
|
}
|
||||||
switch r := rule.Rule.(type) {
|
switch r := rule.Rule.(type) {
|
||||||
case *bttdpb.GcRule_MaxNumVersions:
|
case *bttdpb.GcRule_MaxNumVersions:
|
||||||
|
68
vendor/cloud.google.com/go/bigtable/go18.go
generated
vendored
68
vendor/cloud.google.com/go/bigtable/go18.go
generated
vendored
@ -1,68 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package bigtable
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"go.opencensus.io/plugin/ocgrpc"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func openCensusOptions() []option.ClientOption {
|
|
||||||
return []option.ClientOption{
|
|
||||||
option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceStartSpan(ctx context.Context, name string) context.Context {
|
|
||||||
ctx, _ = trace.StartSpan(ctx, name)
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceEndSpan(ctx context.Context, err error) {
|
|
||||||
span := trace.FromContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(trace.Status{Message: err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
span.End()
|
|
||||||
}
|
|
||||||
|
|
||||||
func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
|
||||||
var attrs []trace.Attribute
|
|
||||||
for k, v := range attrMap {
|
|
||||||
var a trace.Attribute
|
|
||||||
switch v := v.(type) {
|
|
||||||
case string:
|
|
||||||
a = trace.StringAttribute(k, v)
|
|
||||||
case bool:
|
|
||||||
a = trace.BoolAttribute(k, v)
|
|
||||||
case int:
|
|
||||||
a = trace.Int64Attribute(k, int64(v))
|
|
||||||
case int64:
|
|
||||||
a = trace.Int64Attribute(k, v)
|
|
||||||
default:
|
|
||||||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
|
||||||
}
|
|
||||||
attrs = append(attrs, a)
|
|
||||||
}
|
|
||||||
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
|
||||||
}
|
|
14
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
generated
vendored
14
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
generated
vendored
@ -50,14 +50,19 @@ type Config struct {
|
|||||||
TLSCreds credentials.TransportCredentials // derived
|
TLSCreds credentials.TransportCredentials // derived
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RequiredFlags describes the flag requirements for a cbt command.
|
||||||
type RequiredFlags uint
|
type RequiredFlags uint
|
||||||
|
|
||||||
const NoneRequired RequiredFlags = 0
|
|
||||||
const (
|
const (
|
||||||
|
// NoneRequired specifies that not flags are required.
|
||||||
|
NoneRequired RequiredFlags = 0
|
||||||
|
// ProjectRequired specifies that the -project flag is required.
|
||||||
ProjectRequired RequiredFlags = 1 << iota
|
ProjectRequired RequiredFlags = 1 << iota
|
||||||
|
// InstanceRequired specifies that the -instance flag is required.
|
||||||
InstanceRequired
|
InstanceRequired
|
||||||
|
// ProjectAndInstanceRequired specifies that both -project and -instance is required.
|
||||||
|
ProjectAndInstanceRequired = ProjectRequired | InstanceRequired
|
||||||
)
|
)
|
||||||
const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired
|
|
||||||
|
|
||||||
// RegisterFlags registers a set of standard flags for this config.
|
// RegisterFlags registers a set of standard flags for this config.
|
||||||
// It should be called before flag.Parse.
|
// It should be called before flag.Parse.
|
||||||
@ -152,15 +157,18 @@ func Load() (*Config, error) {
|
|||||||
return c, s.Err()
|
return c, s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GcloudCredential holds gcloud credential information.
|
||||||
type GcloudCredential struct {
|
type GcloudCredential struct {
|
||||||
AccessToken string `json:"access_token"`
|
AccessToken string `json:"access_token"`
|
||||||
Expiry time.Time `json:"token_expiry"`
|
Expiry time.Time `json:"token_expiry"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Token creates an oauth2 token using gcloud credentials.
|
||||||
func (cred *GcloudCredential) Token() *oauth2.Token {
|
func (cred *GcloudCredential) Token() *oauth2.Token {
|
||||||
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry}
|
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GcloudConfig holds gcloud configuration values.
|
||||||
type GcloudConfig struct {
|
type GcloudConfig struct {
|
||||||
Configuration struct {
|
Configuration struct {
|
||||||
Properties struct {
|
Properties struct {
|
||||||
@ -172,6 +180,8 @@ type GcloudConfig struct {
|
|||||||
Credential GcloudCredential `json:"credential"`
|
Credential GcloudCredential `json:"credential"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GcloudCmdTokenSource holds the comamnd arguments. It is only intended to be set by the program.
|
||||||
|
// TODO(deklerk) Can this be unexported?
|
||||||
type GcloudCmdTokenSource struct {
|
type GcloudCmdTokenSource struct {
|
||||||
Command string
|
Command string
|
||||||
Args []string
|
Args []string
|
||||||
|
12
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
generated
vendored
12
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
generated
vendored
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
|
// Package gax is a snapshot from github.com/googleapis/gax-go/v2 with minor modifications.
|
||||||
package gax
|
package gax
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -23,12 +23,14 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CallOption is a generic interface for modifying the behavior of outbound calls.
|
||||||
type CallOption interface {
|
type CallOption interface {
|
||||||
Resolve(*CallSettings)
|
Resolve(*CallSettings)
|
||||||
}
|
}
|
||||||
|
|
||||||
type callOptions []CallOption
|
type callOptions []CallOption
|
||||||
|
|
||||||
|
// Resolve resolves all call options individually.
|
||||||
func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
|
func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt.Resolve(s)
|
opt.Resolve(s)
|
||||||
@ -36,30 +38,32 @@ func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encapsulates the call settings for a particular API call.
|
// CallSettings encapsulates the call settings for a particular API call.
|
||||||
type CallSettings struct {
|
type CallSettings struct {
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
RetrySettings RetrySettings
|
RetrySettings RetrySettings
|
||||||
}
|
}
|
||||||
|
|
||||||
// Per-call configurable settings for retrying upon transient failure.
|
// RetrySettings are per-call configurable settings for retrying upon transient failure.
|
||||||
type RetrySettings struct {
|
type RetrySettings struct {
|
||||||
RetryCodes map[codes.Code]bool
|
RetryCodes map[codes.Code]bool
|
||||||
BackoffSettings BackoffSettings
|
BackoffSettings BackoffSettings
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameters to the exponential backoff algorithm for retrying.
|
// BackoffSettings are parameters to the exponential backoff algorithm for retrying.
|
||||||
type BackoffSettings struct {
|
type BackoffSettings struct {
|
||||||
DelayTimeoutSettings MultipliableDuration
|
DelayTimeoutSettings MultipliableDuration
|
||||||
RPCTimeoutSettings MultipliableDuration
|
RPCTimeoutSettings MultipliableDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MultipliableDuration defines parameters for backoff settings.
|
||||||
type MultipliableDuration struct {
|
type MultipliableDuration struct {
|
||||||
Initial time.Duration
|
Initial time.Duration
|
||||||
Max time.Duration
|
Max time.Duration
|
||||||
Multiplier float64
|
Multiplier float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resolve merges the receiver CallSettings into the given CallSettings.
|
||||||
func (w CallSettings) Resolve(s *CallSettings) {
|
func (w CallSettings) Resolve(s *CallSettings) {
|
||||||
s.Timeout = w.Timeout
|
s.Timeout = w.Timeout
|
||||||
s.RetrySettings = w.RetrySettings
|
s.RetrySettings = w.RetrySettings
|
||||||
|
14
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
generated
vendored
14
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
generated
vendored
@ -14,24 +14,24 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
|
// Package gax is a snapshot from github.com/googleapis/gax-go/v2 with minor modifications.
|
||||||
package gax
|
package gax
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
// Logger is a logger that logs to stderr.
|
||||||
|
var Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
|
||||||
// A user defined call stub.
|
// APICall is a user defined call stub.
|
||||||
type APICall func(context.Context) error
|
type APICall func(context.Context) error
|
||||||
|
|
||||||
// scaleDuration returns the product of a and mult.
|
// scaleDuration returns the product of a and mult.
|
||||||
|
2
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
@ -16,10 +16,10 @@ limitations under the License.
|
|||||||
package gax
|
package gax
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
1
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
1
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
@ -70,6 +70,7 @@ func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool)
|
|||||||
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
|
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Aggregate is an aggregate of latencies.
|
||||||
type Aggregate struct {
|
type Aggregate struct {
|
||||||
Name string
|
Name string
|
||||||
Count, Errors int
|
Count, Errors int
|
||||||
|
36
vendor/cloud.google.com/go/bigtable/not_go18.go
generated
vendored
36
vendor/cloud.google.com/go/bigtable/not_go18.go
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
// Copyright 2017 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package bigtable
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OpenCensus only supports go 1.8 and higher.
|
|
||||||
|
|
||||||
func openCensusOptions() []option.ClientOption { return nil }
|
|
||||||
|
|
||||||
func traceStartSpan(ctx context.Context, _ string) context.Context {
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceEndSpan(context.Context, error) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) {
|
|
||||||
}
|
|
3
vendor/cloud.google.com/go/bigtable/reader.go
generated
vendored
3
vendor/cloud.google.com/go/bigtable/reader.go
generated
vendored
@ -152,9 +152,8 @@ func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row
|
|||||||
|
|
||||||
if cc.GetCommitRow() {
|
if cc.GetCommitRow() {
|
||||||
return cr.commitRow()
|
return cr.commitRow()
|
||||||
} else {
|
|
||||||
cr.state = rowInProgress
|
|
||||||
}
|
}
|
||||||
|
cr.state = rowInProgress
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
5
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
5
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes/wrappers"
|
"github.com/golang/protobuf/ptypes/wrappers"
|
||||||
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
|
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||||
@ -226,13 +225,13 @@ type TestResult struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAcceptance(t *testing.T) {
|
func TestAcceptance(t *testing.T) {
|
||||||
testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json")
|
testJSON, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not open acceptance test file %v", err)
|
t.Fatalf("could not open acceptance test file %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var accTest AcceptanceTest
|
var accTest AcceptanceTest
|
||||||
err = json.Unmarshal(testJson, &accTest)
|
err = json.Unmarshal(testJSON, &accTest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not parse acceptance test file: %v", err)
|
t.Fatalf("could not parse acceptance test file: %v", err)
|
||||||
}
|
}
|
||||||
|
4
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
@ -16,6 +16,7 @@ limitations under the License.
|
|||||||
package bigtable
|
package bigtable
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -25,7 +26,6 @@ import (
|
|||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
"github.com/golang/protobuf/ptypes/wrappers"
|
"github.com/golang/protobuf/ptypes/wrappers"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||||
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
|
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
@ -113,7 +113,7 @@ func TestRetryApply(t *testing.T) {
|
|||||||
mutTrue.DeleteRow()
|
mutTrue.DeleteRow()
|
||||||
mutFalse := NewMutation()
|
mutFalse := NewMutation()
|
||||||
mutFalse.Set("cf", "col", 1000, []byte("val"))
|
mutFalse.Set("cf", "col", 1000, []byte("val"))
|
||||||
condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse)
|
condMut := NewCondMutation(ValueFilter(".*"), mutTrue, mutFalse)
|
||||||
|
|
||||||
errCount = 0
|
errCount = 0
|
||||||
code = codes.Unavailable // Will be retried
|
code = codes.Unavailable // Will be retried
|
||||||
|
57
vendor/cloud.google.com/go/bigtable/trace.go
generated
vendored
Normal file
57
vendor/cloud.google.com/go/bigtable/trace.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package bigtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func traceStartSpan(ctx context.Context, name string) context.Context {
|
||||||
|
ctx, _ = trace.StartSpan(ctx, name)
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceEndSpan(ctx context.Context, err error) {
|
||||||
|
span := trace.FromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
span.SetStatus(trace.Status{Message: err.Error()})
|
||||||
|
}
|
||||||
|
|
||||||
|
span.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
for k, v := range attrMap {
|
||||||
|
var a trace.Attribute
|
||||||
|
switch v := v.(type) {
|
||||||
|
case string:
|
||||||
|
a = trace.StringAttribute(k, v)
|
||||||
|
case bool:
|
||||||
|
a = trace.BoolAttribute(k, v)
|
||||||
|
case int:
|
||||||
|
a = trace.Int64Attribute(k, int64(v))
|
||||||
|
case int64:
|
||||||
|
a = trace.Int64Attribute(k, v)
|
||||||
|
default:
|
||||||
|
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
||||||
|
}
|
||||||
|
attrs = append(attrs, a)
|
||||||
|
}
|
||||||
|
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
||||||
|
}
|
7
vendor/cloud.google.com/go/cloud.go
generated
vendored
7
vendor/cloud.google.com/go/cloud.go
generated
vendored
@ -17,6 +17,7 @@ Package cloud is the root of the packages used to access Google Cloud
|
|||||||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||||
of sub-packages.
|
of sub-packages.
|
||||||
|
|
||||||
|
|
||||||
Client Options
|
Client Options
|
||||||
|
|
||||||
All clients in sub-packages are configurable via client options. These options are
|
All clients in sub-packages are configurable via client options. These options are
|
||||||
@ -54,7 +55,7 @@ underlying HTTP transport to cache connections for later re-use. These are cache
|
|||||||
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
||||||
http.DefaultTransport.
|
http.DefaultTransport.
|
||||||
|
|
||||||
For gPRC clients (all others in this repo), connection pooling is configurable. Users
|
For gRPC clients (all others in this repo), connection pooling is configurable. Users
|
||||||
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
||||||
option to NewClient calls. This configures the underlying gRPC connections to be
|
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||||
pooled and addressed in a round robin fashion.
|
pooled and addressed in a round robin fashion.
|
||||||
@ -63,14 +64,16 @@ pooled and addressed in a round robin fashion.
|
|||||||
Using the Libraries with Docker
|
Using the Libraries with Docker
|
||||||
|
|
||||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
||||||
hang, because gRPC retries indefinitely. See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/928
|
hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
|
||||||
for more information.
|
for more information.
|
||||||
|
|
||||||
|
|
||||||
Debugging
|
Debugging
|
||||||
|
|
||||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
||||||
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
||||||
|
|
||||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package cloud // import "cloud.google.com/go"
|
package cloud // import "cloud.google.com/go"
|
||||||
|
184
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client.go
generated
vendored
184
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,18 +12,18 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package cloudtasks
|
package cloudtasks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/version"
|
"github.com/golang/protobuf/proto"
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/api/transport"
|
"google.golang.org/api/transport"
|
||||||
@ -85,7 +85,7 @@ func defaultCallOptions() *CallOptions {
|
|||||||
GetQueue: retry[[2]string{"default", "idempotent"}],
|
GetQueue: retry[[2]string{"default", "idempotent"}],
|
||||||
CreateQueue: retry[[2]string{"default", "non_idempotent"}],
|
CreateQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||||
UpdateQueue: retry[[2]string{"default", "non_idempotent"}],
|
UpdateQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||||
DeleteQueue: retry[[2]string{"default", "non_idempotent"}],
|
DeleteQueue: retry[[2]string{"default", "idempotent"}],
|
||||||
PurgeQueue: retry[[2]string{"default", "non_idempotent"}],
|
PurgeQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||||
PauseQueue: retry[[2]string{"default", "non_idempotent"}],
|
PauseQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||||
ResumeQueue: retry[[2]string{"default", "non_idempotent"}],
|
ResumeQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||||
@ -155,8 +155,8 @@ func (c *Client) Close() error {
|
|||||||
// the `x-goog-api-client` header passed on each request. Intended for
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
// use by Google-written clients.
|
// use by Google-written clients.
|
||||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,6 +168,7 @@ func (c *Client) ListQueues(ctx context.Context, req *taskspb.ListQueuesRequest,
|
|||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ListQueues[0:len(c.CallOptions.ListQueues):len(c.CallOptions.ListQueues)], opts...)
|
opts = append(c.CallOptions.ListQueues[0:len(c.CallOptions.ListQueues):len(c.CallOptions.ListQueues)], opts...)
|
||||||
it := &QueueIterator{}
|
it := &QueueIterator{}
|
||||||
|
req = proto.Clone(req).(*taskspb.ListQueuesRequest)
|
||||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Queue, string, error) {
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Queue, string, error) {
|
||||||
var resp *taskspb.ListQueuesResponse
|
var resp *taskspb.ListQueuesResponse
|
||||||
req.PageToken = pageToken
|
req.PageToken = pageToken
|
||||||
@ -195,6 +196,7 @@ func (c *Client) ListQueues(ctx context.Context, req *taskspb.ListQueuesRequest,
|
|||||||
return nextPageToken, nil
|
return nextPageToken, nil
|
||||||
}
|
}
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
it.pageInfo.MaxSize = int(req.PageSize)
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,14 +220,15 @@ func (c *Client) GetQueue(ctx context.Context, req *taskspb.GetQueueRequest, opt
|
|||||||
// CreateQueue creates a queue.
|
// CreateQueue creates a queue.
|
||||||
//
|
//
|
||||||
// Queues created with this method allow tasks to live for a maximum of 31
|
// Queues created with this method allow tasks to live for a maximum of 31
|
||||||
// days. After a task is 31 days old, the task will be deleted regardless of whether
|
// days. After a task is 31 days old, the task will be deleted regardless of
|
||||||
// it was dispatched or not.
|
// whether it was dispatched or not.
|
||||||
//
|
//
|
||||||
// WARNING: Using this method may have unintended side effects if you are
|
// WARNING: Using this method may have unintended side effects if you are
|
||||||
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
||||||
// Read
|
// Read
|
||||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
|
// Overview of Queue Management and
|
||||||
// before using this method.
|
// queue.yaml (at https://cloud.google.com/tasks/docs/queue-yaml) before using
|
||||||
|
// this method.
|
||||||
func (c *Client) CreateQueue(ctx context.Context, req *taskspb.CreateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
func (c *Client) CreateQueue(ctx context.Context, req *taskspb.CreateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -248,14 +251,15 @@ func (c *Client) CreateQueue(ctx context.Context, req *taskspb.CreateQueueReques
|
|||||||
// the queue if it does exist.
|
// the queue if it does exist.
|
||||||
//
|
//
|
||||||
// Queues created with this method allow tasks to live for a maximum of 31
|
// Queues created with this method allow tasks to live for a maximum of 31
|
||||||
// days. After a task is 31 days old, the task will be deleted regardless of whether
|
// days. After a task is 31 days old, the task will be deleted regardless of
|
||||||
// it was dispatched or not.
|
// whether it was dispatched or not.
|
||||||
//
|
//
|
||||||
// WARNING: Using this method may have unintended side effects if you are
|
// WARNING: Using this method may have unintended side effects if you are
|
||||||
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
||||||
// Read
|
// Read
|
||||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
|
// Overview of Queue Management and
|
||||||
// before using this method.
|
// queue.yaml (at https://cloud.google.com/tasks/docs/queue-yaml) before using
|
||||||
|
// this method.
|
||||||
func (c *Client) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
func (c *Client) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "queue.name", req.GetQueue().GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "queue.name", req.GetQueue().GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -282,8 +286,9 @@ func (c *Client) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueReques
|
|||||||
// WARNING: Using this method may have unintended side effects if you are
|
// WARNING: Using this method may have unintended side effects if you are
|
||||||
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
||||||
// Read
|
// Read
|
||||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
|
// Overview of Queue Management and
|
||||||
// before using this method.
|
// queue.yaml (at https://cloud.google.com/tasks/docs/queue-yaml) before using
|
||||||
|
// this method.
|
||||||
func (c *Client) DeleteQueue(ctx context.Context, req *taskspb.DeleteQueueRequest, opts ...gax.CallOption) error {
|
func (c *Client) DeleteQueue(ctx context.Context, req *taskspb.DeleteQueueRequest, opts ...gax.CallOption) error {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -322,9 +327,10 @@ func (c *Client) PurgeQueue(ctx context.Context, req *taskspb.PurgeQueueRequest,
|
|||||||
//
|
//
|
||||||
// If a queue is paused then the system will stop dispatching tasks
|
// If a queue is paused then the system will stop dispatching tasks
|
||||||
// until the queue is resumed via
|
// until the queue is resumed via
|
||||||
// [ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added
|
// [ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can
|
||||||
// when the queue is paused. A queue is paused if its
|
// still be added when the queue is paused. A queue is paused if its
|
||||||
// [state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
|
// [state][google.cloud.tasks.v2beta2.Queue.state] is
|
||||||
|
// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
|
||||||
func (c *Client) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
func (c *Client) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -345,14 +351,17 @@ func (c *Client) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest,
|
|||||||
//
|
//
|
||||||
// This method resumes a queue after it has been
|
// This method resumes a queue after it has been
|
||||||
// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
|
// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
|
||||||
// [DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored
|
// [DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a
|
||||||
// in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it
|
// queue is stored in the queue's
|
||||||
// will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
|
// [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method
|
||||||
|
// it will be set to
|
||||||
|
// [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
|
||||||
//
|
//
|
||||||
// WARNING: Resuming many high-QPS queues at the same time can
|
// WARNING: Resuming many high-QPS queues at the same time can
|
||||||
// lead to target overloading. If you are resuming high-QPS
|
// lead to target overloading. If you are resuming high-QPS
|
||||||
// queues, follow the 500/50/5 pattern described in
|
// queues, follow the 500/50/5 pattern described in
|
||||||
// Managing Cloud Tasks Scaling Risks (at /cloud-tasks/pdfs/managing-cloud-tasks-scaling-risks-2017-06-05.pdf).
|
// Managing Cloud Tasks Scaling
|
||||||
|
// Risks (at https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).
|
||||||
func (c *Client) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
func (c *Client) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -369,12 +378,13 @@ func (c *Client) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueReques
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetIamPolicy gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue].
|
// GetIamPolicy gets the access control policy for a
|
||||||
// Returns an empty policy if the resource exists and does not have a policy
|
// [Queue][google.cloud.tasks.v2beta2.Queue]. Returns an empty policy if the
|
||||||
// set.
|
// resource exists and does not have a policy set.
|
||||||
//
|
//
|
||||||
// Authorization requires the following Google IAM (at /iam) permission on the
|
// Authorization requires the following
|
||||||
// specified resource parent:
|
// Google IAM (at https://cloud.google.com/iam) permission on the specified
|
||||||
|
// resource parent:
|
||||||
//
|
//
|
||||||
// cloudtasks.queues.getIamPolicy
|
// cloudtasks.queues.getIamPolicy
|
||||||
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||||
@ -393,14 +403,15 @@ func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyReques
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetIamPolicy sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing
|
// SetIamPolicy sets the access control policy for a
|
||||||
// policy.
|
// [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing policy.
|
||||||
//
|
//
|
||||||
// Note: The Cloud Console does not check queue-level IAM permissions yet.
|
// Note: The Cloud Console does not check queue-level IAM permissions yet.
|
||||||
// Project-level permissions are required to use the Cloud Console.
|
// Project-level permissions are required to use the Cloud Console.
|
||||||
//
|
//
|
||||||
// Authorization requires the following Google IAM (at /iam) permission on the
|
// Authorization requires the following
|
||||||
// specified resource parent:
|
// Google IAM (at https://cloud.google.com/iam) permission on the specified
|
||||||
|
// resource parent:
|
||||||
//
|
//
|
||||||
// cloudtasks.queues.setIamPolicy
|
// cloudtasks.queues.setIamPolicy
|
||||||
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||||
@ -419,9 +430,10 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIamPermissions returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue].
|
// TestIamPermissions returns permissions that a caller has on a
|
||||||
// If the resource does not exist, this will return an empty set of
|
// [Queue][google.cloud.tasks.v2beta2.Queue]. If the resource does not exist,
|
||||||
// permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
|
// this will return an empty set of permissions, not a
|
||||||
|
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
|
||||||
//
|
//
|
||||||
// Note: This operation is designed to be used for building permission-aware
|
// Note: This operation is designed to be used for building permission-aware
|
||||||
// UIs and command-line tools, not for authorization checking. This operation
|
// UIs and command-line tools, not for authorization checking. This operation
|
||||||
@ -444,15 +456,19 @@ func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermi
|
|||||||
|
|
||||||
// ListTasks lists the tasks in a queue.
|
// ListTasks lists the tasks in a queue.
|
||||||
//
|
//
|
||||||
// By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved
|
// By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC]
|
||||||
// due to performance considerations;
|
// view is retrieved due to performance considerations;
|
||||||
// [response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the
|
// [response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view]
|
||||||
// subset of information which is returned.
|
// controls the subset of information which is returned.
|
||||||
|
//
|
||||||
|
// The tasks may be returned in any order. The ordering may change at any
|
||||||
|
// time.
|
||||||
func (c *Client) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest, opts ...gax.CallOption) *TaskIterator {
|
func (c *Client) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest, opts ...gax.CallOption) *TaskIterator {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
opts = append(c.CallOptions.ListTasks[0:len(c.CallOptions.ListTasks):len(c.CallOptions.ListTasks)], opts...)
|
opts = append(c.CallOptions.ListTasks[0:len(c.CallOptions.ListTasks):len(c.CallOptions.ListTasks)], opts...)
|
||||||
it := &TaskIterator{}
|
it := &TaskIterator{}
|
||||||
|
req = proto.Clone(req).(*taskspb.ListTasksRequest)
|
||||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Task, string, error) {
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Task, string, error) {
|
||||||
var resp *taskspb.ListTasksResponse
|
var resp *taskspb.ListTasksResponse
|
||||||
req.PageToken = pageToken
|
req.PageToken = pageToken
|
||||||
@ -480,6 +496,7 @@ func (c *Client) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest, o
|
|||||||
return nextPageToken, nil
|
return nextPageToken, nil
|
||||||
}
|
}
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
it.pageInfo.MaxSize = int(req.PageSize)
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -502,18 +519,14 @@ func (c *Client) GetTask(ctx context.Context, req *taskspb.GetTaskRequest, opts
|
|||||||
|
|
||||||
// CreateTask creates a task and adds it to a queue.
|
// CreateTask creates a task and adds it to a queue.
|
||||||
//
|
//
|
||||||
// To add multiple tasks at the same time, use
|
|
||||||
// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch)
|
|
||||||
// or the batching documentation for your client library, for example
|
|
||||||
// https://developers.google.com/api-client-library/python/guide/batch.
|
|
||||||
//
|
|
||||||
// Tasks cannot be updated after creation; there is no UpdateTask command.
|
// Tasks cannot be updated after creation; there is no UpdateTask command.
|
||||||
//
|
//
|
||||||
// For App Engine queues (at google.cloud.tasks.v2beta2.AppEngineHttpTarget),
|
// For [App Engine queues][google.cloud.tasks.v2beta2.AppEngineHttpTarget],
|
||||||
// the maximum task size is 100KB.
|
// the maximum task size is
|
||||||
|
// 100KB.
|
||||||
//
|
//
|
||||||
// For pull queues (at google.cloud.tasks.v2beta2.PullTarget), this
|
// For [pull queues][google.cloud.tasks.v2beta2.PullTarget], the maximum
|
||||||
// the maximum task size is 1MB.
|
// task size is 1MB.
|
||||||
func (c *Client) CreateTask(ctx context.Context, req *taskspb.CreateTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
func (c *Client) CreateTask(ctx context.Context, req *taskspb.CreateTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -552,18 +565,19 @@ func (c *Client) DeleteTask(ctx context.Context, req *taskspb.DeleteTaskRequest,
|
|||||||
//
|
//
|
||||||
// This method is invoked by the worker to obtain a lease. The
|
// This method is invoked by the worker to obtain a lease. The
|
||||||
// worker must acknowledge the task via
|
// worker must acknowledge the task via
|
||||||
// [AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have
|
// [AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask]
|
||||||
// performed the work associated with the task.
|
// after they have performed the work associated with the task.
|
||||||
//
|
//
|
||||||
// The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that
|
// The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended
|
||||||
// the worker needs to perform the work associated with the task. To
|
// to store data that the worker needs to perform the work associated with the
|
||||||
// return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
|
// task. To return the payloads in the
|
||||||
// [response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to
|
// [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
|
||||||
// [FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
|
// [response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view]
|
||||||
|
// to [FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
|
||||||
//
|
//
|
||||||
// A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
|
// A maximum of 10 qps of
|
||||||
// requests are allowed per
|
// [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] requests are
|
||||||
// queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
|
// allowed per queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
|
||||||
// is returned when this limit is
|
// is returned when this limit is
|
||||||
// exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
|
// exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
|
||||||
// is also returned when
|
// is also returned when
|
||||||
@ -588,21 +602,17 @@ func (c *Client) LeaseTasks(ctx context.Context, req *taskspb.LeaseTasksRequest,
|
|||||||
// AcknowledgeTask acknowledges a pull task.
|
// AcknowledgeTask acknowledges a pull task.
|
||||||
//
|
//
|
||||||
// The worker, that is, the entity that
|
// The worker, that is, the entity that
|
||||||
// [leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method
|
// [leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must
|
||||||
// to indicate that the work associated with the task has finished.
|
// call this method to indicate that the work associated with the task has
|
||||||
|
// finished.
|
||||||
//
|
//
|
||||||
// The worker must acknowledge a task within the
|
// The worker must acknowledge a task within the
|
||||||
// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease
|
// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration]
|
||||||
// will expire and the task will become available to be leased
|
// or the lease will expire and the task will become available to be leased
|
||||||
// again. After the task is acknowledged, it will not be returned
|
// again. After the task is acknowledged, it will not be returned
|
||||||
// by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
|
// by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
|
||||||
// [GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
|
// [GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
|
||||||
// [ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
|
// [ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
|
||||||
//
|
|
||||||
// To acknowledge multiple tasks at the same time, use
|
|
||||||
// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch)
|
|
||||||
// or the batching documentation for your client library, for example
|
|
||||||
// https://developers.google.com/api-client-library/python/guide/batch.
|
|
||||||
func (c *Client) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTaskRequest, opts ...gax.CallOption) error {
|
func (c *Client) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTaskRequest, opts ...gax.CallOption) error {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -619,7 +629,8 @@ func (c *Client) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTa
|
|||||||
//
|
//
|
||||||
// The worker can use this method to extend the lease by a new
|
// The worker can use this method to extend the lease by a new
|
||||||
// duration, starting from now. The new task lease will be
|
// duration, starting from now. The new task lease will be
|
||||||
// returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
|
// returned in the task's
|
||||||
|
// [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
|
||||||
func (c *Client) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
func (c *Client) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -639,9 +650,9 @@ func (c *Client) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest,
|
|||||||
// CancelLease cancel a pull task's lease.
|
// CancelLease cancel a pull task's lease.
|
||||||
//
|
//
|
||||||
// The worker can use this method to cancel a task's lease by
|
// The worker can use this method to cancel a task's lease by
|
||||||
// setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will
|
// setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time]
|
||||||
// make the task available to be leased to the next caller of
|
// to now. This will make the task available to be leased to the next caller
|
||||||
// [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
|
// of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
|
||||||
func (c *Client) CancelLease(ctx context.Context, req *taskspb.CancelLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
func (c *Client) CancelLease(ctx context.Context, req *taskspb.CancelLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
@ -661,30 +672,33 @@ func (c *Client) CancelLease(ctx context.Context, req *taskspb.CancelLeaseReques
|
|||||||
// RunTask forces a task to run now.
|
// RunTask forces a task to run now.
|
||||||
//
|
//
|
||||||
// When this method is called, Cloud Tasks will dispatch the task, even if
|
// When this method is called, Cloud Tasks will dispatch the task, even if
|
||||||
// the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or
|
// the task is already running, the queue has reached its
|
||||||
// is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
|
// [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or is
|
||||||
|
// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
|
||||||
//
|
//
|
||||||
// This command is meant to be used for manual debugging. For
|
// This command is meant to be used for manual debugging. For
|
||||||
// example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed
|
// example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be
|
||||||
// task after a fix has been made or to manually force a task to be
|
// used to retry a failed task after a fix has been made or to manually force
|
||||||
// dispatched now.
|
// a task to be dispatched now.
|
||||||
//
|
//
|
||||||
// The dispatched task is returned. That is, the task that is returned
|
// The dispatched task is returned. That is, the task that is returned
|
||||||
// contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but
|
// contains the [status][google.cloud.tasks.v2beta2.Task.status] after the
|
||||||
// before the task is received by its target.
|
// task is dispatched but before the task is received by its target.
|
||||||
//
|
//
|
||||||
// If Cloud Tasks receives a successful response from the task's
|
// If Cloud Tasks receives a successful response from the task's
|
||||||
// target, then the task will be deleted; otherwise the task's
|
// target, then the task will be deleted; otherwise the task's
|
||||||
// [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that
|
// [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be
|
||||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified
|
// reset to the time that
|
||||||
// in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
|
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus
|
||||||
|
// the retry delay specified in the queue's
|
||||||
|
// [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
|
||||||
//
|
//
|
||||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
|
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
|
||||||
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
|
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
|
||||||
// task that has already succeeded or permanently failed.
|
// task that has already succeeded or permanently failed.
|
||||||
//
|
//
|
||||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a
|
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called
|
||||||
// [pull task][google.cloud.tasks.v2beta2.PullMessage].
|
// on a [pull task][google.cloud.tasks.v2beta2.PullMessage].
|
||||||
func (c *Client) RunTask(ctx context.Context, req *taskspb.RunTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
func (c *Client) RunTask(ctx context.Context, req *taskspb.RunTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
9
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go
generated
vendored
9
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -12,13 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package cloudtasks_test
|
package cloudtasks_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cloud.google.com/go/cloudtasks/apiv2beta2"
|
"context"
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
cloudtasks "cloud.google.com/go/cloudtasks/apiv2beta2"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
|
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
|
||||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user