diff --git a/go.mod b/go.mod index 63807a2..a4ee90d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/mchmarny/vimp go 1.20 require ( - cloud.google.com/go/bigquery v1.52.0 + cloud.google.com/go/bigquery v1.53.0 github.com/Jeffail/gabs/v2 v2.7.0 github.com/google/go-containerregistry v0.15.2 github.com/jackc/pgx/v5 v5.4.2 diff --git a/go.sum b/go.sum index 6d23ec5..cba21f7 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go/bigquery v1.52.0 h1:JKLNdxI0N+TIUWD6t9KN646X27N5dQWq9dZbbTWZ8hc= -cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0 h1:K3wLbjbnSlxhuG5q4pntHv5AEbQM1QqHKGYgwFIqOTg= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y= cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= diff --git a/vendor/cloud.google.com/go/bigquery/CHANGES.md b/vendor/cloud.google.com/go/bigquery/CHANGES.md index e406bc3..8c0088d 100644 --- a/vendor/cloud.google.com/go/bigquery/CHANGES.md +++ b/vendor/cloud.google.com/go/bigquery/CHANGES.md @@ -2,6 +2,21 @@ +## [1.53.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.52.0...bigquery/v1.53.0) (2023-07-24) + + +### Features + +* **bigquery/analyticshub:** Promote to GA ([130c571](https://github.com/googleapis/google-cloud-go/commit/130c5713dcbac7f670cb92ea113dd53d8029c960)) +* **bigquery/connection:** Add support for Salesforce connections, which are usable only by allowlisted partners ([bac978a](https://github.com/googleapis/google-cloud-go/commit/bac978ace43bb58db7c0b1475e41c8fdf8c49a29)) +* **bigquery/datapolicies:** Promote to GA ([130c571](https://github.com/googleapis/google-cloud-go/commit/130c5713dcbac7f670cb92ea113dd53d8029c960)) +* **bigquery/storage:** Add ResourceExhausted to retryable error for Write API unary calls ([#8214](https://github.com/googleapis/google-cloud-go/issues/8214)) ([8ff13bf](https://github.com/googleapis/google-cloud-go/commit/8ff13bf87397ad524019268c1146e44f3c1cd0e6)) + + +### Bug Fixes + +* **bigquery/storage/managedwriter:** Context refactoring ([#8275](https://github.com/googleapis/google-cloud-go/issues/8275)) ([c4104ea](https://github.com/googleapis/google-cloud-go/commit/c4104eaab0d7291c15aba37b78e71ce3cbb9f77a)) + ## [1.52.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.51.2...bigquery/v1.52.0) (2023-06-23) diff --git a/vendor/cloud.google.com/go/bigquery/internal/version.go b/vendor/cloud.google.com/go/bigquery/internal/version.go index fa8bf9b..717a529 100644 --- a/vendor/cloud.google.com/go/bigquery/internal/version.go +++ b/vendor/cloud.google.com/go/bigquery/internal/version.go @@ -16,4 +16,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.52.0" +const Version = "1.53.0" diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go index 8d5e039..1fe053f 100644 --- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go +++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go @@ -90,6 +90,7 @@ func defaultBigQueryWriteCallOptions() *BigQueryWriteCallOptions { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, + codes.ResourceExhausted, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, @@ -103,6 +104,7 @@ func defaultBigQueryWriteCallOptions() *BigQueryWriteCallOptions { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, + codes.ResourceExhausted, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, @@ -116,6 +118,7 @@ func defaultBigQueryWriteCallOptions() *BigQueryWriteCallOptions { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, + codes.ResourceExhausted, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, @@ -129,6 +132,7 @@ func defaultBigQueryWriteCallOptions() *BigQueryWriteCallOptions { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, + codes.ResourceExhausted, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go index 16e6569..4ed84b9 100644 --- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go @@ -43,10 +43,9 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// An enum to indicate how to interpret missing values. Missing values are -// fields present in user schema but missing in rows. A missing value can -// represent a NULL or a column default value defined in BigQuery table -// schema. +// An enum to indicate how to interpret missing values of fields that are +// present in user schema but missing in rows. A missing value can represent a +// NULL or a column default value defined in BigQuery table schema. type AppendRowsRequest_MissingValueInterpretation int32 const ( @@ -857,9 +856,10 @@ func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStream { // Request message for `AppendRows`. // -// Due to the nature of AppendRows being a bidirectional streaming RPC, certain -// parts of the AppendRowsRequest need only be specified for the first request -// sent each time the gRPC network connection is opened/reopened. +// Because AppendRows is a bidirectional streaming RPC, certain parts of the +// AppendRowsRequest need only be specified for the first request before +// switching table destinations. You can also switch table destinations within +// the same connection for the default stream. // // The size of a single AppendRowsRequest must be less than 10 MB in size. // Requests larger than this return an error, typically `INVALID_ARGUMENT`. @@ -868,10 +868,14 @@ type AppendRowsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The write_stream identifies the target of the append operation, - // and only needs to be specified as part of the first request on the gRPC - // connection. If provided for subsequent requests, it must match the value of - // the first request. + // Required. The write_stream identifies the append operation. It must be + // provided in the following scenarios: + // + // * In the first request to an AppendRows connection. + // + // * In all subsequent requests to an AppendRows connection, if you use the + // same connection to write to multiple tables or change the input schema for + // default streams. // // For explicitly created write streams, the format is: // @@ -880,6 +884,22 @@ type AppendRowsRequest struct { // For the special default stream, the format is: // // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + // + // An example of a possible sequence of requests with write_stream fields + // within a single connection: + // + // * r1: {write_stream: stream_name_1} + // + // * r2: {write_stream: /*omit*/} + // + // * r3: {write_stream: /*omit*/} + // + // * r4: {write_stream: stream_name_2} + // + // * r5: {write_stream: stream_name_2} + // + // The destination changed in request_4, so the write_stream field must be + // populated in all subsequent requests in this stream. WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` // If present, the write is only performed if the next append offset is same // as the provided value. If not present, the write is performed at the @@ -1740,9 +1760,14 @@ type AppendRowsRequest_ProtoData struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Proto schema used to serialize the data. This value only needs to be - // provided as part of the first request on a gRPC network connection, - // and will be ignored for subsequent requests on the connection. + // The protocol buffer schema used to serialize the data. Provide this value + // whenever: + // + // * You send the first request of an RPC connection. + // + // * You change the input schema. + // + // * You specify a new destination table. WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"` // Serialized row data in protobuf message format. // Currently, the backend expects the serialized rows to adhere to diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go index 95ed2af..5082d47 100644 --- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go +++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go @@ -302,10 +302,10 @@ type ReadSession struct { // all streams are completely consumed. This estimate is based on // metadata from the table which might be incomplete or stale. EstimatedTotalBytesScanned int64 `protobuf:"varint,12,opt,name=estimated_total_bytes_scanned,json=estimatedTotalBytesScanned,proto3" json:"estimated_total_bytes_scanned,omitempty"` - // Output only. A pre-projected estimate of the total physical size (in bytes) - // of files this session will scan when all streams are completely consumed. - // This estimate does not depend on the selected columns and can be based on - // metadata from the table which might be incomplete or stale. Only set for + // Output only. A pre-projected estimate of the total physical size of files + // (in bytes) that this session will scan when all streams are consumed. This + // estimate is independent of the selected columns and can be based on + // incomplete or stale metadata from the table. This field is only set for // BigLake tables. EstimatedTotalPhysicalFileSize int64 `protobuf:"varint,15,opt,name=estimated_total_physical_file_size,json=estimatedTotalPhysicalFileSize,proto3" json:"estimated_total_physical_file_size,omitempty"` // Output only. An estimate on the number of rows present in this session's diff --git a/vendor/modules.txt b/vendor/modules.txt index 5d4e6ca..85972d5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -8,7 +8,7 @@ cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/uid cloud.google.com/go/internal/version -# cloud.google.com/go/bigquery v1.52.0 +# cloud.google.com/go/bigquery v1.53.0 ## explicit; go 1.19 cloud.google.com/go/bigquery cloud.google.com/go/bigquery/internal