blob: f6a72b2d2ab88969b2b97133de3a38dabd9d8e72 [file] [log] [blame]
{
"clientLibrary": {
"name": "cloud.google.com/go/bigquery/storage/apiv1beta2",
"version": "1.43.0",
"language": "GO",
"apis": [
{
"id": "google.cloud.bigquery.storage.v1beta2",
"version": "v1beta2"
}
]
},
"snippets": [
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_CreateReadSession_sync",
"title": "bigquerystorage CreateReadSession Sample",
"description": "CreateReadSession creates a new read session. A read session divides the contents of a\nBigQuery table into one or more streams, which can then be used to read\ndata from the table. The read session also specifies properties of the\ndata to be read, such as a list of columns or a push-down filter describing\nthe rows to be returned.\n\nA particular row can be read by at most one stream. When the caller has\nreached the end of each stream in the session, then all the data in the\ntable has been read.\n\nData is assigned to each stream such that roughly the same number of\nrows can be read from each stream. Because the server-side unit for\nassigning data is collections of rows, the API does not guarantee that\neach stream will return the same number or rows. Additionally, the\nlimits are enforced based on the number of pre-filtered rows, so some\nfilters can lead to lopsided assignments.\n\nRead sessions automatically expire 6 hours after they are created and do\nnot require manual clean-up by the caller.",
"file": "BigQueryReadClient/CreateReadSession/main.go",
"language": "GO",
"clientMethod": {
"shortName": "CreateReadSession",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient.CreateReadSession",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.CreateReadSessionRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.ReadSession",
"client": {
"shortName": "BigQueryReadClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient"
},
"method": {
"shortName": "CreateReadSession",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.CreateReadSession",
"service": {
"shortName": "BigQueryRead",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_ReadRows_sync",
"title": "bigquerystorage ReadRows Sample",
"description": "ReadRows reads rows from the stream in the format prescribed by the ReadSession.\nEach response contains one or more table rows, up to a maximum of 100 MiB\nper response; read requests which attempt to read individual rows larger\nthan 100 MiB will fail.\n\nEach request also returns a set of stream statistics reflecting the current\nstate of the stream.",
"file": "BigQueryReadClient/ReadRows/main.go",
"language": "GO",
"clientMethod": {
"shortName": "ReadRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient.ReadRows",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.ReadRowsRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"client": {
"shortName": "BigQueryReadClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient"
},
"method": {
"shortName": "ReadRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.ReadRows",
"service": {
"shortName": "BigQueryRead",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 1,
"end": -1,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_SplitReadStream_sync",
"title": "bigquerystorage SplitReadStream Sample",
"description": "SplitReadStream splits a given ReadStream into two ReadStream objects. These\nReadStream objects are referred to as the primary and the residual\nstreams of the split. The original ReadStream can still be read from in\nthe same manner as before. Both of the returned ReadStream objects can\nalso be read from, and the rows returned by both child streams will be\nthe same as the rows read from the original stream.\n\nMoreover, the two child streams will be allocated back-to-back in the\noriginal ReadStream. Concretely, it is guaranteed that for streams\noriginal, primary, and residual, that original[0-j] = primary[0-j] and\noriginal[j-n] = residual[0-m] once the streams have been read to\ncompletion.",
"file": "BigQueryReadClient/SplitReadStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "SplitReadStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient.SplitReadStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.SplitReadStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.SplitReadStreamResponse",
"client": {
"shortName": "BigQueryReadClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient"
},
"method": {
"shortName": "SplitReadStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.SplitReadStream",
"service": {
"shortName": "BigQueryRead",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync",
"title": "bigquerystorage AppendRows Sample",
"description": "AppendRows appends data to the given stream.\n\nIf offset is specified, the offset is checked against the end of\nstream. The server returns OUT_OF_RANGE in AppendRowsResponse if an\nattempt is made to append to an offset beyond the current end of the stream\nor ALREADY_EXISTS if user provids an offset that has already been\nwritten to. User can retry with adjusted offset within the same RPC\nstream. If offset is not specified, append happens at the end of the\nstream.\n\nThe response contains the offset at which the append happened. Responses\nare received in the same order in which requests are sent. There will be\none response for each successful request. If the offset is not set in\nresponse, it means append didn’t happen due to some errors. If one request\nfails, all the subsequent requests will also fail until a success request\nis made again.\n\nIf the stream is of PENDING type, data will only be available for read\noperations after the stream is committed.",
"file": "BigQueryWriteClient/AppendRows/main.go",
"language": "GO",
"clientMethod": {
"shortName": "AppendRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.AppendRows",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "AppendRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.AppendRows",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 69,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_BatchCommitWriteStreams_sync",
"title": "bigquerystorage BatchCommitWriteStreams Sample",
"description": "BatchCommitWriteStreams atomically commits a group of PENDING streams that belong to the same\nparent table.\nStreams must be finalized before commit and cannot be committed multiple\ntimes. Once a stream is committed, data in the stream becomes available\nfor read operations.",
"file": "BigQueryWriteClient/BatchCommitWriteStreams/main.go",
"language": "GO",
"clientMethod": {
"shortName": "BatchCommitWriteStreams",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.BatchCommitWriteStreams",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.BatchCommitWriteStreamsRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.BatchCommitWriteStreamsResponse",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "BatchCommitWriteStreams",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.BatchCommitWriteStreams",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_CreateWriteStream_sync",
"title": "bigquerystorage CreateWriteStream Sample",
"description": "CreateWriteStream creates a write stream to the given table.\nAdditionally, every table has a special COMMITTED stream named ‘_default’\nto which data can be written. This stream doesn’t need to be created using\nCreateWriteStream. It is a stream that can be used simultaneously by any\nnumber of clients. Data written to this stream is considered committed as\nsoon as an acknowledgement is received.",
"file": "BigQueryWriteClient/CreateWriteStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "CreateWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.CreateWriteStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.CreateWriteStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.WriteStream",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "CreateWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.CreateWriteStream",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FinalizeWriteStream_sync",
"title": "bigquerystorage FinalizeWriteStream Sample",
"description": "FinalizeWriteStream finalize a write stream so that no new data can be appended to the\nstream. Finalize is not supported on the ‘_default’ stream.",
"file": "BigQueryWriteClient/FinalizeWriteStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "FinalizeWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.FinalizeWriteStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.FinalizeWriteStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.FinalizeWriteStreamResponse",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "FinalizeWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FinalizeWriteStream",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FlushRows_sync",
"title": "bigquerystorage FlushRows Sample",
"description": "FlushRows flushes rows to a BUFFERED stream.\nIf users are appending rows to BUFFERED stream, flush operation is\nrequired in order for the rows to become available for reading. A\nFlush operation flushes up to any previously flushed offset in a BUFFERED\nstream, to the offset specified in the request.\nFlush is not supported on the _default stream, since it is not BUFFERED.",
"file": "BigQueryWriteClient/FlushRows/main.go",
"language": "GO",
"clientMethod": {
"shortName": "FlushRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.FlushRows",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.FlushRowsRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.FlushRowsResponse",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "FlushRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FlushRows",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_GetWriteStream_sync",
"title": "bigquerystorage GetWriteStream Sample",
"description": "GetWriteStream gets a write stream.",
"file": "BigQueryWriteClient/GetWriteStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "GetWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.GetWriteStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.GetWriteStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.WriteStream",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "GetWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.GetWriteStream",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
}
]
}