blob: 16526ba010075725a137ac54fcc2f96dd2656f67 [file] [log] [blame]
<
{
"auth": {
"oauth2": {
"scopes": {
"https://www.googleapis.com/auth/bigquery": {
"description": "View and manage your data in Google BigQuery"
},
"https://www.googleapis.com/auth/bigquery.insertdata": {
"description": "Insert data into Google BigQuery"
},
"https://www.googleapis.com/auth/bigquery.readonly": {
"description": "View your data in Google BigQuery"
},
"https://www.googleapis.com/auth/cloud-platform": {
"description": "View and manage your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/cloud-platform.read-only": {
"description": "View your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/devstorage.full_control": {
"description": "Manage your data and permissions in Google Cloud Storage"
},
"https://www.googleapis.com/auth/devstorage.read_only": {
"description": "View your data in Google Cloud Storage"
},
"https://www.googleapis.com/auth/devstorage.read_write": {
"description": "Manage your data in Google Cloud Storage"
}
}
}
},
"basePath": "/bigquery/v2/",
"baseUrl": "https://bigquery.googleapis.com/bigquery/v2/",
"batchPath": "batch/bigquery/v2",
"description": "A data platform for customers to create, manage, share and query data.",
"discoveryVersion": "v1",
"documentationLink": "https://cloud.google.com/bigquery/",
"icons": {
"x16": "https://www.google.com/images/icons/product/search-16.gif",
"x32": "https://www.google.com/images/icons/product/search-32.gif"
},
"id": "bigquery:v2",
"kind": "discovery#restDescription",
"name": "bigquery",
"ownerDomain": "google.com",
"ownerName": "Google",
"parameters": {
"alt": {
"default": "json",
"description": "Data format for the response.",
"enum": [
"json"
],
"enumDescriptions": [
"Responses with Content-Type of application/json"
],
"location": "query",
"type": "string"
},
"fields": {
"description": "Selector specifying which fields to include in a partial response.",
"location": "query",
"type": "string"
},
"key": {
"description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
"location": "query",
"type": "string"
},
"oauth_token": {
"description": "OAuth 2.0 token for the current user.",
"location": "query",
"type": "string"
},
"prettyPrint": {
"default": "true",
"description": "Returns response with indentations and line breaks.",
"location": "query",
"type": "boolean"
},
"quotaUser": {
"description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.",
"location": "query",
"type": "string"
},
"userIp": {
"description": "Deprecated. Please use quotaUser instead.",
"location": "query",
"type": "string"
}
},
"protocol": "rest",
"resources": {
"datasets": {
"methods": {
"delete": {
"description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.",
"httpMethod": "DELETE",
"id": "bigquery.datasets.delete",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of dataset being deleted",
"location": "path",
"required": true,
"type": "string"
},
"deleteContents": {
"description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False",
"location": "query",
"type": "boolean"
},
"projectId": {
"description": "Project ID of the dataset being deleted",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}",
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
"description": "Returns the dataset specified by datasetID.",
"httpMethod": "GET",
"id": "bigquery.datasets.get",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the requested dataset",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the requested dataset",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}",
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"insert": {
"description": "Creates a new empty dataset.",
"httpMethod": "POST",
"id": "bigquery.datasets.insert",
"parameterOrder": [
"projectId"
],
"parameters": {
"projectId": {
"description": "Project ID of the new dataset",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets",
"request": {
"$ref": "Dataset"
},
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"list": {
"description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.",
"httpMethod": "GET",
"id": "bigquery.datasets.list",
"parameterOrder": [
"projectId"
],
"parameters": {
"all": {
"description": "Whether to list all datasets, including hidden ones",
"location": "query",
"type": "boolean"
},
"filter": {
"description": "An expression for filtering the results of the request by label. The syntax is \"labels.\u003cname\u003e[:\u003cvalue\u003e]\". Multiple filters can be ANDed together by connecting with a space. Example: \"labels.department:receiving labels.active\". See Filtering datasets using labels for details.",
"location": "query",
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of results",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Project ID of the datasets to be listed",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets",
"response": {
"$ref": "DatasetList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"patch": {
"description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.",
"httpMethod": "PATCH",
"id": "bigquery.datasets.patch",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the dataset being updated",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the dataset being updated",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}",
"request": {
"$ref": "Dataset"
},
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"update": {
"description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.",
"httpMethod": "PUT",
"id": "bigquery.datasets.update",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the dataset being updated",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the dataset being updated",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}",
"request": {
"$ref": "Dataset"
},
"response": {
"$ref": "Dataset"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
},
"jobs": {
"methods": {
"cancel": {
"description": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.",
"httpMethod": "POST",
"id": "bigquery.jobs.cancel",
"parameterOrder": [
"projectId",
"jobId"
],
"parameters": {
"jobId": {
"description": "[Required] Job ID of the job to cancel",
"location": "path",
"required": true,
"type": "string"
},
"location": {
"description": "The geographic location of the job. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.",
"location": "query",
"type": "string"
},
"projectId": {
"description": "[Required] Project ID of the job to cancel",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/jobs/{jobId}/cancel",
"response": {
"$ref": "JobCancelResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
"description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.",
"httpMethod": "GET",
"id": "bigquery.jobs.get",
"parameterOrder": [
"projectId",
"jobId"
],
"parameters": {
"jobId": {
"description": "[Required] Job ID of the requested job",
"location": "path",
"required": true,
"type": "string"
},
"location": {
"description": "The geographic location of the job. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.",
"location": "query",
"type": "string"
},
"projectId": {
"description": "[Required] Project ID of the requested job",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/jobs/{jobId}",
"response": {
"$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"getQueryResults": {
"description": "Retrieves the results of a query job.",
"httpMethod": "GET",
"id": "bigquery.jobs.getQueryResults",
"parameterOrder": [
"projectId",
"jobId"
],
"parameters": {
"jobId": {
"description": "[Required] Job ID of the query job",
"location": "path",
"required": true,
"type": "string"
},
"location": {
"description": "The geographic location where the job should run. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.",
"location": "query",
"type": "string"
},
"maxResults": {
"description": "Maximum number of results to read",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of results",
"location": "query",
"type": "string"
},
"projectId": {
"description": "[Required] Project ID of the query job",
"location": "path",
"required": true,
"type": "string"
},
"startIndex": {
"description": "Zero-based index of the starting row",
"format": "uint64",
"location": "query",
"type": "string"
},
"timeoutMs": {
"description": "How long to wait for the query to complete, in milliseconds, before returning. Default is 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the response will be false",
"format": "uint32",
"location": "query",
"type": "integer"
}
},
"path": "projects/{projectId}/queries/{jobId}",
"response": {
"$ref": "GetQueryResultsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"insert": {
"description": "Starts a new asynchronous job. Requires the Can View project role.",
"httpMethod": "POST",
"id": "bigquery.jobs.insert",
"mediaUpload": {
"accept": [
"*/*"
],
"protocols": {
"resumable": {
"multipart": true,
"path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs"
},
"simple": {
"multipart": true,
"path": "/upload/bigquery/v2/projects/{projectId}/jobs"
}
}
},
"parameterOrder": [
"projectId"
],
"parameters": {
"projectId": {
"description": "Project ID of the project that will be billed for the job",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/jobs",
"request": {
"$ref": "Job"
},
"response": {
"$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
],
"supportsMediaUpload": true
},
"list": {
"description": "Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.",
"httpMethod": "GET",
"id": "bigquery.jobs.list",
"parameterOrder": [
"projectId"
],
"parameters": {
"allUsers": {
"description": "Whether to display jobs owned by all users in the project. Default false",
"location": "query",
"type": "boolean"
},
"maxCreationTime": {
"description": "Max value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs created before or at this timestamp are returned",
"format": "uint64",
"location": "query",
"type": "string"
},
"maxResults": {
"description": "Maximum number of results to return",
"format": "uint32",
"location": "query",
"type": "integer"
},
"minCreationTime": {
"description": "Min value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs created after or at this timestamp are returned",
"format": "uint64",
"location": "query",
"type": "string"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of results",
"location": "query",
"type": "string"
},
"parentJobId": {
"description": "If set, retrieves only jobs whose parent is this job. Otherwise, retrieves only jobs which have no parent",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Project ID of the jobs to list",
"location": "path",
"required": true,
"type": "string"
},
"projection": {
"description": "Restrict information returned to a set of selected fields",
"enum": [
"full",
"minimal"
],
"enumDescriptions": [
"Includes all job data",
"Does not include the job configuration"
],
"location": "query",
"type": "string"
},
"stateFilter": {
"description": "Filter for job state",
"enum": [
"done",
"pending",
"running"
],
"enumDescriptions": [
"Finished jobs",
"Pending jobs",
"Running jobs"
],
"location": "query",
"repeated": true,
"type": "string"
}
},
"path": "projects/{projectId}/jobs",
"response": {
"$ref": "JobList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"query": {
"description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.",
"httpMethod": "POST",
"id": "bigquery.jobs.query",
"parameterOrder": [
"projectId"
],
"parameters": {
"projectId": {
"description": "Project ID of the project billed for the query",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/queries",
"request": {
"$ref": "QueryRequest"
},
"response": {
"$ref": "QueryResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
}
}
},
"models": {
"methods": {
"delete": {
"description": "Deletes the model specified by modelId from the dataset.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/models/{modelsId}",
"httpMethod": "DELETE",
"id": "bigquery.models.delete",
"parameterOrder": [
"projectId",
"datasetId",
"modelId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the model to delete.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"modelId": {
"description": "Required. Model ID of the model to delete.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the model to delete.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}",
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
"description": "Gets the specified model resource by model ID.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/models/{modelsId}",
"httpMethod": "GET",
"id": "bigquery.models.get",
"parameterOrder": [
"projectId",
"datasetId",
"modelId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the requested model.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"modelId": {
"description": "Required. Model ID of the requested model.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the requested model.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}",
"response": {
"$ref": "Model"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"list": {
"description": "Lists all models in the specified dataset. Requires the READER dataset\nrole.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/models",
"httpMethod": "GET",
"id": "bigquery.models.list",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the models to list.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call to request the next page of\nresults",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the models to list.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/models",
"response": {
"$ref": "ListModelsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"patch": {
"description": "Patch specific fields in the specified model.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/models/{modelsId}",
"httpMethod": "PATCH",
"id": "bigquery.models.patch",
"parameterOrder": [
"projectId",
"datasetId",
"modelId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the model to patch.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"modelId": {
"description": "Required. Model ID of the model to patch.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the model to patch.",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}",
"request": {
"$ref": "Model"
},
"response": {
"$ref": "Model"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
},
"projects": {
"methods": {
"getServiceAccount": {
"description": "Returns the email address of the service account for your project used for interactions with Google Cloud KMS.",
"httpMethod": "GET",
"id": "bigquery.projects.getServiceAccount",
"parameterOrder": [
"projectId"
],
"parameters": {
"projectId": {
"description": "Project ID for which the service account is requested.",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/serviceAccount",
"response": {
"$ref": "GetServiceAccountResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"list": {
"description": "Lists all projects to which you have been granted any project role.",
"httpMethod": "GET",
"id": "bigquery.projects.list",
"parameters": {
"maxResults": {
"description": "Maximum number of results to return",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of results",
"location": "query",
"type": "string"
}
},
"path": "projects",
"response": {
"$ref": "ProjectList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
}
}
},
"routines": {
"methods": {
"delete": {
"description": "Deletes the routine specified by routineId from the dataset.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "DELETE",
"id": "bigquery.routines.delete",
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the routine to delete",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the routine to delete",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"routineId": {
"description": "Required. Routine ID of the routine to delete",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
"description": "Gets the specified routine resource by routine ID.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "GET",
"id": "bigquery.routines.get",
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the requested routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"fieldMask": {
"description": "If set, only the Routine fields in the field mask are returned in the\nresponse. If unset, all Routine fields are returned.",
"format": "google-fieldmask",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the requested routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"routineId": {
"description": "Required. Routine ID of the requested routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"insert": {
"description": "Creates a new routine in the dataset.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines",
"httpMethod": "POST",
"id": "bigquery.routines.insert",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the new routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the new routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines",
"request": {
"$ref": "Routine"
},
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"list": {
"description": "Lists all routines in the specified dataset. Requires the READER dataset\nrole.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines",
"httpMethod": "GET",
"id": "bigquery.routines.list",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the routines to list",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the routines to list",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines",
"response": {
"$ref": "ListRoutinesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"update": {
"description": "Updates information in an existing routine. The update method replaces the\nentire Routine resource.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "PUT",
"id": "bigquery.routines.update",
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"parameters": {
"datasetId": {
"description": "Required. Dataset ID of the routine to update",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Required. Project ID of the routine to update",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"routineId": {
"description": "Required. Routine ID of the routine to update",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"request": {
"$ref": "Routine"
},
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
},
"tabledata": {
"methods": {
"insertAll": {
"description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.",
"httpMethod": "POST",
"id": "bigquery.tabledata.insertAll",
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the destination table.",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the destination table.",
"location": "path",
"required": true,
"type": "string"
},
"tableId": {
"description": "Table ID of the destination table.",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll",
"request": {
"$ref": "TableDataInsertAllRequest"
},
"response": {
"$ref": "TableDataInsertAllResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.insertdata",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"list": {
"description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.",
"httpMethod": "GET",
"id": "bigquery.tabledata.list",
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the table to read",
"location": "path",
"required": true,
"type": "string"
},
"maxResults": {
"description": "Maximum number of results to return",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, identifying the result set",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Project ID of the table to read",
"location": "path",
"required": true,
"type": "string"
},
"selectedFields": {
"description": "List of fields to return (comma-separated). If unspecified, all fields are returned",
"location": "query",
"type": "string"
},
"startIndex": {
"description": "Zero-based index of the starting row to read",
"format": "uint64",
"location": "query",
"type": "string"
},
"tableId": {
"description": "Table ID of the table to read",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data",
"response": {
"$ref": "TableDataList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
}
}
},
"tables": {
"methods": {
"delete": {
"description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.",
"httpMethod": "DELETE",
"id": "bigquery.tables.delete",
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the table to delete",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the table to delete",
"location": "path",
"required": true,
"type": "string"
},
"tableId": {
"description": "Table ID of the table to delete",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
"description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.",
"httpMethod": "GET",
"id": "bigquery.tables.get",
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the requested table",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the requested table",
"location": "path",
"required": true,
"type": "string"
},
"selectedFields": {
"description": "List of fields to return (comma-separated). If unspecified, all fields are returned",
"location": "query",
"type": "string"
},
"tableId": {
"description": "Table ID of the requested table",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"insert": {
"description": "Creates a new, empty table in the dataset.",
"httpMethod": "POST",
"id": "bigquery.tables.insert",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the new table",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the new table",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables",
"request": {
"$ref": "Table"
},
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"list": {
"description": "Lists all tables in the specified dataset. Requires the READER dataset role.",
"httpMethod": "GET",
"id": "bigquery.tables.list",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the tables to list",
"location": "path",
"required": true,
"type": "string"
},
"maxResults": {
"description": "Maximum number of results to return",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of results",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Project ID of the tables to list",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables",
"response": {
"$ref": "TableList"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery.readonly",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"patch": {
"description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.",
"httpMethod": "PATCH",
"id": "bigquery.tables.patch",
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the table to update",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the table to update",
"location": "path",
"required": true,
"type": "string"
},
"tableId": {
"description": "Table ID of the table to update",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
"request": {
"$ref": "Table"
},
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"update": {
"description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.",
"httpMethod": "PUT",
"id": "bigquery.tables.update",
"parameterOrder": [
"projectId",
"datasetId",
"tableId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the table to update",
"location": "path",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the table to update",
"location": "path",
"required": true,
"type": "string"
},
"tableId": {
"description": "Table ID of the table to update",
"location": "path",
"required": true,
"type": "string"
}
},
"path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
"request": {
"$ref": "Table"
},
"response": {
"$ref": "Table"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
}
},
"revision": "20190917",
"rootUrl": "https://bigquery.googleapis.com/",
"schemas": {
"AggregateClassificationMetrics": {
"description": "Aggregate metrics for classification/classifier models. For multi-class\nmodels, the metrics are either macro-averaged or micro-averaged. When\nmacro-averaged, the metrics are calculated for each label and then an\nunweighted average is taken of those values. When micro-averaged, the\nmetric is calculated globally by counting the total number of correctly\npredicted rows.",
"id": "AggregateClassificationMetrics",
"properties": {
"accuracy": {
"description": "Accuracy is the fraction of predictions given the correct label. For\nmulticlass this is a micro-averaged metric.",
"format": "double",
"type": "number"
},
"f1Score": {
"description": "The F1 score is an average of recall and precision. For multiclass\nthis is a macro-averaged metric.",
"format": "double",
"type": "number"
},
"logLoss": {
"description": "Logarithmic Loss. For multiclass this is a macro-averaged metric.",
"format": "double",
"type": "number"
},
"precision": {
"description": "Precision is the fraction of actual positive predictions that had\npositive actual labels. For multiclass this is a macro-averaged\nmetric treating each class as a binary classifier.",
"format": "double",
"type": "number"
},
"recall": {
"description": "Recall is the fraction of actual positive labels that were given a\npositive prediction. For multiclass this is a macro-averaged metric.",
"format": "double",
"type": "number"
},
"rocAuc": {
"description": "Area Under a ROC Curve. For multiclass this is a macro-averaged\nmetric.",
"format": "double",
"type": "number"
},
"threshold": {
"description": "Threshold at which the metrics are computed. For binary\nclassification models this is the positive class threshold.\nFor multi-class classfication models this is the confidence\nthreshold.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"Argument": {
"description": "Input/output argument of a function or a stored procedure.",
"id": "Argument",
"properties": {
"argumentKind": {
"description": "Optional. Defaults to FIXED_TYPE.",
"enum": [
"ARGUMENT_KIND_UNSPECIFIED",
"FIXED_TYPE",
"ANY_TYPE"
],
"enumDescriptions": [
"",
"The argument is a variable with fully specified type, which can be a\nstruct or an array, but not a table.",
"The argument is any type, including struct or array, but not a table.\nTo be added: FIXED_TABLE, ANY_TABLE"
],
"type": "string"
},
"dataType": {
"$ref": "StandardSqlDataType",
"description": "Required unless argument_kind = ANY_TYPE."
},
"mode": {
"description": "Optional. Specifies whether the argument is input or output.\nCan be set for procedures only.",
"enum": [
"MODE_UNSPECIFIED",
"IN",
"OUT",
"INOUT"
],
"enumDescriptions": [
"",
"The argument is input-only.",
"The argument is output-only.",
"The argument is both an input and an output."
],
"type": "string"
},
"name": {
"description": "Optional. The name of this argument. Can be absent for function return argument.",
"type": "string"
}
},
"type": "object"
},
"BigQueryModelTraining": {
"id": "BigQueryModelTraining",
"properties": {
"currentIteration": {
"description": "[Output-only, Beta] Index of current ML training iteration. Updated during create model query job to show job progress.",
"format": "int32",
"type": "integer"
},
"expectedTotalIterations": {
"description": "[Output-only, Beta] Expected number of iterations for the create model query job specified as num_iterations in the input query. The actual total number of iterations may be less than this number due to early stop.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"BigtableColumn": {
"id": "BigtableColumn",
"properties": {
"encoding": {
"description": "[Optional] The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.",
"type": "string"
},
"fieldName": {
"description": "[Optional] If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.",
"type": "string"
},
"onlyReadLatest": {
"description": "[Optional] If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.",
"type": "boolean"
},
"qualifierEncoded": {
"description": "[Required] Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifier_string field. Otherwise, a base-64 encoded value must be set to qualifier_encoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name.",
"format": "byte",
"type": "string"
},
"qualifierString": {
"type": "string"
},
"type": {
"description": "[Optional] The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is BYTES. 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.",
"type": "string"
}
},
"type": "object"
},
"BigtableColumnFamily": {
"id": "BigtableColumnFamily",
"properties": {
"columns": {
"description": "[Optional] Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as .. Other columns can be accessed as a list through .Column field.",
"items": {
"$ref": "BigtableColumn"
},
"type": "array"
},
"encoding": {
"description": "[Optional] The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.",
"type": "string"
},
"familyId": {
"description": "Identifier of the column family.",
"type": "string"
},
"onlyReadLatest": {
"description": "[Optional] If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.",
"type": "boolean"
},
"type": {
"description": "[Optional] The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.",
"type": "string"
}
},
"type": "object"
},
"BigtableOptions": {
"id": "BigtableOptions",
"properties": {
"columnFamilies": {
"description": "[Optional] List of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.",
"items": {
"$ref": "BigtableColumnFamily"
},
"type": "array"
},
"ignoreUnspecifiedColumnFamilies": {
"description": "[Optional] If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.",
"type": "boolean"
},
"readRowkeyAsString": {
"description": "[Optional] If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.",
"type": "boolean"
}
},
"type": "object"
},
"BinaryClassificationMetrics": {
"description": "Evaluation metrics for binary classification/classifier models.",
"id": "BinaryClassificationMetrics",
"properties": {
"aggregateClassificationMetrics": {
"$ref": "AggregateClassificationMetrics",
"description": "Aggregate classification metrics."
},
"binaryConfusionMatrixList": {
"description": "Binary confusion matrix at multiple thresholds.",
"items": {
"$ref": "BinaryConfusionMatrix"
},
"type": "array"
},
"negativeLabel": {
"description": "Label representing the negative class.",
"type": "string"
},
"positiveLabel": {
"description": "Label representing the positive class.",
"type": "string"
}
},
"type": "object"
},
"BinaryConfusionMatrix": {
"description": "Confusion matrix for binary classification models.",
"id": "BinaryConfusionMatrix",
"properties": {
"accuracy": {
"description": "The fraction of predictions given the correct label.",
"format": "double",
"type": "number"
},
"f1Score": {
"description": "The equally weighted average of recall and precision.",
"format": "double",
"type": "number"
},
"falseNegatives": {
"description": "Number of false samples predicted as false.",
"format": "int64",
"type": "string"
},
"falsePositives": {
"description": "Number of false samples predicted as true.",
"format": "int64",
"type": "string"
},
"positiveClassThreshold": {
"description": "Threshold value used when computing each of the following metric.",
"format": "double",
"type": "number"
},
"precision": {
"description": "The fraction of actual positive predictions that had positive actual\nlabels.",
"format": "double",
"type": "number"
},
"recall": {
"description": "The fraction of actual positive labels that were given a positive\nprediction.",
"format": "double",
"type": "number"
},
"trueNegatives": {
"description": "Number of true samples predicted as false.",
"format": "int64",
"type": "string"
},
"truePositives": {
"description": "Number of true samples predicted as true.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"BqmlIterationResult": {
"id": "BqmlIterationResult",
"properties": {
"durationMs": {
"description": "[Output-only, Beta] Time taken to run the training iteration in milliseconds.",
"format": "int64",
"type": "string"
},
"evalLoss": {
"description": "[Output-only, Beta] Eval loss computed on the eval data at the end of the iteration. The eval loss is used for early stopping to avoid overfitting. No eval loss if eval_split_method option is specified as no_split or auto_split with input data size less than 500 rows.",
"format": "double",
"type": "number"
},
"index": {
"description": "[Output-only, Beta] Index of the ML training iteration, starting from zero for each training run.",
"format": "int32",
"type": "integer"
},
"learnRate": {
"description": "[Output-only, Beta] Learning rate used for this iteration, it varies for different training iterations if learn_rate_strategy option is not constant.",
"format": "double",
"type": "number"
},
"trainingLoss": {
"description": "[Output-only, Beta] Training loss computed on the training data at the end of the iteration. The training loss function is defined by model type.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"BqmlTrainingRun": {
"id": "BqmlTrainingRun",
"properties": {
"iterationResults": {
"description": "[Output-only, Beta] List of each iteration results.",
"items": {
"$ref": "BqmlIterationResult"
},
"type": "array"
},
"startTime": {
"description": "[Output-only, Beta] Training run start time in milliseconds since the epoch.",
"format": "date-time",
"type": "string"
},
"state": {
"description": "[Output-only, Beta] Different state applicable for a training run. IN PROGRESS: Training run is in progress. FAILED: Training run ended due to a non-retryable failure. SUCCEEDED: Training run successfully completed. CANCELLED: Training run cancelled by the user.",
"type": "string"
},
"trainingOptions": {
"description": "[Output-only, Beta] Training options used by this training run. These options are mutable for subsequent training runs. Default values are explicitly stored for options not specified in the input query of the first training run. For subsequent training runs, any option not explicitly specified in the input query will be copied from the previous training run.",
"properties": {
"earlyStop": {
"type": "boolean"
},
"l1Reg": {
"format": "double",
"type": "number"
},
"l2Reg": {
"format": "double",
"type": "number"
},
"learnRate": {
"format": "double",
"type": "number"
},
"learnRateStrategy": {
"type": "string"
},
"lineSearchInitLearnRate": {
"format": "double",
"type": "number"
},
"maxIteration": {
"format": "int64",
"type": "string"
},
"minRelProgress": {
"format": "double",
"type": "number"
},
"warmStart": {
"type": "boolean"
}
},
"type": "object"
}
},
"type": "object"
},
"CategoricalValue": {
"description": "Representative value of a categorical feature.",
"id": "CategoricalValue",
"properties": {
"categoryCounts": {
"description": "Counts of all categories for the categorical feature. If there are\nmore than ten categories, we return top ten (by count) and return\none more CategoryCount with category \"_OTHER_\" and count as\naggregate counts of remaining categories.",
"items": {
"$ref": "CategoryCount"
},
"type": "array"
}
},
"type": "object"
},
"CategoryCount": {
"description": "Represents the count of a single category within the cluster.",
"id": "CategoryCount",
"properties": {
"category": {
"description": "The name of category.",
"type": "string"
},
"count": {
"description": "The count of training samples matching the category within the\ncluster.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"Cluster": {
"description": "Message containing the information about one cluster.",
"id": "Cluster",
"properties": {
"centroidId": {
"description": "Centroid id.",
"format": "int64",
"type": "string"
},
"count": {
"description": "Count of training data rows that were assigned to this cluster.",
"format": "int64",
"type": "string"
},
"featureValues": {
"description": "Values of highly variant features for this cluster.",
"items": {
"$ref": "FeatureValue"
},
"type": "array"
}
},
"type": "object"
},
"ClusterInfo": {
"description": "Information about a single cluster for clustering model.",
"id": "ClusterInfo",
"properties": {
"centroidId": {
"description": "Centroid id.",
"format": "int64",
"type": "string"
},
"clusterRadius": {
"description": "Cluster radius, the average distance from centroid\nto each point assigned to the cluster.",
"format": "double",
"type": "number"
},
"clusterSize": {
"description": "Cluster size, the total number of points assigned to the cluster.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"Clustering": {
"id": "Clustering",
"properties": {
"fields": {
"description": "[Repeated] One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields are supported. When you cluster a table using multiple columns, the order of columns you specify is important. The order of the specified columns determines the sort order of the data.",
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"ClusteringMetrics": {
"description": "Evaluation metrics for clustering models.",
"id": "ClusteringMetrics",
"properties": {
"clusters": {
"description": "[Beta] Information for all clusters.",
"items": {
"$ref": "Cluster"
},
"type": "array"
},
"daviesBouldinIndex": {
"description": "Davies-Bouldin index.",
"format": "double",
"type": "number"
},
"meanSquaredDistance": {
"description": "Mean of squared distances between each sample to its cluster centroid.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"ConfusionMatrix": {
"description": "Confusion matrix for multi-class classification models.",
"id": "ConfusionMatrix",
"properties": {
"confidenceThreshold": {
"description": "Confidence threshold used when computing the entries of the\nconfusion matrix.",
"format": "double",
"type": "number"
},
"rows": {
"description": "One row per actual label.",
"items": {
"$ref": "Row"
},
"type": "array"
}
},
"type": "object"
},
"CsvOptions": {
"id": "CsvOptions",
"properties": {
"allowJaggedRows": {
"description": "[Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.",
"type": "boolean"
},
"allowQuotedNewlines": {
"description": "[Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.",
"type": "boolean"
},
"encoding": {
"description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.",
"type": "string"
},
"fieldDelimiter": {
"description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',').",
"type": "string"
},
"quote": {
"default": "\"",
"description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.",
"pattern": ".?",
"type": "string"
},
"skipLeadingRows": {
"description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"Dataset": {
"id": "Dataset",
"properties": {
"access": {
"description": "[Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;",
"items": {
"properties": {
"domain": {
"description": "[Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: \"example.com\". Maps to IAM policy member \"domain:DOMAIN\".",
"type": "string"
},
"groupByEmail": {
"description": "[Pick one] An email address of a Google Group to grant access to. Maps to IAM policy member \"group:GROUP\".",
"type": "string"
},
"iamMember": {
"description": "[Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group.",
"type": "string"
},
"role": {
"description": "[Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to \"roles/bigquery.dataOwner\", it will be returned back as \"OWNER\".",
"type": "string"
},
"specialGroup": {
"description": "[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members.",
"type": "string"
},
"userByEmail": {
"description": "[Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member \"user:EMAIL\" or \"serviceAccount:EMAIL\".",
"type": "string"
},
"view": {
"$ref": "TableReference",
"description": "[Pick one] A view from a different dataset to grant access to. Queries executed against that view will have read access to tables in this dataset. The role field is not required when this field is set. If that view is updated by any user, access to the view needs to be granted again via an update operation."
}
},
"type": "object"
},
"type": "array"
},
"creationTime": {
"description": "[Output-only] The time when this dataset was created, in milliseconds since the epoch.",
"format": "int64",
"type": "string"
},
"datasetReference": {
"$ref": "DatasetReference",
"description": "[Required] A reference that identifies the dataset."
},
"defaultEncryptionConfiguration": {
"$ref": "EncryptionConfiguration"
},
"defaultPartitionExpirationMs": {
"description": "[Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property.",
"format": "int64",
"type": "string"
},
"defaultTableExpirationMs": {
"description": "[Optional] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.",
"format": "int64",
"type": "string"
},
"description": {
"description": "[Optional] A user-friendly description of the dataset.",
"type": "string"
},
"etag": {
"description": "[Output-only] A hash of the resource.",
"type": "string"
},
"friendlyName": {
"description": "[Optional] A descriptive name for the dataset.",
"type": "string"
},
"id": {
"description": "[Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field.",
"type": "string"
},
"kind": {
"default": "bigquery#dataset",
"description": "[Output-only] The resource type.",
"type": "string"
},
"labels": {
"additionalProperties": {
"type": "string"
},
"description": "The labels associated with this dataset. You can use these to organize and group your datasets. You can set this property when inserting or updating a dataset. See Creating and Updating Dataset Labels for more information.",
"type": "object"
},
"lastModifiedTime": {
"description": "[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.",
"format": "int64",
"type": "string"
},
"location": {
"description": "The geographic location where the dataset should reside. The default value is US. See details at https://cloud.google.com/bigquery/docs/locations.",
"type": "string"
},
"selfLink": {
"description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.",
"type": "string"
}
},
"type": "object"
},
"DatasetList": {
"id": "DatasetList",
"properties": {
"datasets": {
"description": "An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project.",
"items": {
"properties": {
"datasetReference": {
"$ref": "DatasetReference",
"description": "The dataset reference. Use this property to access specific parts of the dataset's ID, such as project ID or dataset ID."
},
"friendlyName": {
"description": "A descriptive name for the dataset, if one exists.",
"type": "string"
},
"id": {
"description": "The fully-qualified, unique, opaque ID of the dataset.",
"type": "string"
},
"kind": {
"default": "bigquery#dataset",
"description": "The resource type. This property always returns the value \"bigquery#dataset\".",
"type": "string"
},
"labels": {
"additionalProperties": {
"type": "string"
},
"description": "The labels associated with this dataset. You can use these to organize and group your datasets.",
"type": "object"
},
"location": {
"description": "The geographic location where the data resides.",
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"etag": {
"description": "A hash value of the results page. You can use this property to determine if the page has changed since the last request.",
"type": "string"
},
"kind": {
"default": "bigquery#datasetList",
"description": "The list type. This property always returns the value \"bigquery#datasetList\".",
"type": "string"
},
"nextPageToken": {
"description": "A token that can be used to request the next results page. This property is omitted on the final results page.",
"type": "string"
}
},
"type": "object"
},
"DatasetReference": {
"id": "DatasetReference",
"properties": {
"datasetId": {
"annotations": {
"required": [
"bigquery.datasets.update"
]
},
"description": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
"type": "string"
},
"projectId": {
"annotations": {
"required": [
"bigquery.datasets.update"
]
},
"description": "[Optional] The ID of the project containing this dataset.",
"type": "string"
}
},
"type": "object"
},
"DestinationTableProperties": {
"id": "DestinationTableProperties",
"properties": {
"description": {
"description": "[Optional] The description for the destination table. This will only be used if the destination table is newly created. If the table already exists and a value different than the current description is provided, the job will fail.",
"type": "string"
},
"friendlyName": {
"description": "[Optional] The friendly name for the destination table. This will only be used if the destination table is newly created. If the table already exists and a value different than the current friendly name is provided, the job will fail.",
"type": "string"
},
"labels": {
"additionalProperties": {
"type": "string"
},
"description": "[Optional] The labels associated with this table. You can use these to organize and group your tables. This will only be used if the destination table is newly created. If the table already exists and labels are different than the current labels are provided, the job will fail.",
"type": "object"
}
},
"type": "object"
},
"EncryptionConfiguration": {
"id": "EncryptionConfiguration",
"properties": {
"kmsKeyName": {
"description": "[Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.",
"type": "string"
}
},
"type": "object"
},
"Entry": {
"description": "A single entry in the confusion matrix.",
"id": "Entry",
"properties": {
"itemCount": {
"description": "Number of items being predicted as this label.",
"format": "int64",
"type": "string"
},
"predictedLabel": {
"description": "The predicted label. For confidence_threshold \u003e 0, we will\nalso add an entry indicating the number of items under the\nconfidence threshold.",
"type": "string"
}
},
"type": "object"
},
"ErrorProto": {
"id": "ErrorProto",
"properties": {
"debugInfo": {
"description": "Debugging information. This property is internal to Google and should not be used.",
"type": "string"
},
"location": {
"description": "Specifies where the error occurred, if present.",
"type": "string"
},
"message": {
"description": "A human-readable description of the error.",
"type": "string"
},
"reason": {
"description": "A short error code that summarizes the error.",
"type": "string"
}
},
"type": "object"
},
"EvaluationMetrics": {
"description": "Evaluation metrics of a model. These are either computed on all training\ndata or just the eval data based on whether eval data was used during\ntraining. These are not present for imported models.",
"id": "EvaluationMetrics",
"properties": {
"binaryClassificationMetrics": {
"$ref": "BinaryClassificationMetrics",
"description": "Populated for binary classification/classifier models."
},
"clusteringMetrics": {
"$ref": "ClusteringMetrics",
"description": "Populated for clustering models."
},
"multiClassClassificationMetrics": {
"$ref": "MultiClassClassificationMetrics",
"description": "Populated for multi-class classification/classifier models."
},
"regressionMetrics": {
"$ref": "RegressionMetrics",
"description": "Populated for regression models and explicit feedback type matrix\nfactorization models."
}
},
"type": "object"
},
"ExplainQueryStage": {
"id": "ExplainQueryStage",
"properties": {
"completedParallelInputs": {
"description": "Number of parallel input segments completed.",
"format": "int64",
"type": "string"
},
"computeMsAvg": {
"description": "Milliseconds the average shard spent on CPU-bound tasks.",
"format": "int64",
"type": "string"
},
"computeMsMax": {
"description": "Milliseconds the slowest shard spent on CPU-bound tasks.",
"format": "int64",
"type": "string"
},
"computeRatioAvg": {
"description": "Relative amount of time the average shard spent on CPU-bound tasks.",
"format": "double",
"type": "number"
},
"computeRatioMax": {
"description": "Relative amount of time the slowest shard spent on CPU-bound tasks.",
"format": "double",
"type": "number"
},
"endMs": {
"description": "Stage end time represented as milliseconds since epoch.",
"format": "int64",
"type": "string"
},
"id": {
"description": "Unique ID for stage within plan.",
"format": "int64",
"type": "string"
},
"inputStages": {
"description": "IDs for stages that are inputs to this stage.",
"items": {
"format": "int64",
"type": "string"
},
"type": "array"
},
"name": {
"description": "Human-readable name for stage.",
"type": "string"
},
"parallelInputs": {
"description": "Number of parallel input segments to be processed.",
"format": "int64",
"type": "string"
},
"readMsAvg": {
"description": "Milliseconds the average shard spent reading input.",
"format": "int64",
"type": "string"
},
"readMsMax": {
"description": "Milliseconds the slowest shard spent reading input.",
"format": "int64",
"type": "string"
},
"readRatioAvg": {
"description": "Relative amount of time the average shard spent reading input.",
"format": "double",
"type": "number"
},
"readRatioMax": {
"description": "Relative amount of time the slowest shard spent reading input.",
"format": "double",
"type": "number"
},
"recordsRead": {
"description": "Number of records read into the stage.",
"format": "int64",
"type": "string"
},
"recordsWritten": {
"description": "Number of records written by the stage.",
"format": "int64",
"type": "string"
},
"shuffleOutputBytes": {
"description": "Total number of bytes written to shuffle.",
"format": "int64",
"type": "string"
},
"shuffleOutputBytesSpilled": {
"description": "Total number of bytes written to shuffle and spilled to disk.",
"format": "int64",
"type": "string"
},
"startMs": {
"description": "Stage start time represented as milliseconds since epoch.",
"format": "int64",
"type": "string"
},
"status": {
"description": "Current status for the stage.",
"type": "string"
},
"steps": {
"description": "List of operations within the stage in dependency order (approximately chronological).",
"items": {
"$ref": "ExplainQueryStep"
},
"type": "array"
},
"waitMsAvg": {
"description": "Milliseconds the average shard spent waiting to be scheduled.",
"format": "int64",
"type": "string"
},
"waitMsMax": {
"description": "Milliseconds the slowest shard spent waiting to be scheduled.",
"format": "int64",
"type": "string"
},
"waitRatioAvg": {
"description": "Relative amount of time the average shard spent waiting to be scheduled.",
"format": "double",
"type": "number"
},
"waitRatioMax": {
"description": "Relative amount of time the slowest shard spent waiting to be scheduled.",
"format": "double",
"type": "number"
},
"writeMsAvg": {
"description": "Milliseconds the average shard spent on writing output.",
"format": "int64",
"type": "string"
},
"writeMsMax": {
"description": "Milliseconds the slowest shard spent on writing output.",
"format": "int64",
"type": "string"
},
"writeRatioAvg": {
"description": "Relative amount of time the average shard spent on writing output.",
"format": "double",
"type": "number"
},
"writeRatioMax": {
"description": "Relative amount of time the slowest shard spent on writing output.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"ExplainQueryStep": {
"id": "ExplainQueryStep",
"properties": {
"kind": {
"description": "Machine-readable operation type.",
"type": "string"
},
"substeps": {
"description": "Human-readable stage descriptions.",
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"ExternalDataConfiguration": {
"id": "ExternalDataConfiguration",
"properties": {
"autodetect": {
"description": "Try to detect schema and format options automatically. Any option specified explicitly will be honored.",
"type": "boolean"
},
"bigtableOptions": {
"$ref": "BigtableOptions",
"description": "[Optional] Additional options if sourceFormat is set to BIGTABLE."
},
"compression": {
"description": "[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.",
"type": "string"
},
"csvOptions": {
"$ref": "CsvOptions",
"description": "Additional properties to set if sourceFormat is set to CSV."
},
"googleSheetsOptions": {
"$ref": "GoogleSheetsOptions",
"description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS."
},
"hivePartitioningMode": {
"description": "[Optional, Trusted Tester] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error. Note: this setting is in the process of being deprecated in favor of hivePartitioningOptions.",
"type": "string"
},
"hivePartitioningOptions": {
"$ref": "HivePartitioningOptions",
"description": "[Optional, Trusted Tester] Options to configure hive partitioning support."
},
"ignoreUnknownValues": {
"description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.",
"type": "boolean"
},
"maxBadRecords": {
"description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.",
"format": "int32",
"type": "integer"
},
"schema": {
"$ref": "TableSchema",
"description": "[Optional] The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats."
},
"sourceFormat": {
"description": "[Required] The data format. For CSV files, specify \"CSV\". For Google sheets, specify \"GOOGLE_SHEETS\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro files, specify \"AVRO\". For Google Cloud Datastore backups, specify \"DATASTORE_BACKUP\". [Beta] For Google Cloud Bigtable, specify \"BIGTABLE\".",
"type": "string"
},
"sourceUris": {
"description": "[Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the '*' wildcard character is not allowed.",
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"FeatureValue": {
"description": "Representative value of a single feature within the cluster.",
"id": "FeatureValue",
"properties": {
"categoricalValue": {
"$ref": "CategoricalValue",
"description": "The categorical feature value."
},
"featureColumn": {
"description": "The feature column name.",
"type": "string"
},
"numericalValue": {
"description": "The numerical feature value. This is the centroid value for this\nfeature.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"GetQueryResultsResponse": {
"id": "GetQueryResultsResponse",
"properties": {
"cacheHit": {
"description": "Whether the query result was fetched from the query cache.",
"type": "boolean"
},
"errors": {
"description": "[Output-only] The first errors or warnings encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has completed or was unsuccessful.",
"items": {
"$ref": "ErrorProto"
},
"type": "array"
},
"etag": {
"description": "A hash of this response.",
"type": "string"
},
"jobComplete": {
"description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available.",
"type": "boolean"
},
"jobReference": {
"$ref": "JobReference",
"description": "Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)."
},
"kind": {
"default": "bigquery#getQueryResultsResponse",
"description": "The resource type of the response.",
"type": "string"
},
"numDmlAffectedRows": {
"description": "[Output-only] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.",
"format": "int64",
"type": "string"
},
"pageToken": {
"description": "A token used for paging results.",
"type": "string"
},
"rows": {
"description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.",
"items": {
"$ref": "TableRow"
},
"type": "array"
},
"schema": {
"$ref": "TableSchema",
"description": "The schema of the results. Present only when the query completes successfully."
},
"totalBytesProcessed": {
"description": "The total number of bytes processed for this query.",
"format": "int64",
"type": "string"
},
"totalRows": {
"description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.",
"format": "uint64",
"type": "string"
}
},
"type": "object"
},
"GetServiceAccountResponse": {
"id": "GetServiceAccountResponse",
"properties": {
"email": {
"description": "The service account email address.",
"type": "string"
},
"kind": {
"default": "bigquery#getServiceAccountResponse",
"description": "The resource type of the response.",
"type": "string"
}
},
"type": "object"
},
"GoogleSheetsOptions": {
"id": "GoogleSheetsOptions",
"properties": {
"range": {
"description": "[Optional] Range of a sheet to query from. Only used when non-empty. Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id For example: sheet1!A1:B20",
"type": "string"
},
"skipLeadingRows": {
"description": "[Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N \u003e 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"HivePartitioningOptions": {
"id": "HivePartitioningOptions",
"properties": {
"mode": {
"description": "[Optional, Trusted Tester] When set, what mode of hive partitioning to use when reading data. Two modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.",
"type": "string"
},
"sourceUriPrefix": {
"description": "[Optional, Trusted Tester] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter).",
"type": "string"
}
},
"type": "object"
},
"IterationResult": {
"description": "Information about a single iteration of the training run.",
"id": "IterationResult",
"properties": {
"clusterInfos": {
"description": "Information about top clusters for clustering models.",
"items": {
"$ref": "ClusterInfo"
},
"type": "array"
},
"durationMs": {
"description": "Time taken to run the iteration in milliseconds.",
"format": "int64",
"type": "string"
},
"evalLoss": {
"description": "Loss computed on the eval data at the end of iteration.",
"format": "double",
"type": "number"
},
"index": {
"description": "Index of the iteration, 0 based.",
"format": "int32",
"type": "integer"
},
"learnRate": {
"description": "Learn rate used for this iteration.",
"format": "double",
"type": "number"
},
"trainingLoss": {
"description": "Loss computed on the training data at the end of iteration.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"Job": {
"id": "Job",
"properties": {
"configuration": {
"$ref": "JobConfiguration",
"description": "[Required] Describes the job configuration."
},
"etag": {
"description": "[Output-only] A hash of this resource.",
"type": "string"
},
"id": {
"description": "[Output-only] Opaque ID field of the job",
"type": "string"
},
"jobReference": {
"$ref": "JobReference",
"description": "[Optional] Reference describing the unique-per-user name of the job."
},
"kind": {
"default": "bigquery#job",
"description": "[Output-only] The type of the resource.",
"type": "string"
},
"selfLink": {
"description": "[Output-only] A URL that can be used to access this resource again.",
"type": "string"
},
"statistics": {
"$ref": "JobStatistics",
"description": "[Output-only] Information about the job, including starting time and ending time of the job."
},
"status": {
"$ref": "JobStatus",
"description": "[Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete."
},
"user_email": {
"description": "[Output-only] Email address of the user who ran the job.",
"type": "string"
}
},
"type": "object"
},
"JobCancelResponse": {
"id": "JobCancelResponse",
"properties": {
"job": {
"$ref": "Job",
"description": "The final state of the job."
},
"kind": {
"default": "bigquery#jobCancelResponse",
"description": "The resource type of the response.",
"type": "string"
}
},
"type": "object"
},
"JobConfiguration": {
"id": "JobConfiguration",
"properties": {
"copy": {
"$ref": "JobConfigurationTableCopy",
"description": "[Pick one] Copies a table."
},
"dryRun": {
"description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run. Behavior of non-query jobs is undefined.",