blob: 89fa6fc21dbfaa0b0d94c145be26efe1a863c194 [file] [log] [blame]
{
"auth": {
"oauth2": {
"scopes": {
"https://www.googleapis.com/auth/cloud-platform": {
"description": "View and manage your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/cloud-vision": {
"description": "Apply machine learning models to understand and label images"
}
}
}
},
"basePath": "",
"baseUrl": "https://vision.googleapis.com/",
"batchPath": "batch",
"canonicalName": "Vision",
"description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.",
"discoveryVersion": "v1",
"documentationLink": "https://cloud.google.com/vision/",
"fullyEncodeReservedExpansion": true,
"icons": {
"x16": "http://www.google.com/images/icons/product/search-16.gif",
"x32": "http://www.google.com/images/icons/product/search-32.gif"
},
"id": "vision:v1",
"kind": "discovery#restDescription",
"name": "vision",
"ownerDomain": "google.com",
"ownerName": "Google",
"parameters": {
"$.xgafv": {
"description": "V1 error format.",
"enum": [
"1",
"2"
],
"enumDescriptions": [
"v1 error format",
"v2 error format"
],
"location": "query",
"type": "string"
},
"access_token": {
"description": "OAuth access token.",
"location": "query",
"type": "string"
},
"alt": {
"default": "json",
"description": "Data format for response.",
"enum": [
"json",
"media",
"proto"
],
"enumDescriptions": [
"Responses with Content-Type of application/json",
"Media download with context-dependent Content-Type",
"Responses with Content-Type of application/x-protobuf"
],
"location": "query",
"type": "string"
},
"callback": {
"description": "JSONP",
"location": "query",
"type": "string"
},
"fields": {
"description": "Selector specifying which fields to include in a partial response.",
"location": "query",
"type": "string"
},
"key": {
"description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
"location": "query",
"type": "string"
},
"oauth_token": {
"description": "OAuth 2.0 token for the current user.",
"location": "query",
"type": "string"
},
"prettyPrint": {
"default": "true",
"description": "Returns response with indentations and line breaks.",
"location": "query",
"type": "boolean"
},
"quotaUser": {
"description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
"location": "query",
"type": "string"
},
"uploadType": {
"description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
"location": "query",
"type": "string"
},
"upload_protocol": {
"description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
"location": "query",
"type": "string"
}
},
"protocol": "rest",
"resources": {
"files": {
"methods": {
"annotate": {
"description": "Service that performs image detection and annotation for a batch of files.\nNow only \"application/pdf\", \"image/tiff\" and \"image/gif\" are supported.\n\nThis service will extract at most 5 (customers can specify which 5 in\nAnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each\nfile provided and perform detection and annotation for each image\nextracted.",
"flatPath": "v1/files:annotate",
"httpMethod": "POST",
"id": "vision.files.annotate",
"parameterOrder": [],
"parameters": {},
"path": "v1/files:annotate",
"request": {
"$ref": "BatchAnnotateFilesRequest"
},
"response": {
"$ref": "BatchAnnotateFilesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"asyncBatchAnnotate": {
"description": "Run asynchronous image detection and annotation for a list of generic\nfiles, such as PDF files, which may contain multiple pages and multiple\nimages per page. Progress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).",
"flatPath": "v1/files:asyncBatchAnnotate",
"httpMethod": "POST",
"id": "vision.files.asyncBatchAnnotate",
"parameterOrder": [],
"parameters": {},
"path": "v1/files:asyncBatchAnnotate",
"request": {
"$ref": "AsyncBatchAnnotateFilesRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"images": {
"methods": {
"annotate": {
"description": "Run image detection and annotation for a batch of images.",
"flatPath": "v1/images:annotate",
"httpMethod": "POST",
"id": "vision.images.annotate",
"parameterOrder": [],
"parameters": {},
"path": "v1/images:annotate",
"request": {
"$ref": "BatchAnnotateImagesRequest"
},
"response": {
"$ref": "BatchAnnotateImagesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"asyncBatchAnnotate": {
"description": "Run asynchronous image detection and annotation for a list of images.\n\nProgress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).\n\nThis service will write image annotation outputs to json files in customer\nGCS bucket, each json file containing BatchAnnotateImagesResponse proto.",
"flatPath": "v1/images:asyncBatchAnnotate",
"httpMethod": "POST",
"id": "vision.images.asyncBatchAnnotate",
"parameterOrder": [],
"parameters": {},
"path": "v1/images:asyncBatchAnnotate",
"request": {
"$ref": "AsyncBatchAnnotateImagesRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"locations": {
"resources": {
"operations": {
"methods": {
"get": {
"description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
"flatPath": "v1/locations/{locationsId}/operations/{operationsId}",
"httpMethod": "GET",
"id": "vision.locations.operations.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource.",
"location": "path",
"pattern": "^locations/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
}
}
},
"operations": {
"methods": {
"cancel": {
"description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.",
"flatPath": "v1/operations/{operationsId}:cancel",
"httpMethod": "POST",
"id": "vision.operations.cancel",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource to be cancelled.",
"location": "path",
"pattern": "^operations/.+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}:cancel",
"request": {
"$ref": "CancelOperationRequest"
},
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"delete": {
"description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.",
"flatPath": "v1/operations/{operationsId}",
"httpMethod": "DELETE",
"id": "vision.operations.delete",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource to be deleted.",
"location": "path",
"pattern": "^operations/.+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"get": {
"description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
"flatPath": "v1/operations/{operationsId}",
"httpMethod": "GET",
"id": "vision.operations.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource.",
"location": "path",
"pattern": "^operations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"list": {
"description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.",
"flatPath": "v1/operations",
"httpMethod": "GET",
"id": "vision.operations.list",
"parameterOrder": [
"name"
],
"parameters": {
"filter": {
"description": "The standard list filter.",
"location": "query",
"type": "string"
},
"name": {
"description": "The name of the operation's parent resource.",
"location": "path",
"pattern": "^operations$",
"required": true,
"type": "string"
},
"pageSize": {
"description": "The standard list page size.",
"format": "int32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "The standard list page token.",
"location": "query",
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "ListOperationsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"projects": {
"resources": {
"files": {
"methods": {
"annotate": {
"description": "Service that performs image detection and annotation for a batch of files.\nNow only \"application/pdf\", \"image/tiff\" and \"image/gif\" are supported.\n\nThis service will extract at most 5 (customers can specify which 5 in\nAnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each\nfile provided and perform detection and annotation for each image\nextracted.",
"flatPath": "v1/projects/{projectsId}/files:annotate",
"httpMethod": "POST",
"id": "vision.projects.files.annotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/files:annotate",
"request": {
"$ref": "BatchAnnotateFilesRequest"
},
"response": {
"$ref": "BatchAnnotateFilesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"asyncBatchAnnotate": {
"description": "Run asynchronous image detection and annotation for a list of generic\nfiles, such as PDF files, which may contain multiple pages and multiple\nimages per page. Progress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).",
"flatPath": "v1/projects/{projectsId}/files:asyncBatchAnnotate",
"httpMethod": "POST",
"id": "vision.projects.files.asyncBatchAnnotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/files:asyncBatchAnnotate",
"request": {
"$ref": "AsyncBatchAnnotateFilesRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"images": {
"methods": {
"annotate": {
"description": "Run image detection and annotation for a batch of images.",
"flatPath": "v1/projects/{projectsId}/images:annotate",
"httpMethod": "POST",
"id": "vision.projects.images.annotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/images:annotate",
"request": {
"$ref": "BatchAnnotateImagesRequest"
},
"response": {
"$ref": "BatchAnnotateImagesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"asyncBatchAnnotate": {
"description": "Run asynchronous image detection and annotation for a list of images.\n\nProgress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).\n\nThis service will write image annotation outputs to json files in customer\nGCS bucket, each json file containing BatchAnnotateImagesResponse proto.",
"flatPath": "v1/projects/{projectsId}/images:asyncBatchAnnotate",
"httpMethod": "POST",
"id": "vision.projects.images.asyncBatchAnnotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/images:asyncBatchAnnotate",
"request": {
"$ref": "AsyncBatchAnnotateImagesRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"locations": {
"resources": {
"files": {
"methods": {
"annotate": {
"description": "Service that performs image detection and annotation for a batch of files.\nNow only \"application/pdf\", \"image/tiff\" and \"image/gif\" are supported.\n\nThis service will extract at most 5 (customers can specify which 5 in\nAnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each\nfile provided and perform detection and annotation for each image\nextracted.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/files:annotate",
"httpMethod": "POST",
"id": "vision.projects.locations.files.annotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/files:annotate",
"request": {
"$ref": "BatchAnnotateFilesRequest"
},
"response": {
"$ref": "BatchAnnotateFilesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"asyncBatchAnnotate": {
"description": "Run asynchronous image detection and annotation for a list of generic\nfiles, such as PDF files, which may contain multiple pages and multiple\nimages per page. Progress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/files:asyncBatchAnnotate",
"httpMethod": "POST",
"id": "vision.projects.locations.files.asyncBatchAnnotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/files:asyncBatchAnnotate",
"request": {
"$ref": "AsyncBatchAnnotateFilesRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"images": {
"methods": {
"annotate": {
"description": "Run image detection and annotation for a batch of images.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/images:annotate",
"httpMethod": "POST",
"id": "vision.projects.locations.images.annotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/images:annotate",
"request": {
"$ref": "BatchAnnotateImagesRequest"
},
"response": {
"$ref": "BatchAnnotateImagesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"asyncBatchAnnotate": {
"description": "Run asynchronous image detection and annotation for a list of images.\n\nProgress and results can be retrieved through the\n`google.longrunning.Operations` interface.\n`Operation.metadata` contains `OperationMetadata` (metadata).\n`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).\n\nThis service will write image annotation outputs to json files in customer\nGCS bucket, each json file containing BatchAnnotateImagesResponse proto.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/images:asyncBatchAnnotate",
"httpMethod": "POST",
"id": "vision.projects.locations.images.asyncBatchAnnotate",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/images:asyncBatchAnnotate",
"request": {
"$ref": "AsyncBatchAnnotateImagesRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"operations": {
"methods": {
"get": {
"description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
"httpMethod": "GET",
"id": "vision.projects.locations.operations.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
},
"productSets": {
"methods": {
"addProduct": {
"description": "Adds a Product to the specified ProductSet. If the Product is already\npresent, no change is made.\n\nOne Product can be added to at most 100 ProductSets.\n\nPossible errors:\n\n* Returns NOT_FOUND if the Product or the ProductSet doesn't exist.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets/{productSetsId}:addProduct",
"httpMethod": "POST",
"id": "vision.projects.locations.productSets.addProduct",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. The resource name for the ProductSet to modify.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/productSets/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}:addProduct",
"request": {
"$ref": "AddProductToProductSetRequest"
},
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"create": {
"description": "Creates and returns a new ProductSet resource.\n\nPossible errors:\n\n* Returns INVALID_ARGUMENT if display_name is missing, or is longer than\n 4096 characters.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets",
"httpMethod": "POST",
"id": "vision.projects.locations.productSets.create",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Required. The project in which the ProductSet should be created.\n\nFormat is `projects/PROJECT_ID/locations/LOC_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
},
"productSetId": {
"description": "A user-supplied resource id for this ProductSet. If set, the server will\nattempt to use this value as the resource id. If it is already in use, an\nerror is returned with code ALREADY_EXISTS. Must be at most 128 characters\nlong. It cannot contain the character `/`.",
"location": "query",
"type": "string"
}
},
"path": "v1/{+parent}/productSets",
"request": {
"$ref": "ProductSet"
},
"response": {
"$ref": "ProductSet"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"delete": {
"description": "Permanently deletes a ProductSet. Products and ReferenceImages in the\nProductSet are not deleted.\n\nThe actual image files are not deleted from Google Cloud Storage.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets/{productSetsId}",
"httpMethod": "DELETE",
"id": "vision.projects.locations.productSets.delete",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. Resource name of the ProductSet to delete.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/productSets/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"get": {
"description": "Gets information associated with a ProductSet.\n\nPossible errors:\n\n* Returns NOT_FOUND if the ProductSet does not exist.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets/{productSetsId}",
"httpMethod": "GET",
"id": "vision.projects.locations.productSets.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. Resource name of the ProductSet to get.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOG_ID/productSets/PRODUCT_SET_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/productSets/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "ProductSet"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"import": {
"description": "Asynchronous API that imports a list of reference images to specified\nproduct sets based on a list of image information.\n\nThe google.longrunning.Operation API can be used to keep track of the\nprogress and results of the request.\n`Operation.metadata` contains `BatchOperationMetadata`. (progress)\n`Operation.response` contains `ImportProductSetsResponse`. (results)\n\nThe input source of this method is a csv file on Google Cloud Storage.\nFor the format of the csv file please see\nImportProductSetsGcsSource.csv_file_uri.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets:import",
"httpMethod": "POST",
"id": "vision.projects.locations.productSets.import",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Required. The project in which the ProductSets should be imported.\n\nFormat is `projects/PROJECT_ID/locations/LOC_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/productSets:import",
"request": {
"$ref": "ImportProductSetsRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"list": {
"description": "Lists ProductSets in an unspecified order.\n\nPossible errors:\n\n* Returns INVALID_ARGUMENT if page_size is greater than 100, or less\n than 1.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets",
"httpMethod": "GET",
"id": "vision.projects.locations.productSets.list",
"parameterOrder": [
"parent"
],
"parameters": {
"pageSize": {
"description": "The maximum number of items to return. Default 10, maximum 100.",
"format": "int32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "The next_page_token returned from a previous List request, if any.",
"location": "query",
"type": "string"
},
"parent": {
"description": "Required. The project from which ProductSets should be listed.\n\nFormat is `projects/PROJECT_ID/locations/LOC_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/productSets",
"response": {
"$ref": "ListProductSetsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"patch": {
"description": "Makes changes to a ProductSet resource.\nOnly display_name can be updated currently.\n\nPossible errors:\n\n* Returns NOT_FOUND if the ProductSet does not exist.\n* Returns INVALID_ARGUMENT if display_name is present in update_mask but\n missing from the request or longer than 4096 characters.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets/{productSetsId}",
"httpMethod": "PATCH",
"id": "vision.projects.locations.productSets.patch",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The resource name of the ProductSet.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.\n\nThis field is ignored when creating a ProductSet.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/productSets/[^/]+$",
"required": true,
"type": "string"
},
"updateMask": {
"description": "The FieldMask that specifies which fields to\nupdate.\nIf update_mask isn't specified, all mutable fields are to be updated.\nValid mask path is `display_name`.",
"format": "google-fieldmask",
"location": "query",
"type": "string"
}
},
"path": "v1/{+name}",
"request": {
"$ref": "ProductSet"
},
"response": {
"$ref": "ProductSet"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"removeProduct": {
"description": "Removes a Product from the specified ProductSet.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets/{productSetsId}:removeProduct",
"httpMethod": "POST",
"id": "vision.projects.locations.productSets.removeProduct",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. The resource name for the ProductSet to modify.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/productSets/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}:removeProduct",
"request": {
"$ref": "RemoveProductFromProductSetRequest"
},
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
},
"resources": {
"products": {
"methods": {
"list": {
"description": "Lists the Products in a ProductSet, in an unspecified order. If the\nProductSet does not exist, the products field of the response will be\nempty.\n\nPossible errors:\n\n* Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/productSets/{productSetsId}/products",
"httpMethod": "GET",
"id": "vision.projects.locations.productSets.products.list",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. The ProductSet resource for which to retrieve Products.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/productSets/[^/]+$",
"required": true,
"type": "string"
},
"pageSize": {
"description": "The maximum number of items to return. Default 10, maximum 100.",
"format": "int32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "The next_page_token returned from a previous List request, if any.",
"location": "query",
"type": "string"
}
},
"path": "v1/{+name}/products",
"response": {
"$ref": "ListProductsInProductSetResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
}
}
},
"products": {
"methods": {
"create": {
"description": "Creates and returns a new product resource.\n\nPossible errors:\n\n* Returns INVALID_ARGUMENT if display_name is missing or longer than 4096\n characters.\n* Returns INVALID_ARGUMENT if description is longer than 4096 characters.\n* Returns INVALID_ARGUMENT if product_category is missing or invalid.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products",
"httpMethod": "POST",
"id": "vision.projects.locations.products.create",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Required. The project in which the Product should be created.\n\nFormat is\n`projects/PROJECT_ID/locations/LOC_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
},
"productId": {
"description": "A user-supplied resource id for this Product. If set, the server will\nattempt to use this value as the resource id. If it is already in use, an\nerror is returned with code ALREADY_EXISTS. Must be at most 128 characters\nlong. It cannot contain the character `/`.",
"location": "query",
"type": "string"
}
},
"path": "v1/{+parent}/products",
"request": {
"$ref": "Product"
},
"response": {
"$ref": "Product"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"delete": {
"description": "Permanently deletes a product and its reference images.\n\nMetadata of the product and all its images will be deleted right away, but\nsearch queries against ProductSets containing the product may still work\nuntil all related caches are refreshed.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}",
"httpMethod": "DELETE",
"id": "vision.projects.locations.products.delete",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. Resource name of product to delete.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"get": {
"description": "Gets information associated with a Product.\n\nPossible errors:\n\n* Returns NOT_FOUND if the Product does not exist.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}",
"httpMethod": "GET",
"id": "vision.projects.locations.products.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. Resource name of the Product to get.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Product"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"list": {
"description": "Lists products in an unspecified order.\n\nPossible errors:\n\n* Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products",
"httpMethod": "GET",
"id": "vision.projects.locations.products.list",
"parameterOrder": [
"parent"
],
"parameters": {
"pageSize": {
"description": "The maximum number of items to return. Default 10, maximum 100.",
"format": "int32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "The next_page_token returned from a previous List request, if any.",
"location": "query",
"type": "string"
},
"parent": {
"description": "Required. The project OR ProductSet from which Products should be listed.\n\nFormat:\n`projects/PROJECT_ID/locations/LOC_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/products",
"response": {
"$ref": "ListProductsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"patch": {
"description": "Makes changes to a Product resource.\nOnly the `display_name`, `description`, and `labels` fields can be updated\nright now.\n\nIf labels are updated, the change will not be reflected in queries until\nthe next index time.\n\nPossible errors:\n\n* Returns NOT_FOUND if the Product does not exist.\n* Returns INVALID_ARGUMENT if display_name is present in update_mask but is\n missing from the request or longer than 4096 characters.\n* Returns INVALID_ARGUMENT if description is present in update_mask but is\n longer than 4096 characters.\n* Returns INVALID_ARGUMENT if product_category is present in update_mask.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}",
"httpMethod": "PATCH",
"id": "vision.projects.locations.products.patch",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The resource name of the product.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.\n\nThis field is ignored when creating a product.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+$",
"required": true,
"type": "string"
},
"updateMask": {
"description": "The FieldMask that specifies which fields\nto update.\nIf update_mask isn't specified, all mutable fields are to be updated.\nValid mask paths include `product_labels`, `display_name`, and\n`description`.",
"format": "google-fieldmask",
"location": "query",
"type": "string"
}
},
"path": "v1/{+name}",
"request": {
"$ref": "Product"
},
"response": {
"$ref": "Product"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"purge": {
"description": "Asynchronous API to delete all Products in a ProductSet or all Products\nthat are in no ProductSet.\n\nIf a Product is a member of the specified ProductSet in addition to other\nProductSets, the Product will still be deleted.\n\nIt is recommended to not delete the specified ProductSet until after this\noperation has completed. It is also recommended to not add any of the\nProducts involved in the batch delete to a new ProductSet while this\noperation is running because those Products may still end up deleted.\n\nIt's not possible to undo the PurgeProducts operation. Therefore, it is\nrecommended to keep the csv files used in ImportProductSets (if that was\nhow you originally built the Product Set) before starting PurgeProducts, in\ncase you need to re-import the data after deletion.\n\nIf the plan is to purge all of the Products from a ProductSet and then\nre-use the empty ProductSet to re-import new Products into the empty\nProductSet, you must wait until the PurgeProducts operation has finished\nfor that ProductSet.\n\nThe google.longrunning.Operation API can be used to keep track of the\nprogress and results of the request.\n`Operation.metadata` contains `BatchOperationMetadata`. (progress)",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products:purge",
"httpMethod": "POST",
"id": "vision.projects.locations.products.purge",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Required. The project and location in which the Products should be deleted.\n\nFormat is `projects/PROJECT_ID/locations/LOC_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/products:purge",
"request": {
"$ref": "PurgeProductsRequest"
},
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
},
"resources": {
"referenceImages": {
"methods": {
"create": {
"description": "Creates and returns a new ReferenceImage resource.\n\nThe `bounding_poly` field is optional. If `bounding_poly` is not specified,\nthe system will try to detect regions of interest in the image that are\ncompatible with the product_category on the parent product. If it is\nspecified, detection is ALWAYS skipped. The system converts polygons into\nnon-rotated rectangles.\n\nNote that the pipeline will resize the image if the image resolution is too\nlarge to process (above 50MP).\n\nPossible errors:\n\n* Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096\n characters.\n* Returns INVALID_ARGUMENT if the product does not exist.\n* Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing\n compatible with the parent product's product_category is detected.\n* Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}/referenceImages",
"httpMethod": "POST",
"id": "vision.projects.locations.products.referenceImages.create",
"parameterOrder": [
"parent"
],
"parameters": {
"parent": {
"description": "Required. Resource name of the product in which to create the reference image.\n\nFormat is\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+$",
"required": true,
"type": "string"
},
"referenceImageId": {
"description": "A user-supplied resource id for the ReferenceImage to be added. If set,\nthe server will attempt to use this value as the resource id. If it is\nalready in use, an error is returned with code ALREADY_EXISTS. Must be at\nmost 128 characters long. It cannot contain the character `/`.",
"location": "query",
"type": "string"
}
},
"path": "v1/{+parent}/referenceImages",
"request": {
"$ref": "ReferenceImage"
},
"response": {
"$ref": "ReferenceImage"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"delete": {
"description": "Permanently deletes a reference image.\n\nThe image metadata will be deleted right away, but search queries\nagainst ProductSets containing the image may still work until all related\ncaches are refreshed.\n\nThe actual image files are not deleted from Google Cloud Storage.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}/referenceImages/{referenceImagesId}",
"httpMethod": "DELETE",
"id": "vision.projects.locations.products.referenceImages.delete",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. The resource name of the reference image to delete.\n\nFormat is:\n\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+/referenceImages/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"get": {
"description": "Gets information associated with a ReferenceImage.\n\nPossible errors:\n\n* Returns NOT_FOUND if the specified image does not exist.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}/referenceImages/{referenceImagesId}",
"httpMethod": "GET",
"id": "vision.projects.locations.products.referenceImages.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "Required. The resource name of the ReferenceImage to get.\n\nFormat is:\n\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+/referenceImages/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "ReferenceImage"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
},
"list": {
"description": "Lists reference images.\n\nPossible errors:\n\n* Returns NOT_FOUND if the parent product does not exist.\n* Returns INVALID_ARGUMENT if the page_size is greater than 100, or less\n than 1.",
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/products/{productsId}/referenceImages",
"httpMethod": "GET",
"id": "vision.projects.locations.products.referenceImages.list",
"parameterOrder": [
"parent"
],
"parameters": {
"pageSize": {
"description": "The maximum number of items to return. Default 10, maximum 100.",
"format": "int32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "A token identifying a page of results to be returned. This is the value\nof `nextPageToken` returned in a previous reference image list request.\n\nDefaults to the first page if not specified.",
"location": "query",
"type": "string"
},
"parent": {
"description": "Required. Resource name of the product containing the reference images.\n\nFormat is\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.",
"location": "path",
"pattern": "^projects/[^/]+/locations/[^/]+/products/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+parent}/referenceImages",
"response": {
"$ref": "ListReferenceImagesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
}
}
}
}
},
"operations": {
"methods": {
"get": {
"description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
"flatPath": "v1/projects/{projectsId}/operations/{operationsId}",
"httpMethod": "GET",
"id": "vision.projects.operations.get",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource.",
"location": "path",
"pattern": "^projects/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision"
]
}
}
}
}
}
},
"revision": "20190927",
"rootUrl": "https://vision.googleapis.com/",
"schemas": {
"AddProductToProductSetRequest": {
"description": "Request message for the `AddProductToProductSet` method.",
"id": "AddProductToProductSetRequest",
"properties": {
"product": {
"description": "Required. The resource name for the Product to be added to this ProductSet.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`",
"type": "string"
}
},
"type": "object"
},
"AnnotateFileRequest": {
"description": "A request to annotate one single file, e.g. a PDF, TIFF or GIF file.",
"id": "AnnotateFileRequest",
"properties": {
"features": {
"description": "Required. Requested features.",
"items": {
"$ref": "Feature"
},
"type": "array"
},
"imageContext": {
"$ref": "ImageContext",
"description": "Additional context that may accompany the image(s) in the file."
},
"inputConfig": {
"$ref": "InputConfig",
"description": "Required. Information about the input file."
},
"pages": {
"description": "Pages of the file to perform image annotation.\n\nPages starts from 1, we assume the first page of the file is page 1.\nAt most 5 pages are supported per request. Pages can be negative.\n\nPage 1 means the first page.\nPage 2 means the second page.\nPage -1 means the last page.\nPage -2 means the second to the last page.\n\nIf the file is GIF instead of PDF or TIFF, page refers to GIF frames.\n\nIf this field is empty, by default the service performs image annotation\nfor the first 5 pages of the file.",
"items": {
"format": "int32",
"type": "integer"
},
"type": "array"
}
},
"type": "object"
},
"AnnotateFileResponse": {
"description": "Response to a single file annotation request. A file may contain one or more\nimages, which individually have their own responses.",
"id": "AnnotateFileResponse",
"properties": {
"error": {
"$ref": "Status",
"description": "If set, represents the error message for the failed request. The\n`responses` field will not be set in this case."
},
"inputConfig": {
"$ref": "InputConfig",
"description": "Information about the file for which this response is generated."
},
"responses": {
"description": "Individual responses to images found within the file. This field will be\nempty if the `error` field is set.",
"items": {
"$ref": "AnnotateImageResponse"
},
"type": "array"
},
"totalPages": {
"description": "This field gives the total number of pages in the file.",
"format": "int32",
"type": "integer"
}
},
"type": "object"
},
"AnnotateImageRequest": {
"description": "Request for performing Google Cloud Vision API tasks over a user-provided\nimage, with user-requested features, and with context information.",
"id": "AnnotateImageRequest",
"properties": {
"features": {
"description": "Requested features.",
"items": {
"$ref": "Feature"
},
"type": "array"
},
"image": {
"$ref": "Image",
"description": "The image to be processed."
},
"imageContext": {
"$ref": "ImageContext",
"description": "Additional context that may accompany the image."
}
},
"type": "object"
},
"AnnotateImageResponse": {
"description": "Response to an image annotation request.",
"id": "AnnotateImageResponse",
"properties": {
"context": {
"$ref": "ImageAnnotationContext",
"description": "If present, contextual information is needed to understand where this image\ncomes from."
},
"cropHintsAnnotation": {
"$ref": "CropHintsAnnotation",
"description": "If present, crop hints have completed successfully."
},
"error": {
"$ref": "Status",
"description": "If set, represents the error message for the operation.\nNote that filled-in image annotations are guaranteed to be\ncorrect, even when `error` is set."
},
"faceAnnotations": {
"description": "If present, face detection has completed successfully.",
"items": {
"$ref": "FaceAnnotation"
},
"type": "array"
},
"fullTextAnnotation": {
"$ref": "TextAnnotation",
"description": "If present, text (OCR) detection or document (OCR) text detection has\ncompleted successfully.\nThis annotation provides the structural hierarchy for the OCR detected\ntext."
},
"imagePropertiesAnnotation": {
"$ref": "ImageProperties",
"description": "If present, image properties were extracted successfully."
},
"labelAnnotations": {
"description": "If present, label detection has completed successfully.",
"items": {
"$ref": "EntityAnnotation"
},
"type": "array"
},
"landmarkAnnotations": {
"description": "If present, landmark detection has completed successfully.",
"items": {
"$ref": "EntityAnnotation"
},
"type": "array"
},
"localizedObjectAnnotations": {
"description": "If present, localized object detection has completed successfully.\nThis will be sorted descending by confidence score.",
"items": {
"$ref": "LocalizedObjectAnnotation"
},
"type": "array"
},
"logoAnnotations": {
"description": "If present, logo detection has completed successfully.",
"items": {
"$ref": "EntityAnnotation"
},
"type": "array"
},
"productSearchResults": {
"$ref": "ProductSearchResults",
"description": "If present, product search has completed successfully."
},
"safeSearchAnnotation": {
"$ref": "SafeSearchAnnotation",
"description": "If present, safe-search annotation has completed successfully."
},
"textAnnotations": {
"description": "If present, text (OCR) detection has completed successfully.",
"items": {
"$ref": "EntityAnnotation"
},
"type": "array"
},
"webDetection": {
"$ref": "WebDetection",
"description": "If present, web detection has completed successfully."
}
},
"type": "object"
},
"AsyncAnnotateFileRequest": {
"description": "An offline file annotation request.",
"id": "AsyncAnnotateFileRequest",
"properties": {
"features": {
"description": "Required. Requested features.",
"items": {
"$ref": "Feature"
},
"type": "array"
},
"imageContext": {
"$ref": "ImageContext",
"description": "Additional context that may accompany the image(s) in the file."
},
"inputConfig": {
"$ref": "InputConfig",
"description": "Required. Information about the input file."
},
"outputConfig": {
"$ref": "OutputConfig",
"description": "Required. The desired output location and metadata (e.g. format)."
}
},
"type": "object"
},
"AsyncAnnotateFileResponse": {
"description": "The response for a single offline file annotation request.",
"id": "AsyncAnnotateFileResponse",
"properties": {
"outputConfig": {
"$ref": "OutputConfig",
"description": "The output location and metadata from AsyncAnnotateFileRequest."
}
},
"type": "object"
},
"AsyncBatchAnnotateFilesRequest": {
"description": "Multiple async file annotation requests are batched into a single service\ncall.",
"id": "AsyncBatchAnnotateFilesRequest",
"properties": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"type": "string"
},
"requests": {
"description": "Required. Individual async file annotation requests for this batch.",
"items": {
"$ref": "AsyncAnnotateFileRequest"
},
"type": "array"
}
},
"type": "object"
},
"AsyncBatchAnnotateFilesResponse": {
"description": "Response to an async batch file annotation request.",
"id": "AsyncBatchAnnotateFilesResponse",
"properties": {
"responses": {
"description": "The list of file annotation responses, one for each request in\nAsyncBatchAnnotateFilesRequest.",
"items": {
"$ref": "AsyncAnnotateFileResponse"
},
"type": "array"
}
},
"type": "object"
},
"AsyncBatchAnnotateImagesRequest": {
"description": "Request for async image annotation for a list of images.",
"id": "AsyncBatchAnnotateImagesRequest",
"properties": {
"outputConfig": {
"$ref": "OutputConfig",
"description": "Required. The desired output location and metadata (e.g. format)."
},
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"type": "string"
},
"requests": {
"description": "Required. Individual image annotation requests for this batch.",
"items": {
"$ref": "AnnotateImageRequest"
},
"type": "array"
}
},
"type": "object"
},
"AsyncBatchAnnotateImagesResponse": {
"description": "Response to an async batch image annotation request.",
"id": "AsyncBatchAnnotateImagesResponse",
"properties": {
"outputConfig": {
"$ref": "OutputConfig",
"description": "The output location and metadata from AsyncBatchAnnotateImagesRequest."
}
},
"type": "object"
},
"BatchAnnotateFilesRequest": {
"description": "A list of requests to annotate files using the BatchAnnotateFiles API.",
"id": "BatchAnnotateFilesRequest",
"properties": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"type": "string"
},
"requests": {
"description": "Required. The list of file annotation requests. Right now we support only one\nAnnotateFileRequest in BatchAnnotateFilesRequest.",
"items": {
"$ref": "AnnotateFileRequest"
},
"type": "array"
}
},
"type": "object"
},
"BatchAnnotateFilesResponse": {
"description": "A list of file annotation responses.",
"id": "BatchAnnotateFilesResponse",
"properties": {
"responses": {
"description": "The list of file annotation responses, each response corresponding to each\nAnnotateFileRequest in BatchAnnotateFilesRequest.",
"items": {
"$ref": "AnnotateFileResponse"
},
"type": "array"
}
},
"type": "object"
},
"BatchAnnotateImagesRequest": {
"description": "Multiple image annotation requests are batched into a single service call.",
"id": "BatchAnnotateImagesRequest",
"properties": {
"parent": {
"description": "Optional. Target project and location to make a call.\n\nFormat: `projects/{project-id}/locations/{location-id}`.\n\nIf no parent is specified, a region will be chosen automatically.\n\nSupported location-ids:\n `us`: USA country only,\n `asia`: East asia areas, like Japan, Taiwan,\n `eu`: The European Union.\n\nExample: `projects/project-A/locations/eu`.",
"type": "string"
},
"requests": {
"description": "Required. Individual image annotation requests for this batch.",
"items": {
"$ref": "AnnotateImageRequest"
},
"type": "array"
}
},
"type": "object"
},
"BatchAnnotateImagesResponse": {
"description": "Response to a batch image annotation request.",
"id": "BatchAnnotateImagesResponse",
"properties": {
"responses": {
"description": "Individual responses to image annotation requests within the batch.",
"items": {
"$ref": "AnnotateImageResponse"
},
"type": "array"
}
},
"type": "object"
},
"BatchOperationMetadata": {
"description": "Metadata for the batch operations such as the current state.\n\nThis is included in the `metadata` field of the `Operation` returned by the\n`GetOperation` call of the `google::longrunning::Operations` service.",
"id": "BatchOperationMetadata",
"properties": {
"endTime": {
"description": "The time when the batch request is finished and\ngoogle.longrunning.Operation.done is set to true.",
"format": "google-datetime",
"type": "string"
},
"state": {
"description": "The current state of the batch operation.",
"enum": [
"STATE_UNSPECIFIED",
"PROCESSING",
"SUCCESSFUL",
"FAILED",
"CANCELLED"
],
"enumDescriptions": [
"Invalid.",
"Request is actively being processed.",
"The request is done and at least one item has been successfully\nprocessed.",
"The request is done and no item has been successfully processed.",
"The request is done after the longrunning.Operations.CancelOperation has\nbeen called by the user. Any records that were processed before the\ncancel command are output as specified in the request."
],
"type": "string"
},
"submitTime": {
"description": "The time when the batch request was submitted to the server.",
"format": "google-datetime",
"type": "string"
}
},
"type": "object"
},
"Block": {
"description": "Logical element on the page.",
"id": "Block",
"properties": {
"blockType": {
"description": "Detected block type (text, image etc) for this block.",
"enum": [
"UNKNOWN",
"TEXT",
"TABLE",
"PICTURE",
"RULER",
"BARCODE"
],
"enumDescriptions": [
"Unknown block type.",
"Regular text block.",
"Table block.",
"Image block.",
"Horizontal/vertical line box.",
"Barcode block."
],
"type": "string"
},
"boundingBox": {
"$ref": "BoundingPoly",
"description": "The bounding box for the block.\nThe vertices are in the order of top-left, top-right, bottom-right,\nbottom-left. When a rotation of the bounding box is detected the rotation\nis represented as around the top-left corner as defined when the text is\nread in the 'natural' orientation.\nFor example:\n\n* when the text is horizontal it might look like:\n\n 0----1\n | |\n 3----2\n\n* when it's rotated 180 degrees around the top-left corner it becomes:\n\n 2----3\n | |\n 1----0\n\n and the vertex order will still be (0, 1, 2, 3)."
},
"confidence": {
"description": "Confidence of the OCR results on the block. Range [0, 1].",
"format": "float",
"type": "number"
},
"paragraphs": {
"description": "List of paragraphs in this block (if this blocks is of type text).",
"items": {
"$ref": "Paragraph"
},
"type": "array"
},
"property": {
"$ref": "TextProperty",
"description": "Additional information detected for the block."
}
},
"type": "object"
},
"BoundingPoly": {
"description": "A bounding polygon for the detected image annotation.",
"id": "BoundingPoly",
"properties": {
"normalizedVertices": {
"description": "The bounding polygon normalized vertices.",
"items": {
"$ref": "NormalizedVertex"
},
"type": "array"
},
"vertices": {
"description": "The bounding polygon vertices.",
"items": {
"$ref": "Vertex"
},
"type": "array"
}
},
"type": "object"
},
"CancelOperationRequest": {
"description": "The request message for Operations.CancelOperation.",
"id": "CancelOperationRequest",
"properties": {},
"type": "object"
},
"Color": {
"description": "Represents a color in the RGBA color space. This representation is designed\nfor simplicity of conversion to/from color representations in various\nlanguages over compactness; for example, the fields of this representation\ncan be trivially provided to the constructor of \"java.awt.Color\" in Java; it\ncan also be trivially provided to UIColor's \"+colorWithRed:green:blue:alpha\"\nmethod in iOS; and, with just a little work, it can be easily formatted into\na CSS \"rgba()\" string in JavaScript, as well.\n\nNote: this proto does not carry information about the absolute color space\nthat should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,\nDCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color\nspace.\n\nExample (Java):\n\n import com.google.type.Color;\n\n // ...\n public static java.awt.Color fromProto(Color protocolor) {\n float alpha = protocolor.hasAlpha()\n ? protocolor.getAlpha().getValue()\n : 1.0;\n\n return new java.awt.Color(\n protocolor.getRed(),\n protocolor.getGreen(),\n protocolor.getBlue(),\n alpha);\n }\n\n public static Color toProto(java.awt.Color color) {\n float red = (float) color.getRed();\n float green = (float) color.getGreen();\n float blue = (float) color.getBlue();\n float denominator = 255.0;\n Color.Builder resultBuilder =\n Color\n .newBuilder()\n .setRed(red / denominator)\n .setGreen(green / denominator)\n .setBlue(blue / denominator);\n int alpha = color.getAlpha();\n if (alpha != 255) {\n result.setAlpha(\n FloatValue\n .newBuilder()\n .setValue(((float) alpha) / denominator)\n .build());\n }\n return resultBuilder.build();\n }\n // ...\n\nExample (iOS / Obj-C):\n\n // ...\n static UIColor* fromProto(Color* protocolor) {\n float red = [protocolor red];\n float green = [protocolor green];\n float blue = [protocolor blue];\n FloatValue* alpha_wrapper = [protocolor alpha];\n float alpha = 1.0;\n if (alpha_wrapper != nil) {\n alpha = [alpha_wrapper value];\n }\n return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];\n }\n\n static Color* toProto(UIColor* color) {\n CGFloat red, green, blue, alpha;\n if (![color getRed:\u0026red green:\u0026green blue:\u0026blue alpha:\u0026alpha]) {\n return nil;\n }\n Color* result = [[Color alloc] init];\n [result setRed:red];\n [result setGreen:green];\n [result setBlue:blue];\n if (alpha \u003c= 0.9999) {\n [result setAlpha:floatWrapperWithValue(alpha)];\n }\n [result autorelease];\n return result;\n }\n // ...\n\n Example (JavaScript):\n\n // ...\n\n var protoToCssColor = function(rgb_color) {\n var redFrac = rgb_color.red || 0.0;\n var greenFrac = rgb_color.green || 0.0;\n var blueFrac = rgb_color.blue || 0.0;\n var red = Math.floor(redFrac * 255);\n var green = Math.floor(greenFrac * 255);\n var blue = Math.floor(blueFrac * 255);\n\n if (!('alpha' in rgb_color)) {\n return rgbToCssColor_(red, green, blue);\n }\n\n var alphaFrac = rgb_color.alpha.value || 0.0;\n var rgbParams = [red, green, blue].join(',');\n return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');\n };\n\n var rgbToCssColor_ = function(red, green, blue) {\n var rgbNumber = new Number((red \u003c\u003c 16) | (green \u003c\u003c 8) | blue);\n var hexString = rgbNumber.toString(16);\n var missingZeros = 6 - hexString.length;\n var resultBuilder = ['#'];\n for (var i = 0; i \u003c missingZeros; i++) {\n resultBuilder.push('0');\n }\n resultBuilder.push(hexString);\n return resultBuilder.join('');\n };\n\n // ...",
"id": "Color",
"properties": {
"alpha": {
"description": "The fraction of this color that should be applied to the pixel. That is,\nthe final pixel color is defined by the equation:\n\n pixel color = alpha * (this color) + (1.0 - alpha) * (background color)\n\nThis means that a value of 1.0 corresponds to a solid color, whereas\na value of 0.0 corresponds to a completely transparent color. This\nuses a wrapper message rather than a simple float scalar so that it is\npossible to distinguish between a default value and the value being unset.\nIf omitted, this color object is to be rendered as a solid color\n(as if the alpha value had been explicitly given with a value of 1.0).",
"format": "float",
"type": "number"
},
"blue": {
"description": "The amount of blue in the color as a value in the interval [0, 1].",
"format": "float",
"type": "number"
},
"green": {
"description": "The amount of green in the color as a value in the interval [0, 1].",
"format": "float",
"type": "number"
},
"red": {
"description": "The amount of red in the color as a value in the interval [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"ColorInfo": {
"description": "Color information consists of RGB channels, score, and the fraction of\nthe image that the color occupies in the image.",
"id": "ColorInfo",
"properties": {
"color": {
"$ref": "Color",
"description": "RGB components of the color."
},
"pixelFraction": {
"description": "The fraction of pixels the color occupies in the image.\nValue in range [0, 1].",
"format": "float",
"type": "number"
},
"score": {
"description": "Image-specific score for this color. Value in range [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"CropHint": {
"description": "Single crop hint that is used to generate a new crop when serving an image.",
"id": "CropHint",
"properties": {
"boundingPoly": {
"$ref": "BoundingPoly",
"description": "The bounding polygon for the crop region. The coordinates of the bounding\nbox are in the original image's scale."
},
"confidence": {
"description": "Confidence of this being a salient region. Range [0, 1].",
"format": "float",
"type": "number"
},
"importanceFraction": {
"description": "Fraction of importance of this salient region with respect to the original\nimage.",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"CropHintsAnnotation": {
"description": "Set of crop hints that are used to generate new crops when serving images.",
"id": "CropHintsAnnotation",
"properties": {
"cropHints": {
"description": "Crop hint results.",
"items": {
"$ref": "CropHint"
},
"type": "array"
}
},
"type": "object"
},
"CropHintsParams": {
"description": "Parameters for crop hints annotation request.",
"id": "CropHintsParams",
"properties": {
"aspectRatios": {
"description": "Aspect ratios in floats, representing the ratio of the width to the height\nof the image. For example, if the desired aspect ratio is 4/3, the\ncorresponding float value should be 1.33333. If not specified, the\nbest possible crop is returned. The number of provided aspect ratios is\nlimited to a maximum of 16; any aspect ratios provided after the 16th are\nignored.",
"items": {
"format": "float",
"type": "number"
},
"type": "array"
}
},
"type": "object"
},
"DetectedBreak": {
"description": "Detected start or end of a structural component.",
"id": "DetectedBreak",
"properties": {
"isPrefix": {
"description": "True if break prepends the element.",
"type": "boolean"
},
"type": {
"description": "Detected break type.",
"enum": [
"UNKNOWN",
"SPACE",
"SURE_SPACE",
"EOL_SURE_SPACE",
"HYPHEN",
"LINE_BREAK"
],
"enumDescriptions": [
"Unknown break label type.",
"Regular space.",
"Sure space (very wide).",
"Line-wrapping break.",
"End-line hyphen that is not present in text; does not co-occur with\n`SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.",
"Line break that ends a paragraph."
],
"type": "string"
}
},
"type": "object"
},
"DetectedLanguage": {
"description": "Detected language for a structural component.",
"id": "DetectedLanguage",
"properties": {
"confidence": {
"description": "Confidence of detected language. Range [0, 1].",
"format": "float",
"type": "number"
},
"languageCode": {
"description": "The BCP-47 language code, such as \"en-US\" or \"sr-Latn\". For more\ninformation, see\nhttp://www.unicode.org/reports/tr35/#Unicode_locale_identifier.",
"type": "string"
}
},
"type": "object"
},
"DominantColorsAnnotation": {
"description": "Set of dominant colors and their corresponding scores.",
"id": "DominantColorsAnnotation",
"properties": {
"colors": {
"description": "RGB color values with their score and pixel fraction.",
"items": {
"$ref": "ColorInfo"
},
"type": "array"
}
},
"type": "object"
},
"Empty": {
"description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.",
"id": "Empty",
"properties": {},
"type": "object"
},
"EntityAnnotation": {
"description": "Set of detected entity features.",
"id": "EntityAnnotation",
"properties": {
"boundingPoly": {
"$ref": "BoundingPoly",
"description": "Image region to which this entity belongs. Not produced\nfor `LABEL_DETECTION` features."
},
"confidence": {
"description": "**Deprecated. Use `score` instead.**\nThe accuracy of the entity detection in an image.\nFor example, for an image in which the \"Eiffel Tower\" entity is detected,\nthis field represents the confidence that there is a tower in the query\nimage. Range [0, 1].",
"format": "float",
"type": "number"
},
"description": {
"description": "Entity textual description, expressed in its `locale` language.",
"type": "string"
},
"locale": {
"description": "The language code for the locale in which the entity textual\n`description` is expressed.",
"type": "string"
},
"locations": {
"description": "The location information for the detected entity. Multiple\n`LocationInfo` elements can be present because one location may\nindicate the location of the scene in the image, and another location\nmay indicate the location of the place where the image was taken.\nLocation information is usually present for landmarks.",
"items": {
"$ref": "LocationInfo"
},
"type": "array"
},
"mid": {
"description": "Opaque entity ID. Some IDs may be available in\n[Google Knowledge Graph Search\nAPI](https://developers.google.com/knowledge-graph/).",
"type": "string"
},
"properties": {
"description": "Some entities may have optional user-supplied `Property` (name/value)\nfields, such a score or string that qualifies the entity.",
"items": {
"$ref": "Property"
},
"type": "array"
},
"score": {
"description": "Overall score of the result. Range [0, 1].",
"format": "float",
"type": "number"
},
"topicality": {
"description": "The relevancy of the ICA (Image Content Annotation) label to the\nimage. For example, the relevancy of \"tower\" is likely higher to an image\ncontaining the detected \"Eiffel Tower\" than to an image containing a\ndetected distant towering building, even though the confidence that\nthere is a tower in each image may be the same. Range [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"FaceAnnotation": {
"description": "A face annotation object contains the results of face detection.",
"id": "FaceAnnotation",
"properties": {
"angerLikelihood": {
"description": "Anger likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"blurredLikelihood": {
"description": "Blurred likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"boundingPoly": {
"$ref": "BoundingPoly",
"description": "The bounding polygon around the face. The coordinates of the bounding box\nare in the original image's scale.\nThe bounding box is computed to \"frame\" the face in accordance with human\nexpectations. It is based on the landmarker results.\nNote that one or more x and/or y coordinates may not be generated in the\n`BoundingPoly` (the polygon will be unbounded) if only a partial face\nappears in the image to be annotated."
},
"detectionConfidence": {
"description": "Detection confidence. Range [0, 1].",
"format": "float",
"type": "number"
},
"fdBoundingPoly": {
"$ref": "BoundingPoly",
"description": "The `fd_bounding_poly` bounding polygon is tighter than the\n`boundingPoly`, and encloses only the skin part of the face. Typically, it\nis used to eliminate the face from any image analysis that detects the\n\"amount of skin\" visible in an image. It is not based on the\nlandmarker results, only on the initial face detection, hence\nthe \u003ccode\u003efd\u003c/code\u003e (face detection) prefix."
},
"headwearLikelihood": {
"description": "Headwear likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"joyLikelihood": {
"description": "Joy likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"landmarkingConfidence": {
"description": "Face landmarking confidence. Range [0, 1].",
"format": "float",
"type": "number"
},
"landmarks": {
"description": "Detected face landmarks.",
"items": {
"$ref": "Landmark"
},
"type": "array"
},
"panAngle": {
"description": "Yaw angle, which indicates the leftward/rightward angle that the face is\npointing relative to the vertical plane perpendicular to the image. Range\n[-180,180].",
"format": "float",
"type": "number"
},
"rollAngle": {
"description": "Roll angle, which indicates the amount of clockwise/anti-clockwise rotation\nof the face relative to the image vertical about the axis perpendicular to\nthe face. Range [-180,180].",
"format": "float",
"type": "number"
},
"sorrowLikelihood": {
"description": "Sorrow likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"surpriseLikelihood": {
"description": "Surprise likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"tiltAngle": {
"description": "Pitch angle, which indicates the upwards/downwards angle that the face is\npointing relative to the image's horizontal plane. Range [-180,180].",
"format": "float",
"type": "number"
},
"underExposedLikelihood": {
"description": "Under-exposed likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
}
},
"type": "object"
},
"Feature": {
"description": "The type of Google Cloud Vision API detection to perform, and the maximum\nnumber of results to return for that type. Multiple `Feature` objects can\nbe specified in the `features` list.",
"id": "Feature",
"properties": {
"maxResults": {
"description": "Maximum number of results of this type. Does not apply to\n`TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.",
"format": "int32",
"type": "integer"
},
"model": {
"description": "Model to use for the feature.\nSupported values: \"builtin/stable\" (the default if unset) and\n\"builtin/latest\".",
"type": "string"
},
"type": {
"description": "The feature type.",
"enum": [
"TYPE_UNSPECIFIED",
"FACE_DETECTION",
"LANDMARK_DETECTION",
"LOGO_DETECTION",
"LABEL_DETECTION",
"TEXT_DETECTION",
"DOCUMENT_TEXT_DETECTION",
"SAFE_SEARCH_DETECTION",
"IMAGE_PROPERTIES",
"CROP_HINTS",
"WEB_DETECTION",
"PRODUCT_SEARCH",
"OBJECT_LOCALIZATION"
],
"enumDescriptions": [
"Unspecified feature type.",
"Run face detection.",
"Run landmark detection.",
"Run logo detection.",
"Run label detection.",
"Run text detection / optical character recognition (OCR). Text detection\nis optimized for areas of text within a larger image; if the image is\na document, use `DOCUMENT_TEXT_DETECTION` instead.",
"Run dense text document OCR. Takes precedence when both\n`DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.",
"Run Safe Search to detect potentially unsafe\nor undesirable content.",
"Compute a set of image properties, such as the\nimage's dominant colors.",
"Run crop hints.",
"Run web detection.",
"Run Product Search.",
"Run localizer for object detection."
],
"type": "string"
}
},
"type": "object"
},
"GcsDestination": {
"description": "The Google Cloud Storage location where the output will be written to.",
"id": "GcsDestination",
"properties": {
"uri": {
"description": "Google Cloud Storage URI prefix where the results will be stored. Results\nwill be in JSON format and preceded by its corresponding input URI prefix.\nThis field can either represent a gcs file prefix or gcs directory. In\neither case, the uri should be unique because in order to get all of the\noutput files, you will need to do a wildcard gcs search on the uri prefix\nyou provide.\n\nExamples:\n\n* File Prefix: gs://bucket-name/here/filenameprefix The output files\nwill be created in gs://bucket-name/here/ and the names of the\noutput files will begin with \"filenameprefix\".\n\n* Directory Prefix: gs://bucket-name/some/location/ The output files\nwill be created in gs://bucket-name/some/location/ and the names of the\noutput files could be anything because there was no filename prefix\nspecified.\n\nIf multiple outputs, each response is still AnnotateFileResponse, each of\nwhich contains some subset of the full list of AnnotateImageResponse.\nMultiple outputs can happen if, for example, the output JSON is too large\nand overflows into multiple sharded files.",
"type": "string"
}
},
"type": "object"
},
"GcsSource": {
"description": "The Google Cloud Storage location where the input will be read from.",
"id": "GcsSource",
"properties": {
"uri": {
"description": "Google Cloud Storage URI for the input file. This must only be a\nGoogle Cloud Storage object. Wildcards are not currently supported.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1AnnotateFileResponse": {
"description": "Response to a single file annotation request. A file may contain one or more\nimages, which individually have their own responses.",
"id": "GoogleCloudVisionV1p1beta1AnnotateFileResponse",
"properties": {
"error": {
"$ref": "Status",
"description": "If set, represents the error message for the failed request. The\n`responses` field will not be set in this case."
},
"inputConfig": {
"$ref": "GoogleCloudVisionV1p1beta1InputConfig",
"description": "Information about the file for which this response is generated."
},
"responses": {
"description": "Individual responses to images found within the file. This field will be\nempty if the `error` field is set.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1AnnotateImageResponse"
},
"type": "array"
},
"totalPages": {
"description": "This field gives the total number of pages in the file.",
"format": "int32",
"type": "integer"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1AnnotateImageResponse": {
"description": "Response to an image annotation request.",
"id": "GoogleCloudVisionV1p1beta1AnnotateImageResponse",
"properties": {
"context": {
"$ref": "GoogleCloudVisionV1p1beta1ImageAnnotationContext",
"description": "If present, contextual information is needed to understand where this image\ncomes from."
},
"cropHintsAnnotation": {
"$ref": "GoogleCloudVisionV1p1beta1CropHintsAnnotation",
"description": "If present, crop hints have completed successfully."
},
"error": {
"$ref": "Status",
"description": "If set, represents the error message for the operation.\nNote that filled-in image annotations are guaranteed to be\ncorrect, even when `error` is set."
},
"faceAnnotations": {
"description": "If present, face detection has completed successfully.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1FaceAnnotation"
},
"type": "array"
},
"fullTextAnnotation": {
"$ref": "GoogleCloudVisionV1p1beta1TextAnnotation",
"description": "If present, text (OCR) detection or document (OCR) text detection has\ncompleted successfully.\nThis annotation provides the structural hierarchy for the OCR detected\ntext."
},
"imagePropertiesAnnotation": {
"$ref": "GoogleCloudVisionV1p1beta1ImageProperties",
"description": "If present, image properties were extracted successfully."
},
"labelAnnotations": {
"description": "If present, label detection has completed successfully.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1EntityAnnotation"
},
"type": "array"
},
"landmarkAnnotations": {
"description": "If present, landmark detection has completed successfully.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1EntityAnnotation"
},
"type": "array"
},
"localizedObjectAnnotations": {
"description": "If present, localized object detection has completed successfully.\nThis will be sorted descending by confidence score.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1LocalizedObjectAnnotation"
},
"type": "array"
},
"logoAnnotations": {
"description": "If present, logo detection has completed successfully.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1EntityAnnotation"
},
"type": "array"
},
"productSearchResults": {
"$ref": "GoogleCloudVisionV1p1beta1ProductSearchResults",
"description": "If present, product search has completed successfully."
},
"safeSearchAnnotation": {
"$ref": "GoogleCloudVisionV1p1beta1SafeSearchAnnotation",
"description": "If present, safe-search annotation has completed successfully."
},
"textAnnotations": {
"description": "If present, text (OCR) detection has completed successfully.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1EntityAnnotation"
},
"type": "array"
},
"webDetection": {
"$ref": "GoogleCloudVisionV1p1beta1WebDetection",
"description": "If present, web detection has completed successfully."
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1AsyncAnnotateFileResponse": {
"description": "The response for a single offline file annotation request.",
"id": "GoogleCloudVisionV1p1beta1AsyncAnnotateFileResponse",
"properties": {
"outputConfig": {
"$ref": "GoogleCloudVisionV1p1beta1OutputConfig",
"description": "The output location and metadata from AsyncAnnotateFileRequest."
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1AsyncBatchAnnotateFilesResponse": {
"description": "Response to an async batch file annotation request.",
"id": "GoogleCloudVisionV1p1beta1AsyncBatchAnnotateFilesResponse",
"properties": {
"responses": {
"description": "The list of file annotation responses, one for each request in\nAsyncBatchAnnotateFilesRequest.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1AsyncAnnotateFileResponse"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Block": {
"description": "Logical element on the page.",
"id": "GoogleCloudVisionV1p1beta1Block",
"properties": {
"blockType": {
"description": "Detected block type (text, image etc) for this block.",
"enum": [
"UNKNOWN",
"TEXT",
"TABLE",
"PICTURE",
"RULER",
"BARCODE"
],
"enumDescriptions": [
"Unknown block type.",
"Regular text block.",
"Table block.",
"Image block.",
"Horizontal/vertical line box.",
"Barcode block."
],
"type": "string"
},
"boundingBox": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The bounding box for the block.\nThe vertices are in the order of top-left, top-right, bottom-right,\nbottom-left. When a rotation of the bounding box is detected the rotation\nis represented as around the top-left corner as defined when the text is\nread in the 'natural' orientation.\nFor example:\n\n* when the text is horizontal it might look like:\n\n 0----1\n | |\n 3----2\n\n* when it's rotated 180 degrees around the top-left corner it becomes:\n\n 2----3\n | |\n 1----0\n\n and the vertex order will still be (0, 1, 2, 3)."
},
"confidence": {
"description": "Confidence of the OCR results on the block. Range [0, 1].",
"format": "float",
"type": "number"
},
"paragraphs": {
"description": "List of paragraphs in this block (if this blocks is of type text).",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1Paragraph"
},
"type": "array"
},
"property": {
"$ref": "GoogleCloudVisionV1p1beta1TextAnnotationTextProperty",
"description": "Additional information detected for the block."
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1BoundingPoly": {
"description": "A bounding polygon for the detected image annotation.",
"id": "GoogleCloudVisionV1p1beta1BoundingPoly",
"properties": {
"normalizedVertices": {
"description": "The bounding polygon normalized vertices.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1NormalizedVertex"
},
"type": "array"
},
"vertices": {
"description": "The bounding polygon vertices.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1Vertex"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ColorInfo": {
"description": "Color information consists of RGB channels, score, and the fraction of\nthe image that the color occupies in the image.",
"id": "GoogleCloudVisionV1p1beta1ColorInfo",
"properties": {
"color": {
"$ref": "Color",
"description": "RGB components of the color."
},
"pixelFraction": {
"description": "The fraction of pixels the color occupies in the image.\nValue in range [0, 1].",
"format": "float",
"type": "number"
},
"score": {
"description": "Image-specific score for this color. Value in range [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1CropHint": {
"description": "Single crop hint that is used to generate a new crop when serving an image.",
"id": "GoogleCloudVisionV1p1beta1CropHint",
"properties": {
"boundingPoly": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The bounding polygon for the crop region. The coordinates of the bounding\nbox are in the original image's scale."
},
"confidence": {
"description": "Confidence of this being a salient region. Range [0, 1].",
"format": "float",
"type": "number"
},
"importanceFraction": {
"description": "Fraction of importance of this salient region with respect to the original\nimage.",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1CropHintsAnnotation": {
"description": "Set of crop hints that are used to generate new crops when serving images.",
"id": "GoogleCloudVisionV1p1beta1CropHintsAnnotation",
"properties": {
"cropHints": {
"description": "Crop hint results.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1CropHint"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1DominantColorsAnnotation": {
"description": "Set of dominant colors and their corresponding scores.",
"id": "GoogleCloudVisionV1p1beta1DominantColorsAnnotation",
"properties": {
"colors": {
"description": "RGB color values with their score and pixel fraction.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1ColorInfo"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1EntityAnnotation": {
"description": "Set of detected entity features.",
"id": "GoogleCloudVisionV1p1beta1EntityAnnotation",
"properties": {
"boundingPoly": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "Image region to which this entity belongs. Not produced\nfor `LABEL_DETECTION` features."
},
"confidence": {
"description": "**Deprecated. Use `score` instead.**\nThe accuracy of the entity detection in an image.\nFor example, for an image in which the \"Eiffel Tower\" entity is detected,\nthis field represents the confidence that there is a tower in the query\nimage. Range [0, 1].",
"format": "float",
"type": "number"
},
"description": {
"description": "Entity textual description, expressed in its `locale` language.",
"type": "string"
},
"locale": {
"description": "The language code for the locale in which the entity textual\n`description` is expressed.",
"type": "string"
},
"locations": {
"description": "The location information for the detected entity. Multiple\n`LocationInfo` elements can be present because one location may\nindicate the location of the scene in the image, and another location\nmay indicate the location of the place where the image was taken.\nLocation information is usually present for landmarks.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1LocationInfo"
},
"type": "array"
},
"mid": {
"description": "Opaque entity ID. Some IDs may be available in\n[Google Knowledge Graph Search\nAPI](https://developers.google.com/knowledge-graph/).",
"type": "string"
},
"properties": {
"description": "Some entities may have optional user-supplied `Property` (name/value)\nfields, such a score or string that qualifies the entity.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1Property"
},
"type": "array"
},
"score": {
"description": "Overall score of the result. Range [0, 1].",
"format": "float",
"type": "number"
},
"topicality": {
"description": "The relevancy of the ICA (Image Content Annotation) label to the\nimage. For example, the relevancy of \"tower\" is likely higher to an image\ncontaining the detected \"Eiffel Tower\" than to an image containing a\ndetected distant towering building, even though the confidence that\nthere is a tower in each image may be the same. Range [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1FaceAnnotation": {
"description": "A face annotation object contains the results of face detection.",
"id": "GoogleCloudVisionV1p1beta1FaceAnnotation",
"properties": {
"angerLikelihood": {
"description": "Anger likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"blurredLikelihood": {
"description": "Blurred likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"boundingPoly": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The bounding polygon around the face. The coordinates of the bounding box\nare in the original image's scale.\nThe bounding box is computed to \"frame\" the face in accordance with human\nexpectations. It is based on the landmarker results.\nNote that one or more x and/or y coordinates may not be generated in the\n`BoundingPoly` (the polygon will be unbounded) if only a partial face\nappears in the image to be annotated."
},
"detectionConfidence": {
"description": "Detection confidence. Range [0, 1].",
"format": "float",
"type": "number"
},
"fdBoundingPoly": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The `fd_bounding_poly` bounding polygon is tighter than the\n`boundingPoly`, and encloses only the skin part of the face. Typically, it\nis used to eliminate the face from any image analysis that detects the\n\"amount of skin\" visible in an image. It is not based on the\nlandmarker results, only on the initial face detection, hence\nthe \u003ccode\u003efd\u003c/code\u003e (face detection) prefix."
},
"headwearLikelihood": {
"description": "Headwear likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"joyLikelihood": {
"description": "Joy likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"landmarkingConfidence": {
"description": "Face landmarking confidence. Range [0, 1].",
"format": "float",
"type": "number"
},
"landmarks": {
"description": "Detected face landmarks.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1FaceAnnotationLandmark"
},
"type": "array"
},
"panAngle": {
"description": "Yaw angle, which indicates the leftward/rightward angle that the face is\npointing relative to the vertical plane perpendicular to the image. Range\n[-180,180].",
"format": "float",
"type": "number"
},
"rollAngle": {
"description": "Roll angle, which indicates the amount of clockwise/anti-clockwise rotation\nof the face relative to the image vertical about the axis perpendicular to\nthe face. Range [-180,180].",
"format": "float",
"type": "number"
},
"sorrowLikelihood": {
"description": "Sorrow likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"surpriseLikelihood": {
"description": "Surprise likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"tiltAngle": {
"description": "Pitch angle, which indicates the upwards/downwards angle that the face is\npointing relative to the image's horizontal plane. Range [-180,180].",
"format": "float",
"type": "number"
},
"underExposedLikelihood": {
"description": "Under-exposed likelihood.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1FaceAnnotationLandmark": {
"description": "A face-specific landmark (for example, a face feature).",
"id": "GoogleCloudVisionV1p1beta1FaceAnnotationLandmark",
"properties": {
"position": {
"$ref": "GoogleCloudVisionV1p1beta1Position",
"description": "Face landmark position."
},
"type": {
"description": "Face landmark type.",
"enum": [
"UNKNOWN_LANDMARK",
"LEFT_EYE",
"RIGHT_EYE",
"LEFT_OF_LEFT_EYEBROW",
"RIGHT_OF_LEFT_EYEBROW",
"LEFT_OF_RIGHT_EYEBROW",
"RIGHT_OF_RIGHT_EYEBROW",
"MIDPOINT_BETWEEN_EYES",
"NOSE_TIP",
"UPPER_LIP",
"LOWER_LIP",
"MOUTH_LEFT",
"MOUTH_RIGHT",
"MOUTH_CENTER",
"NOSE_BOTTOM_RIGHT",
"NOSE_BOTTOM_LEFT",
"NOSE_BOTTOM_CENTER",
"LEFT_EYE_TOP_BOUNDARY",
"LEFT_EYE_RIGHT_CORNER",
"LEFT_EYE_BOTTOM_BOUNDARY",
"LEFT_EYE_LEFT_CORNER",
"RIGHT_EYE_TOP_BOUNDARY",
"RIGHT_EYE_RIGHT_CORNER",
"RIGHT_EYE_BOTTOM_BOUNDARY",
"RIGHT_EYE_LEFT_CORNER",
"LEFT_EYEBROW_UPPER_MIDPOINT",
"RIGHT_EYEBROW_UPPER_MIDPOINT",
"LEFT_EAR_TRAGION",
"RIGHT_EAR_TRAGION",
"LEFT_EYE_PUPIL",
"RIGHT_EYE_PUPIL",
"FOREHEAD_GLABELLA",
"CHIN_GNATHION",
"CHIN_LEFT_GONION",
"CHIN_RIGHT_GONION"
],
"enumDescriptions": [
"Unknown face landmark detected. Should not be filled.",
"Left eye.",
"Right eye.",
"Left of left eyebrow.",
"Right of left eyebrow.",
"Left of right eyebrow.",
"Right of right eyebrow.",
"Midpoint between eyes.",
"Nose tip.",
"Upper lip.",
"Lower lip.",
"Mouth left.",
"Mouth right.",
"Mouth center.",
"Nose, bottom right.",
"Nose, bottom left.",
"Nose, bottom center.",
"Left eye, top boundary.",
"Left eye, right corner.",
"Left eye, bottom boundary.",
"Left eye, left corner.",
"Right eye, top boundary.",
"Right eye, right corner.",
"Right eye, bottom boundary.",
"Right eye, left corner.",
"Left eyebrow, upper midpoint.",
"Right eyebrow, upper midpoint.",
"Left ear tragion.",
"Right ear tragion.",
"Left eye pupil.",
"Right eye pupil.",
"Forehead glabella.",
"Chin gnathion.",
"Chin left gonion.",
"Chin right gonion."
],
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1GcsDestination": {
"description": "The Google Cloud Storage location where the output will be written to.",
"id": "GoogleCloudVisionV1p1beta1GcsDestination",
"properties": {
"uri": {
"description": "Google Cloud Storage URI prefix where the results will be stored. Results\nwill be in JSON format and preceded by its corresponding input URI prefix.\nThis field can either represent a gcs file prefix or gcs directory. In\neither case, the uri should be unique because in order to get all of the\noutput files, you will need to do a wildcard gcs search on the uri prefix\nyou provide.\n\nExamples:\n\n* File Prefix: gs://bucket-name/here/filenameprefix The output files\nwill be created in gs://bucket-name/here/ and the names of the\noutput files will begin with \"filenameprefix\".\n\n* Directory Prefix: gs://bucket-name/some/location/ The output files\nwill be created in gs://bucket-name/some/location/ and the names of the\noutput files could be anything because there was no filename prefix\nspecified.\n\nIf multiple outputs, each response is still AnnotateFileResponse, each of\nwhich contains some subset of the full list of AnnotateImageResponse.\nMultiple outputs can happen if, for example, the output JSON is too large\nand overflows into multiple sharded files.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1GcsSource": {
"description": "The Google Cloud Storage location where the input will be read from.",
"id": "GoogleCloudVisionV1p1beta1GcsSource",
"properties": {
"uri": {
"description": "Google Cloud Storage URI for the input file. This must only be a\nGoogle Cloud Storage object. Wildcards are not currently supported.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ImageAnnotationContext": {
"description": "If an image was produced from a file (e.g. a PDF), this message gives\ninformation about the source of that image.",
"id": "GoogleCloudVisionV1p1beta1ImageAnnotationContext",
"properties": {
"pageNumber": {
"description": "If the file was a PDF or TIFF, this field gives the page number within\nthe file used to produce the image.",
"format": "int32",
"type": "integer"
},
"uri": {
"description": "The URI of the file used to produce the image.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ImageProperties": {
"description": "Stores image properties, such as dominant colors.",
"id": "GoogleCloudVisionV1p1beta1ImageProperties",
"properties": {
"dominantColors": {
"$ref": "GoogleCloudVisionV1p1beta1DominantColorsAnnotation",
"description": "If present, dominant colors completed successfully."
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1InputConfig": {
"description": "The desired input location and metadata.",
"id": "GoogleCloudVisionV1p1beta1InputConfig",
"properties": {
"content": {
"description": "File content, represented as a stream of bytes.\nNote: As with all `bytes` fields, protobuffers use a pure binary\nrepresentation, whereas JSON representations use base64.\n\nCurrently, this field only works for BatchAnnotateFiles requests. It does\nnot work for AsyncBatchAnnotateFiles requests.",
"format": "byte",
"type": "string"
},
"gcsSource": {
"$ref": "GoogleCloudVisionV1p1beta1GcsSource",
"description": "The Google Cloud Storage location to read the input from."
},
"mimeType": {
"description": "The type of the file. Currently only \"application/pdf\", \"image/tiff\" and\n\"image/gif\" are supported. Wildcards are not supported.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1LocalizedObjectAnnotation": {
"description": "Set of detected objects with bounding boxes.",
"id": "GoogleCloudVisionV1p1beta1LocalizedObjectAnnotation",
"properties": {
"boundingPoly": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "Image region to which this object belongs. This must be populated."
},
"languageCode": {
"description": "The BCP-47 language code, such as \"en-US\" or \"sr-Latn\". For more\ninformation, see\nhttp://www.unicode.org/reports/tr35/#Unicode_locale_identifier.",
"type": "string"
},
"mid": {
"description": "Object ID that should align with EntityAnnotation mid.",
"type": "string"
},
"name": {
"description": "Object name, expressed in its `language_code` language.",
"type": "string"
},
"score": {
"description": "Score of the result. Range [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1LocationInfo": {
"description": "Detected entity location information.",
"id": "GoogleCloudVisionV1p1beta1LocationInfo",
"properties": {
"latLng": {
"$ref": "LatLng",
"description": "lat/long location coordinates."
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1NormalizedVertex": {
"description": "A vertex represents a 2D point in the image.\nNOTE: the normalized vertex coordinates are relative to the original image\nand range from 0 to 1.",
"id": "GoogleCloudVisionV1p1beta1NormalizedVertex",
"properties": {
"x": {
"description": "X coordinate.",
"format": "float",
"type": "number"
},
"y": {
"description": "Y coordinate.",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1OperationMetadata": {
"description": "Contains metadata for the BatchAnnotateImages operation.",
"id": "GoogleCloudVisionV1p1beta1OperationMetadata",
"properties": {
"createTime": {
"description": "The time when the batch request was received.",
"format": "google-datetime",
"type": "string"
},
"state": {
"description": "Current state of the batch operation.",
"enum": [
"STATE_UNSPECIFIED",
"CREATED",
"RUNNING",
"DONE",
"CANCELLED"
],
"enumDescriptions": [
"Invalid.",
"Request is received.",
"Request is actively being processed.",
"The batch processing is done.",
"The batch processing was cancelled."
],
"type": "string"
},
"updateTime": {
"description": "The time when the operation result was last updated.",
"format": "google-datetime",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1OutputConfig": {
"description": "The desired output location and metadata.",
"id": "GoogleCloudVisionV1p1beta1OutputConfig",
"properties": {
"batchSize": {
"description": "The max number of response protos to put into each output JSON file on\nGoogle Cloud Storage.\nThe valid range is [1, 100]. If not specified, the default value is 20.\n\nFor example, for one pdf file with 100 pages, 100 response protos will\nbe generated. If `batch_size` = 20, then 5 json files each\ncontaining 20 response protos will be written under the prefix\n`gcs_destination`.`uri`.\n\nCurrently, batch_size only applies to GcsDestination, with potential future\nsupport for other output configurations.",
"format": "int32",
"type": "integer"
},
"gcsDestination": {
"$ref": "GoogleCloudVisionV1p1beta1GcsDestination",
"description": "The Google Cloud Storage location to write the output(s) to."
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Page": {
"description": "Detected page from OCR.",
"id": "GoogleCloudVisionV1p1beta1Page",
"properties": {
"blocks": {
"description": "List of blocks of text, images etc on this page.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1Block"
},
"type": "array"
},
"confidence": {
"description": "Confidence of the OCR results on the page. Range [0, 1].",
"format": "float",
"type": "number"
},
"height": {
"description": "Page height. For PDFs the unit is points. For images (including\nTIFFs) the unit is pixels.",
"format": "int32",
"type": "integer"
},
"property": {
"$ref": "GoogleCloudVisionV1p1beta1TextAnnotationTextProperty",
"description": "Additional information detected on the page."
},
"width": {
"description": "Page width. For PDFs the unit is points. For images (including\nTIFFs) the unit is pixels.",
"format": "int32",
"type": "integer"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Paragraph": {
"description": "Structural unit of text representing a number of words in certain order.",
"id": "GoogleCloudVisionV1p1beta1Paragraph",
"properties": {
"boundingBox": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The bounding box for the paragraph.\nThe vertices are in the order of top-left, top-right, bottom-right,\nbottom-left. When a rotation of the bounding box is detected the rotation\nis represented as around the top-left corner as defined when the text is\nread in the 'natural' orientation.\nFor example:\n * when the text is horizontal it might look like:\n 0----1\n | |\n 3----2\n * when it's rotated 180 degrees around the top-left corner it becomes:\n 2----3\n | |\n 1----0\n and the vertex order will still be (0, 1, 2, 3)."
},
"confidence": {
"description": "Confidence of the OCR results for the paragraph. Range [0, 1].",
"format": "float",
"type": "number"
},
"property": {
"$ref": "GoogleCloudVisionV1p1beta1TextAnnotationTextProperty",
"description": "Additional information detected for the paragraph."
},
"words": {
"description": "List of all words in this paragraph.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1Word"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Position": {
"description": "A 3D position in the image, used primarily for Face detection landmarks.\nA valid Position must have both x and y coordinates.\nThe position coordinates are in the same scale as the original image.",
"id": "GoogleCloudVisionV1p1beta1Position",
"properties": {
"x": {
"description": "X coordinate.",
"format": "float",
"type": "number"
},
"y": {
"description": "Y coordinate.",
"format": "float",
"type": "number"
},
"z": {
"description": "Z coordinate (or depth).",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Product": {
"description": "A Product contains ReferenceImages.",
"id": "GoogleCloudVisionV1p1beta1Product",
"properties": {
"description": {
"description": "User-provided metadata to be stored with this product. Must be at most 4096\ncharacters long.",
"type": "string"
},
"displayName": {
"description": "The user-provided name for this Product. Must not be empty. Must be at most\n4096 characters long.",
"type": "string"
},
"name": {
"description": "The resource name of the product.\n\nFormat is:\n`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.\n\nThis field is ignored when creating a product.",
"type": "string"
},
"productCategory": {
"description": "Immutable. The category for the product identified by the reference image. This should\nbe either \"homegoods-v2\", \"apparel-v2\", or \"toys-v2\". The legacy categories\n\"homegoods\", \"apparel\", and \"toys\" are still supported, but these should\nnot be used for new products.",
"type": "string"
},
"productLabels": {
"description": "Key-value pairs that can be attached to a product. At query time,\nconstraints can be specified based on the product_labels.\n\nNote that integer values can be provided as strings, e.g. \"1199\". Only\nstrings with integer values can match a range-based restriction which is\nto be supported soon.\n\nMultiple values can be assigned to the same key. One product may have up to\n500 product_labels.\n\nNotice that the total number of distinct product_labels over all products\nin one ProductSet cannot exceed 1M, otherwise the product search pipeline\nwill refuse to work for that ProductSet.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1ProductKeyValue"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ProductKeyValue": {
"description": "A product label represented as a key-value pair.",
"id": "GoogleCloudVisionV1p1beta1ProductKeyValue",
"properties": {
"key": {
"description": "The key of the label attached to the product. Cannot be empty and cannot\nexceed 128 bytes.",
"type": "string"
},
"value": {
"description": "The value of the label attached to the product. Cannot be empty and\ncannot exceed 128 bytes.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ProductSearchResults": {
"description": "Results for a product search request.",
"id": "GoogleCloudVisionV1p1beta1ProductSearchResults",
"properties": {
"indexTime": {
"description": "Timestamp of the index which provided these results. Products added to the\nproduct set and products removed from the product set after this time are\nnot reflected in the current results.",
"format": "google-datetime",
"type": "string"
},
"productGroupedResults": {
"description": "List of results grouped by products detected in the query image. Each entry\ncorresponds to one bounding polygon in the query image, and contains the\nmatching products specific to that region. There may be duplicate product\nmatches in the union of all the per-product results.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1ProductSearchResultsGroupedResult"
},
"type": "array"
},
"results": {
"description": "List of results, one for each product match.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1ProductSearchResultsResult"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ProductSearchResultsGroupedResult": {
"description": "Information about the products similar to a single product in a query\nimage.",
"id": "GoogleCloudVisionV1p1beta1ProductSearchResultsGroupedResult",
"properties": {
"boundingPoly": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The bounding polygon around the product detected in the query image."
},
"objectAnnotations": {
"description": "List of generic predictions for the object in the bounding box.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1ProductSearchResultsObjectAnnotation"
},
"type": "array"
},
"results": {
"description": "List of results, one for each product match.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1ProductSearchResultsResult"
},
"type": "array"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ProductSearchResultsObjectAnnotation": {
"description": "Prediction for what the object in the bounding box is.",
"id": "GoogleCloudVisionV1p1beta1ProductSearchResultsObjectAnnotation",
"properties": {
"languageCode": {
"description": "The BCP-47 language code, such as \"en-US\" or \"sr-Latn\". For more\ninformation, see\nhttp://www.unicode.org/reports/tr35/#Unicode_locale_identifier.",
"type": "string"
},
"mid": {
"description": "Object ID that should align with EntityAnnotation mid.",
"type": "string"
},
"name": {
"description": "Object name, expressed in its `language_code` language.",
"type": "string"
},
"score": {
"description": "Score of the result. Range [0, 1].",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1ProductSearchResultsResult": {
"description": "Information about a product.",
"id": "GoogleCloudVisionV1p1beta1ProductSearchResultsResult",
"properties": {
"image": {
"description": "The resource name of the image from the product that is the closest match\nto the query.",
"type": "string"
},
"product": {
"$ref": "GoogleCloudVisionV1p1beta1Product",
"description": "The Product."
},
"score": {
"description": "A confidence level on the match, ranging from 0 (no confidence) to\n1 (full confidence).",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Property": {
"description": "A `Property` consists of a user-supplied name/value pair.",
"id": "GoogleCloudVisionV1p1beta1Property",
"properties": {
"name": {
"description": "Name of the property.",
"type": "string"
},
"uint64Value": {
"description": "Value of numeric properties.",
"format": "uint64",
"type": "string"
},
"value": {
"description": "Value of the property.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1SafeSearchAnnotation": {
"description": "Set of features pertaining to the image, computed by computer vision\nmethods over safe-search verticals (for example, adult, spoof, medical,\nviolence).",
"id": "GoogleCloudVisionV1p1beta1SafeSearchAnnotation",
"properties": {
"adult": {
"description": "Represents the adult content likelihood for the image. Adult content may\ncontain elements such as nudity, pornographic images or cartoons, or\nsexual activities.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"medical": {
"description": "Likelihood that this is a medical image.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"racy": {
"description": "Likelihood that the request image contains racy content. Racy content may\ninclude (but is not limited to) skimpy or sheer clothing, strategically\ncovered nudity, lewd or provocative poses, or close-ups of sensitive\nbody areas.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"spoof": {
"description": "Spoof likelihood. The likelihood that an modification\nwas made to the image's canonical version to make it appear\nfunny or offensive.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
},
"violence": {
"description": "Likelihood that this image contains violent content.",
"enum": [
"UNKNOWN",
"VERY_UNLIKELY",
"UNLIKELY",
"POSSIBLE",
"LIKELY",
"VERY_LIKELY"
],
"enumDescriptions": [
"Unknown likelihood.",
"It is very unlikely.",
"It is unlikely.",
"It is possible.",
"It is likely.",
"It is very likely."
],
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1Symbol": {
"description": "A single symbol representation.",
"id": "GoogleCloudVisionV1p1beta1Symbol",
"properties": {
"boundingBox": {
"$ref": "GoogleCloudVisionV1p1beta1BoundingPoly",
"description": "The bounding box for the symbol.\nThe vertices are in the order of top-left, top-right, bottom-right,\nbottom-left. When a rotation of the bounding box is detected the rotation\nis represented as around the top-left corner as defined when the text is\nread in the 'natural' orientation.\nFor example:\n * when the text is horizontal it might look like:\n 0----1\n | |\n 3----2\n * when it's rotated 180 degrees around the top-left corner it becomes:\n 2----3\n | |\n 1----0\n and the vertex order will still be (0, 1, 2, 3)."
},
"confidence": {
"description": "Confidence of the OCR results for the symbol. Range [0, 1].",
"format": "float",
"type": "number"
},
"property": {
"$ref": "GoogleCloudVisionV1p1beta1TextAnnotationTextProperty",
"description": "Additional information detected for the symbol."
},
"text": {
"description": "The actual UTF-8 representation of the symbol.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1TextAnnotation": {
"description": "TextAnnotation contains a structured representation of OCR extracted text.\nThe hierarchy of an OCR extracted text structure is like this:\n TextAnnotation -\u003e Page -\u003e Block -\u003e Paragraph -\u003e Word -\u003e Symbol\nEach structural component, starting from Page, may further have their own\nproperties. Properties describe detected languages, breaks etc.. Please refer\nto the TextAnnotation.TextProperty message definition below for more\ndetail.",
"id": "GoogleCloudVisionV1p1beta1TextAnnotation",
"properties": {
"pages": {
"description": "List of pages detected by OCR.",
"items": {
"$ref": "GoogleCloudVisionV1p1beta1Page"
},
"type": "array"
},
"text": {
"description": "UTF-8 text detected on the pages.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak": {
"description": "Detected start or end of a structural component.",
"id": "GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak",
"properties": {
"isPrefix": {
"description": "True if break prepends the element.",
"type": "boolean"
},
"type": {
"description": "Detected break type.",
<