all: autogenerated update (2019-01-21)

Update:
- alertcenter/v1beta1
- bigquery/v2
- container/v1
- logging/v2
- logging/v2beta1
- ml/v1
- remotebuildexecution/v1
- remotebuildexecution/v1alpha
- remotebuildexecution/v2
- servicecontrol/v1
- serviceusage/v1
- videointelligence/v1
- videointelligence/v1beta2
- videointelligence/v1p1beta1
diff --git a/alertcenter/v1beta1/alertcenter-api.json b/alertcenter/v1beta1/alertcenter-api.json
index c5f5330..708062d 100644
--- a/alertcenter/v1beta1/alertcenter-api.json
+++ b/alertcenter/v1beta1/alertcenter-api.json
@@ -357,7 +357,7 @@
       }
     }
   },
-  "revision": "20190110",
+  "revision": "20190116",
   "rootUrl": "https://alertcenter.googleapis.com/",
   "schemas": {
     "AccountWarning": {
@@ -370,7 +370,7 @@
         },
         "loginDetails": {
           "$ref": "LoginDetails",
-          "description": "Optional. Details of the login action associated with the warning event.\nThis is only available for:\n\n* Suspicious login\n* Suspicious login (less secure app)\n* User suspended (suspicious activity)"
+          "description": "Optional. Details of the login action associated with the warning event.\nThis is only available for:\n\n* Suspicious login\n* Suspicious login (less secure app)\n* Suspicious programmatic login\n* User suspended (suspicious activity)"
         }
       },
       "type": "object"
diff --git a/alertcenter/v1beta1/alertcenter-gen.go b/alertcenter/v1beta1/alertcenter-gen.go
index 7786a30..29ec0a2 100644
--- a/alertcenter/v1beta1/alertcenter-gen.go
+++ b/alertcenter/v1beta1/alertcenter-gen.go
@@ -124,6 +124,7 @@
 	//
 	// * Suspicious login
 	// * Suspicious login (less secure app)
+	// * Suspicious programmatic login
 	// * User suspended (suspicious activity)
 	LoginDetails *LoginDetails `json:"loginDetails,omitempty"`
 
diff --git a/api-list.json b/api-list.json
index 71546d0..c28cd0b 100644
--- a/api-list.json
+++ b/api-list.json
@@ -2218,21 +2218,6 @@
   },
   {
    "kind": "discovery#directoryItem",
-   "id": "monitoring:v1",
-   "name": "monitoring",
-   "version": "v1",
-   "title": "Stackdriver Monitoring API",
-   "description": "Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages.",
-   "discoveryRestUrl": "https://monitoring.googleapis.com/$discovery/rest?version=v1",
-   "icons": {
-    "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png",
-    "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png"
-   },
-   "documentationLink": "https://cloud.google.com/monitoring/api/",
-   "preferred": false
-  },
-  {
-   "kind": "discovery#directoryItem",
    "id": "monitoring:v3",
    "name": "monitoring",
    "version": "v3",
diff --git a/bigquery/v2/bigquery-api.json b/bigquery/v2/bigquery-api.json
index a93e75b..5c5d134 100644
--- a/bigquery/v2/bigquery-api.json
+++ b/bigquery/v2/bigquery-api.json
@@ -32,7 +32,7 @@
   "description": "A data platform for customers to create, manage, share and query data.",
   "discoveryVersion": "v1",
   "documentationLink": "https://cloud.google.com/bigquery/",
-  "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/JOw8AGQ14WBAuCZq4gmK7QO5KVM\"",
+  "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/Sc1cBz8bm3nVS0YgBFltaKSnt9g\"",
   "icons": {
     "x16": "https://www.google.com/images/icons/product/search-16.gif",
     "x32": "https://www.google.com/images/icons/product/search-32.gif"
@@ -998,7 +998,7 @@
       }
     }
   },
-  "revision": "20181230",
+  "revision": "20190108",
   "rootUrl": "https://www.googleapis.com/",
   "schemas": {
     "BigQueryModelTraining": {
@@ -1566,6 +1566,10 @@
           "$ref": "GoogleSheetsOptions",
           "description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS."
         },
+        "hivePartitioningMode": {
+          "description": "[Optional, Experimental] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error.",
+          "type": "string"
+        },
         "ignoreUnknownValues": {
           "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.",
           "type": "boolean"
@@ -1899,6 +1903,10 @@
           "description": "[Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',').",
           "type": "string"
         },
+        "hivePartitioningMode": {
+          "description": "[Optional, Experimental] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error.",
+          "type": "string"
+        },
         "ignoreUnknownValues": {
           "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names",
           "type": "boolean"
diff --git a/bigquery/v2/bigquery-gen.go b/bigquery/v2/bigquery-gen.go
index 2d4c557..6ed5893 100644
--- a/bigquery/v2/bigquery-gen.go
+++ b/bigquery/v2/bigquery-gen.go
@@ -1070,6 +1070,14 @@
 	// set to GOOGLE_SHEETS.
 	GoogleSheetsOptions *GoogleSheetsOptions `json:"googleSheetsOptions,omitempty"`
 
+	// HivePartitioningMode: [Optional, Experimental] If hive partitioning
+	// is enabled, which mode to use. Two modes are supported: - AUTO:
+	// automatically infer partition key name(s) and type(s). - STRINGS:
+	// automatic infer partition key name(s). All types are strings. Not all
+	// storage formats support hive partitioning -- requesting hive
+	// partitioning on an unsupported format will lead to an error.
+	HivePartitioningMode string `json:"hivePartitioningMode,omitempty"`
+
 	// IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow
 	// extra values that are not represented in the table schema. If true,
 	// the extra values are ignored. If false, records with extra columns
@@ -1635,6 +1643,14 @@
 	// specify a tab separator. The default value is a comma (',').
 	FieldDelimiter string `json:"fieldDelimiter,omitempty"`
 
+	// HivePartitioningMode: [Optional, Experimental] If hive partitioning
+	// is enabled, which mode to use. Two modes are supported: - AUTO:
+	// automatically infer partition key name(s) and type(s). - STRINGS:
+	// automatic infer partition key name(s). All types are strings. Not all
+	// storage formats support hive partitioning -- requesting hive
+	// partitioning on an unsupported format will lead to an error.
+	HivePartitioningMode string `json:"hivePartitioningMode,omitempty"`
+
 	// IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow
 	// extra values that are not represented in the table schema. If true,
 	// the extra values are ignored. If false, records with extra columns
diff --git a/container/v1/container-api.json b/container/v1/container-api.json
index 242fbe2..6bdb8c3 100644
--- a/container/v1/container-api.json
+++ b/container/v1/container-api.json
@@ -283,6 +283,31 @@
                     "https://www.googleapis.com/auth/cloud-platform"
                   ]
                 },
+                "getJwks": {
+                  "description": "GetJSONWebKeys gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.",
+                  "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks",
+                  "httpMethod": "GET",
+                  "id": "container.projects.locations.clusters.getJwks",
+                  "parameterOrder": [
+                    "parent"
+                  ],
+                  "parameters": {
+                    "parent": {
+                      "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format 'projects/*/locations/*/clusters/*'.",
+                      "location": "path",
+                      "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$",
+                      "required": true,
+                      "type": "string"
+                    }
+                  },
+                  "path": "v1/{+parent}/jwks",
+                  "response": {
+                    "$ref": "GetJSONWebKeysResponse"
+                  },
+                  "scopes": [
+                    "https://www.googleapis.com/auth/cloud-platform"
+                  ]
+                },
                 "list": {
                   "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.",
                   "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters",
@@ -957,6 +982,35 @@
                       ]
                     }
                   }
+                },
+                "well-known": {
+                  "methods": {
+                    "getOpenid-configuration": {
+                      "description": "GetOpenIDConfig gets the OIDC discovery document for the cluster.\nSee the OpenID Connect Discovery 1.0 specification for details.\nhttps://openid.net/specs/openid-connect-discovery-1_0.html\nThis API is not yet intended for general use, and is not available for all\nclusters.",
+                      "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration",
+                      "httpMethod": "GET",
+                      "id": "container.projects.locations.clusters.well-known.getOpenid-configuration",
+                      "parameterOrder": [
+                        "parent"
+                      ],
+                      "parameters": {
+                        "parent": {
+                          "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format 'projects/*/locations/*/clusters/*'.",
+                          "location": "path",
+                          "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$",
+                          "required": true,
+                          "type": "string"
+                        }
+                      },
+                      "path": "v1/{+parent}/.well-known/openid-configuration",
+                      "response": {
+                        "$ref": "GetOpenIDConfigResponse"
+                      },
+                      "scopes": [
+                        "https://www.googleapis.com/auth/cloud-platform"
+                      ]
+                    }
+                  }
                 }
               }
             },
@@ -2361,7 +2415,7 @@
       }
     }
   },
-  "revision": "20181207",
+  "revision": "20190102",
   "rootUrl": "https://container.googleapis.com/",
   "schemas": {
     "AcceleratorConfig": {
@@ -2806,6 +2860,70 @@
       "properties": {},
       "type": "object"
     },
+    "GetJSONWebKeysResponse": {
+      "description": "GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc 7517",
+      "id": "GetJSONWebKeysResponse",
+      "properties": {
+        "keys": {
+          "description": "The public component of the keys used by the cluster to sign token\nrequests.",
+          "items": {
+            "$ref": "Jwk"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
+    "GetOpenIDConfigResponse": {
+      "description": "GetOpenIDConfigResponse is an OIDC discovery document for the cluster.\nSee the OpenID Connect Discovery 1.0 specification for details.",
+      "id": "GetOpenIDConfigResponse",
+      "properties": {
+        "claims_supported": {
+          "description": "NOLINT",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "grant_types": {
+          "description": "NOLINT",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "id_token_signing_alg_values_supported": {
+          "description": "NOLINT",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "issuer": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "jwks_uri": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "response_types_supported": {
+          "description": "NOLINT",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
+        "subject_types_supported": {
+          "description": "NOLINT",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        }
+      },
+      "type": "object"
+    },
     "HorizontalPodAutoscaling": {
       "description": "Configuration options for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.",
       "id": "HorizontalPodAutoscaling",
@@ -2879,6 +2997,49 @@
       },
       "type": "object"
     },
+    "Jwk": {
+      "description": "Jwk is a JSON Web Key as specified in RFC 7517",
+      "id": "Jwk",
+      "properties": {
+        "alg": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "crv": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "e": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "kid": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "kty": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "n": {
+          "description": "Fields for RSA keys.\nNOLINT",
+          "type": "string"
+        },
+        "use": {
+          "description": "NOLINT",
+          "type": "string"
+        },
+        "x": {
+          "description": "Fields for ECDSA keys.\nNOLINT",
+          "type": "string"
+        },
+        "y": {
+          "description": "NOLINT",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "KubernetesDashboard": {
       "description": "Configuration for the Kubernetes Dashboard.",
       "id": "KubernetesDashboard",
diff --git a/container/v1/container-gen.go b/container/v1/container-gen.go
index c218da8..6157920 100644
--- a/container/v1/container-gen.go
+++ b/container/v1/container-gen.go
@@ -113,6 +113,7 @@
 func NewProjectsLocationsClustersService(s *Service) *ProjectsLocationsClustersService {
 	rs := &ProjectsLocationsClustersService{s: s}
 	rs.NodePools = NewProjectsLocationsClustersNodePoolsService(s)
+	rs.WellKnown = NewProjectsLocationsClustersWellKnownService(s)
 	return rs
 }
 
@@ -120,6 +121,8 @@
 	s *Service
 
 	NodePools *ProjectsLocationsClustersNodePoolsService
+
+	WellKnown *ProjectsLocationsClustersWellKnownService
 }
 
 func NewProjectsLocationsClustersNodePoolsService(s *Service) *ProjectsLocationsClustersNodePoolsService {
@@ -131,6 +134,15 @@
 	s *Service
 }
 
+func NewProjectsLocationsClustersWellKnownService(s *Service) *ProjectsLocationsClustersWellKnownService {
+	rs := &ProjectsLocationsClustersWellKnownService{s: s}
+	return rs
+}
+
+type ProjectsLocationsClustersWellKnownService struct {
+	s *Service
+}
+
 func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService {
 	rs := &ProjectsLocationsOperationsService{s: s}
 	return rs
@@ -1074,6 +1086,94 @@
 	googleapi.ServerResponse `json:"-"`
 }
 
+// GetJSONWebKeysResponse: GetJSONWebKeysResponse is a valid JSON Web
+// Key Set as specififed in rfc 7517
+type GetJSONWebKeysResponse struct {
+	// Keys: The public component of the keys used by the cluster to sign
+	// token
+	// requests.
+	Keys []*Jwk `json:"keys,omitempty"`
+
+	// ServerResponse contains the HTTP response code and headers from the
+	// server.
+	googleapi.ServerResponse `json:"-"`
+
+	// ForceSendFields is a list of field names (e.g. "Keys") to
+	// unconditionally include in API requests. By default, fields with
+	// empty values are omitted from API requests. However, any non-pointer,
+	// non-interface field appearing in ForceSendFields will be sent to the
+	// server regardless of whether the field is empty or not. This may be
+	// used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "Keys") to include in API
+	// requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
+	// null. It is an error if a field in this list has a non-empty value.
+	// This may be used to include null fields in Patch requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *GetJSONWebKeysResponse) MarshalJSON() ([]byte, error) {
+	type NoMethod GetJSONWebKeysResponse
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
+// GetOpenIDConfigResponse: GetOpenIDConfigResponse is an OIDC discovery
+// document for the cluster.
+// See the OpenID Connect Discovery 1.0 specification for details.
+type GetOpenIDConfigResponse struct {
+	// ClaimsSupported: NOLINT
+	ClaimsSupported []string `json:"claims_supported,omitempty"`
+
+	// GrantTypes: NOLINT
+	GrantTypes []string `json:"grant_types,omitempty"`
+
+	// IdTokenSigningAlgValuesSupported: NOLINT
+	IdTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"`
+
+	// Issuer: NOLINT
+	Issuer string `json:"issuer,omitempty"`
+
+	// JwksUri: NOLINT
+	JwksUri string `json:"jwks_uri,omitempty"`
+
+	// ResponseTypesSupported: NOLINT
+	ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
+
+	// SubjectTypesSupported: NOLINT
+	SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
+
+	// ServerResponse contains the HTTP response code and headers from the
+	// server.
+	googleapi.ServerResponse `json:"-"`
+
+	// ForceSendFields is a list of field names (e.g. "ClaimsSupported") to
+	// unconditionally include in API requests. By default, fields with
+	// empty values are omitted from API requests. However, any non-pointer,
+	// non-interface field appearing in ForceSendFields will be sent to the
+	// server regardless of whether the field is empty or not. This may be
+	// used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "ClaimsSupported") to
+	// include in API requests with the JSON null value. By default, fields
+	// with empty values are omitted from API requests. However, any field
+	// with an empty value appearing in NullFields will be sent to the
+	// server as null. It is an error if a field in this list has a
+	// non-empty value. This may be used to include null fields in Patch
+	// requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *GetOpenIDConfigResponse) MarshalJSON() ([]byte, error) {
+	type NoMethod GetOpenIDConfigResponse
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
 // HorizontalPodAutoscaling: Configuration options for the horizontal
 // pod autoscaling feature, which
 // increases or decreases the number of replica pods a replication
@@ -1290,6 +1390,60 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
+// Jwk: Jwk is a JSON Web Key as specified in RFC 7517
+type Jwk struct {
+	// Alg: NOLINT
+	Alg string `json:"alg,omitempty"`
+
+	// Crv: NOLINT
+	Crv string `json:"crv,omitempty"`
+
+	// E: NOLINT
+	E string `json:"e,omitempty"`
+
+	// Kid: NOLINT
+	Kid string `json:"kid,omitempty"`
+
+	// Kty: NOLINT
+	Kty string `json:"kty,omitempty"`
+
+	// N: Fields for RSA keys.
+	// NOLINT
+	N string `json:"n,omitempty"`
+
+	// Use: NOLINT
+	Use string `json:"use,omitempty"`
+
+	// X: Fields for ECDSA keys.
+	// NOLINT
+	X string `json:"x,omitempty"`
+
+	// Y: NOLINT
+	Y string `json:"y,omitempty"`
+
+	// ForceSendFields is a list of field names (e.g. "Alg") to
+	// unconditionally include in API requests. By default, fields with
+	// empty values are omitted from API requests. However, any non-pointer,
+	// non-interface field appearing in ForceSendFields will be sent to the
+	// server regardless of whether the field is empty or not. This may be
+	// used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "Alg") to include in API
+	// requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
+	// null. It is an error if a field in this list has a non-empty value.
+	// This may be used to include null fields in Patch requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *Jwk) MarshalJSON() ([]byte, error) {
+	type NoMethod Jwk
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
 // KubernetesDashboard: Configuration for the Kubernetes Dashboard.
 type KubernetesDashboard struct {
 	// Disabled: Whether the Kubernetes Dashboard is enabled for this
@@ -4291,6 +4445,154 @@
 
 }
 
+// method id "container.projects.locations.clusters.getJwks":
+
+type ProjectsLocationsClustersGetJwksCall struct {
+	s            *Service
+	parent       string
+	urlParams_   gensupport.URLParams
+	ifNoneMatch_ string
+	ctx_         context.Context
+	header_      http.Header
+}
+
+// GetJwks: GetJSONWebKeys gets the public component of the cluster
+// signing keys in
+// JSON Web Key format.
+// This API is not yet intended for general use, and is not available
+// for all
+// clusters.
+func (r *ProjectsLocationsClustersService) GetJwks(parent string) *ProjectsLocationsClustersGetJwksCall {
+	c := &ProjectsLocationsClustersGetJwksCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.parent = parent
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLocationsClustersGetJwksCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersGetJwksCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLocationsClustersGetJwksCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersGetJwksCall {
+	c.ifNoneMatch_ = entityTag
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLocationsClustersGetJwksCall) Context(ctx context.Context) *ProjectsLocationsClustersGetJwksCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ProjectsLocationsClustersGetJwksCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *ProjectsLocationsClustersGetJwksCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	if c.ifNoneMatch_ != "" {
+		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+	}
+	var body io.Reader = nil
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/jwks")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("GET", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"parent": c.parent,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "container.projects.locations.clusters.getJwks" call.
+// Exactly one of *GetJSONWebKeysResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *GetJSONWebKeysResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLocationsClustersGetJwksCall) Do(opts ...googleapi.CallOption) (*GetJSONWebKeysResponse, error) {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if res != nil && res.StatusCode == http.StatusNotModified {
+		if res.Body != nil {
+			res.Body.Close()
+		}
+		return nil, &googleapi.Error{
+			Code:   res.StatusCode,
+			Header: res.Header,
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return nil, err
+	}
+	ret := &GetJSONWebKeysResponse{
+		ServerResponse: googleapi.ServerResponse{
+			Header:         res.Header,
+			HTTPStatusCode: res.StatusCode,
+		},
+	}
+	target := &ret
+	if err := gensupport.DecodeResponse(target, res); err != nil {
+		return nil, err
+	}
+	return ret, nil
+	// {
+	//   "description": "GetJSONWebKeys gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.",
+	//   "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks",
+	//   "httpMethod": "GET",
+	//   "id": "container.projects.locations.clusters.getJwks",
+	//   "parameterOrder": [
+	//     "parent"
+	//   ],
+	//   "parameters": {
+	//     "parent": {
+	//       "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format 'projects/*/locations/*/clusters/*'.",
+	//       "location": "path",
+	//       "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$",
+	//       "required": true,
+	//       "type": "string"
+	//     }
+	//   },
+	//   "path": "v1/{+parent}/jwks",
+	//   "response": {
+	//     "$ref": "GetJSONWebKeysResponse"
+	//   },
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform"
+	//   ]
+	// }
+
+}
+
 // method id "container.projects.locations.clusters.list":
 
 type ProjectsLocationsClustersListCall struct {
@@ -7554,6 +7856,157 @@
 
 }
 
+// method id "container.projects.locations.clusters.well-known.getOpenid-configuration":
+
+type ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall struct {
+	s            *Service
+	parent       string
+	urlParams_   gensupport.URLParams
+	ifNoneMatch_ string
+	ctx_         context.Context
+	header_      http.Header
+}
+
+// GetOpenidConfiguration: GetOpenIDConfig gets the OIDC discovery
+// document for the cluster.
+// See the OpenID Connect Discovery 1.0 specification for
+// details.
+// https://openid.net/specs/openid-connect-discovery-1_0.html
+// Th
+// is API is not yet intended for general use, and is not available for
+// all
+// clusters.
+func (r *ProjectsLocationsClustersWellKnownService) GetOpenidConfiguration(parent string) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall {
+	c := &ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.parent = parent
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall {
+	c.ifNoneMatch_ = entityTag
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Context(ctx context.Context) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	if c.ifNoneMatch_ != "" {
+		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+	}
+	var body io.Reader = nil
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/.well-known/openid-configuration")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("GET", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"parent": c.parent,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "container.projects.locations.clusters.well-known.getOpenid-configuration" call.
+// Exactly one of *GetOpenIDConfigResponse or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *GetOpenIDConfigResponse.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Do(opts ...googleapi.CallOption) (*GetOpenIDConfigResponse, error) {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if res != nil && res.StatusCode == http.StatusNotModified {
+		if res.Body != nil {
+			res.Body.Close()
+		}
+		return nil, &googleapi.Error{
+			Code:   res.StatusCode,
+			Header: res.Header,
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return nil, err
+	}
+	ret := &GetOpenIDConfigResponse{
+		ServerResponse: googleapi.ServerResponse{
+			Header:         res.Header,
+			HTTPStatusCode: res.StatusCode,
+		},
+	}
+	target := &ret
+	if err := gensupport.DecodeResponse(target, res); err != nil {
+		return nil, err
+	}
+	return ret, nil
+	// {
+	//   "description": "GetOpenIDConfig gets the OIDC discovery document for the cluster.\nSee the OpenID Connect Discovery 1.0 specification for details.\nhttps://openid.net/specs/openid-connect-discovery-1_0.html\nThis API is not yet intended for general use, and is not available for all\nclusters.",
+	//   "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration",
+	//   "httpMethod": "GET",
+	//   "id": "container.projects.locations.clusters.well-known.getOpenid-configuration",
+	//   "parameterOrder": [
+	//     "parent"
+	//   ],
+	//   "parameters": {
+	//     "parent": {
+	//       "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format 'projects/*/locations/*/clusters/*'.",
+	//       "location": "path",
+	//       "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$",
+	//       "required": true,
+	//       "type": "string"
+	//     }
+	//   },
+	//   "path": "v1/{+parent}/.well-known/openid-configuration",
+	//   "response": {
+	//     "$ref": "GetOpenIDConfigResponse"
+	//   },
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform"
+	//   ]
+	// }
+
+}
+
 // method id "container.projects.locations.operations.cancel":
 
 type ProjectsLocationsOperationsCancelCall struct {
diff --git a/logging/v2/logging-api.json b/logging/v2/logging-api.json
index 20b51d4..1999d6d 100644
--- a/logging/v2/logging-api.json
+++ b/logging/v2/logging-api.json
@@ -478,7 +478,7 @@
               ]
             },
             "patch": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
               "httpMethod": "PATCH",
               "id": "logging.billingAccounts.sinks.patch",
@@ -518,7 +518,7 @@
               ]
             },
             "update": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
               "httpMethod": "PUT",
               "id": "logging.billingAccounts.sinks.update",
@@ -1129,7 +1129,7 @@
               ]
             },
             "patch": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
               "httpMethod": "PATCH",
               "id": "logging.folders.sinks.patch",
@@ -1169,7 +1169,7 @@
               ]
             },
             "update": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
               "httpMethod": "PUT",
               "id": "logging.folders.sinks.update",
@@ -1677,7 +1677,7 @@
               ]
             },
             "patch": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
               "httpMethod": "PATCH",
               "id": "logging.organizations.sinks.patch",
@@ -1717,7 +1717,7 @@
               ]
             },
             "update": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
               "httpMethod": "PUT",
               "id": "logging.organizations.sinks.update",
@@ -2280,7 +2280,7 @@
               ]
             },
             "patch": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
               "httpMethod": "PATCH",
               "id": "logging.projects.sinks.patch",
@@ -2320,7 +2320,7 @@
               ]
             },
             "update": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
               "httpMethod": "PUT",
               "id": "logging.projects.sinks.update",
@@ -2493,7 +2493,7 @@
           ]
         },
         "update": {
-          "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+          "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
           "flatPath": "v2/{v2Id}/{v2Id1}/sinks/{sinksId}",
           "httpMethod": "PUT",
           "id": "logging.sinks.update",
@@ -2535,7 +2535,7 @@
       }
     }
   },
-  "revision": "20181215",
+  "revision": "20190115",
   "rootUrl": "https://logging.googleapis.com/",
   "schemas": {
     "BucketOptions": {
@@ -2897,12 +2897,12 @@
           "type": "object"
         },
         "logName": {
-          "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\nA project number may optionally be used in place of PROJECT_ID. The  project number is translated to its corresponding PROJECT_ID internally  and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.",
+          "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\nA project number may optionally be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.",
           "type": "string"
         },
         "metadata": {
           "$ref": "MonitoredResourceMetadata",
-          "description": "Output only. Additional metadata about the monitored resource. Only k8s_container, k8s_pod, and k8s_node MonitoredResources have this field populated."
+          "description": "Output only. Additional metadata about the monitored resource.Only k8s_container, k8s_pod, and k8s_node MonitoredResources have this field populated."
         },
         "operation": {
           "$ref": "LogEntryOperation",
@@ -2923,7 +2923,7 @@
         },
         "resource": {
           "$ref": "MonitoredResource",
-          "description": "Required. The primary monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error."
+          "description": "Required. The primary monitored resource associated with this log entry.Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error."
         },
         "severity": {
           "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT.",
@@ -2956,7 +2956,7 @@
           "description": "Optional. Source code location information associated with the log entry, if any."
         },
         "spanId": {
-          "description": "Optional. The span ID within the trace associated with the log entry. For Trace spans, this is the same format that the Trace API v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such as \u003ccode\u003e\"000000000000004a\"\u003c/code\u003e.",
+          "description": "Optional. The span ID within the trace associated with the log entry.For Trace spans, this is the same format that the Trace API v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such as \u003ccode\u003e\"000000000000004a\"\u003c/code\u003e.",
           "type": "string"
         },
         "textPayload": {
@@ -2973,7 +2973,7 @@
           "type": "string"
         },
         "traceSampled": {
-          "description": "Optional. The sampling decision of the trace associated with the log entry. True means that the trace resource name in the trace field was sampled for storage in a trace backend. False means that the trace was not sampled for storage when this log entry was written, or the sampling decision was unknown at the time. A non-sampled trace value is still useful as a request correlation identifier. The default is False.",
+          "description": "Optional. The sampling decision of the trace associated with the log entry.True means that the trace resource name in the trace field was sampled for storage in a trace backend. False means that the trace was not sampled for storage when this log entry was written, or the sampling decision was unknown at the time. A non-sampled trace value is still useful as a request correlation identifier. The default is False.",
           "type": "boolean"
         }
       },
@@ -3026,12 +3026,17 @@
       "description": "Specifies a set of log entries that are not to be stored in Logging. If your project receives a large volume of logs, you might be able to use exclusions to reduce your chargeable logs. Exclusions are processed after log sinks, so you can export log entries before they are excluded. Audit log entries and log entries from Amazon Web Services are never excluded.",
       "id": "LogExclusion",
       "properties": {
+        "createTime": {
+          "description": "Output only. The creation timestamp of the exclusion.This field may not be present for older exclusions.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "description": {
           "description": "Optional. A description of this exclusion.",
           "type": "string"
         },
         "disabled": {
-          "description": "Optional. If set to True, then this exclusion is disabled and it does not exclude any log entries. You can use exclusions.patch to change the value of this field.",
+          "description": "Optional. If set to True, then this exclusion is disabled and it does not exclude any log entries. You can update an exclusion to change the value of this field.",
           "type": "boolean"
         },
         "filter": {
@@ -3041,6 +3046,11 @@
         "name": {
           "description": "Required. A client-assigned identifier, such as \"load-balancer-exclusion\". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods.",
           "type": "string"
+        },
+        "updateTime": {
+          "description": "Output only. The last update timestamp of the exclusion.This field may not be present for older exclusions.",
+          "format": "google-datetime",
+          "type": "string"
         }
       },
       "type": "object"
@@ -3099,6 +3109,11 @@
           "$ref": "BucketOptions",
           "description": "Optional. The bucket_options are required when the logs-based metric is using a DISTRIBUTION value type and it describes the bucket boundaries used to create a histogram of the extracted values."
         },
+        "createTime": {
+          "description": "Output only. The creation timestamp of the metric.This field may not be present for older metrics.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "description": {
           "description": "Optional. A description of this metric, which is used in documentation. The maximum length of the description is 8000 characters.",
           "type": "string"
@@ -3122,6 +3137,11 @@
           "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".",
           "type": "string"
         },
+        "updateTime": {
+          "description": "Output only. The last update timestamp of the metric.This field may not be present for older metrics.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "valueExtractor": {
           "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are:  1. field: The name of the log entry field from which the value is to be  extracted.  2. regex: A regular expression using the Google RE2 syntax  (https://github.com/google/re2/wiki/Syntax) with a single capture  group to extract data from the specified log entry field. The value  of the field is converted to a string before applying the regex.  It is an error to specify a regex that does not include exactly one  capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")",
           "type": "string"
@@ -3145,8 +3165,13 @@
       "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.",
       "id": "LogSink",
       "properties": {
+        "createTime": {
+          "description": "Output only. The creation timestamp of the sink.This field may not be present for older sinks.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "destination": {
-          "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks.",
+          "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks.",
           "type": "string"
         },
         "filter": {
@@ -3175,8 +3200,13 @@
           ],
           "type": "string"
         },
+        "updateTime": {
+          "description": "Output only. The last update timestamp of the sink.This field may not be present for older sinks.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "writerIdentity": {
-          "description": "Output only. An IAM identity\u0026mdash;a service account or group\u0026mdash;under which Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.",
+          "description": "Output only. An IAM identity\u0026mdash;a service account or group\u0026mdash;under which Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update based on the value of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting Access for a Resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.",
           "type": "string"
         }
       },
diff --git a/logging/v2/logging-gen.go b/logging/v2/logging-gen.go
index ebadc6d..318bfbe 100644
--- a/logging/v2/logging-gen.go
+++ b/logging/v2/logging-gen.go
@@ -1040,7 +1040,7 @@
 	// "folders/[FOLDER_ID]/logs/[LOG_ID]"
 	// A project number may optionally be used in place of PROJECT_ID. The
 	// project number is translated to its corresponding PROJECT_ID
-	// internally  and the log_name field will contain PROJECT_ID in queries
+	// internally and the log_name field will contain PROJECT_ID in queries
 	// and exports.[LOG_ID] must be URL-encoded within log_name. Example:
 	// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa
 	// ctivity". [LOG_ID] must be less than 512 characters long and can only
@@ -1054,8 +1054,8 @@
 	LogName string `json:"logName,omitempty"`
 
 	// Metadata: Output only. Additional metadata about the monitored
-	// resource. Only k8s_container, k8s_pod, and k8s_node
-	// MonitoredResources have this field populated.
+	// resource.Only k8s_container, k8s_pod, and k8s_node MonitoredResources
+	// have this field populated.
 	Metadata *MonitoredResourceMetadata `json:"metadata,omitempty"`
 
 	// Operation: Optional. Information about an operation associated with
@@ -1072,7 +1072,7 @@
 	ReceiveTimestamp string `json:"receiveTimestamp,omitempty"`
 
 	// Resource: Required. The primary monitored resource associated with
-	// this log entry. Example: a log entry that reports a database error
+	// this log entry.Example: a log entry that reports a database error
 	// would be associated with the monitored resource designating the
 	// particular database that reported the error.
 	Resource *MonitoredResource `json:"resource,omitempty"`
@@ -1100,9 +1100,9 @@
 	SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"`
 
 	// SpanId: Optional. The span ID within the trace associated with the
-	// log entry. For Trace spans, this is the same format that the Trace
-	// API v2 uses: a 16-character hexadecimal encoding of an 8-byte array,
-	// such as <code>"000000000000004a"</code>.
+	// log entry.For Trace spans, this is the same format that the Trace API
+	// v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such
+	// as <code>"000000000000004a"</code>.
 	SpanId string `json:"spanId,omitempty"`
 
 	// TextPayload: The log entry payload, represented as a Unicode string
@@ -1129,7 +1129,7 @@
 	Trace string `json:"trace,omitempty"`
 
 	// TraceSampled: Optional. The sampling decision of the trace associated
-	// with the log entry. True means that the trace resource name in the
+	// with the log entry.True means that the trace resource name in the
 	// trace field was sampled for storage in a trace backend. False means
 	// that the trace was not sampled for storage when this log entry was
 	// written, or the sampling decision was unknown at the time. A
@@ -1252,11 +1252,15 @@
 // entries before they are excluded. Audit log entries and log entries
 // from Amazon Web Services are never excluded.
 type LogExclusion struct {
+	// CreateTime: Output only. The creation timestamp of the exclusion.This
+	// field may not be present for older exclusions.
+	CreateTime string `json:"createTime,omitempty"`
+
 	// Description: Optional. A description of this exclusion.
 	Description string `json:"description,omitempty"`
 
 	// Disabled: Optional. If set to True, then this exclusion is disabled
-	// and it does not exclude any log entries. You can use exclusions.patch
+	// and it does not exclude any log entries. You can update an exclusion
 	// to change the value of this field.
 	Disabled bool `json:"disabled,omitempty"`
 
@@ -1274,11 +1278,15 @@
 	// periods.
 	Name string `json:"name,omitempty"`
 
+	// UpdateTime: Output only. The last update timestamp of the
+	// exclusion.This field may not be present for older exclusions.
+	UpdateTime string `json:"updateTime,omitempty"`
+
 	// ServerResponse contains the HTTP response code and headers from the
 	// server.
 	googleapi.ServerResponse `json:"-"`
 
-	// ForceSendFields is a list of field names (e.g. "Description") to
+	// ForceSendFields is a list of field names (e.g. "CreateTime") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -1286,10 +1294,10 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Description") to include
-	// in API requests with the JSON null value. By default, fields with
-	// empty values are omitted from API requests. However, any field with
-	// an empty value appearing in NullFields will be sent to the server as
+	// NullFields is a list of field names (e.g. "CreateTime") to include in
+	// API requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
 	// null. It is an error if a field in this list has a non-empty value.
 	// This may be used to include null fields in Patch requests.
 	NullFields []string `json:"-"`
@@ -1366,6 +1374,10 @@
 	// values.
 	BucketOptions *BucketOptions `json:"bucketOptions,omitempty"`
 
+	// CreateTime: Output only. The creation timestamp of the metric.This
+	// field may not be present for older metrics.
+	CreateTime string `json:"createTime,omitempty"`
+
 	// Description: Optional. A description of this metric, which is used in
 	// documentation. The maximum length of the description is 8000
 	// characters.
@@ -1422,6 +1434,10 @@
 	// Example: "projects/my-project/metrics/nginx%2Frequests".
 	Name string `json:"name,omitempty"`
 
+	// UpdateTime: Output only. The last update timestamp of the metric.This
+	// field may not be present for older metrics.
+	UpdateTime string `json:"updateTime,omitempty"`
+
 	// ValueExtractor: Optional. A value_extractor is required when using a
 	// distribution logs-based metric to extract the values to record from a
 	// log entry. Two functions are supported for value extraction:
@@ -1481,6 +1497,10 @@
 // which log entries are exported. The sink must be created within a
 // project, organization, billing account, or folder.
 type LogSink struct {
+	// CreateTime: Output only. The creation timestamp of the sink.This
+	// field may not be present for older sinks.
+	CreateTime string `json:"createTime,omitempty"`
+
 	// Destination: Required. The export
 	// destination:
 	// "storage.googleapis.com/[GCS_BUCKET]"
@@ -1490,7 +1510,7 @@
 	// /projects/[PROJECT_ID]/topics/[TOPIC_ID]"
 	// The sink's writer_identity, set when the sink is created, must have
 	// permission to write to the destination or else the log entries are
-	// not exported. For more information, see Exporting Logs With Sinks.
+	// not exported. For more information, see Exporting Logs with Sinks.
 	Destination string `json:"destination,omitempty"`
 
 	// Filter: Optional. An advanced logs filter. The only exported log
@@ -1537,13 +1557,17 @@
 	//   "V1" - LogEntry version 1 format.
 	OutputVersionFormat string `json:"outputVersionFormat,omitempty"`
 
+	// UpdateTime: Output only. The last update timestamp of the sink.This
+	// field may not be present for older sinks.
+	UpdateTime string `json:"updateTime,omitempty"`
+
 	// WriterIdentity: Output only. An IAM identity&mdash;a service account
 	// or group&mdash;under which Logging writes the exported log entries to
 	// the sink's destination. This field is set by sinks.create and
-	// sinks.update, based on the setting of unique_writer_identity in those
+	// sinks.update based on the value of unique_writer_identity in those
 	// methods.Until you grant this identity write-access to the
 	// destination, log entry exports from this sink will fail. For more
-	// information, see Granting access for a resource. Consult the
+	// information, see Granting Access for a Resource. Consult the
 	// destination service's documentation to determine the appropriate IAM
 	// roles to assign to the identity.
 	WriterIdentity string `json:"writerIdentity,omitempty"`
@@ -1552,7 +1576,7 @@
 	// server.
 	googleapi.ServerResponse `json:"-"`
 
-	// ForceSendFields is a list of field names (e.g. "Destination") to
+	// ForceSendFields is a list of field names (e.g. "CreateTime") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -1560,10 +1584,10 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Destination") to include
-	// in API requests with the JSON null value. By default, fields with
-	// empty values are omitted from API requests. However, any field with
-	// an empty value appearing in NullFields will be sent to the server as
+	// NullFields is a list of field names (e.g. "CreateTime") to include in
+	// API requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
 	// null. It is an error if a field in this list has a non-empty value.
 	// This may be used to include null fields in Patch requests.
 	NullFields []string `json:"-"`
@@ -4043,7 +4067,7 @@
 
 // Patch: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *BillingAccountsSinksService) Patch(sinkNameid string, logsink *LogSink) *BillingAccountsSinksPatchCall {
 	c := &BillingAccountsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -4173,7 +4197,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
 	//   "httpMethod": "PATCH",
 	//   "id": "logging.billingAccounts.sinks.patch",
@@ -4228,7 +4252,7 @@
 
 // Update: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink) *BillingAccountsSinksUpdateCall {
 	c := &BillingAccountsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -4358,7 +4382,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
 	//   "httpMethod": "PUT",
 	//   "id": "logging.billingAccounts.sinks.update",
@@ -7209,7 +7233,7 @@
 
 // Patch: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *FoldersSinksService) Patch(sinkNameid string, logsink *LogSink) *FoldersSinksPatchCall {
 	c := &FoldersSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -7339,7 +7363,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
 	//   "httpMethod": "PATCH",
 	//   "id": "logging.folders.sinks.patch",
@@ -7394,7 +7418,7 @@
 
 // Update: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *FoldersSinksUpdateCall {
 	c := &FoldersSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -7524,7 +7548,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
 	//   "httpMethod": "PUT",
 	//   "id": "logging.folders.sinks.update",
@@ -9837,7 +9861,7 @@
 
 // Patch: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *OrganizationsSinksService) Patch(sinkNameid string, logsink *LogSink) *OrganizationsSinksPatchCall {
 	c := &OrganizationsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -9967,7 +9991,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
 	//   "httpMethod": "PATCH",
 	//   "id": "logging.organizations.sinks.patch",
@@ -10022,7 +10046,7 @@
 
 // Update: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) *OrganizationsSinksUpdateCall {
 	c := &OrganizationsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -10152,7 +10176,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
 	//   "httpMethod": "PUT",
 	//   "id": "logging.organizations.sinks.update",
@@ -12705,7 +12729,7 @@
 
 // Patch: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *ProjectsSinksService) Patch(sinkNameid string, logsink *LogSink) *ProjectsSinksPatchCall {
 	c := &ProjectsSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -12835,7 +12859,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
 	//   "httpMethod": "PATCH",
 	//   "id": "logging.projects.sinks.patch",
@@ -12890,7 +12914,7 @@
 
 // Update: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *ProjectsSinksUpdateCall {
 	c := &ProjectsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -13020,7 +13044,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
 	//   "httpMethod": "PUT",
 	//   "id": "logging.projects.sinks.update",
@@ -13714,7 +13738,7 @@
 
 // Update: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *SinksService) Update(sinkNameid string, logsink *LogSink) *SinksUpdateCall {
 	c := &SinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -13844,7 +13868,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2/{v2Id}/{v2Id1}/sinks/{sinksId}",
 	//   "httpMethod": "PUT",
 	//   "id": "logging.sinks.update",
diff --git a/logging/v2beta1/logging-api.json b/logging/v2beta1/logging-api.json
index a882ba5..eb32766 100644
--- a/logging/v2beta1/logging-api.json
+++ b/logging/v2beta1/logging-api.json
@@ -485,7 +485,7 @@
               ]
             },
             "update": {
-              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+              "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
               "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}",
               "httpMethod": "PUT",
               "id": "logging.projects.sinks.update",
@@ -529,7 +529,7 @@
       }
     }
   },
-  "revision": "20181215",
+  "revision": "20190115",
   "rootUrl": "https://logging.googleapis.com/",
   "schemas": {
     "BucketOptions": {
@@ -855,12 +855,12 @@
           "type": "object"
         },
         "logName": {
-          "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\nA project number may optionally be used in place of PROJECT_ID. The  project number is translated to its corresponding PROJECT_ID internally  and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.",
+          "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\nA project number may optionally be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.",
           "type": "string"
         },
         "metadata": {
           "$ref": "MonitoredResourceMetadata",
-          "description": "Output only. Additional metadata about the monitored resource. Only k8s_container, k8s_pod, and k8s_node MonitoredResources have this field populated."
+          "description": "Output only. Additional metadata about the monitored resource.Only k8s_container, k8s_pod, and k8s_node MonitoredResources have this field populated."
         },
         "operation": {
           "$ref": "LogEntryOperation",
@@ -881,7 +881,7 @@
         },
         "resource": {
           "$ref": "MonitoredResource",
-          "description": "Required. The primary monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error."
+          "description": "Required. The primary monitored resource associated with this log entry.Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error."
         },
         "severity": {
           "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT.",
@@ -914,7 +914,7 @@
           "description": "Optional. Source code location information associated with the log entry, if any."
         },
         "spanId": {
-          "description": "Optional. The span ID within the trace associated with the log entry. For Trace spans, this is the same format that the Trace API v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such as \u003ccode\u003e\"000000000000004a\"\u003c/code\u003e.",
+          "description": "Optional. The span ID within the trace associated with the log entry.For Trace spans, this is the same format that the Trace API v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such as \u003ccode\u003e\"000000000000004a\"\u003c/code\u003e.",
           "type": "string"
         },
         "textPayload": {
@@ -931,7 +931,7 @@
           "type": "string"
         },
         "traceSampled": {
-          "description": "Optional. The sampling decision of the trace associated with the log entry. True means that the trace resource name in the trace field was sampled for storage in a trace backend. False means that the trace was not sampled for storage when this log entry was written, or the sampling decision was unknown at the time. A non-sampled trace value is still useful as a request correlation identifier. The default is False.",
+          "description": "Optional. The sampling decision of the trace associated with the log entry.True means that the trace resource name in the trace field was sampled for storage in a trace backend. False means that the trace was not sampled for storage when this log entry was written, or the sampling decision was unknown at the time. A non-sampled trace value is still useful as a request correlation identifier. The default is False.",
           "type": "boolean"
         }
       },
@@ -1034,6 +1034,11 @@
           "$ref": "BucketOptions",
           "description": "Optional. The bucket_options are required when the logs-based metric is using a DISTRIBUTION value type and it describes the bucket boundaries used to create a histogram of the extracted values."
         },
+        "createTime": {
+          "description": "Output only. The creation timestamp of the metric.This field may not be present for older metrics.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "description": {
           "description": "Optional. A description of this metric, which is used in documentation. The maximum length of the description is 8000 characters.",
           "type": "string"
@@ -1057,6 +1062,11 @@
           "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".",
           "type": "string"
         },
+        "updateTime": {
+          "description": "Output only. The last update timestamp of the metric.This field may not be present for older metrics.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "valueExtractor": {
           "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are:  1. field: The name of the log entry field from which the value is to be  extracted.  2. regex: A regular expression using the Google RE2 syntax  (https://github.com/google/re2/wiki/Syntax) with a single capture  group to extract data from the specified log entry field. The value  of the field is converted to a string before applying the regex.  It is an error to specify a regex that does not include exactly one  capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")",
           "type": "string"
@@ -1080,8 +1090,13 @@
       "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.",
       "id": "LogSink",
       "properties": {
+        "createTime": {
+          "description": "Output only. The creation timestamp of the sink.This field may not be present for older sinks.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "destination": {
-          "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks.",
+          "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks.",
           "type": "string"
         },
         "filter": {
@@ -1110,8 +1125,13 @@
           ],
           "type": "string"
         },
+        "updateTime": {
+          "description": "Output only. The last update timestamp of the sink.This field may not be present for older sinks.",
+          "format": "google-datetime",
+          "type": "string"
+        },
         "writerIdentity": {
-          "description": "Output only. An IAM identity\u0026mdash;a service account or group\u0026mdash;under which Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.",
+          "description": "Output only. An IAM identity\u0026mdash;a service account or group\u0026mdash;under which Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update based on the value of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting Access for a Resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.",
           "type": "string"
         }
       },
diff --git a/logging/v2beta1/logging-gen.go b/logging/v2beta1/logging-gen.go
index 83e0c16..983d202 100644
--- a/logging/v2beta1/logging-gen.go
+++ b/logging/v2beta1/logging-gen.go
@@ -758,7 +758,7 @@
 	// "folders/[FOLDER_ID]/logs/[LOG_ID]"
 	// A project number may optionally be used in place of PROJECT_ID. The
 	// project number is translated to its corresponding PROJECT_ID
-	// internally  and the log_name field will contain PROJECT_ID in queries
+	// internally and the log_name field will contain PROJECT_ID in queries
 	// and exports.[LOG_ID] must be URL-encoded within log_name. Example:
 	// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa
 	// ctivity". [LOG_ID] must be less than 512 characters long and can only
@@ -772,8 +772,8 @@
 	LogName string `json:"logName,omitempty"`
 
 	// Metadata: Output only. Additional metadata about the monitored
-	// resource. Only k8s_container, k8s_pod, and k8s_node
-	// MonitoredResources have this field populated.
+	// resource.Only k8s_container, k8s_pod, and k8s_node MonitoredResources
+	// have this field populated.
 	Metadata *MonitoredResourceMetadata `json:"metadata,omitempty"`
 
 	// Operation: Optional. Information about an operation associated with
@@ -790,7 +790,7 @@
 	ReceiveTimestamp string `json:"receiveTimestamp,omitempty"`
 
 	// Resource: Required. The primary monitored resource associated with
-	// this log entry. Example: a log entry that reports a database error
+	// this log entry.Example: a log entry that reports a database error
 	// would be associated with the monitored resource designating the
 	// particular database that reported the error.
 	Resource *MonitoredResource `json:"resource,omitempty"`
@@ -818,9 +818,9 @@
 	SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"`
 
 	// SpanId: Optional. The span ID within the trace associated with the
-	// log entry. For Trace spans, this is the same format that the Trace
-	// API v2 uses: a 16-character hexadecimal encoding of an 8-byte array,
-	// such as <code>"000000000000004a"</code>.
+	// log entry.For Trace spans, this is the same format that the Trace API
+	// v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such
+	// as <code>"000000000000004a"</code>.
 	SpanId string `json:"spanId,omitempty"`
 
 	// TextPayload: The log entry payload, represented as a Unicode string
@@ -847,7 +847,7 @@
 	Trace string `json:"trace,omitempty"`
 
 	// TraceSampled: Optional. The sampling decision of the trace associated
-	// with the log entry. True means that the trace resource name in the
+	// with the log entry.True means that the trace resource name in the
 	// trace field was sampled for storage in a trace backend. False means
 	// that the trace was not sampled for storage when this log entry was
 	// written, or the sampling decision was unknown at the time. A
@@ -1028,6 +1028,10 @@
 	// values.
 	BucketOptions *BucketOptions `json:"bucketOptions,omitempty"`
 
+	// CreateTime: Output only. The creation timestamp of the metric.This
+	// field may not be present for older metrics.
+	CreateTime string `json:"createTime,omitempty"`
+
 	// Description: Optional. A description of this metric, which is used in
 	// documentation. The maximum length of the description is 8000
 	// characters.
@@ -1084,6 +1088,10 @@
 	// Example: "projects/my-project/metrics/nginx%2Frequests".
 	Name string `json:"name,omitempty"`
 
+	// UpdateTime: Output only. The last update timestamp of the metric.This
+	// field may not be present for older metrics.
+	UpdateTime string `json:"updateTime,omitempty"`
+
 	// ValueExtractor: Optional. A value_extractor is required when using a
 	// distribution logs-based metric to extract the values to record from a
 	// log entry. Two functions are supported for value extraction:
@@ -1143,6 +1151,10 @@
 // which log entries are exported. The sink must be created within a
 // project, organization, billing account, or folder.
 type LogSink struct {
+	// CreateTime: Output only. The creation timestamp of the sink.This
+	// field may not be present for older sinks.
+	CreateTime string `json:"createTime,omitempty"`
+
 	// Destination: Required. The export
 	// destination:
 	// "storage.googleapis.com/[GCS_BUCKET]"
@@ -1152,7 +1164,7 @@
 	// /projects/[PROJECT_ID]/topics/[TOPIC_ID]"
 	// The sink's writer_identity, set when the sink is created, must have
 	// permission to write to the destination or else the log entries are
-	// not exported. For more information, see Exporting Logs With Sinks.
+	// not exported. For more information, see Exporting Logs with Sinks.
 	Destination string `json:"destination,omitempty"`
 
 	// Filter: Optional. An advanced logs filter. The only exported log
@@ -1199,13 +1211,17 @@
 	//   "V1" - LogEntry version 1 format.
 	OutputVersionFormat string `json:"outputVersionFormat,omitempty"`
 
+	// UpdateTime: Output only. The last update timestamp of the sink.This
+	// field may not be present for older sinks.
+	UpdateTime string `json:"updateTime,omitempty"`
+
 	// WriterIdentity: Output only. An IAM identity&mdash;a service account
 	// or group&mdash;under which Logging writes the exported log entries to
 	// the sink's destination. This field is set by sinks.create and
-	// sinks.update, based on the setting of unique_writer_identity in those
+	// sinks.update based on the value of unique_writer_identity in those
 	// methods.Until you grant this identity write-access to the
 	// destination, log entry exports from this sink will fail. For more
-	// information, see Granting access for a resource. Consult the
+	// information, see Granting Access for a Resource. Consult the
 	// destination service's documentation to determine the appropriate IAM
 	// roles to assign to the identity.
 	WriterIdentity string `json:"writerIdentity,omitempty"`
@@ -1214,7 +1230,7 @@
 	// server.
 	googleapi.ServerResponse `json:"-"`
 
-	// ForceSendFields is a list of field names (e.g. "Destination") to
+	// ForceSendFields is a list of field names (e.g. "CreateTime") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -1222,10 +1238,10 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Destination") to include
-	// in API requests with the JSON null value. By default, fields with
-	// empty values are omitted from API requests. However, any field with
-	// an empty value appearing in NullFields will be sent to the server as
+	// NullFields is a list of field names (e.g. "CreateTime") to include in
+	// API requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
 	// null. It is an error if a field in this list has a non-empty value.
 	// This may be used to include null fields in Patch requests.
 	NullFields []string `json:"-"`
@@ -3825,7 +3841,7 @@
 
 // Update: Updates a sink. This method replaces the following fields in
 // the existing sink with values from the new sink: destination, and
-// filter. The updated sink might also have a new writer_identity; see
+// filter.The updated sink might also have a new writer_identity; see
 // the unique_writer_identity field.
 func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *ProjectsSinksUpdateCall {
 	c := &ProjectsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -3955,7 +3971,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
+	//   "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter.The updated sink might also have a new writer_identity; see the unique_writer_identity field.",
 	//   "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}",
 	//   "httpMethod": "PUT",
 	//   "id": "logging.projects.sinks.update",
diff --git a/ml/v1/ml-api.json b/ml/v1/ml-api.json
index d4f9a12..e283b7b 100644
--- a/ml/v1/ml-api.json
+++ b/ml/v1/ml-api.json
@@ -1015,7 +1015,7 @@
       }
     }
   },
-  "revision": "20181215",
+  "revision": "20190115",
   "rootUrl": "https://ml.googleapis.com/",
   "schemas": {
     "GoogleApi__HttpBody": {
@@ -1023,11 +1023,11 @@
       "id": "GoogleApi__HttpBody",
       "properties": {
         "contentType": {
-          "description": "The HTTP Content-Type string representing the content type of the body.",
+          "description": "The HTTP Content-Type header value specifying the content type of the body.",
           "type": "string"
         },
         "data": {
-          "description": "HTTP body binary data.",
+          "description": "The HTTP request/response body as raw binary.",
           "format": "byte",
           "type": "string"
         },
@@ -1230,7 +1230,7 @@
             "RANDOM_SEARCH"
           ],
           "enumDescriptions": [
-            "The default algorithm used by hyperparameter tuning service.",
+            "The default algorithm used by the hyperparameter tuning service. This is\na Bayesian optimization algorithm.",
             "Simple grid search within the feasible space. To use grid search,\nall parameters must be `INTEGER`, `CATEGORICAL`, or `DISCRETE`.",
             "Simple random search within the feasible space."
           ],
@@ -1655,7 +1655,7 @@
       "type": "object"
     },
     "GoogleCloudMlV1__PredictionInput": {
-      "description": "Represents input parameters for a prediction job. Next field: 20",
+      "description": "Represents input parameters for a prediction job.",
       "id": "GoogleCloudMlV1__PredictionInput",
       "properties": {
         "accelerator": {
@@ -1802,7 +1802,7 @@
           "type": "string"
         },
         "masterType": {
-          "description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n\u003cdl\u003e\n  \u003cdt\u003estandard\u003c/dt\u003e\n  \u003cdd\u003e\n  A basic machine configuration suitable for training simple models with\n  small to moderate datasets.\n  \u003c/dd\u003e\n  \u003cdt\u003elarge_model\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine with a lot of memory, specially suited for parameter servers\n  when your model is large (having many hidden layers or layers with very\n  large numbers of nodes).\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_s\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine suitable for the master and workers of the cluster when your\n  model requires more computation than the standard machine can handle\n  satisfactorily.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine with roughly twice the number of cores and roughly double the\n  memory of \u003ci\u003ecomplex_model_s\u003c/i\u003e.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_l\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine with roughly twice the number of cores and roughly double the\n  memory of \u003ci\u003ecomplex_model_m\u003c/i\u003e.\n  \u003c/dd\u003e\n  \u003cdt\u003estandard_gpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n  also includes a single NVIDIA Tesla K80 GPU. See more about\n  \u003ca href=\"/ml-engine/docs/tensorflow/using-gpus\"\u003eusing GPUs to\n  train your model\u003c/a\u003e.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m_gpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that also includes\n  four NVIDIA Tesla K80 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_l_gpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_l\u003c/i\u003e that also includes\n  eight NVIDIA Tesla K80 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003estandard_p100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n  also includes a single NVIDIA Tesla P100 GPU.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m_p100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that also includes\n  four NVIDIA Tesla P100 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003estandard_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n  also includes a single NVIDIA Tesla V100 GPU. The availability of these\n  GPUs is in the \u003ci\u003eBeta\u003c/i\u003e launch stage.\n  \u003c/dd\u003e\n  \u003cdt\u003elarge_model_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003elarge_model\u003c/i\u003e that\n  also includes a single NVIDIA Tesla V100 GPU. The availability of these\n  GPUs is in the \u003ci\u003eBeta\u003c/i\u003e launch stage.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that\n  also includes four NVIDIA Tesla V100 GPUs. The availability of these\n  GPUs is in the \u003ci\u003eBeta\u003c/i\u003e launch stage.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_l_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_l\u003c/i\u003e that\n  also includes eight NVIDIA Tesla V100 GPUs. The availability of these\n  GPUs is in the \u003ci\u003eBeta\u003c/i\u003e launch stage.\n  \u003c/dd\u003e\n  \u003cdt\u003ecloud_tpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A TPU VM including one Cloud TPU. See more about\n  \u003ca href=\"/ml-engine/docs/tensorflow/using-tpus\"\u003eusing TPUs to train\n  your model\u003c/a\u003e.\n  \u003c/dd\u003e\n\u003c/dl\u003e\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.",
+          "description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n\u003cdl\u003e\n  \u003cdt\u003estandard\u003c/dt\u003e\n  \u003cdd\u003e\n  A basic machine configuration suitable for training simple models with\n  small to moderate datasets.\n  \u003c/dd\u003e\n  \u003cdt\u003elarge_model\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine with a lot of memory, specially suited for parameter servers\n  when your model is large (having many hidden layers or layers with very\n  large numbers of nodes).\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_s\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine suitable for the master and workers of the cluster when your\n  model requires more computation than the standard machine can handle\n  satisfactorily.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine with roughly twice the number of cores and roughly double the\n  memory of \u003ci\u003ecomplex_model_s\u003c/i\u003e.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_l\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine with roughly twice the number of cores and roughly double the\n  memory of \u003ci\u003ecomplex_model_m\u003c/i\u003e.\n  \u003c/dd\u003e\n  \u003cdt\u003estandard_gpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n  also includes a single NVIDIA Tesla K80 GPU. See more about\n  \u003ca href=\"/ml-engine/docs/tensorflow/using-gpus\"\u003eusing GPUs to\n  train your model\u003c/a\u003e.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m_gpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that also includes\n  four NVIDIA Tesla K80 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_l_gpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_l\u003c/i\u003e that also includes\n  eight NVIDIA Tesla K80 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003estandard_p100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n  also includes a single NVIDIA Tesla P100 GPU.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m_p100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that also includes\n  four NVIDIA Tesla P100 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003estandard_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n  also includes a single NVIDIA Tesla V100 GPU.\n  \u003c/dd\u003e\n  \u003cdt\u003elarge_model_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003elarge_model\u003c/i\u003e that\n  also includes a single NVIDIA Tesla V100 GPU.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_m_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that\n  also includes four NVIDIA Tesla V100 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003ecomplex_model_l_v100\u003c/dt\u003e\n  \u003cdd\u003e\n  A machine equivalent to \u003ci\u003ecomplex_model_l\u003c/i\u003e that\n  also includes eight NVIDIA Tesla V100 GPUs.\n  \u003c/dd\u003e\n  \u003cdt\u003ecloud_tpu\u003c/dt\u003e\n  \u003cdd\u003e\n  A TPU VM including one Cloud TPU. See more about\n  \u003ca href=\"/ml-engine/docs/tensorflow/using-tpus\"\u003eusing TPUs to train\n  your model\u003c/a\u003e.\n  \u003c/dd\u003e\n\u003c/dl\u003e\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.",
           "type": "string"
         },
         "packageUris": {
@@ -1813,7 +1813,7 @@
           "type": "array"
         },
         "parameterServerCount": {
-          "description": "Optional. The number of parameter server replicas to use for the training\njob. Each replica in the cluster will be of the type specified in\n`parameter_server_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`.If you\nset this value, you must also set `parameter_server_type`.",
+          "description": "Optional. The number of parameter server replicas to use for the training\njob. Each replica in the cluster will be of the type specified in\n`parameter_server_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`.If you\nset this value, you must also set `parameter_server_type`.\n\nThe default value is zero.",
           "format": "int64",
           "type": "string"
         },
@@ -1858,7 +1858,7 @@
           "type": "string"
         },
         "workerCount": {
-          "description": "Optional. The number of worker replicas to use for the training job. Each\nreplica in the cluster will be of the type specified in `worker_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`. If you\nset this value, you must also set `worker_type`.",
+          "description": "Optional. The number of worker replicas to use for the training job. Each\nreplica in the cluster will be of the type specified in `worker_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`. If you\nset this value, you must also set `worker_type`.\n\nThe default value is zero.",
           "format": "int64",
           "type": "string"
         },
@@ -1936,7 +1936,7 @@
             "XGBOOST"
           ],
           "enumDescriptions": [
-            "Unspecified framework. Defaults to TensorFlow.",
+            "Unspecified framework. Assigns a value based on the file suffix.",
             "Tensorflow framework.",
             "Scikit-learn framework.",
             "XGBoost framework."
diff --git a/ml/v1/ml-gen.go b/ml/v1/ml-gen.go
index 7c1a921..08112fb 100644
--- a/ml/v1/ml-gen.go
+++ b/ml/v1/ml-gen.go
@@ -196,11 +196,11 @@
 // are
 // handled, all other features will continue to work unchanged.
 type GoogleApi__HttpBody struct {
-	// ContentType: The HTTP Content-Type string representing the content
-	// type of the body.
+	// ContentType: The HTTP Content-Type header value specifying the
+	// content type of the body.
 	ContentType string `json:"contentType,omitempty"`
 
-	// Data: HTTP body binary data.
+	// Data: The HTTP request/response body as raw binary.
 	Data string `json:"data,omitempty"`
 
 	// Extensions: Application specific response metadata. Must be set in
@@ -575,8 +575,9 @@
 	// algorithm if unspecified.
 	//
 	// Possible values:
-	//   "ALGORITHM_UNSPECIFIED" - The default algorithm used by
-	// hyperparameter tuning service.
+	//   "ALGORITHM_UNSPECIFIED" - The default algorithm used by the
+	// hyperparameter tuning service. This is
+	// a Bayesian optimization algorithm.
 	//   "GRID_SEARCH" - Simple grid search within the feasible space. To
 	// use grid search,
 	// all parameters must be `INTEGER`, `CATEGORICAL`, or `DISCRETE`.
@@ -1300,7 +1301,7 @@
 }
 
 // GoogleCloudMlV1__PredictionInput: Represents input parameters for a
-// prediction job. Next field: 20
+// prediction job.
 type GoogleCloudMlV1__PredictionInput struct {
 	// Accelerator: Optional. The type and number of accelerators to be
 	// attached to each
@@ -1595,30 +1596,22 @@
 	//   <dt>standard_v100</dt>
 	//   <dd>
 	//   A machine equivalent to <i>standard</i> that
-	//   also includes a single NVIDIA Tesla V100 GPU. The availability of
-	// these
-	//   GPUs is in the <i>Beta</i> launch stage.
+	//   also includes a single NVIDIA Tesla V100 GPU.
 	//   </dd>
 	//   <dt>large_model_v100</dt>
 	//   <dd>
 	//   A machine equivalent to <i>large_model</i> that
-	//   also includes a single NVIDIA Tesla V100 GPU. The availability of
-	// these
-	//   GPUs is in the <i>Beta</i> launch stage.
+	//   also includes a single NVIDIA Tesla V100 GPU.
 	//   </dd>
 	//   <dt>complex_model_m_v100</dt>
 	//   <dd>
 	//   A machine equivalent to <i>complex_model_m</i> that
-	//   also includes four NVIDIA Tesla V100 GPUs. The availability of
-	// these
-	//   GPUs is in the <i>Beta</i> launch stage.
+	//   also includes four NVIDIA Tesla V100 GPUs.
 	//   </dd>
 	//   <dt>complex_model_l_v100</dt>
 	//   <dd>
 	//   A machine equivalent to <i>complex_model_l</i> that
-	//   also includes eight NVIDIA Tesla V100 GPUs. The availability of
-	// these
-	//   GPUs is in the <i>Beta</i> launch stage.
+	//   also includes eight NVIDIA Tesla V100 GPUs.
 	//   </dd>
 	//   <dt>cloud_tpu</dt>
 	//   <dd>
@@ -1647,6 +1640,8 @@
 	// This value can only be used when `scale_tier` is set to `CUSTOM`.If
 	// you
 	// set this value, you must also set `parameter_server_type`.
+	//
+	// The default value is zero.
 	ParameterServerCount int64 `json:"parameterServerCount,omitempty,string"`
 
 	// ParameterServerType: Optional. Specifies the type of virtual machine
@@ -1753,6 +1748,8 @@
 	// This value can only be used when `scale_tier` is set to `CUSTOM`. If
 	// you
 	// set this value, you must also set `worker_type`.
+	//
+	// The default value is zero.
 	WorkerCount int64 `json:"workerCount,omitempty,string"`
 
 	// WorkerType: Optional. Specifies the type of virtual machine to use
@@ -1925,8 +1922,8 @@
 	// of the model to 1.4 or greater.
 	//
 	// Possible values:
-	//   "FRAMEWORK_UNSPECIFIED" - Unspecified framework. Defaults to
-	// TensorFlow.
+	//   "FRAMEWORK_UNSPECIFIED" - Unspecified framework. Assigns a value
+	// based on the file suffix.
 	//   "TENSORFLOW" - Tensorflow framework.
 	//   "SCIKIT_LEARN" - Scikit-learn framework.
 	//   "XGBOOST" - XGBoost framework.
diff --git a/remotebuildexecution/v1/remotebuildexecution-api.json b/remotebuildexecution/v1/remotebuildexecution-api.json
index aed23e0..186190b 100644
--- a/remotebuildexecution/v1/remotebuildexecution-api.json
+++ b/remotebuildexecution/v1/remotebuildexecution-api.json
@@ -337,7 +337,7 @@
       }
     }
   },
-  "revision": "20190103",
+  "revision": "20190116",
   "rootUrl": "https://remotebuildexecution.googleapis.com/",
   "schemas": {
     "BuildBazelRemoteExecutionV2Action": {
@@ -438,21 +438,21 @@
           "type": "array"
         },
         "environmentVariables": {
-          "description": "The environment variables to set when running the program. The worker may\nprovide its own default environment variables; these defaults can be\noverridden using this field. Additional variables can also be specified.\n\nIn order to ensure that equivalent `Command`s always hash to the same\nvalue, the environment variables MUST be lexicographically sorted by name.\nSorting of strings is done by code point, equivalently, by the UTF-8 bytes.",
+          "description": "The environment variables to set when running the program. The worker may\nprovide its own default environment variables; these defaults can be\noverridden using this field. Additional variables can also be specified.\n\nIn order to ensure that equivalent\nCommands always hash to the same\nvalue, the environment variables MUST be lexicographically sorted by name.\nSorting of strings is done by code point, equivalently, by the UTF-8 bytes.",
           "items": {
             "$ref": "BuildBazelRemoteExecutionV2CommandEnvironmentVariable"
           },
           "type": "array"
         },
         "outputDirectories": {
-          "description": "A list of the output directories that the client expects to retrieve from\nthe action. Only the contents of the indicated directories (recursively\nincluding the contents of their subdirectories) will be\nreturned, as well as files listed in `output_files`. Other files that may\nbe created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path. The special value of empty string is allowed,\nalthough not recommended, and can be used to capture the entire working\ndirectory tree, including inputs.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output directory cannot be duplicated, be a parent of another output\ndirectory, be a parent of a listed output file, or have the same path as\nany of the listed output files.",
+          "description": "A list of the output directories that the client expects to retrieve from\nthe action. Only the listed directories will be returned (an entire\ndirectory structure will be returned as a\nTree message digest, see\nOutputDirectory), as\nwell as files listed in `output_files`. Other files or directories that\nmay be created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path. The special value of empty string is allowed,\nalthough not recommended, and can be used to capture the entire working\ndirectory tree, including inputs.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output directory cannot be duplicated or have the same path as any of\nthe listed output files.\n\nDirectories leading up to the output directories (but not the output\ndirectories themselves) are created by the worker prior to execution, even\nif they are not explicitly part of the input root.",
           "items": {
             "type": "string"
           },
           "type": "array"
         },
         "outputFiles": {
-          "description": "A list of the output files that the client expects to retrieve from the\naction. Only the listed files, as well as directories listed in\n`output_directories`, will be returned to the client as output.\nOther files that may be created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output file cannot be duplicated, be a parent of another output file, be\na child of a listed output directory, or have the same path as any of the\nlisted output directories.",
+          "description": "A list of the output files that the client expects to retrieve from the\naction. Only the listed files, as well as directories listed in\n`output_directories`, will be returned to the client as output.\nOther files or directories that may be created during command execution\nare discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output file cannot be duplicated, be a parent of another output file, or\nhave the same path as any of the listed output directories.\n\nDirectories leading up to the output files are created by the worker prior\nto execution, even if they are not explicitly part of the input root.",
           "items": {
             "type": "string"
           },
@@ -485,7 +485,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2Digest": {
-      "description": "A content digest. A digest for a given blob consists of the size of the blob\nand its hash. The hash algorithm to use is defined by the server, but servers\nSHOULD use SHA-256.\n\nThe size is considered to be an integral part of the digest and cannot be\nseparated. That is, even if the `hash` field is correctly specified but\n`size_bytes` is not, the server MUST reject the request.\n\nThe reason for including the size in the digest is as follows: in a great\nmany cases, the server needs to know the size of the blob it is about to work\nwith prior to starting an operation with it, such as flattening Merkle tree\nstructures or streaming it to a worker. Technically, the server could\nimplement a separate metadata store, but this results in a significantly more\ncomplicated implementation as opposed to having the client specify the size\nup-front (or storing the size along with the digest in every message where\ndigests are embedded). This does mean that the API leaks some implementation\ndetails of (what we consider to be) a reasonable server implementation, but\nwe consider this to be a worthwhile tradeoff.\n\nWhen a `Digest` is used to refer to a proto message, it always refers to the\nmessage in binary encoded form. To ensure consistent hashing, clients and\nservers MUST ensure that they serialize messages according to the following\nrules, even if there are alternate valid encodings for the same message.\n- Fields are serialized in tag order.\n- There are no unknown fields.\n- There are no duplicate fields.\n- Fields are serialized according to the default semantics for their type.\n\nMost protocol buffer implementations will always follow these rules when\nserializing, but care should be taken to avoid shortcuts. For instance,\nconcatenating two messages to merge them may produce duplicate fields.",
+      "description": "A content digest. A digest for a given blob consists of the size of the blob\nand its hash. The hash algorithm to use is defined by the server, but servers\nSHOULD use SHA-256.\n\nThe size is considered to be an integral part of the digest and cannot be\nseparated. That is, even if the `hash` field is correctly specified but\n`size_bytes` is not, the server MUST reject the request.\n\nThe reason for including the size in the digest is as follows: in a great\nmany cases, the server needs to know the size of the blob it is about to work\nwith prior to starting an operation with it, such as flattening Merkle tree\nstructures or streaming it to a worker. Technically, the server could\nimplement a separate metadata store, but this results in a significantly more\ncomplicated implementation as opposed to having the client specify the size\nup-front (or storing the size along with the digest in every message where\ndigests are embedded). This does mean that the API leaks some implementation\ndetails of (what we consider to be) a reasonable server implementation, but\nwe consider this to be a worthwhile tradeoff.\n\nWhen a `Digest` is used to refer to a proto message, it always refers to the\nmessage in binary encoded form. To ensure consistent hashing, clients and\nservers MUST ensure that they serialize messages according to the following\nrules, even if there are alternate valid encodings for the same message:\n\n* Fields are serialized in tag order.\n* There are no unknown fields.\n* There are no duplicate fields.\n* Fields are serialized according to the default semantics for their type.\n\nMost protocol buffer implementations will always follow these rules when\nserializing, but care should be taken to avoid shortcuts. For instance,\nconcatenating two messages to merge them may produce duplicate fields.",
       "id": "BuildBazelRemoteExecutionV2Digest",
       "properties": {
         "hash": {
@@ -501,7 +501,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2Directory": {
-      "description": "A `Directory` represents a directory node in a file tree, containing zero or\nmore children FileNodes,\nDirectoryNodes and\nSymlinkNodes.\nEach `Node` contains its name in the directory, either the digest of its\ncontent (either a file blob or a `Directory` proto) or a symlink target, as\nwell as possibly some metadata about the file or directory.\n\nIn order to ensure that two equivalent directory trees hash to the same\nvalue, the following restrictions MUST be obeyed when constructing a\na `Directory`:\n  - Every child in the directory must have a path of exactly one segment.\n    Multiple levels of directory hierarchy may not be collapsed.\n  - Each child in the directory must have a unique path segment (file name).\n  - The files, directories and symlinks in the directory must each be sorted\n    in lexicographical order by path. The path strings must be sorted by code\n    point, equivalently, by UTF-8 bytes.\n\nA `Directory` that obeys the restrictions is said to be in canonical form.\n\nAs an example, the following could be used for a file named `bar` and a\ndirectory named `foo` with an executable file named `baz` (hashes shortened\nfor readability):\n\n```json\n// (Directory proto)\n{\n  files: [\n    {\n      name: \"bar\",\n      digest: {\n        hash: \"4a73bc9d03...\",\n        size: 65534\n      }\n    }\n  ],\n  directories: [\n    {\n      name: \"foo\",\n      digest: {\n        hash: \"4cf2eda940...\",\n        size: 43\n      }\n    }\n  ]\n}\n\n// (Directory proto with hash \"4cf2eda940...\" and size 43)\n{\n  files: [\n    {\n      name: \"baz\",\n      digest: {\n        hash: \"b2c941073e...\",\n        size: 1294,\n      },\n      is_executable: true\n    }\n  ]\n}\n```",
+      "description": "A `Directory` represents a directory node in a file tree, containing zero or\nmore children FileNodes,\nDirectoryNodes and\nSymlinkNodes.\nEach `Node` contains its name in the directory, either the digest of its\ncontent (either a file blob or a `Directory` proto) or a symlink target, as\nwell as possibly some metadata about the file or directory.\n\nIn order to ensure that two equivalent directory trees hash to the same\nvalue, the following restrictions MUST be obeyed when constructing a\na `Directory`:\n\n* Every child in the directory must have a path of exactly one segment.\n  Multiple levels of directory hierarchy may not be collapsed.\n* Each child in the directory must have a unique path segment (file name).\n* The files, directories and symlinks in the directory must each be sorted\n  in lexicographical order by path. The path strings must be sorted by code\n  point, equivalently, by UTF-8 bytes.\n\nA `Directory` that obeys the restrictions is said to be in canonical form.\n\nAs an example, the following could be used for a file named `bar` and a\ndirectory named `foo` with an executable file named `baz` (hashes shortened\nfor readability):\n\n```json\n// (Directory proto)\n{\n  files: [\n    {\n      name: \"bar\",\n      digest: {\n        hash: \"4a73bc9d03...\",\n        size: 65534\n      }\n    }\n  ],\n  directories: [\n    {\n      name: \"foo\",\n      digest: {\n        hash: \"4cf2eda940...\",\n        size: 43\n      }\n    }\n  ]\n}\n\n// (Directory proto with hash \"4cf2eda940...\" and size 43)\n{\n  files: [\n    {\n      name: \"baz\",\n      digest: {\n        hash: \"b2c941073e...\",\n        size: 1294,\n      },\n      is_executable: true\n    }\n  ]\n}\n```",
       "id": "BuildBazelRemoteExecutionV2Directory",
       "properties": {
         "directories": {
@@ -587,6 +587,10 @@
           "description": "True if the result was served from cache, false if it was executed.",
           "type": "boolean"
         },
+        "message": {
+          "description": "Freeform informational message with details on the execution of the action\nthat may be displayed to the user upon failure or when requested explicitly.",
+          "type": "string"
+        },
         "result": {
           "$ref": "BuildBazelRemoteExecutionV2ActionResult",
           "description": "The result of the action."
@@ -774,7 +778,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2RequestMetadata": {
-      "description": "An optional Metadata to attach to any RPC request to tell the server about an\nexternal context of the request. The server may use this for logging or other\npurposes. To use it, the client attaches the header to the call using the\ncanonical proto serialization:\nname: build.bazel.remote.execution.v2.requestmetadata-bin\ncontents: the base64 encoded binary RequestMetadata message.",
+      "description": "An optional Metadata to attach to any RPC request to tell the server about an\nexternal context of the request. The server may use this for logging or other\npurposes. To use it, the client attaches the header to the call using the\ncanonical proto serialization:\n\n* name: `build.bazel.remote.execution.v2.requestmetadata-bin`\n* contents: the base64 encoded binary `RequestMetadata` message.",
       "id": "BuildBazelRemoteExecutionV2RequestMetadata",
       "properties": {
         "actionId": {
@@ -1008,6 +1012,10 @@
           "description": "The location is a GCP region. Currently only `us-central1` is supported.",
           "type": "string"
         },
+        "loggingEnabled": {
+          "description": "Output only. Whether stack driver logging is enabled for the instance.",
+          "type": "boolean"
+        },
         "name": {
           "description": "Output only. Instance resource name formatted as:\n`projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.\nName should not be populated when creating an instance since it is provided\nin the `instance_id` field.",
           "type": "string"
@@ -1115,7 +1123,7 @@
           "type": "string"
         },
         "reserved": {
-          "description": "Output only. `reserved=true` means the worker is reserved and won't be\npreempted.",
+          "description": "Determines whether the worker is reserved (and therefore won't be\npreempted).\nSee [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more\ndetails.",
           "type": "boolean"
         }
       },
@@ -1580,17 +1588,8 @@
           "format": "int32",
           "type": "integer"
         },
-        "outputs": {
-          "$ref": "GoogleDevtoolsRemoteworkersV1test2Digest",
-          "description": "The output files. The blob referenced by the digest should contain\none of the following (implementation-dependent):\n   * A marshalled DirectoryMetadata of the returned filesystem\n   * A LUCI-style .isolated file"
-        },
-        "overhead": {
-          "description": "The amount of time *not* spent executing the command (ie\nuploading/downloading files).",
-          "format": "google-duration",
-          "type": "string"
-        },
-        "statistics": {
-          "description": "Implementation-dependent statistics about the task. Both servers and bots\nmay define messages which can be encoded here; bots are free to provide\nstatistics in multiple formats, and servers are free to choose one or more\nof the values to process and ignore others. In particular, it is *not*\nconsidered an error for the bot to provide the server with a field that it\ndoesn't know about.",
+        "metadata": {
+          "description": "Implementation-dependent metadata about the task. Both servers and bots\nmay define messages which can be encoded here; bots are free to provide\nmetadata in multiple formats, and servers are free to choose one or more\nof the values to process and ignore others. In particular, it is *not*\nconsidered an error for the bot to provide the server with a field that it\ndoesn't know about.",
           "items": {
             "additionalProperties": {
               "description": "Properties of the object. Contains field @type with type URL.",
@@ -1600,6 +1599,15 @@
           },
           "type": "array"
         },
+        "outputs": {
+          "$ref": "GoogleDevtoolsRemoteworkersV1test2Digest",
+          "description": "The output files. The blob referenced by the digest should contain\none of the following (implementation-dependent):\n   * A marshalled DirectoryMetadata of the returned filesystem\n   * A LUCI-style .isolated file"
+        },
+        "overhead": {
+          "description": "The amount of time *not* spent executing the command (ie\nuploading/downloading files).",
+          "format": "google-duration",
+          "type": "string"
+        },
         "status": {
           "$ref": "GoogleRpcStatus",
           "description": "An overall status for the command. For example, if the command timed out,\nthis might have a code of DEADLINE_EXCEEDED; if it was killed by the OS for\nmemory exhaustion, it might have a code of RESOURCE_EXHAUSTED."
diff --git a/remotebuildexecution/v1/remotebuildexecution-gen.go b/remotebuildexecution/v1/remotebuildexecution-gen.go
index 1cdcf4f..fb5583e 100644
--- a/remotebuildexecution/v1/remotebuildexecution-gen.go
+++ b/remotebuildexecution/v1/remotebuildexecution-gen.go
@@ -482,8 +482,8 @@
 	// overridden using this field. Additional variables can also be
 	// specified.
 	//
-	// In order to ensure that equivalent `Command`s always hash to the
-	// same
+	// In order to ensure that equivalent
+	// Commands always hash to the same
 	// value, the environment variables MUST be lexicographically sorted by
 	// name.
 	// Sorting of strings is done by code point, equivalently, by the UTF-8
@@ -492,12 +492,14 @@
 
 	// OutputDirectories: A list of the output directories that the client
 	// expects to retrieve from
-	// the action. Only the contents of the indicated directories
-	// (recursively
-	// including the contents of their subdirectories) will be
-	// returned, as well as files listed in `output_files`. Other files that
-	// may
-	// be created during command execution are discarded.
+	// the action. Only the listed directories will be returned (an
+	// entire
+	// directory structure will be returned as a
+	// Tree message digest, see
+	// OutputDirectory), as
+	// well as files listed in `output_files`. Other files or directories
+	// that
+	// may be created during command execution are discarded.
 	//
 	// The paths are relative to the working directory of the action
 	// execution.
@@ -519,11 +521,15 @@
 	// UTF-8
 	// bytes).
 	//
-	// An output directory cannot be duplicated, be a parent of another
+	// An output directory cannot be duplicated or have the same path as any
+	// of
+	// the listed output files.
+	//
+	// Directories leading up to the output directories (but not the
 	// output
-	// directory, be a parent of a listed output file, or have the same path
-	// as
-	// any of the listed output files.
+	// directories themselves) are created by the worker prior to execution,
+	// even
+	// if they are not explicitly part of the input root.
 	OutputDirectories []string `json:"outputDirectories,omitempty"`
 
 	// OutputFiles: A list of the output files that the client expects to
@@ -531,8 +537,9 @@
 	// action. Only the listed files, as well as directories listed
 	// in
 	// `output_directories`, will be returned to the client as output.
-	// Other files that may be created during command execution are
-	// discarded.
+	// Other files or directories that may be created during command
+	// execution
+	// are discarded.
 	//
 	// The paths are relative to the working directory of the action
 	// execution.
@@ -551,10 +558,13 @@
 	// bytes).
 	//
 	// An output file cannot be duplicated, be a parent of another output
-	// file, be
-	// a child of a listed output directory, or have the same path as any of
-	// the
-	// listed output directories.
+	// file, or
+	// have the same path as any of the listed output
+	// directories.
+	//
+	// Directories leading up to the output files are created by the worker
+	// prior
+	// to execution, even if they are not explicitly part of the input root.
 	OutputFiles []string `json:"outputFiles,omitempty"`
 
 	// Platform: The platform requirements for the execution environment.
@@ -669,11 +679,12 @@
 // servers MUST ensure that they serialize messages according to the
 // following
 // rules, even if there are alternate valid encodings for the same
-// message.
-// - Fields are serialized in tag order.
-// - There are no unknown fields.
-// - There are no duplicate fields.
-// - Fields are serialized according to the default semantics for their
+// message:
+//
+// * Fields are serialized in tag order.
+// * There are no unknown fields.
+// * There are no duplicate fields.
+// * Fields are serialized according to the default semantics for their
 // type.
 //
 // Most protocol buffer implementations will always follow these rules
@@ -730,16 +741,17 @@
 // value, the following restrictions MUST be obeyed when constructing
 // a
 // a `Directory`:
-//   - Every child in the directory must have a path of exactly one
+//
+// * Every child in the directory must have a path of exactly one
 // segment.
-//     Multiple levels of directory hierarchy may not be collapsed.
-//   - Each child in the directory must have a unique path segment (file
+//   Multiple levels of directory hierarchy may not be collapsed.
+// * Each child in the directory must have a unique path segment (file
 // name).
-//   - The files, directories and symlinks in the directory must each be
+// * The files, directories and symlinks in the directory must each be
 // sorted
-//     in lexicographical order by path. The path strings must be sorted
+//   in lexicographical order by path. The path strings must be sorted
 // by code
-//     point, equivalently, by UTF-8 bytes.
+//   point, equivalently, by UTF-8 bytes.
 //
 // A `Directory` that obeys the restrictions is said to be in canonical
 // form.
@@ -922,6 +934,12 @@
 	// was executed.
 	CachedResult bool `json:"cachedResult,omitempty"`
 
+	// Message: Freeform informational message with details on the execution
+	// of the action
+	// that may be displayed to the user upon failure or when requested
+	// explicitly.
+	Message string `json:"message,omitempty"`
+
 	// Result: The result of the action.
 	Result *BuildBazelRemoteExecutionV2ActionResult `json:"result,omitempty"`
 
@@ -1362,8 +1380,9 @@
 // purposes. To use it, the client attaches the header to the call using
 // the
 // canonical proto serialization:
-// name: build.bazel.remote.execution.v2.requestmetadata-bin
-// contents: the base64 encoded binary RequestMetadata message.
+//
+// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
+// * contents: the base64 encoded binary `RequestMetadata` message.
 type BuildBazelRemoteExecutionV2RequestMetadata struct {
 	// ActionId: An identifier that ties multiple requests to the same
 	// action.
@@ -1873,6 +1892,10 @@
 	// is supported.
 	Location string `json:"location,omitempty"`
 
+	// LoggingEnabled: Output only. Whether stack driver logging is enabled
+	// for the instance.
+	LoggingEnabled bool `json:"loggingEnabled,omitempty"`
+
 	// Name: Output only. Instance resource name formatted
 	// as:
 	// `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
@@ -2104,9 +2127,12 @@
 	// Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
 	MinCpuPlatform string `json:"minCpuPlatform,omitempty"`
 
-	// Reserved: Output only. `reserved=true` means the worker is reserved
-	// and won't be
-	// preempted.
+	// Reserved: Determines whether the worker is reserved (and therefore
+	// won't be
+	// preempted).
+	// See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for
+	// more
+	// details.
 	Reserved bool `json:"reserved,omitempty"`
 
 	// ForceSendFields is a list of field names (e.g. "DiskSizeGb") to
@@ -3307,6 +3333,19 @@
 	// `status` has a code of OK (otherwise it may simply be unset).
 	ExitCode int64 `json:"exitCode,omitempty"`
 
+	// Metadata: Implementation-dependent metadata about the task. Both
+	// servers and bots
+	// may define messages which can be encoded here; bots are free to
+	// provide
+	// metadata in multiple formats, and servers are free to choose one or
+	// more
+	// of the values to process and ignore others. In particular, it is
+	// *not*
+	// considered an error for the bot to provide the server with a field
+	// that it
+	// doesn't know about.
+	Metadata []googleapi.RawMessage `json:"metadata,omitempty"`
+
 	// Outputs: The output files. The blob referenced by the digest should
 	// contain
 	// one of the following (implementation-dependent):
@@ -3319,19 +3358,6 @@
 	// uploading/downloading files).
 	Overhead string `json:"overhead,omitempty"`
 
-	// Statistics: Implementation-dependent statistics about the task. Both
-	// servers and bots
-	// may define messages which can be encoded here; bots are free to
-	// provide
-	// statistics in multiple formats, and servers are free to choose one or
-	// more
-	// of the values to process and ignore others. In particular, it is
-	// *not*
-	// considered an error for the bot to provide the server with a field
-	// that it
-	// doesn't know about.
-	Statistics []googleapi.RawMessage `json:"statistics,omitempty"`
-
 	// Status: An overall status for the command. For example, if the
 	// command timed out,
 	// this might have a code of DEADLINE_EXCEEDED; if it was killed by the
diff --git a/remotebuildexecution/v1alpha/remotebuildexecution-api.json b/remotebuildexecution/v1alpha/remotebuildexecution-api.json
index c683632..4c0ca8c 100644
--- a/remotebuildexecution/v1alpha/remotebuildexecution-api.json
+++ b/remotebuildexecution/v1alpha/remotebuildexecution-api.json
@@ -382,7 +382,7 @@
       }
     }
   },
-  "revision": "20190103",
+  "revision": "20190116",
   "rootUrl": "https://admin-remotebuildexecution.googleapis.com/",
   "schemas": {
     "BuildBazelRemoteExecutionV2Action": {
@@ -483,21 +483,21 @@
           "type": "array"
         },
         "environmentVariables": {
-          "description": "The environment variables to set when running the program. The worker may\nprovide its own default environment variables; these defaults can be\noverridden using this field. Additional variables can also be specified.\n\nIn order to ensure that equivalent `Command`s always hash to the same\nvalue, the environment variables MUST be lexicographically sorted by name.\nSorting of strings is done by code point, equivalently, by the UTF-8 bytes.",
+          "description": "The environment variables to set when running the program. The worker may\nprovide its own default environment variables; these defaults can be\noverridden using this field. Additional variables can also be specified.\n\nIn order to ensure that equivalent\nCommands always hash to the same\nvalue, the environment variables MUST be lexicographically sorted by name.\nSorting of strings is done by code point, equivalently, by the UTF-8 bytes.",
           "items": {
             "$ref": "BuildBazelRemoteExecutionV2CommandEnvironmentVariable"
           },
           "type": "array"
         },
         "outputDirectories": {
-          "description": "A list of the output directories that the client expects to retrieve from\nthe action. Only the contents of the indicated directories (recursively\nincluding the contents of their subdirectories) will be\nreturned, as well as files listed in `output_files`. Other files that may\nbe created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path. The special value of empty string is allowed,\nalthough not recommended, and can be used to capture the entire working\ndirectory tree, including inputs.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output directory cannot be duplicated, be a parent of another output\ndirectory, be a parent of a listed output file, or have the same path as\nany of the listed output files.",
+          "description": "A list of the output directories that the client expects to retrieve from\nthe action. Only the listed directories will be returned (an entire\ndirectory structure will be returned as a\nTree message digest, see\nOutputDirectory), as\nwell as files listed in `output_files`. Other files or directories that\nmay be created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path. The special value of empty string is allowed,\nalthough not recommended, and can be used to capture the entire working\ndirectory tree, including inputs.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output directory cannot be duplicated or have the same path as any of\nthe listed output files.\n\nDirectories leading up to the output directories (but not the output\ndirectories themselves) are created by the worker prior to execution, even\nif they are not explicitly part of the input root.",
           "items": {
             "type": "string"
           },
           "type": "array"
         },
         "outputFiles": {
-          "description": "A list of the output files that the client expects to retrieve from the\naction. Only the listed files, as well as directories listed in\n`output_directories`, will be returned to the client as output.\nOther files that may be created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output file cannot be duplicated, be a parent of another output file, be\na child of a listed output directory, or have the same path as any of the\nlisted output directories.",
+          "description": "A list of the output files that the client expects to retrieve from the\naction. Only the listed files, as well as directories listed in\n`output_directories`, will be returned to the client as output.\nOther files or directories that may be created during command execution\nare discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output file cannot be duplicated, be a parent of another output file, or\nhave the same path as any of the listed output directories.\n\nDirectories leading up to the output files are created by the worker prior\nto execution, even if they are not explicitly part of the input root.",
           "items": {
             "type": "string"
           },
@@ -530,7 +530,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2Digest": {
-      "description": "A content digest. A digest for a given blob consists of the size of the blob\nand its hash. The hash algorithm to use is defined by the server, but servers\nSHOULD use SHA-256.\n\nThe size is considered to be an integral part of the digest and cannot be\nseparated. That is, even if the `hash` field is correctly specified but\n`size_bytes` is not, the server MUST reject the request.\n\nThe reason for including the size in the digest is as follows: in a great\nmany cases, the server needs to know the size of the blob it is about to work\nwith prior to starting an operation with it, such as flattening Merkle tree\nstructures or streaming it to a worker. Technically, the server could\nimplement a separate metadata store, but this results in a significantly more\ncomplicated implementation as opposed to having the client specify the size\nup-front (or storing the size along with the digest in every message where\ndigests are embedded). This does mean that the API leaks some implementation\ndetails of (what we consider to be) a reasonable server implementation, but\nwe consider this to be a worthwhile tradeoff.\n\nWhen a `Digest` is used to refer to a proto message, it always refers to the\nmessage in binary encoded form. To ensure consistent hashing, clients and\nservers MUST ensure that they serialize messages according to the following\nrules, even if there are alternate valid encodings for the same message.\n- Fields are serialized in tag order.\n- There are no unknown fields.\n- There are no duplicate fields.\n- Fields are serialized according to the default semantics for their type.\n\nMost protocol buffer implementations will always follow these rules when\nserializing, but care should be taken to avoid shortcuts. For instance,\nconcatenating two messages to merge them may produce duplicate fields.",
+      "description": "A content digest. A digest for a given blob consists of the size of the blob\nand its hash. The hash algorithm to use is defined by the server, but servers\nSHOULD use SHA-256.\n\nThe size is considered to be an integral part of the digest and cannot be\nseparated. That is, even if the `hash` field is correctly specified but\n`size_bytes` is not, the server MUST reject the request.\n\nThe reason for including the size in the digest is as follows: in a great\nmany cases, the server needs to know the size of the blob it is about to work\nwith prior to starting an operation with it, such as flattening Merkle tree\nstructures or streaming it to a worker. Technically, the server could\nimplement a separate metadata store, but this results in a significantly more\ncomplicated implementation as opposed to having the client specify the size\nup-front (or storing the size along with the digest in every message where\ndigests are embedded). This does mean that the API leaks some implementation\ndetails of (what we consider to be) a reasonable server implementation, but\nwe consider this to be a worthwhile tradeoff.\n\nWhen a `Digest` is used to refer to a proto message, it always refers to the\nmessage in binary encoded form. To ensure consistent hashing, clients and\nservers MUST ensure that they serialize messages according to the following\nrules, even if there are alternate valid encodings for the same message:\n\n* Fields are serialized in tag order.\n* There are no unknown fields.\n* There are no duplicate fields.\n* Fields are serialized according to the default semantics for their type.\n\nMost protocol buffer implementations will always follow these rules when\nserializing, but care should be taken to avoid shortcuts. For instance,\nconcatenating two messages to merge them may produce duplicate fields.",
       "id": "BuildBazelRemoteExecutionV2Digest",
       "properties": {
         "hash": {
@@ -546,7 +546,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2Directory": {
-      "description": "A `Directory` represents a directory node in a file tree, containing zero or\nmore children FileNodes,\nDirectoryNodes and\nSymlinkNodes.\nEach `Node` contains its name in the directory, either the digest of its\ncontent (either a file blob or a `Directory` proto) or a symlink target, as\nwell as possibly some metadata about the file or directory.\n\nIn order to ensure that two equivalent directory trees hash to the same\nvalue, the following restrictions MUST be obeyed when constructing a\na `Directory`:\n  - Every child in the directory must have a path of exactly one segment.\n    Multiple levels of directory hierarchy may not be collapsed.\n  - Each child in the directory must have a unique path segment (file name).\n  - The files, directories and symlinks in the directory must each be sorted\n    in lexicographical order by path. The path strings must be sorted by code\n    point, equivalently, by UTF-8 bytes.\n\nA `Directory` that obeys the restrictions is said to be in canonical form.\n\nAs an example, the following could be used for a file named `bar` and a\ndirectory named `foo` with an executable file named `baz` (hashes shortened\nfor readability):\n\n```json\n// (Directory proto)\n{\n  files: [\n    {\n      name: \"bar\",\n      digest: {\n        hash: \"4a73bc9d03...\",\n        size: 65534\n      }\n    }\n  ],\n  directories: [\n    {\n      name: \"foo\",\n      digest: {\n        hash: \"4cf2eda940...\",\n        size: 43\n      }\n    }\n  ]\n}\n\n// (Directory proto with hash \"4cf2eda940...\" and size 43)\n{\n  files: [\n    {\n      name: \"baz\",\n      digest: {\n        hash: \"b2c941073e...\",\n        size: 1294,\n      },\n      is_executable: true\n    }\n  ]\n}\n```",
+      "description": "A `Directory` represents a directory node in a file tree, containing zero or\nmore children FileNodes,\nDirectoryNodes and\nSymlinkNodes.\nEach `Node` contains its name in the directory, either the digest of its\ncontent (either a file blob or a `Directory` proto) or a symlink target, as\nwell as possibly some metadata about the file or directory.\n\nIn order to ensure that two equivalent directory trees hash to the same\nvalue, the following restrictions MUST be obeyed when constructing a\na `Directory`:\n\n* Every child in the directory must have a path of exactly one segment.\n  Multiple levels of directory hierarchy may not be collapsed.\n* Each child in the directory must have a unique path segment (file name).\n* The files, directories and symlinks in the directory must each be sorted\n  in lexicographical order by path. The path strings must be sorted by code\n  point, equivalently, by UTF-8 bytes.\n\nA `Directory` that obeys the restrictions is said to be in canonical form.\n\nAs an example, the following could be used for a file named `bar` and a\ndirectory named `foo` with an executable file named `baz` (hashes shortened\nfor readability):\n\n```json\n// (Directory proto)\n{\n  files: [\n    {\n      name: \"bar\",\n      digest: {\n        hash: \"4a73bc9d03...\",\n        size: 65534\n      }\n    }\n  ],\n  directories: [\n    {\n      name: \"foo\",\n      digest: {\n        hash: \"4cf2eda940...\",\n        size: 43\n      }\n    }\n  ]\n}\n\n// (Directory proto with hash \"4cf2eda940...\" and size 43)\n{\n  files: [\n    {\n      name: \"baz\",\n      digest: {\n        hash: \"b2c941073e...\",\n        size: 1294,\n      },\n      is_executable: true\n    }\n  ]\n}\n```",
       "id": "BuildBazelRemoteExecutionV2Directory",
       "properties": {
         "directories": {
@@ -632,6 +632,10 @@
           "description": "True if the result was served from cache, false if it was executed.",
           "type": "boolean"
         },
+        "message": {
+          "description": "Freeform informational message with details on the execution of the action\nthat may be displayed to the user upon failure or when requested explicitly.",
+          "type": "string"
+        },
         "result": {
           "$ref": "BuildBazelRemoteExecutionV2ActionResult",
           "description": "The result of the action."
@@ -819,7 +823,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2RequestMetadata": {
-      "description": "An optional Metadata to attach to any RPC request to tell the server about an\nexternal context of the request. The server may use this for logging or other\npurposes. To use it, the client attaches the header to the call using the\ncanonical proto serialization:\nname: build.bazel.remote.execution.v2.requestmetadata-bin\ncontents: the base64 encoded binary RequestMetadata message.",
+      "description": "An optional Metadata to attach to any RPC request to tell the server about an\nexternal context of the request. The server may use this for logging or other\npurposes. To use it, the client attaches the header to the call using the\ncanonical proto serialization:\n\n* name: `build.bazel.remote.execution.v2.requestmetadata-bin`\n* contents: the base64 encoded binary `RequestMetadata` message.",
       "id": "BuildBazelRemoteExecutionV2RequestMetadata",
       "properties": {
         "actionId": {
@@ -1042,6 +1046,10 @@
           "description": "The location is a GCP region. Currently only `us-central1` is supported.",
           "type": "string"
         },
+        "loggingEnabled": {
+          "description": "Output only. Whether stack driver logging is enabled for the instance.",
+          "type": "boolean"
+        },
         "name": {
           "description": "Output only. Instance resource name formatted as:\n`projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.\nName should not be populated when creating an instance since it is provided\nin the `instance_id` field.",
           "type": "string"
@@ -1149,7 +1157,7 @@
           "type": "string"
         },
         "reserved": {
-          "description": "Output only. `reserved=true` means the worker is reserved and won't be\npreempted.",
+          "description": "Determines whether the worker is reserved (and therefore won't be\npreempted).\nSee [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more\ndetails.",
           "type": "boolean"
         }
       },
@@ -1614,17 +1622,8 @@
           "format": "int32",
           "type": "integer"
         },
-        "outputs": {
-          "$ref": "GoogleDevtoolsRemoteworkersV1test2Digest",
-          "description": "The output files. The blob referenced by the digest should contain\none of the following (implementation-dependent):\n   * A marshalled DirectoryMetadata of the returned filesystem\n   * A LUCI-style .isolated file"
-        },
-        "overhead": {
-          "description": "The amount of time *not* spent executing the command (ie\nuploading/downloading files).",
-          "format": "google-duration",
-          "type": "string"
-        },
-        "statistics": {
-          "description": "Implementation-dependent statistics about the task. Both servers and bots\nmay define messages which can be encoded here; bots are free to provide\nstatistics in multiple formats, and servers are free to choose one or more\nof the values to process and ignore others. In particular, it is *not*\nconsidered an error for the bot to provide the server with a field that it\ndoesn't know about.",
+        "metadata": {
+          "description": "Implementation-dependent metadata about the task. Both servers and bots\nmay define messages which can be encoded here; bots are free to provide\nmetadata in multiple formats, and servers are free to choose one or more\nof the values to process and ignore others. In particular, it is *not*\nconsidered an error for the bot to provide the server with a field that it\ndoesn't know about.",
           "items": {
             "additionalProperties": {
               "description": "Properties of the object. Contains field @type with type URL.",
@@ -1634,6 +1633,15 @@
           },
           "type": "array"
         },
+        "outputs": {
+          "$ref": "GoogleDevtoolsRemoteworkersV1test2Digest",
+          "description": "The output files. The blob referenced by the digest should contain\none of the following (implementation-dependent):\n   * A marshalled DirectoryMetadata of the returned filesystem\n   * A LUCI-style .isolated file"
+        },
+        "overhead": {
+          "description": "The amount of time *not* spent executing the command (ie\nuploading/downloading files).",
+          "format": "google-duration",
+          "type": "string"
+        },
         "status": {
           "$ref": "GoogleRpcStatus",
           "description": "An overall status for the command. For example, if the command timed out,\nthis might have a code of DEADLINE_EXCEEDED; if it was killed by the OS for\nmemory exhaustion, it might have a code of RESOURCE_EXHAUSTED."
diff --git a/remotebuildexecution/v1alpha/remotebuildexecution-gen.go b/remotebuildexecution/v1alpha/remotebuildexecution-gen.go
index c45f3d1..7f61995 100644
--- a/remotebuildexecution/v1alpha/remotebuildexecution-gen.go
+++ b/remotebuildexecution/v1alpha/remotebuildexecution-gen.go
@@ -470,8 +470,8 @@
 	// overridden using this field. Additional variables can also be
 	// specified.
 	//
-	// In order to ensure that equivalent `Command`s always hash to the
-	// same
+	// In order to ensure that equivalent
+	// Commands always hash to the same
 	// value, the environment variables MUST be lexicographically sorted by
 	// name.
 	// Sorting of strings is done by code point, equivalently, by the UTF-8
@@ -480,12 +480,14 @@
 
 	// OutputDirectories: A list of the output directories that the client
 	// expects to retrieve from
-	// the action. Only the contents of the indicated directories
-	// (recursively
-	// including the contents of their subdirectories) will be
-	// returned, as well as files listed in `output_files`. Other files that
-	// may
-	// be created during command execution are discarded.
+	// the action. Only the listed directories will be returned (an
+	// entire
+	// directory structure will be returned as a
+	// Tree message digest, see
+	// OutputDirectory), as
+	// well as files listed in `output_files`. Other files or directories
+	// that
+	// may be created during command execution are discarded.
 	//
 	// The paths are relative to the working directory of the action
 	// execution.
@@ -507,11 +509,15 @@
 	// UTF-8
 	// bytes).
 	//
-	// An output directory cannot be duplicated, be a parent of another
+	// An output directory cannot be duplicated or have the same path as any
+	// of
+	// the listed output files.
+	//
+	// Directories leading up to the output directories (but not the
 	// output
-	// directory, be a parent of a listed output file, or have the same path
-	// as
-	// any of the listed output files.
+	// directories themselves) are created by the worker prior to execution,
+	// even
+	// if they are not explicitly part of the input root.
 	OutputDirectories []string `json:"outputDirectories,omitempty"`
 
 	// OutputFiles: A list of the output files that the client expects to
@@ -519,8 +525,9 @@
 	// action. Only the listed files, as well as directories listed
 	// in
 	// `output_directories`, will be returned to the client as output.
-	// Other files that may be created during command execution are
-	// discarded.
+	// Other files or directories that may be created during command
+	// execution
+	// are discarded.
 	//
 	// The paths are relative to the working directory of the action
 	// execution.
@@ -539,10 +546,13 @@
 	// bytes).
 	//
 	// An output file cannot be duplicated, be a parent of another output
-	// file, be
-	// a child of a listed output directory, or have the same path as any of
-	// the
-	// listed output directories.
+	// file, or
+	// have the same path as any of the listed output
+	// directories.
+	//
+	// Directories leading up to the output files are created by the worker
+	// prior
+	// to execution, even if they are not explicitly part of the input root.
 	OutputFiles []string `json:"outputFiles,omitempty"`
 
 	// Platform: The platform requirements for the execution environment.
@@ -657,11 +667,12 @@
 // servers MUST ensure that they serialize messages according to the
 // following
 // rules, even if there are alternate valid encodings for the same
-// message.
-// - Fields are serialized in tag order.
-// - There are no unknown fields.
-// - There are no duplicate fields.
-// - Fields are serialized according to the default semantics for their
+// message:
+//
+// * Fields are serialized in tag order.
+// * There are no unknown fields.
+// * There are no duplicate fields.
+// * Fields are serialized according to the default semantics for their
 // type.
 //
 // Most protocol buffer implementations will always follow these rules
@@ -718,16 +729,17 @@
 // value, the following restrictions MUST be obeyed when constructing
 // a
 // a `Directory`:
-//   - Every child in the directory must have a path of exactly one
+//
+// * Every child in the directory must have a path of exactly one
 // segment.
-//     Multiple levels of directory hierarchy may not be collapsed.
-//   - Each child in the directory must have a unique path segment (file
+//   Multiple levels of directory hierarchy may not be collapsed.
+// * Each child in the directory must have a unique path segment (file
 // name).
-//   - The files, directories and symlinks in the directory must each be
+// * The files, directories and symlinks in the directory must each be
 // sorted
-//     in lexicographical order by path. The path strings must be sorted
+//   in lexicographical order by path. The path strings must be sorted
 // by code
-//     point, equivalently, by UTF-8 bytes.
+//   point, equivalently, by UTF-8 bytes.
 //
 // A `Directory` that obeys the restrictions is said to be in canonical
 // form.
@@ -910,6 +922,12 @@
 	// was executed.
 	CachedResult bool `json:"cachedResult,omitempty"`
 
+	// Message: Freeform informational message with details on the execution
+	// of the action
+	// that may be displayed to the user upon failure or when requested
+	// explicitly.
+	Message string `json:"message,omitempty"`
+
 	// Result: The result of the action.
 	Result *BuildBazelRemoteExecutionV2ActionResult `json:"result,omitempty"`
 
@@ -1350,8 +1368,9 @@
 // purposes. To use it, the client attaches the header to the call using
 // the
 // canonical proto serialization:
-// name: build.bazel.remote.execution.v2.requestmetadata-bin
-// contents: the base64 encoded binary RequestMetadata message.
+//
+// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
+// * contents: the base64 encoded binary `RequestMetadata` message.
 type BuildBazelRemoteExecutionV2RequestMetadata struct {
 	// ActionId: An identifier that ties multiple requests to the same
 	// action.
@@ -1829,6 +1848,10 @@
 	// is supported.
 	Location string `json:"location,omitempty"`
 
+	// LoggingEnabled: Output only. Whether stack driver logging is enabled
+	// for the instance.
+	LoggingEnabled bool `json:"loggingEnabled,omitempty"`
+
 	// Name: Output only. Instance resource name formatted
 	// as:
 	// `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
@@ -2072,9 +2095,12 @@
 	// Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
 	MinCpuPlatform string `json:"minCpuPlatform,omitempty"`
 
-	// Reserved: Output only. `reserved=true` means the worker is reserved
-	// and won't be
-	// preempted.
+	// Reserved: Determines whether the worker is reserved (and therefore
+	// won't be
+	// preempted).
+	// See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for
+	// more
+	// details.
 	Reserved bool `json:"reserved,omitempty"`
 
 	// ForceSendFields is a list of field names (e.g. "DiskSizeGb") to
@@ -3279,6 +3305,19 @@
 	// `status` has a code of OK (otherwise it may simply be unset).
 	ExitCode int64 `json:"exitCode,omitempty"`
 
+	// Metadata: Implementation-dependent metadata about the task. Both
+	// servers and bots
+	// may define messages which can be encoded here; bots are free to
+	// provide
+	// metadata in multiple formats, and servers are free to choose one or
+	// more
+	// of the values to process and ignore others. In particular, it is
+	// *not*
+	// considered an error for the bot to provide the server with a field
+	// that it
+	// doesn't know about.
+	Metadata []googleapi.RawMessage `json:"metadata,omitempty"`
+
 	// Outputs: The output files. The blob referenced by the digest should
 	// contain
 	// one of the following (implementation-dependent):
@@ -3291,19 +3330,6 @@
 	// uploading/downloading files).
 	Overhead string `json:"overhead,omitempty"`
 
-	// Statistics: Implementation-dependent statistics about the task. Both
-	// servers and bots
-	// may define messages which can be encoded here; bots are free to
-	// provide
-	// statistics in multiple formats, and servers are free to choose one or
-	// more
-	// of the values to process and ignore others. In particular, it is
-	// *not*
-	// considered an error for the bot to provide the server with a field
-	// that it
-	// doesn't know about.
-	Statistics []googleapi.RawMessage `json:"statistics,omitempty"`
-
 	// Status: An overall status for the command. For example, if the
 	// command timed out,
 	// this might have a code of DEADLINE_EXCEEDED; if it was killed by the
diff --git a/remotebuildexecution/v2/remotebuildexecution-api.json b/remotebuildexecution/v2/remotebuildexecution-api.json
index 33fa955..77d1d25 100644
--- a/remotebuildexecution/v2/remotebuildexecution-api.json
+++ b/remotebuildexecution/v2/remotebuildexecution-api.json
@@ -107,7 +107,7 @@
     "actionResults": {
       "methods": {
         "get": {
-          "description": "Retrieve a cached execution result.\n\nErrors:\n* `NOT_FOUND`: The requested `ActionResult` is not in the cache.",
+          "description": "Retrieve a cached execution result.\n\nErrors:\n\n* `NOT_FOUND`: The requested `ActionResult` is not in the cache.",
           "flatPath": "v2/{v2Id}/actionResults/{hash}/{sizeBytes}",
           "httpMethod": "GET",
           "id": "remotebuildexecution.actionResults.get",
@@ -147,7 +147,7 @@
           ]
         },
         "update": {
-          "description": "Upload a new execution result.\n\nThis method is intended for servers which implement the distributed cache\nindependently of the\nExecution API. As a\nresult, it is OPTIONAL for servers to implement.\n\nIn order to allow the server to perform access control based on the type of\naction, and to assist with client debugging, the client MUST first upload\nthe Action that produced the\nresult, along with its\nCommand, into the\n`ContentAddressableStorage`.\n\nErrors:\n* `UNIMPLEMENTED`: This method is not supported by the server.\n* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the\n  entry to the cache.",
+          "description": "Upload a new execution result.\n\nIn order to allow the server to perform access control based on the type of\naction, and to assist with client debugging, the client MUST first upload\nthe Action that produced the\nresult, along with its\nCommand, into the\n`ContentAddressableStorage`.\n\nErrors:\n\n* `INVALID_ARGUMENT`: One or more arguments are invalid.\n* `FAILED_PRECONDITION`: One or more errors occurred in updating the\n  action result, such as a missing command or action.\n* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the\n  entry to the cache.",
           "flatPath": "v2/{v2Id}/actionResults/{hash}/{sizeBytes}",
           "httpMethod": "PUT",
           "id": "remotebuildexecution.actionResults.update",
@@ -200,7 +200,7 @@
     "actions": {
       "methods": {
         "execute": {
-          "description": "Execute an action remotely.\n\nIn order to execute an action, the client must first upload all of the\ninputs, the\nCommand to run, and the\nAction into the\nContentAddressableStorage.\nIt then calls `Execute` with an `action_digest` referring to them. The\nserver will run the action and eventually return the result.\n\nThe input `Action`'s fields MUST meet the various canonicalization\nrequirements specified in the documentation for their types so that it has\nthe same digest as other logically equivalent `Action`s. The server MAY\nenforce the requirements and return errors if a non-canonical input is\nreceived. It MAY also proceed without verifying some or all of the\nrequirements, such as for performance reasons. If the server does not\nverify the requirement, then it will treat the `Action` as distinct from\nanother logically equivalent action if they hash differently.\n\nReturns a stream of\ngoogle.longrunning.Operation messages\ndescribing the resulting execution, with eventual `response`\nExecuteResponse. The\n`metadata` on the operation is of type\nExecuteOperationMetadata.\n\nIf the client remains connected after the first response is returned after\nthe server, then updates are streamed as if the client had called\nWaitExecution\nuntil the execution completes or the request reaches an error. The\noperation can also be queried using Operations\nAPI.\n\nThe server NEED NOT implement other methods or functionality of the\nOperations API.\n\nErrors discovered during creation of the `Operation` will be reported\nas gRPC Status errors, while errors that occurred while running the\naction will be reported in the `status` field of the `ExecuteResponse`. The\nserver MUST NOT set the `error` field of the `Operation` proto.\nThe possible errors include:\n* `INVALID_ARGUMENT`: One or more arguments are invalid.\n* `FAILED_PRECONDITION`: One or more errors occurred in setting up the\n  action requested, such as a missing input or command or no worker being\n  available. The client may be able to fix the errors and retry.\n* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run\n  the action.\n* `UNAVAILABLE`: Due to a transient condition, such as all workers being\n  occupied (and the server does not support a queue), the action could not\n  be started. The client should retry.\n* `INTERNAL`: An internal error occurred in the execution engine or the\n  worker.\n* `DEADLINE_EXCEEDED`: The execution timed out.\n\nIn the case of a missing input or command, the server SHOULD additionally\nsend a PreconditionFailure error detail\nwhere, for each requested blob not present in the CAS, there is a\n`Violation` with a `type` of `MISSING` and a `subject` of\n`\"blobs/{hash}/{size}\"` indicating the digest of the missing blob.",
+          "description": "Execute an action remotely.\n\nIn order to execute an action, the client must first upload all of the\ninputs, the\nCommand to run, and the\nAction into the\nContentAddressableStorage.\nIt then calls `Execute` with an `action_digest` referring to them. The\nserver will run the action and eventually return the result.\n\nThe input `Action`'s fields MUST meet the various canonicalization\nrequirements specified in the documentation for their types so that it has\nthe same digest as other logically equivalent `Action`s. The server MAY\nenforce the requirements and return errors if a non-canonical input is\nreceived. It MAY also proceed without verifying some or all of the\nrequirements, such as for performance reasons. If the server does not\nverify the requirement, then it will treat the `Action` as distinct from\nanother logically equivalent action if they hash differently.\n\nReturns a stream of\ngoogle.longrunning.Operation messages\ndescribing the resulting execution, with eventual `response`\nExecuteResponse. The\n`metadata` on the operation is of type\nExecuteOperationMetadata.\n\nIf the client remains connected after the first response is returned after\nthe server, then updates are streamed as if the client had called\nWaitExecution\nuntil the execution completes or the request reaches an error. The\noperation can also be queried using Operations\nAPI.\n\nThe server NEED NOT implement other methods or functionality of the\nOperations API.\n\nErrors discovered during creation of the `Operation` will be reported\nas gRPC Status errors, while errors that occurred while running the\naction will be reported in the `status` field of the `ExecuteResponse`. The\nserver MUST NOT set the `error` field of the `Operation` proto.\nThe possible errors include:\n\n* `INVALID_ARGUMENT`: One or more arguments are invalid.\n* `FAILED_PRECONDITION`: One or more errors occurred in setting up the\n  action requested, such as a missing input or command or no worker being\n  available. The client may be able to fix the errors and retry.\n* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run\n  the action.\n* `UNAVAILABLE`: Due to a transient condition, such as all workers being\n  occupied (and the server does not support a queue), the action could not\n  be started. The client should retry.\n* `INTERNAL`: An internal error occurred in the execution engine or the\n  worker.\n* `DEADLINE_EXCEEDED`: The execution timed out.\n* `CANCELLED`: The operation was cancelled by the client. This status is\n  only possible if the server implements the Operations API CancelOperation\n  method, and it was called for the current execution.\n\nIn the case of a missing input or command, the server SHOULD additionally\nsend a PreconditionFailure error detail\nwhere, for each requested blob not present in the CAS, there is a\n`Violation` with a `type` of `MISSING` and a `subject` of\n`\"blobs/{hash}/{size}\"` indicating the digest of the missing blob.",
           "flatPath": "v2/{v2Id}/actions:execute",
           "httpMethod": "POST",
           "id": "remotebuildexecution.actions.execute",
@@ -232,7 +232,7 @@
     "blobs": {
       "methods": {
         "batchRead": {
-          "description": "Download many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be downloaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or downloaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Read` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n* `INVALID_ARGUMENT`: The client attempted to read more than the\n  server supported limit.\n\nEvery error on individual read will be returned in the corresponding digest\nstatus.",
+          "description": "Download many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be downloaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or downloaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Read` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n\n* `INVALID_ARGUMENT`: The client attempted to read more than the\n  server supported limit.\n\nEvery error on individual read will be returned in the corresponding digest\nstatus.",
           "flatPath": "v2/{v2Id}/blobs:batchRead",
           "httpMethod": "POST",
           "id": "remotebuildexecution.blobs.batchRead",
@@ -260,7 +260,7 @@
           ]
         },
         "batchUpdate": {
-          "description": "Upload many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be uploaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or uploaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Write` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n* `INVALID_ARGUMENT`: The client attempted to upload more than the\n  server supported limit.\n\nIndividual requests may return the following errors, additionally:\n* `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.\n* `INVALID_ARGUMENT`: The\nDigest does not match the\nprovided data.",
+          "description": "Upload many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be uploaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or uploaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Write` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n\n* `INVALID_ARGUMENT`: The client attempted to upload more than the\n  server supported limit.\n\nIndividual requests may return the following errors, additionally:\n\n* `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.\n* `INVALID_ARGUMENT`: The\nDigest does not match the\nprovided data.",
           "flatPath": "v2/{v2Id}/blobs:batchUpdate",
           "httpMethod": "POST",
           "id": "remotebuildexecution.blobs.batchUpdate",
@@ -430,7 +430,7 @@
       }
     }
   },
-  "revision": "20190103",
+  "revision": "20190116",
   "rootUrl": "https://remotebuildexecution.googleapis.com/",
   "schemas": {
     "BuildBazelRemoteExecutionV2Action": {
@@ -651,10 +651,10 @@
         "digestFunction": {
           "description": "All the digest functions supported by the remote cache.\nRemote cache may support multiple digest functions simultaneously.",
           "enumDescriptions": [
-            "",
-            "",
-            "",
-            ""
+            "It is an error for the server to return this value.",
+            "The Sha-256 digest function.",
+            "The Sha-1 digest function.",
+            "The MD5 digest function."
           ],
           "items": {
             "enum": [
@@ -681,7 +681,7 @@
           ],
           "enumDescriptions": [
             "",
-            "Server will return an INVALID_ARGUMENT on input symlinks with absolute targets.\nIf an action tries to create an output symlink with an absolute target, a\nFAILED_PRECONDITION will be returned.",
+            "Server will return an `INVALID_ARGUMENT` on input symlinks with absolute\ntargets.\nIf an action tries to create an output symlink with an absolute target, a\n`FAILED_PRECONDITION` will be returned.",
             "Server will allow symlink targets to escape the input root tree, possibly\nresulting in non-hermetic builds."
           ],
           "type": "string"
@@ -701,21 +701,21 @@
           "type": "array"
         },
         "environmentVariables": {
-          "description": "The environment variables to set when running the program. The worker may\nprovide its own default environment variables; these defaults can be\noverridden using this field. Additional variables can also be specified.\n\nIn order to ensure that equivalent `Command`s always hash to the same\nvalue, the environment variables MUST be lexicographically sorted by name.\nSorting of strings is done by code point, equivalently, by the UTF-8 bytes.",
+          "description": "The environment variables to set when running the program. The worker may\nprovide its own default environment variables; these defaults can be\noverridden using this field. Additional variables can also be specified.\n\nIn order to ensure that equivalent\nCommands always hash to the same\nvalue, the environment variables MUST be lexicographically sorted by name.\nSorting of strings is done by code point, equivalently, by the UTF-8 bytes.",
           "items": {
             "$ref": "BuildBazelRemoteExecutionV2CommandEnvironmentVariable"
           },
           "type": "array"
         },
         "outputDirectories": {
-          "description": "A list of the output directories that the client expects to retrieve from\nthe action. Only the contents of the indicated directories (recursively\nincluding the contents of their subdirectories) will be\nreturned, as well as files listed in `output_files`. Other files that may\nbe created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path. The special value of empty string is allowed,\nalthough not recommended, and can be used to capture the entire working\ndirectory tree, including inputs.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output directory cannot be duplicated, be a parent of another output\ndirectory, be a parent of a listed output file, or have the same path as\nany of the listed output files.",
+          "description": "A list of the output directories that the client expects to retrieve from\nthe action. Only the listed directories will be returned (an entire\ndirectory structure will be returned as a\nTree message digest, see\nOutputDirectory), as\nwell as files listed in `output_files`. Other files or directories that\nmay be created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path. The special value of empty string is allowed,\nalthough not recommended, and can be used to capture the entire working\ndirectory tree, including inputs.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output directory cannot be duplicated or have the same path as any of\nthe listed output files.\n\nDirectories leading up to the output directories (but not the output\ndirectories themselves) are created by the worker prior to execution, even\nif they are not explicitly part of the input root.",
           "items": {
             "type": "string"
           },
           "type": "array"
         },
         "outputFiles": {
-          "description": "A list of the output files that the client expects to retrieve from the\naction. Only the listed files, as well as directories listed in\n`output_directories`, will be returned to the client as output.\nOther files that may be created during command execution are discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output file cannot be duplicated, be a parent of another output file, be\na child of a listed output directory, or have the same path as any of the\nlisted output directories.",
+          "description": "A list of the output files that the client expects to retrieve from the\naction. Only the listed files, as well as directories listed in\n`output_directories`, will be returned to the client as output.\nOther files or directories that may be created during command execution\nare discarded.\n\nThe paths are relative to the working directory of the action execution.\nThe paths are specified using a single forward slash (`/`) as a path\nseparator, even if the execution platform natively uses a different\nseparator. The path MUST NOT include a trailing slash, nor a leading slash,\nbeing a relative path.\n\nIn order to ensure consistent hashing of the same Action, the output paths\nMUST be sorted lexicographically by code point (or, equivalently, by UTF-8\nbytes).\n\nAn output file cannot be duplicated, be a parent of another output file, or\nhave the same path as any of the listed output directories.\n\nDirectories leading up to the output files are created by the worker prior\nto execution, even if they are not explicitly part of the input root.",
           "items": {
             "type": "string"
           },
@@ -748,7 +748,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2Digest": {
-      "description": "A content digest. A digest for a given blob consists of the size of the blob\nand its hash. The hash algorithm to use is defined by the server, but servers\nSHOULD use SHA-256.\n\nThe size is considered to be an integral part of the digest and cannot be\nseparated. That is, even if the `hash` field is correctly specified but\n`size_bytes` is not, the server MUST reject the request.\n\nThe reason for including the size in the digest is as follows: in a great\nmany cases, the server needs to know the size of the blob it is about to work\nwith prior to starting an operation with it, such as flattening Merkle tree\nstructures or streaming it to a worker. Technically, the server could\nimplement a separate metadata store, but this results in a significantly more\ncomplicated implementation as opposed to having the client specify the size\nup-front (or storing the size along with the digest in every message where\ndigests are embedded). This does mean that the API leaks some implementation\ndetails of (what we consider to be) a reasonable server implementation, but\nwe consider this to be a worthwhile tradeoff.\n\nWhen a `Digest` is used to refer to a proto message, it always refers to the\nmessage in binary encoded form. To ensure consistent hashing, clients and\nservers MUST ensure that they serialize messages according to the following\nrules, even if there are alternate valid encodings for the same message.\n- Fields are serialized in tag order.\n- There are no unknown fields.\n- There are no duplicate fields.\n- Fields are serialized according to the default semantics for their type.\n\nMost protocol buffer implementations will always follow these rules when\nserializing, but care should be taken to avoid shortcuts. For instance,\nconcatenating two messages to merge them may produce duplicate fields.",
+      "description": "A content digest. A digest for a given blob consists of the size of the blob\nand its hash. The hash algorithm to use is defined by the server, but servers\nSHOULD use SHA-256.\n\nThe size is considered to be an integral part of the digest and cannot be\nseparated. That is, even if the `hash` field is correctly specified but\n`size_bytes` is not, the server MUST reject the request.\n\nThe reason for including the size in the digest is as follows: in a great\nmany cases, the server needs to know the size of the blob it is about to work\nwith prior to starting an operation with it, such as flattening Merkle tree\nstructures or streaming it to a worker. Technically, the server could\nimplement a separate metadata store, but this results in a significantly more\ncomplicated implementation as opposed to having the client specify the size\nup-front (or storing the size along with the digest in every message where\ndigests are embedded). This does mean that the API leaks some implementation\ndetails of (what we consider to be) a reasonable server implementation, but\nwe consider this to be a worthwhile tradeoff.\n\nWhen a `Digest` is used to refer to a proto message, it always refers to the\nmessage in binary encoded form. To ensure consistent hashing, clients and\nservers MUST ensure that they serialize messages according to the following\nrules, even if there are alternate valid encodings for the same message:\n\n* Fields are serialized in tag order.\n* There are no unknown fields.\n* There are no duplicate fields.\n* Fields are serialized according to the default semantics for their type.\n\nMost protocol buffer implementations will always follow these rules when\nserializing, but care should be taken to avoid shortcuts. For instance,\nconcatenating two messages to merge them may produce duplicate fields.",
       "id": "BuildBazelRemoteExecutionV2Digest",
       "properties": {
         "hash": {
@@ -764,7 +764,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2Directory": {
-      "description": "A `Directory` represents a directory node in a file tree, containing zero or\nmore children FileNodes,\nDirectoryNodes and\nSymlinkNodes.\nEach `Node` contains its name in the directory, either the digest of its\ncontent (either a file blob or a `Directory` proto) or a symlink target, as\nwell as possibly some metadata about the file or directory.\n\nIn order to ensure that two equivalent directory trees hash to the same\nvalue, the following restrictions MUST be obeyed when constructing a\na `Directory`:\n  - Every child in the directory must have a path of exactly one segment.\n    Multiple levels of directory hierarchy may not be collapsed.\n  - Each child in the directory must have a unique path segment (file name).\n  - The files, directories and symlinks in the directory must each be sorted\n    in lexicographical order by path. The path strings must be sorted by code\n    point, equivalently, by UTF-8 bytes.\n\nA `Directory` that obeys the restrictions is said to be in canonical form.\n\nAs an example, the following could be used for a file named `bar` and a\ndirectory named `foo` with an executable file named `baz` (hashes shortened\nfor readability):\n\n```json\n// (Directory proto)\n{\n  files: [\n    {\n      name: \"bar\",\n      digest: {\n        hash: \"4a73bc9d03...\",\n        size: 65534\n      }\n    }\n  ],\n  directories: [\n    {\n      name: \"foo\",\n      digest: {\n        hash: \"4cf2eda940...\",\n        size: 43\n      }\n    }\n  ]\n}\n\n// (Directory proto with hash \"4cf2eda940...\" and size 43)\n{\n  files: [\n    {\n      name: \"baz\",\n      digest: {\n        hash: \"b2c941073e...\",\n        size: 1294,\n      },\n      is_executable: true\n    }\n  ]\n}\n```",
+      "description": "A `Directory` represents a directory node in a file tree, containing zero or\nmore children FileNodes,\nDirectoryNodes and\nSymlinkNodes.\nEach `Node` contains its name in the directory, either the digest of its\ncontent (either a file blob or a `Directory` proto) or a symlink target, as\nwell as possibly some metadata about the file or directory.\n\nIn order to ensure that two equivalent directory trees hash to the same\nvalue, the following restrictions MUST be obeyed when constructing a\na `Directory`:\n\n* Every child in the directory must have a path of exactly one segment.\n  Multiple levels of directory hierarchy may not be collapsed.\n* Each child in the directory must have a unique path segment (file name).\n* The files, directories and symlinks in the directory must each be sorted\n  in lexicographical order by path. The path strings must be sorted by code\n  point, equivalently, by UTF-8 bytes.\n\nA `Directory` that obeys the restrictions is said to be in canonical form.\n\nAs an example, the following could be used for a file named `bar` and a\ndirectory named `foo` with an executable file named `baz` (hashes shortened\nfor readability):\n\n```json\n// (Directory proto)\n{\n  files: [\n    {\n      name: \"bar\",\n      digest: {\n        hash: \"4a73bc9d03...\",\n        size: 65534\n      }\n    }\n  ],\n  directories: [\n    {\n      name: \"foo\",\n      digest: {\n        hash: \"4cf2eda940...\",\n        size: 43\n      }\n    }\n  ]\n}\n\n// (Directory proto with hash \"4cf2eda940...\" and size 43)\n{\n  files: [\n    {\n      name: \"baz\",\n      digest: {\n        hash: \"b2c941073e...\",\n        size: 1294,\n      },\n      is_executable: true\n    }\n  ]\n}\n```",
       "id": "BuildBazelRemoteExecutionV2Directory",
       "properties": {
         "directories": {
@@ -873,6 +873,10 @@
           "description": "True if the result was served from cache, false if it was executed.",
           "type": "boolean"
         },
+        "message": {
+          "description": "Freeform informational message with details on the execution of the action\nthat may be displayed to the user upon failure or when requested explicitly.",
+          "type": "string"
+        },
         "result": {
           "$ref": "BuildBazelRemoteExecutionV2ActionResult",
           "description": "The result of the action."
@@ -960,10 +964,10 @@
             "MD5"
           ],
           "enumDescriptions": [
-            "",
-            "",
-            "",
-            ""
+            "It is an error for the server to return this value.",
+            "The Sha-256 digest function.",
+            "The Sha-1 digest function.",
+            "The MD5 digest function."
           ],
           "type": "string"
         },
@@ -1177,7 +1181,7 @@
       "type": "object"
     },
     "BuildBazelRemoteExecutionV2RequestMetadata": {
-      "description": "An optional Metadata to attach to any RPC request to tell the server about an\nexternal context of the request. The server may use this for logging or other\npurposes. To use it, the client attaches the header to the call using the\ncanonical proto serialization:\nname: build.bazel.remote.execution.v2.requestmetadata-bin\ncontents: the base64 encoded binary RequestMetadata message.",
+      "description": "An optional Metadata to attach to any RPC request to tell the server about an\nexternal context of the request. The server may use this for logging or other\npurposes. To use it, the client attaches the header to the call using the\ncanonical proto serialization:\n\n* name: `build.bazel.remote.execution.v2.requestmetadata-bin`\n* contents: the base64 encoded binary `RequestMetadata` message.",
       "id": "BuildBazelRemoteExecutionV2RequestMetadata",
       "properties": {
         "actionId": {
@@ -1293,21 +1297,26 @@
       "type": "object"
     },
     "BuildBazelSemverSemVer": {
+      "description": "The full version of a given tool.",
       "id": "BuildBazelSemverSemVer",
       "properties": {
         "major": {
+          "description": "The major version, e.g 10 for 10.2.3.",
           "format": "int32",
           "type": "integer"
         },
         "minor": {
+          "description": "The minor version, e.g. 2 for 10.2.3.",
           "format": "int32",
           "type": "integer"
         },
         "patch": {
+          "description": "The patch version, e.g 3 for 10.2.3.",
           "format": "int32",
           "type": "integer"
         },
         "prerelease": {
+          "description": "The pre-release version. Either this field or major/minor/patch fields\nmust be filled. They are mutually exclusive. Pre-release versions are\nassumed to be earlier than any released versions.",
           "type": "string"
         }
       },
@@ -1466,6 +1475,10 @@
           "description": "The location is a GCP region. Currently only `us-central1` is supported.",
           "type": "string"
         },
+        "loggingEnabled": {
+          "description": "Output only. Whether stack driver logging is enabled for the instance.",
+          "type": "boolean"
+        },
         "name": {
           "description": "Output only. Instance resource name formatted as:\n`projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.\nName should not be populated when creating an instance since it is provided\nin the `instance_id` field.",
           "type": "string"
@@ -1573,7 +1586,7 @@
           "type": "string"
         },
         "reserved": {
-          "description": "Output only. `reserved=true` means the worker is reserved and won't be\npreempted.",
+          "description": "Determines whether the worker is reserved (and therefore won't be\npreempted).\nSee [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more\ndetails.",
           "type": "boolean"
         }
       },
@@ -2038,17 +2051,8 @@
           "format": "int32",
           "type": "integer"
         },
-        "outputs": {
-          "$ref": "GoogleDevtoolsRemoteworkersV1test2Digest",
-          "description": "The output files. The blob referenced by the digest should contain\none of the following (implementation-dependent):\n   * A marshalled DirectoryMetadata of the returned filesystem\n   * A LUCI-style .isolated file"
-        },
-        "overhead": {
-          "description": "The amount of time *not* spent executing the command (ie\nuploading/downloading files).",
-          "format": "google-duration",
-          "type": "string"
-        },
-        "statistics": {
-          "description": "Implementation-dependent statistics about the task. Both servers and bots\nmay define messages which can be encoded here; bots are free to provide\nstatistics in multiple formats, and servers are free to choose one or more\nof the values to process and ignore others. In particular, it is *not*\nconsidered an error for the bot to provide the server with a field that it\ndoesn't know about.",
+        "metadata": {
+          "description": "Implementation-dependent metadata about the task. Both servers and bots\nmay define messages which can be encoded here; bots are free to provide\nmetadata in multiple formats, and servers are free to choose one or more\nof the values to process and ignore others. In particular, it is *not*\nconsidered an error for the bot to provide the server with a field that it\ndoesn't know about.",
           "items": {
             "additionalProperties": {
               "description": "Properties of the object. Contains field @type with type URL.",
@@ -2058,6 +2062,15 @@
           },
           "type": "array"
         },
+        "outputs": {
+          "$ref": "GoogleDevtoolsRemoteworkersV1test2Digest",
+          "description": "The output files. The blob referenced by the digest should contain\none of the following (implementation-dependent):\n   * A marshalled DirectoryMetadata of the returned filesystem\n   * A LUCI-style .isolated file"
+        },
+        "overhead": {
+          "description": "The amount of time *not* spent executing the command (ie\nuploading/downloading files).",
+          "format": "google-duration",
+          "type": "string"
+        },
         "status": {
           "$ref": "GoogleRpcStatus",
           "description": "An overall status for the command. For example, if the command timed out,\nthis might have a code of DEADLINE_EXCEEDED; if it was killed by the OS for\nmemory exhaustion, it might have a code of RESOURCE_EXHAUSTED."
diff --git a/remotebuildexecution/v2/remotebuildexecution-gen.go b/remotebuildexecution/v2/remotebuildexecution-gen.go
index 2cbc3a4..4ceb87b 100644
--- a/remotebuildexecution/v2/remotebuildexecution-gen.go
+++ b/remotebuildexecution/v2/remotebuildexecution-gen.go
@@ -732,10 +732,10 @@
 	// Remote cache may support multiple digest functions simultaneously.
 	//
 	// Possible values:
-	//   "UNKNOWN"
-	//   "SHA256"
-	//   "SHA1"
-	//   "MD5"
+	//   "UNKNOWN" - It is an error for the server to return this value.
+	//   "SHA256" - The Sha-256 digest function.
+	//   "SHA1" - The Sha-1 digest function.
+	//   "MD5" - The MD5 digest function.
 	DigestFunction []string `json:"digestFunction,omitempty"`
 
 	// MaxBatchTotalSizeBytes: Maximum total size of blobs to be
@@ -750,11 +750,12 @@
 	//
 	// Possible values:
 	//   "UNKNOWN"
-	//   "DISALLOWED" - Server will return an INVALID_ARGUMENT on input
-	// symlinks with absolute targets.
+	//   "DISALLOWED" - Server will return an `INVALID_ARGUMENT` on input
+	// symlinks with absolute
+	// targets.
 	// If an action tries to create an output symlink with an absolute
 	// target, a
-	// FAILED_PRECONDITION will be returned.
+	// `FAILED_PRECONDITION` will be returned.
 	//   "ALLOWED" - Server will allow symlink targets to escape the input
 	// root tree, possibly
 	// resulting in non-hermetic builds.
@@ -811,8 +812,8 @@
 	// overridden using this field. Additional variables can also be
 	// specified.
 	//
-	// In order to ensure that equivalent `Command`s always hash to the
-	// same
+	// In order to ensure that equivalent
+	// Commands always hash to the same
 	// value, the environment variables MUST be lexicographically sorted by
 	// name.
 	// Sorting of strings is done by code point, equivalently, by the UTF-8
@@ -821,12 +822,14 @@
 
 	// OutputDirectories: A list of the output directories that the client
 	// expects to retrieve from
-	// the action. Only the contents of the indicated directories
-	// (recursively
-	// including the contents of their subdirectories) will be
-	// returned, as well as files listed in `output_files`. Other files that
-	// may
-	// be created during command execution are discarded.
+	// the action. Only the listed directories will be returned (an
+	// entire
+	// directory structure will be returned as a
+	// Tree message digest, see
+	// OutputDirectory), as
+	// well as files listed in `output_files`. Other files or directories
+	// that
+	// may be created during command execution are discarded.
 	//
 	// The paths are relative to the working directory of the action
 	// execution.
@@ -848,11 +851,15 @@
 	// UTF-8
 	// bytes).
 	//
-	// An output directory cannot be duplicated, be a parent of another
+	// An output directory cannot be duplicated or have the same path as any
+	// of
+	// the listed output files.
+	//
+	// Directories leading up to the output directories (but not the
 	// output
-	// directory, be a parent of a listed output file, or have the same path
-	// as
-	// any of the listed output files.
+	// directories themselves) are created by the worker prior to execution,
+	// even
+	// if they are not explicitly part of the input root.
 	OutputDirectories []string `json:"outputDirectories,omitempty"`
 
 	// OutputFiles: A list of the output files that the client expects to
@@ -860,8 +867,9 @@
 	// action. Only the listed files, as well as directories listed
 	// in
 	// `output_directories`, will be returned to the client as output.
-	// Other files that may be created during command execution are
-	// discarded.
+	// Other files or directories that may be created during command
+	// execution
+	// are discarded.
 	//
 	// The paths are relative to the working directory of the action
 	// execution.
@@ -880,10 +888,13 @@
 	// bytes).
 	//
 	// An output file cannot be duplicated, be a parent of another output
-	// file, be
-	// a child of a listed output directory, or have the same path as any of
-	// the
-	// listed output directories.
+	// file, or
+	// have the same path as any of the listed output
+	// directories.
+	//
+	// Directories leading up to the output files are created by the worker
+	// prior
+	// to execution, even if they are not explicitly part of the input root.
 	OutputFiles []string `json:"outputFiles,omitempty"`
 
 	// Platform: The platform requirements for the execution environment.
@@ -998,11 +1009,12 @@
 // servers MUST ensure that they serialize messages according to the
 // following
 // rules, even if there are alternate valid encodings for the same
-// message.
-// - Fields are serialized in tag order.
-// - There are no unknown fields.
-// - There are no duplicate fields.
-// - Fields are serialized according to the default semantics for their
+// message:
+//
+// * Fields are serialized in tag order.
+// * There are no unknown fields.
+// * There are no duplicate fields.
+// * Fields are serialized according to the default semantics for their
 // type.
 //
 // Most protocol buffer implementations will always follow these rules
@@ -1059,16 +1071,17 @@
 // value, the following restrictions MUST be obeyed when constructing
 // a
 // a `Directory`:
-//   - Every child in the directory must have a path of exactly one
+//
+// * Every child in the directory must have a path of exactly one
 // segment.
-//     Multiple levels of directory hierarchy may not be collapsed.
-//   - Each child in the directory must have a unique path segment (file
+//   Multiple levels of directory hierarchy may not be collapsed.
+// * Each child in the directory must have a unique path segment (file
 // name).
-//   - The files, directories and symlinks in the directory must each be
+// * The files, directories and symlinks in the directory must each be
 // sorted
-//     in lexicographical order by path. The path strings must be sorted
+//   in lexicographical order by path. The path strings must be sorted
 // by code
-//     point, equivalently, by UTF-8 bytes.
+//   point, equivalently, by UTF-8 bytes.
 //
 // A `Directory` that obeys the restrictions is said to be in canonical
 // form.
@@ -1300,6 +1313,12 @@
 	// was executed.
 	CachedResult bool `json:"cachedResult,omitempty"`
 
+	// Message: Freeform informational message with details on the execution
+	// of the action
+	// that may be displayed to the user upon failure or when requested
+	// explicitly.
+	Message string `json:"message,omitempty"`
+
 	// Result: The result of the action.
 	Result *BuildBazelRemoteExecutionV2ActionResult `json:"result,omitempty"`
 
@@ -1433,10 +1452,10 @@
 	// function.
 	//
 	// Possible values:
-	//   "UNKNOWN"
-	//   "SHA256"
-	//   "SHA1"
-	//   "MD5"
+	//   "UNKNOWN" - It is an error for the server to return this value.
+	//   "SHA256" - The Sha-256 digest function.
+	//   "SHA1" - The Sha-1 digest function.
+	//   "MD5" - The MD5 digest function.
 	DigestFunction string `json:"digestFunction,omitempty"`
 
 	// ExecEnabled: Whether remote execution is enabled for the particular
@@ -1997,8 +2016,9 @@
 // purposes. To use it, the client attaches the header to the call using
 // the
 // canonical proto serialization:
-// name: build.bazel.remote.execution.v2.requestmetadata-bin
-// contents: the base64 encoded binary RequestMetadata message.
+//
+// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
+// * contents: the base64 encoded binary `RequestMetadata` message.
 type BuildBazelRemoteExecutionV2RequestMetadata struct {
 	// ActionId: An identifier that ties multiple requests to the same
 	// action.
@@ -2252,13 +2272,22 @@
 type BuildBazelRemoteExecutionV2WaitExecutionRequest struct {
 }
 
+// BuildBazelSemverSemVer: The full version of a given tool.
 type BuildBazelSemverSemVer struct {
+	// Major: The major version, e.g 10 for 10.2.3.
 	Major int64 `json:"major,omitempty"`
 
+	// Minor: The minor version, e.g. 2 for 10.2.3.
 	Minor int64 `json:"minor,omitempty"`
 
+	// Patch: The patch version, e.g 3 for 10.2.3.
 	Patch int64 `json:"patch,omitempty"`
 
+	// Prerelease: The pre-release version. Either this field or
+	// major/minor/patch fields
+	// must be filled. They are mutually exclusive. Pre-release versions
+	// are
+	// assumed to be earlier than any released versions.
 	Prerelease string `json:"prerelease,omitempty"`
 
 	// ForceSendFields is a list of field names (e.g. "Major") to
@@ -2604,6 +2633,10 @@
 	// is supported.
 	Location string `json:"location,omitempty"`
 
+	// LoggingEnabled: Output only. Whether stack driver logging is enabled
+	// for the instance.
+	LoggingEnabled bool `json:"loggingEnabled,omitempty"`
+
 	// Name: Output only. Instance resource name formatted
 	// as:
 	// `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
@@ -2835,9 +2868,12 @@
 	// Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
 	MinCpuPlatform string `json:"minCpuPlatform,omitempty"`
 
-	// Reserved: Output only. `reserved=true` means the worker is reserved
-	// and won't be
-	// preempted.
+	// Reserved: Determines whether the worker is reserved (and therefore
+	// won't be
+	// preempted).
+	// See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for
+	// more
+	// details.
 	Reserved bool `json:"reserved,omitempty"`
 
 	// ForceSendFields is a list of field names (e.g. "DiskSizeGb") to
@@ -4038,6 +4074,19 @@
 	// `status` has a code of OK (otherwise it may simply be unset).
 	ExitCode int64 `json:"exitCode,omitempty"`
 
+	// Metadata: Implementation-dependent metadata about the task. Both
+	// servers and bots
+	// may define messages which can be encoded here; bots are free to
+	// provide
+	// metadata in multiple formats, and servers are free to choose one or
+	// more
+	// of the values to process and ignore others. In particular, it is
+	// *not*
+	// considered an error for the bot to provide the server with a field
+	// that it
+	// doesn't know about.
+	Metadata []googleapi.RawMessage `json:"metadata,omitempty"`
+
 	// Outputs: The output files. The blob referenced by the digest should
 	// contain
 	// one of the following (implementation-dependent):
@@ -4050,19 +4099,6 @@
 	// uploading/downloading files).
 	Overhead string `json:"overhead,omitempty"`
 
-	// Statistics: Implementation-dependent statistics about the task. Both
-	// servers and bots
-	// may define messages which can be encoded here; bots are free to
-	// provide
-	// statistics in multiple formats, and servers are free to choose one or
-	// more
-	// of the values to process and ignore others. In particular, it is
-	// *not*
-	// considered an error for the bot to provide the server with a field
-	// that it
-	// doesn't know about.
-	Statistics []googleapi.RawMessage `json:"statistics,omitempty"`
-
 	// Status: An overall status for the command. For example, if the
 	// command timed out,
 	// this might have a code of DEADLINE_EXCEEDED; if it was killed by the
@@ -4737,6 +4773,7 @@
 // Get: Retrieve a cached execution result.
 //
 // Errors:
+//
 // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
 func (r *ActionResultsService) Get(instanceName string, hash string, sizeBytes int64) *ActionResultsGetCall {
 	c := &ActionResultsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
@@ -4847,7 +4884,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Retrieve a cached execution result.\n\nErrors:\n* `NOT_FOUND`: The requested `ActionResult` is not in the cache.",
+	//   "description": "Retrieve a cached execution result.\n\nErrors:\n\n* `NOT_FOUND`: The requested `ActionResult` is not in the cache.",
 	//   "flatPath": "v2/{v2Id}/actionResults/{hash}/{sizeBytes}",
 	//   "httpMethod": "GET",
 	//   "id": "remotebuildexecution.actionResults.get",
@@ -4904,12 +4941,6 @@
 
 // Update: Upload a new execution result.
 //
-// This method is intended for servers which implement the distributed
-// cache
-// independently of the
-// Execution API. As a
-// result, it is OPTIONAL for servers to implement.
-//
 // In order to allow the server to perform access control based on the
 // type of
 // action, and to assist with client debugging, the client MUST first
@@ -4920,7 +4951,10 @@
 // `ContentAddressableStorage`.
 //
 // Errors:
-// * `UNIMPLEMENTED`: This method is not supported by the server.
+//
+// * `INVALID_ARGUMENT`: One or more arguments are invalid.
+// * `FAILED_PRECONDITION`: One or more errors occurred in updating the
+//   action result, such as a missing command or action.
 // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add
 // the
 //   entry to the cache.
@@ -5045,7 +5079,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Upload a new execution result.\n\nThis method is intended for servers which implement the distributed cache\nindependently of the\nExecution API. As a\nresult, it is OPTIONAL for servers to implement.\n\nIn order to allow the server to perform access control based on the type of\naction, and to assist with client debugging, the client MUST first upload\nthe Action that produced the\nresult, along with its\nCommand, into the\n`ContentAddressableStorage`.\n\nErrors:\n* `UNIMPLEMENTED`: This method is not supported by the server.\n* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the\n  entry to the cache.",
+	//   "description": "Upload a new execution result.\n\nIn order to allow the server to perform access control based on the type of\naction, and to assist with client debugging, the client MUST first upload\nthe Action that produced the\nresult, along with its\nCommand, into the\n`ContentAddressableStorage`.\n\nErrors:\n\n* `INVALID_ARGUMENT`: One or more arguments are invalid.\n* `FAILED_PRECONDITION`: One or more errors occurred in updating the\n  action result, such as a missing command or action.\n* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the\n  entry to the cache.",
 	//   "flatPath": "v2/{v2Id}/actionResults/{hash}/{sizeBytes}",
 	//   "httpMethod": "PUT",
 	//   "id": "remotebuildexecution.actionResults.update",
@@ -5166,6 +5200,7 @@
 // `ExecuteResponse`. The
 // server MUST NOT set the `error` field of the `Operation` proto.
 // The possible errors include:
+//
 // * `INVALID_ARGUMENT`: One or more arguments are invalid.
 // * `FAILED_PRECONDITION`: One or more errors occurred in setting up
 // the
@@ -5184,6 +5219,11 @@
 // the
 //   worker.
 // * `DEADLINE_EXCEEDED`: The execution timed out.
+// * `CANCELLED`: The operation was cancelled by the client. This status
+// is
+//   only possible if the server implements the Operations API
+// CancelOperation
+//   method, and it was called for the current execution.
 //
 // In the case of a missing input or command, the server SHOULD
 // additionally
@@ -5290,7 +5330,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Execute an action remotely.\n\nIn order to execute an action, the client must first upload all of the\ninputs, the\nCommand to run, and the\nAction into the\nContentAddressableStorage.\nIt then calls `Execute` with an `action_digest` referring to them. The\nserver will run the action and eventually return the result.\n\nThe input `Action`'s fields MUST meet the various canonicalization\nrequirements specified in the documentation for their types so that it has\nthe same digest as other logically equivalent `Action`s. The server MAY\nenforce the requirements and return errors if a non-canonical input is\nreceived. It MAY also proceed without verifying some or all of the\nrequirements, such as for performance reasons. If the server does not\nverify the requirement, then it will treat the `Action` as distinct from\nanother logically equivalent action if they hash differently.\n\nReturns a stream of\ngoogle.longrunning.Operation messages\ndescribing the resulting execution, with eventual `response`\nExecuteResponse. The\n`metadata` on the operation is of type\nExecuteOperationMetadata.\n\nIf the client remains connected after the first response is returned after\nthe server, then updates are streamed as if the client had called\nWaitExecution\nuntil the execution completes or the request reaches an error. The\noperation can also be queried using Operations\nAPI.\n\nThe server NEED NOT implement other methods or functionality of the\nOperations API.\n\nErrors discovered during creation of the `Operation` will be reported\nas gRPC Status errors, while errors that occurred while running the\naction will be reported in the `status` field of the `ExecuteResponse`. The\nserver MUST NOT set the `error` field of the `Operation` proto.\nThe possible errors include:\n* `INVALID_ARGUMENT`: One or more arguments are invalid.\n* `FAILED_PRECONDITION`: One or more errors occurred in setting up the\n  action requested, such as a missing input or command or no worker being\n  available. The client may be able to fix the errors and retry.\n* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run\n  the action.\n* `UNAVAILABLE`: Due to a transient condition, such as all workers being\n  occupied (and the server does not support a queue), the action could not\n  be started. The client should retry.\n* `INTERNAL`: An internal error occurred in the execution engine or the\n  worker.\n* `DEADLINE_EXCEEDED`: The execution timed out.\n\nIn the case of a missing input or command, the server SHOULD additionally\nsend a PreconditionFailure error detail\nwhere, for each requested blob not present in the CAS, there is a\n`Violation` with a `type` of `MISSING` and a `subject` of\n`\"blobs/{hash}/{size}\"` indicating the digest of the missing blob.",
+	//   "description": "Execute an action remotely.\n\nIn order to execute an action, the client must first upload all of the\ninputs, the\nCommand to run, and the\nAction into the\nContentAddressableStorage.\nIt then calls `Execute` with an `action_digest` referring to them. The\nserver will run the action and eventually return the result.\n\nThe input `Action`'s fields MUST meet the various canonicalization\nrequirements specified in the documentation for their types so that it has\nthe same digest as other logically equivalent `Action`s. The server MAY\nenforce the requirements and return errors if a non-canonical input is\nreceived. It MAY also proceed without verifying some or all of the\nrequirements, such as for performance reasons. If the server does not\nverify the requirement, then it will treat the `Action` as distinct from\nanother logically equivalent action if they hash differently.\n\nReturns a stream of\ngoogle.longrunning.Operation messages\ndescribing the resulting execution, with eventual `response`\nExecuteResponse. The\n`metadata` on the operation is of type\nExecuteOperationMetadata.\n\nIf the client remains connected after the first response is returned after\nthe server, then updates are streamed as if the client had called\nWaitExecution\nuntil the execution completes or the request reaches an error. The\noperation can also be queried using Operations\nAPI.\n\nThe server NEED NOT implement other methods or functionality of the\nOperations API.\n\nErrors discovered during creation of the `Operation` will be reported\nas gRPC Status errors, while errors that occurred while running the\naction will be reported in the `status` field of the `ExecuteResponse`. The\nserver MUST NOT set the `error` field of the `Operation` proto.\nThe possible errors include:\n\n* `INVALID_ARGUMENT`: One or more arguments are invalid.\n* `FAILED_PRECONDITION`: One or more errors occurred in setting up the\n  action requested, such as a missing input or command or no worker being\n  available. The client may be able to fix the errors and retry.\n* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run\n  the action.\n* `UNAVAILABLE`: Due to a transient condition, such as all workers being\n  occupied (and the server does not support a queue), the action could not\n  be started. The client should retry.\n* `INTERNAL`: An internal error occurred in the execution engine or the\n  worker.\n* `DEADLINE_EXCEEDED`: The execution timed out.\n* `CANCELLED`: The operation was cancelled by the client. This status is\n  only possible if the server implements the Operations API CancelOperation\n  method, and it was called for the current execution.\n\nIn the case of a missing input or command, the server SHOULD additionally\nsend a PreconditionFailure error detail\nwhere, for each requested blob not present in the CAS, there is a\n`Violation` with a `type` of `MISSING` and a `subject` of\n`\"blobs/{hash}/{size}\"` indicating the digest of the missing blob.",
 	//   "flatPath": "v2/{v2Id}/actions:execute",
 	//   "httpMethod": "POST",
 	//   "id": "remotebuildexecution.actions.execute",
@@ -5348,6 +5388,7 @@
 // independently.
 //
 // Errors:
+//
 // * `INVALID_ARGUMENT`: The client attempted to read more than the
 //   server supported limit.
 //
@@ -5453,7 +5494,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Download many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be downloaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or downloaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Read` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n* `INVALID_ARGUMENT`: The client attempted to read more than the\n  server supported limit.\n\nEvery error on individual read will be returned in the corresponding digest\nstatus.",
+	//   "description": "Download many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be downloaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or downloaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Read` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n\n* `INVALID_ARGUMENT`: The client attempted to read more than the\n  server supported limit.\n\nEvery error on individual read will be returned in the corresponding digest\nstatus.",
 	//   "flatPath": "v2/{v2Id}/blobs:batchRead",
 	//   "httpMethod": "POST",
 	//   "id": "remotebuildexecution.blobs.batchRead",
@@ -5511,10 +5552,12 @@
 // independently.
 //
 // Errors:
+//
 // * `INVALID_ARGUMENT`: The client attempted to upload more than the
 //   server supported limit.
 //
 // Individual requests may return the following errors, additionally:
+//
 // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the
 // blob.
 // * `INVALID_ARGUMENT`: The
@@ -5619,7 +5662,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Upload many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be uploaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or uploaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Write` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n* `INVALID_ARGUMENT`: The client attempted to upload more than the\n  server supported limit.\n\nIndividual requests may return the following errors, additionally:\n* `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.\n* `INVALID_ARGUMENT`: The\nDigest does not match the\nprovided data.",
+	//   "description": "Upload many blobs at once.\n\nThe server may enforce a limit of the combined total size of blobs\nto be uploaded using this API. This limit may be obtained using the\nCapabilities API.\nRequests exceeding the limit should either be split into smaller\nchunks or uploaded using the\nByteStream API, as appropriate.\n\nThis request is equivalent to calling a Bytestream `Write` request\non each individual blob, in parallel. The requests may succeed or fail\nindependently.\n\nErrors:\n\n* `INVALID_ARGUMENT`: The client attempted to upload more than the\n  server supported limit.\n\nIndividual requests may return the following errors, additionally:\n\n* `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.\n* `INVALID_ARGUMENT`: The\nDigest does not match the\nprovided data.",
 	//   "flatPath": "v2/{v2Id}/blobs:batchUpdate",
 	//   "httpMethod": "POST",
 	//   "id": "remotebuildexecution.blobs.batchUpdate",
diff --git a/servicecontrol/v1/servicecontrol-api.json b/servicecontrol/v1/servicecontrol-api.json
index a42b8c2..3e3601b 100644
--- a/servicecontrol/v1/servicecontrol-api.json
+++ b/servicecontrol/v1/servicecontrol-api.json
@@ -196,7 +196,7 @@
       }
     }
   },
-  "revision": "20181229",
+  "revision": "20190114",
   "rootUrl": "https://servicecontrol.googleapis.com/",
   "schemas": {
     "AllocateInfo": {
@@ -564,7 +564,7 @@
           "type": "string"
         },
         "skipActivationCheck": {
-          "description": "Indicates if service activation check should be skipped for this request.\nDefault behavior is to perform the check and apply relevant quota.",
+          "description": "Indicates if service activation check should be skipped for this request.\nDefault behavior is to perform the check and apply relevant quota.\nWARNING: Setting this flag to \"true\" will disable quota enforcement.",
           "type": "boolean"
         }
       },
diff --git a/servicecontrol/v1/servicecontrol-gen.go b/servicecontrol/v1/servicecontrol-gen.go
index e7377a1..26da88e 100644
--- a/servicecontrol/v1/servicecontrol-gen.go
+++ b/servicecontrol/v1/servicecontrol-gen.go
@@ -734,7 +734,9 @@
 
 	// SkipActivationCheck: Indicates if service activation check should be
 	// skipped for this request.
-	// Default behavior is to perform the check and apply relevant quota.
+	// Default behavior is to perform the check and apply relevant
+	// quota.
+	// WARNING: Setting this flag to "true" will disable quota enforcement.
 	SkipActivationCheck bool `json:"skipActivationCheck,omitempty"`
 
 	// ForceSendFields is a list of field names (e.g. "Operation") to
diff --git a/serviceusage/v1/serviceusage-api.json b/serviceusage/v1/serviceusage-api.json
index 703633e..bd2222a 100644
--- a/serviceusage/v1/serviceusage-api.json
+++ b/serviceusage/v1/serviceusage-api.json
@@ -236,7 +236,7 @@
     "services": {
       "methods": {
         "batchEnable": {
-          "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.",
+          "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.\n\nOperation\u003cresponse: BatchEnableServicesResponse\u003e",
           "flatPath": "v1/{v1Id}/{v1Id1}/services:batchEnable",
           "httpMethod": "POST",
           "id": "serviceusage.services.batchEnable",
@@ -265,7 +265,7 @@
           ]
         },
         "disable": {
-          "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.",
+          "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.\n\nOperation\u003cresponse: DisableServiceResponse\u003e",
           "flatPath": "v1/{v1Id}/{v1Id1}/services/{servicesId}:disable",
           "httpMethod": "POST",
           "id": "serviceusage.services.disable",
@@ -294,7 +294,7 @@
           ]
         },
         "enable": {
-          "description": "Enable a service so that it can be used with a project.",
+          "description": "Enable a service so that it can be used with a project.\n\nOperation\u003cresponse: EnableServiceResponse\u003e",
           "flatPath": "v1/{v1Id}/{v1Id1}/services/{servicesId}:enable",
           "httpMethod": "POST",
           "id": "serviceusage.services.enable",
@@ -393,7 +393,7 @@
       }
     }
   },
-  "revision": "20190115",
+  "revision": "20190114",
   "rootUrl": "https://serviceusage.googleapis.com/",
   "schemas": {
     "Api": {
diff --git a/serviceusage/v1/serviceusage-gen.go b/serviceusage/v1/serviceusage-gen.go
index 534b575..5eb15bf 100644
--- a/serviceusage/v1/serviceusage-gen.go
+++ b/serviceusage/v1/serviceusage-gen.go
@@ -5232,6 +5232,8 @@
 // atomic: if enabling
 // any service fails, then the entire batch fails, and no state changes
 // occur.
+//
+// Operation<response: BatchEnableServicesResponse>
 func (r *ServicesService) BatchEnable(parent string, batchenableservicesrequest *BatchEnableServicesRequest) *ServicesBatchEnableCall {
 	c := &ServicesBatchEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.parent = parent
@@ -5329,7 +5331,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.",
+	//   "description": "Enable multiple services on a project. The operation is atomic: if enabling\nany service fails, then the entire batch fails, and no state changes occur.\n\nOperation\u003cresponse: BatchEnableServicesResponse\u003e",
 	//   "flatPath": "v1/{v1Id}/{v1Id1}/services:batchEnable",
 	//   "httpMethod": "POST",
 	//   "id": "serviceusage.services.batchEnable",
@@ -5382,6 +5384,8 @@
 // currently enabled. Callers will receive a `FAILED_PRECONDITION`
 // status if
 // the target service is not currently enabled.
+//
+// Operation<response: DisableServiceResponse>
 func (r *ServicesService) Disable(name string, disableservicerequest *DisableServiceRequest) *ServicesDisableCall {
 	c := &ServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.name = name
@@ -5479,7 +5483,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.",
+	//   "description": "Disable a service so that it can no longer be used with a project.\nThis prevents unintended usage that may cause unexpected billing\ncharges or security leaks.\n\nIt is not valid to call the disable method on a service that is not\ncurrently enabled. Callers will receive a `FAILED_PRECONDITION` status if\nthe target service is not currently enabled.\n\nOperation\u003cresponse: DisableServiceResponse\u003e",
 	//   "flatPath": "v1/{v1Id}/{v1Id1}/services/{servicesId}:disable",
 	//   "httpMethod": "POST",
 	//   "id": "serviceusage.services.disable",
@@ -5521,7 +5525,10 @@
 	header_              http.Header
 }
 
-// Enable: Enable a service so that it can be used with a project.
+// Enable: Enable a service so that it can be used with a
+// project.
+//
+// Operation<response: EnableServiceResponse>
 func (r *ServicesService) Enable(name string, enableservicerequest *EnableServiceRequest) *ServicesEnableCall {
 	c := &ServicesEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.name = name
@@ -5619,7 +5626,7 @@
 	}
 	return ret, nil
 	// {
-	//   "description": "Enable a service so that it can be used with a project.",
+	//   "description": "Enable a service so that it can be used with a project.\n\nOperation\u003cresponse: EnableServiceResponse\u003e",
 	//   "flatPath": "v1/{v1Id}/{v1Id1}/services/{servicesId}:enable",
 	//   "httpMethod": "POST",
 	//   "id": "serviceusage.services.enable",
diff --git a/videointelligence/v1/videointelligence-api.json b/videointelligence/v1/videointelligence-api.json
index 340da18..f357604 100644
--- a/videointelligence/v1/videointelligence-api.json
+++ b/videointelligence/v1/videointelligence-api.json
@@ -246,7 +246,7 @@
       }
     }
   },
-  "revision": "20181027",
+  "revision": "20190112",
   "rootUrl": "https://videointelligence.googleapis.com/",
   "schemas": {
     "GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -1658,7 +1658,7 @@
           "description": "Entity to specify the object category that this track is labeled as."
         },
         "frames": {
-          "description": "Information corresponding to all frames where this object track appears.",
+          "description": "Information corresponding to all frames where this object track appears.\nNon-streaming batch mode: it may be one or multiple ObjectTrackingFrame\nmessages in frames.\nStreaming mode: it can only be one ObjectTrackingFrame message in frames.",
           "items": {
             "$ref": "GoogleCloudVideointelligenceV1p2beta1_ObjectTrackingFrame"
           },
@@ -1666,7 +1666,12 @@
         },
         "segment": {
           "$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
-          "description": "Each object track corresponds to one video segment where it appears."
+          "description": "Non-streaming batch mode ONLY.\nEach object track corresponds to one video segment where it appears."
+        },
+        "trackId": {
+          "description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
+          "format": "int64",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/videointelligence/v1/videointelligence-gen.go b/videointelligence/v1/videointelligence-gen.go
index e9a5796..580982b 100644
--- a/videointelligence/v1/videointelligence-gen.go
+++ b/videointelligence/v1/videointelligence-gen.go
@@ -2917,12 +2917,29 @@
 
 	// Frames: Information corresponding to all frames where this object
 	// track appears.
+	// Non-streaming batch mode: it may be one or multiple
+	// ObjectTrackingFrame
+	// messages in frames.
+	// Streaming mode: it can only be one ObjectTrackingFrame message in
+	// frames.
 	Frames []*GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame `json:"frames,omitempty"`
 
-	// Segment: Each object track corresponds to one video segment where it
-	// appears.
+	// Segment: Non-streaming batch mode ONLY.
+	// Each object track corresponds to one video segment where it appears.
 	Segment *GoogleCloudVideointelligenceV1p2beta1VideoSegment `json:"segment,omitempty"`
 
+	// TrackId: Streaming mode ONLY.
+	// In streaming mode, we do not know the end time of a tracked
+	// object
+	// before it is completed. Hence, there is no VideoSegment info
+	// returned.
+	// Instead, we provide a unique identifiable integer track_id so
+	// that
+	// the customers can correlate the results of the
+	// ongoing
+	// ObjectTrackAnnotation of the same track_id over time.
+	TrackId int64 `json:"trackId,omitempty,string"`
+
 	// ForceSendFields is a list of field names (e.g. "Confidence") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
diff --git a/videointelligence/v1beta2/videointelligence-api.json b/videointelligence/v1beta2/videointelligence-api.json
index 31218e5..8202255 100644
--- a/videointelligence/v1beta2/videointelligence-api.json
+++ b/videointelligence/v1beta2/videointelligence-api.json
@@ -127,7 +127,7 @@
       }
     }
   },
-  "revision": "20181027",
+  "revision": "20190112",
   "rootUrl": "https://videointelligence.googleapis.com/",
   "schemas": {
     "GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -1539,7 +1539,7 @@
           "description": "Entity to specify the object category that this track is labeled as."
         },
         "frames": {
-          "description": "Information corresponding to all frames where this object track appears.",
+          "description": "Information corresponding to all frames where this object track appears.\nNon-streaming batch mode: it may be one or multiple ObjectTrackingFrame\nmessages in frames.\nStreaming mode: it can only be one ObjectTrackingFrame message in frames.",
           "items": {
             "$ref": "GoogleCloudVideointelligenceV1p2beta1_ObjectTrackingFrame"
           },
@@ -1547,7 +1547,12 @@
         },
         "segment": {
           "$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
-          "description": "Each object track corresponds to one video segment where it appears."
+          "description": "Non-streaming batch mode ONLY.\nEach object track corresponds to one video segment where it appears."
+        },
+        "trackId": {
+          "description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
+          "format": "int64",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/videointelligence/v1beta2/videointelligence-gen.go b/videointelligence/v1beta2/videointelligence-gen.go
index d85aa63..c5c8f48 100644
--- a/videointelligence/v1beta2/videointelligence-gen.go
+++ b/videointelligence/v1beta2/videointelligence-gen.go
@@ -2905,12 +2905,29 @@
 
 	// Frames: Information corresponding to all frames where this object
 	// track appears.
+	// Non-streaming batch mode: it may be one or multiple
+	// ObjectTrackingFrame
+	// messages in frames.
+	// Streaming mode: it can only be one ObjectTrackingFrame message in
+	// frames.
 	Frames []*GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame `json:"frames,omitempty"`
 
-	// Segment: Each object track corresponds to one video segment where it
-	// appears.
+	// Segment: Non-streaming batch mode ONLY.
+	// Each object track corresponds to one video segment where it appears.
 	Segment *GoogleCloudVideointelligenceV1p2beta1VideoSegment `json:"segment,omitempty"`
 
+	// TrackId: Streaming mode ONLY.
+	// In streaming mode, we do not know the end time of a tracked
+	// object
+	// before it is completed. Hence, there is no VideoSegment info
+	// returned.
+	// Instead, we provide a unique identifiable integer track_id so
+	// that
+	// the customers can correlate the results of the
+	// ongoing
+	// ObjectTrackAnnotation of the same track_id over time.
+	TrackId int64 `json:"trackId,omitempty,string"`
+
 	// ForceSendFields is a list of field names (e.g. "Confidence") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
diff --git a/videointelligence/v1p1beta1/videointelligence-api.json b/videointelligence/v1p1beta1/videointelligence-api.json
index 6c6fc35..70ec90e 100644
--- a/videointelligence/v1p1beta1/videointelligence-api.json
+++ b/videointelligence/v1p1beta1/videointelligence-api.json
@@ -127,7 +127,7 @@
       }
     }
   },
-  "revision": "20181027",
+  "revision": "20190112",
   "rootUrl": "https://videointelligence.googleapis.com/",
   "schemas": {
     "GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -1539,7 +1539,7 @@
           "description": "Entity to specify the object category that this track is labeled as."
         },
         "frames": {
-          "description": "Information corresponding to all frames where this object track appears.",
+          "description": "Information corresponding to all frames where this object track appears.\nNon-streaming batch mode: it may be one or multiple ObjectTrackingFrame\nmessages in frames.\nStreaming mode: it can only be one ObjectTrackingFrame message in frames.",
           "items": {
             "$ref": "GoogleCloudVideointelligenceV1p2beta1_ObjectTrackingFrame"
           },
@@ -1547,7 +1547,12 @@
         },
         "segment": {
           "$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
-          "description": "Each object track corresponds to one video segment where it appears."
+          "description": "Non-streaming batch mode ONLY.\nEach object track corresponds to one video segment where it appears."
+        },
+        "trackId": {
+          "description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
+          "format": "int64",
+          "type": "string"
         }
       },
       "type": "object"
diff --git a/videointelligence/v1p1beta1/videointelligence-gen.go b/videointelligence/v1p1beta1/videointelligence-gen.go
index e95e733..a113fa6 100644
--- a/videointelligence/v1p1beta1/videointelligence-gen.go
+++ b/videointelligence/v1p1beta1/videointelligence-gen.go
@@ -2905,12 +2905,29 @@
 
 	// Frames: Information corresponding to all frames where this object
 	// track appears.
+	// Non-streaming batch mode: it may be one or multiple
+	// ObjectTrackingFrame
+	// messages in frames.
+	// Streaming mode: it can only be one ObjectTrackingFrame message in
+	// frames.
 	Frames []*GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame `json:"frames,omitempty"`
 
-	// Segment: Each object track corresponds to one video segment where it
-	// appears.
+	// Segment: Non-streaming batch mode ONLY.
+	// Each object track corresponds to one video segment where it appears.
 	Segment *GoogleCloudVideointelligenceV1p2beta1VideoSegment `json:"segment,omitempty"`
 
+	// TrackId: Streaming mode ONLY.
+	// In streaming mode, we do not know the end time of a tracked
+	// object
+	// before it is completed. Hence, there is no VideoSegment info
+	// returned.
+	// Instead, we provide a unique identifiable integer track_id so
+	// that
+	// the customers can correlate the results of the
+	// ongoing
+	// ObjectTrackAnnotation of the same track_id over time.
+	TrackId int64 `json:"trackId,omitempty,string"`
+
 	// ForceSendFields is a list of field names (e.g. "Confidence") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,