all: autogenerated update (2018-10-15)

Update:
- bigquery/v2
- videointelligence/v1
- videointelligence/v1beta2
- videointelligence/v1p1beta1
diff --git a/bigquery/v2/bigquery-api.json b/bigquery/v2/bigquery-api.json
index 850ceb6..baa5c2a 100644
--- a/bigquery/v2/bigquery-api.json
+++ b/bigquery/v2/bigquery-api.json
@@ -32,7 +32,7 @@
   "description": "A data platform for customers to create, manage, share and query data.",
   "discoveryVersion": "v1",
   "documentationLink": "https://cloud.google.com/bigquery/",
-  "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/QF07rEPTnP4y6xu7pxBI9RDU640\"",
+  "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/kNaWPiDorJ4a-XoVjJgcrtZLb_0\"",
   "icons": {
     "x16": "https://www.google.com/images/icons/product/search-16.gif",
     "x32": "https://www.google.com/images/icons/product/search-32.gif"
@@ -998,7 +998,7 @@
       }
     }
   },
-  "revision": "20180826",
+  "revision": "20181005",
   "rootUrl": "https://www.googleapis.com/",
   "schemas": {
     "BigQueryModelTraining": {
@@ -1960,6 +1960,10 @@
           "$ref": "TimePartitioning",
           "description": "Time-based partitioning specification for the destination table."
         },
+        "useAvroLogicalTypes": {
+          "description": "If sourceFormat is set to \"AVRO\", indicates whether to enable interpreting logical types into their corresponding types (ie. TIMESTAMP), instead of only using their raw types (ie. INTEGER). The default value will be true once this feature launches, but can be set now in preparation.",
+          "type": "boolean"
+        },
         "writeDisposition": {
           "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.",
           "type": "string"
diff --git a/bigquery/v2/bigquery-gen.go b/bigquery/v2/bigquery-gen.go
index 61a9cb9..99d6624 100644
--- a/bigquery/v2/bigquery-gen.go
+++ b/bigquery/v2/bigquery-gen.go
@@ -1723,6 +1723,13 @@
 	// destination table.
 	TimePartitioning *TimePartitioning `json:"timePartitioning,omitempty"`
 
+	// UseAvroLogicalTypes: If sourceFormat is set to "AVRO", indicates
+	// whether to enable interpreting logical types into their corresponding
+	// types (ie. TIMESTAMP), instead of only using their raw types (ie.
+	// INTEGER). The default value will be true once this feature launches,
+	// but can be set now in preparation.
+	UseAvroLogicalTypes bool `json:"useAvroLogicalTypes,omitempty"`
+
 	// WriteDisposition: [Optional] Specifies the action that occurs if the
 	// destination table already exists. The following values are supported:
 	// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
diff --git a/videointelligence/v1/videointelligence-api.json b/videointelligence/v1/videointelligence-api.json
index cc94053..e45bd4a 100644
--- a/videointelligence/v1/videointelligence-api.json
+++ b/videointelligence/v1/videointelligence-api.json
@@ -246,7 +246,7 @@
       }
     }
   },
-  "revision": "20180920",
+  "revision": "20181003",
   "rootUrl": "https://videointelligence.googleapis.com/",
   "schemas": {
     "GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -1062,6 +1062,10 @@
             "$ref": "GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative"
           },
           "type": "array"
+        },
+        "languageCode": {
+          "description": "Output only. The\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the\nlanguage in this result. This language code was detected to have the most\nlikelihood of being spoken in the audio.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -1457,18 +1461,6 @@
       "description": "Annotations related to one detected OCR text snippet. This will contain the\ncorresponding text, confidence value, and frame level information for each\ndetection.",
       "id": "GoogleCloudVideointelligenceV1p2beta1_TextAnnotation",
       "properties": {
-        "confidence": {
-          "description": "Confidence for the track of detected text. It is calculated as the highest\nover all frames where OCR detected text appears.",
-          "format": "float",
-          "type": "number"
-        },
-        "frames": {
-          "description": "Information related to the frames where OCR detected text appears.",
-          "items": {
-            "$ref": "GoogleCloudVideointelligenceV1p2beta1_TextFrame"
-          },
-          "type": "array"
-        },
         "segments": {
           "description": "All video segments where OCR detected text appears.",
           "items": {
@@ -1503,6 +1495,18 @@
       "description": "Video segment level annotation results for text detection.",
       "id": "GoogleCloudVideointelligenceV1p2beta1_TextSegment",
       "properties": {
+        "confidence": {
+          "description": "Confidence for the track of detected text. It is calculated as the highest\nover all frames where OCR detected text appears.",
+          "format": "float",
+          "type": "number"
+        },
+        "frames": {
+          "description": "Information related to the frames where OCR detected text appears.",
+          "items": {
+            "$ref": "GoogleCloudVideointelligenceV1p2beta1_TextFrame"
+          },
+          "type": "array"
+        },
         "segment": {
           "$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
           "description": "Video segment where a text snippet was detected."
diff --git a/videointelligence/v1/videointelligence-gen.go b/videointelligence/v1/videointelligence-gen.go
index f6ae28b..d62efe5 100644
--- a/videointelligence/v1/videointelligence-gen.go
+++ b/videointelligence/v1/videointelligence-gen.go
@@ -1633,6 +1633,15 @@
 	// ranked by the recognizer.
 	Alternatives []*GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative `json:"alternatives,omitempty"`
 
+	// LanguageCode: Output only.
+	// The
+	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+	// of the
+	// language in this result. This language code was detected to have the
+	// most
+	// likelihood of being spoken in the audio.
+	LanguageCode string `json:"languageCode,omitempty"`
+
 	// ForceSendFields is a list of field names (e.g. "Alternatives") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
@@ -2455,22 +2464,13 @@
 // each
 // detection.
 type GoogleCloudVideointelligenceV1p2beta1TextAnnotation struct {
-	// Confidence: Confidence for the track of detected text. It is
-	// calculated as the highest
-	// over all frames where OCR detected text appears.
-	Confidence float64 `json:"confidence,omitempty"`
-
-	// Frames: Information related to the frames where OCR detected text
-	// appears.
-	Frames []*GoogleCloudVideointelligenceV1p2beta1TextFrame `json:"frames,omitempty"`
-
 	// Segments: All video segments where OCR detected text appears.
 	Segments []*GoogleCloudVideointelligenceV1p2beta1TextSegment `json:"segments,omitempty"`
 
 	// Text: The detected text.
 	Text string `json:"text,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Confidence") to
+	// ForceSendFields is a list of field names (e.g. "Segments") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -2478,7 +2478,7 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Confidence") to include in
+	// NullFields is a list of field names (e.g. "Segments") to include in
 	// API requests with the JSON null value. By default, fields with empty
 	// values are omitted from API requests. However, any field with an
 	// empty value appearing in NullFields will be sent to the server as
@@ -2493,20 +2493,6 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
-func (s *GoogleCloudVideointelligenceV1p2beta1TextAnnotation) UnmarshalJSON(data []byte) error {
-	type NoMethod GoogleCloudVideointelligenceV1p2beta1TextAnnotation
-	var s1 struct {
-		Confidence gensupport.JSONFloat64 `json:"confidence"`
-		*NoMethod
-	}
-	s1.NoMethod = (*NoMethod)(s)
-	if err := json.Unmarshal(data, &s1); err != nil {
-		return err
-	}
-	s.Confidence = float64(s1.Confidence)
-	return nil
-}
-
 // GoogleCloudVideointelligenceV1p2beta1TextFrame: Video frame level
 // annotation results for text annotation (OCR).
 // Contains information regarding timestamp and bounding box locations
@@ -2547,10 +2533,19 @@
 // GoogleCloudVideointelligenceV1p2beta1TextSegment: Video segment level
 // annotation results for text detection.
 type GoogleCloudVideointelligenceV1p2beta1TextSegment struct {
+	// Confidence: Confidence for the track of detected text. It is
+	// calculated as the highest
+	// over all frames where OCR detected text appears.
+	Confidence float64 `json:"confidence,omitempty"`
+
+	// Frames: Information related to the frames where OCR detected text
+	// appears.
+	Frames []*GoogleCloudVideointelligenceV1p2beta1TextFrame `json:"frames,omitempty"`
+
 	// Segment: Video segment where a text snippet was detected.
 	Segment *GoogleCloudVideointelligenceV1p2beta1VideoSegment `json:"segment,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Segment") to
+	// ForceSendFields is a list of field names (e.g. "Confidence") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -2558,7 +2553,7 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Segment") to include in
+	// NullFields is a list of field names (e.g. "Confidence") to include in
 	// API requests with the JSON null value. By default, fields with empty
 	// values are omitted from API requests. However, any field with an
 	// empty value appearing in NullFields will be sent to the server as
@@ -2573,6 +2568,20 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
+func (s *GoogleCloudVideointelligenceV1p2beta1TextSegment) UnmarshalJSON(data []byte) error {
+	type NoMethod GoogleCloudVideointelligenceV1p2beta1TextSegment
+	var s1 struct {
+		Confidence gensupport.JSONFloat64 `json:"confidence"`
+		*NoMethod
+	}
+	s1.NoMethod = (*NoMethod)(s)
+	if err := json.Unmarshal(data, &s1); err != nil {
+		return err
+	}
+	s.Confidence = float64(s1.Confidence)
+	return nil
+}
+
 // GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress:
 // Annotation progress for a single video.
 type GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress struct {
diff --git a/videointelligence/v1beta2/videointelligence-api.json b/videointelligence/v1beta2/videointelligence-api.json
index 5314bb2..4dee910 100644
--- a/videointelligence/v1beta2/videointelligence-api.json
+++ b/videointelligence/v1beta2/videointelligence-api.json
@@ -127,7 +127,7 @@
       }
     }
   },
-  "revision": "20180920",
+  "revision": "20181003",
   "rootUrl": "https://videointelligence.googleapis.com/",
   "schemas": {
     "GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -943,6 +943,10 @@
             "$ref": "GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative"
           },
           "type": "array"
+        },
+        "languageCode": {
+          "description": "Output only. The\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the\nlanguage in this result. This language code was detected to have the most\nlikelihood of being spoken in the audio.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -1338,18 +1342,6 @@
       "description": "Annotations related to one detected OCR text snippet. This will contain the\ncorresponding text, confidence value, and frame level information for each\ndetection.",
       "id": "GoogleCloudVideointelligenceV1p2beta1_TextAnnotation",
       "properties": {
-        "confidence": {
-          "description": "Confidence for the track of detected text. It is calculated as the highest\nover all frames where OCR detected text appears.",
-          "format": "float",
-          "type": "number"
-        },
-        "frames": {
-          "description": "Information related to the frames where OCR detected text appears.",
-          "items": {
-            "$ref": "GoogleCloudVideointelligenceV1p2beta1_TextFrame"
-          },
-          "type": "array"
-        },
         "segments": {
           "description": "All video segments where OCR detected text appears.",
           "items": {
@@ -1384,6 +1376,18 @@
       "description": "Video segment level annotation results for text detection.",
       "id": "GoogleCloudVideointelligenceV1p2beta1_TextSegment",
       "properties": {
+        "confidence": {
+          "description": "Confidence for the track of detected text. It is calculated as the highest\nover all frames where OCR detected text appears.",
+          "format": "float",
+          "type": "number"
+        },
+        "frames": {
+          "description": "Information related to the frames where OCR detected text appears.",
+          "items": {
+            "$ref": "GoogleCloudVideointelligenceV1p2beta1_TextFrame"
+          },
+          "type": "array"
+        },
         "segment": {
           "$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
           "description": "Video segment where a text snippet was detected."
diff --git a/videointelligence/v1beta2/videointelligence-gen.go b/videointelligence/v1beta2/videointelligence-gen.go
index 325d360..5a9dbe7 100644
--- a/videointelligence/v1beta2/videointelligence-gen.go
+++ b/videointelligence/v1beta2/videointelligence-gen.go
@@ -1621,6 +1621,15 @@
 	// ranked by the recognizer.
 	Alternatives []*GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative `json:"alternatives,omitempty"`
 
+	// LanguageCode: Output only.
+	// The
+	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+	// of the
+	// language in this result. This language code was detected to have the
+	// most
+	// likelihood of being spoken in the audio.
+	LanguageCode string `json:"languageCode,omitempty"`
+
 	// ForceSendFields is a list of field names (e.g. "Alternatives") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
@@ -2443,22 +2452,13 @@
 // each
 // detection.
 type GoogleCloudVideointelligenceV1p2beta1TextAnnotation struct {
-	// Confidence: Confidence for the track of detected text. It is
-	// calculated as the highest
-	// over all frames where OCR detected text appears.
-	Confidence float64 `json:"confidence,omitempty"`
-
-	// Frames: Information related to the frames where OCR detected text
-	// appears.
-	Frames []*GoogleCloudVideointelligenceV1p2beta1TextFrame `json:"frames,omitempty"`
-
 	// Segments: All video segments where OCR detected text appears.
 	Segments []*GoogleCloudVideointelligenceV1p2beta1TextSegment `json:"segments,omitempty"`
 
 	// Text: The detected text.
 	Text string `json:"text,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Confidence") to
+	// ForceSendFields is a list of field names (e.g. "Segments") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -2466,7 +2466,7 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Confidence") to include in
+	// NullFields is a list of field names (e.g. "Segments") to include in
 	// API requests with the JSON null value. By default, fields with empty
 	// values are omitted from API requests. However, any field with an
 	// empty value appearing in NullFields will be sent to the server as
@@ -2481,20 +2481,6 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
-func (s *GoogleCloudVideointelligenceV1p2beta1TextAnnotation) UnmarshalJSON(data []byte) error {
-	type NoMethod GoogleCloudVideointelligenceV1p2beta1TextAnnotation
-	var s1 struct {
-		Confidence gensupport.JSONFloat64 `json:"confidence"`
-		*NoMethod
-	}
-	s1.NoMethod = (*NoMethod)(s)
-	if err := json.Unmarshal(data, &s1); err != nil {
-		return err
-	}
-	s.Confidence = float64(s1.Confidence)
-	return nil
-}
-
 // GoogleCloudVideointelligenceV1p2beta1TextFrame: Video frame level
 // annotation results for text annotation (OCR).
 // Contains information regarding timestamp and bounding box locations
@@ -2535,10 +2521,19 @@
 // GoogleCloudVideointelligenceV1p2beta1TextSegment: Video segment level
 // annotation results for text detection.
 type GoogleCloudVideointelligenceV1p2beta1TextSegment struct {
+	// Confidence: Confidence for the track of detected text. It is
+	// calculated as the highest
+	// over all frames where OCR detected text appears.
+	Confidence float64 `json:"confidence,omitempty"`
+
+	// Frames: Information related to the frames where OCR detected text
+	// appears.
+	Frames []*GoogleCloudVideointelligenceV1p2beta1TextFrame `json:"frames,omitempty"`
+
 	// Segment: Video segment where a text snippet was detected.
 	Segment *GoogleCloudVideointelligenceV1p2beta1VideoSegment `json:"segment,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Segment") to
+	// ForceSendFields is a list of field names (e.g. "Confidence") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -2546,7 +2541,7 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Segment") to include in
+	// NullFields is a list of field names (e.g. "Confidence") to include in
 	// API requests with the JSON null value. By default, fields with empty
 	// values are omitted from API requests. However, any field with an
 	// empty value appearing in NullFields will be sent to the server as
@@ -2561,6 +2556,20 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
+func (s *GoogleCloudVideointelligenceV1p2beta1TextSegment) UnmarshalJSON(data []byte) error {
+	type NoMethod GoogleCloudVideointelligenceV1p2beta1TextSegment
+	var s1 struct {
+		Confidence gensupport.JSONFloat64 `json:"confidence"`
+		*NoMethod
+	}
+	s1.NoMethod = (*NoMethod)(s)
+	if err := json.Unmarshal(data, &s1); err != nil {
+		return err
+	}
+	s.Confidence = float64(s1.Confidence)
+	return nil
+}
+
 // GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress:
 // Annotation progress for a single video.
 type GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress struct {
diff --git a/videointelligence/v1p1beta1/videointelligence-api.json b/videointelligence/v1p1beta1/videointelligence-api.json
index 96ea172..fd4ded7 100644
--- a/videointelligence/v1p1beta1/videointelligence-api.json
+++ b/videointelligence/v1p1beta1/videointelligence-api.json
@@ -127,7 +127,7 @@
       }
     }
   },
-  "revision": "20180920",
+  "revision": "20181003",
   "rootUrl": "https://videointelligence.googleapis.com/",
   "schemas": {
     "GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -933,6 +933,10 @@
             "$ref": "GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative"
           },
           "type": "array"
+        },
+        "languageCode": {
+          "description": "Output only. The\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the\nlanguage in this result. This language code was detected to have the most\nlikelihood of being spoken in the audio.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -941,6 +945,13 @@
       "description": "Config for SPEECH_TRANSCRIPTION.",
       "id": "GoogleCloudVideointelligenceV1p1beta1_SpeechTranscriptionConfig",
       "properties": {
+        "alternativeLanguageCodes": {
+          "description": "*Optional* A list of up to 3 additional\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,\nlisting possible alternative languages of the supplied video.\nSee [Language Support](/speech-to-text/docs/languages)\nfor a list of the currently supported language codes.\nIf alternative languages are listed, transcription result will contain\ntranscription in the most likely language detected, including the main\nlanguage_code. The transcription result will include the language tag\nof the language detected in the video.\nNote: This feature is only supported for Voice Command and Voice Search\nuse cases and performance may vary for other use cases (e.g., phone call\ntranscription).",
+          "items": {
+            "type": "string"
+          },
+          "type": "array"
+        },
         "audioTracks": {
           "description": "*Optional* For file formats, such as MXF or MKV, supporting multiple audio\ntracks, specify up to two tracks. Default: track 0.",
           "items": {
@@ -975,7 +986,7 @@
           "type": "string"
         },
         "maxAlternatives": {
-          "description": "*Optional* Maximum number of recognition hypotheses to be returned.\nSpecifically, the maximum number of `SpeechRecognitionAlternative` messages\nwithin each `SpeechRecognitionResult`. The server may return fewer than\n`max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will\nreturn a maximum of one. If omitted, will return a maximum of one.",
+          "description": "*Optional* Maximum number of recognition hypotheses to be returned.\nSpecifically, the maximum number of `SpeechRecognitionAlternative` messages\nwithin each `SpeechTranscription`. The server may return fewer than\n`max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will\nreturn a maximum of one. If omitted, will return a maximum of one.",
           "format": "int32",
           "type": "integer"
         },
@@ -1410,18 +1421,6 @@
       "description": "Annotations related to one detected OCR text snippet. This will contain the\ncorresponding text, confidence value, and frame level information for each\ndetection.",
       "id": "GoogleCloudVideointelligenceV1p2beta1_TextAnnotation",
       "properties": {
-        "confidence": {
-          "description": "Confidence for the track of detected text. It is calculated as the highest\nover all frames where OCR detected text appears.",
-          "format": "float",
-          "type": "number"
-        },
-        "frames": {
-          "description": "Information related to the frames where OCR detected text appears.",
-          "items": {
-            "$ref": "GoogleCloudVideointelligenceV1p2beta1_TextFrame"
-          },
-          "type": "array"
-        },
         "segments": {
           "description": "All video segments where OCR detected text appears.",
           "items": {
@@ -1456,6 +1455,18 @@
       "description": "Video segment level annotation results for text detection.",
       "id": "GoogleCloudVideointelligenceV1p2beta1_TextSegment",
       "properties": {
+        "confidence": {
+          "description": "Confidence for the track of detected text. It is calculated as the highest\nover all frames where OCR detected text appears.",
+          "format": "float",
+          "type": "number"
+        },
+        "frames": {
+          "description": "Information related to the frames where OCR detected text appears.",
+          "items": {
+            "$ref": "GoogleCloudVideointelligenceV1p2beta1_TextFrame"
+          },
+          "type": "array"
+        },
         "segment": {
           "$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
           "description": "Video segment where a text snippet was detected."
diff --git a/videointelligence/v1p1beta1/videointelligence-gen.go b/videointelligence/v1p1beta1/videointelligence-gen.go
index 2ffbafe..89a4354 100644
--- a/videointelligence/v1p1beta1/videointelligence-gen.go
+++ b/videointelligence/v1p1beta1/videointelligence-gen.go
@@ -1617,6 +1617,15 @@
 	// ranked by the recognizer.
 	Alternatives []*GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative `json:"alternatives,omitempty"`
 
+	// LanguageCode: Output only.
+	// The
+	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+	// of the
+	// language in this result. This language code was detected to have the
+	// most
+	// likelihood of being spoken in the audio.
+	LanguageCode string `json:"languageCode,omitempty"`
+
 	// ForceSendFields is a list of field names (e.g. "Alternatives") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
@@ -1643,6 +1652,27 @@
 // GoogleCloudVideointelligenceV1p1beta1SpeechTranscriptionConfig:
 // Config for SPEECH_TRANSCRIPTION.
 type GoogleCloudVideointelligenceV1p1beta1SpeechTranscriptionConfig struct {
+	// AlternativeLanguageCodes: *Optional* A list of up to 3
+	// additional
+	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
+	// tags,
+	// listing possible alternative languages of the supplied video.
+	// See [Language Support](/speech-to-text/docs/languages)
+	// for a list of the currently supported language codes.
+	// If alternative languages are listed, transcription result will
+	// contain
+	// transcription in the most likely language detected, including the
+	// main
+	// language_code. The transcription result will include the language
+	// tag
+	// of the language detected in the video.
+	// Note: This feature is only supported for Voice Command and Voice
+	// Search
+	// use cases and performance may vary for other use cases (e.g., phone
+	// call
+	// transcription).
+	AlternativeLanguageCodes []string `json:"alternativeLanguageCodes,omitempty"`
+
 	// AudioTracks: *Optional* For file formats, such as MXF or MKV,
 	// supporting multiple audio
 	// tracks, specify up to two tracks. Default: track 0.
@@ -1711,7 +1741,7 @@
 	// to be returned.
 	// Specifically, the maximum number of `SpeechRecognitionAlternative`
 	// messages
-	// within each `SpeechRecognitionResult`. The server may return fewer
+	// within each `SpeechTranscription`. The server may return fewer
 	// than
 	// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1`
 	// will
@@ -1722,20 +1752,22 @@
 	// speech recognition.
 	SpeechContexts []*GoogleCloudVideointelligenceV1p1beta1SpeechContext `json:"speechContexts,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "AudioTracks") to
-	// unconditionally include in API requests. By default, fields with
-	// empty values are omitted from API requests. However, any non-pointer,
-	// non-interface field appearing in ForceSendFields will be sent to the
-	// server regardless of whether the field is empty or not. This may be
-	// used to include empty fields in Patch requests.
+	// ForceSendFields is a list of field names (e.g.
+	// "AlternativeLanguageCodes") to unconditionally include in API
+	// requests. By default, fields with empty values are omitted from API
+	// requests. However, any non-pointer, non-interface field appearing in
+	// ForceSendFields will be sent to the server regardless of whether the
+	// field is empty or not. This may be used to include empty fields in
+	// Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "AudioTracks") to include
-	// in API requests with the JSON null value. By default, fields with
-	// empty values are omitted from API requests. However, any field with
-	// an empty value appearing in NullFields will be sent to the server as
-	// null. It is an error if a field in this list has a non-empty value.
-	// This may be used to include null fields in Patch requests.
+	// NullFields is a list of field names (e.g. "AlternativeLanguageCodes")
+	// to include in API requests with the JSON null value. By default,
+	// fields with empty values are omitted from API requests. However, any
+	// field with an empty value appearing in NullFields will be sent to the
+	// server as null. It is an error if a field in this list has a
+	// non-empty value. This may be used to include null fields in Patch
+	// requests.
 	NullFields []string `json:"-"`
 }
 
@@ -2592,22 +2624,13 @@
 // each
 // detection.
 type GoogleCloudVideointelligenceV1p2beta1TextAnnotation struct {
-	// Confidence: Confidence for the track of detected text. It is
-	// calculated as the highest
-	// over all frames where OCR detected text appears.
-	Confidence float64 `json:"confidence,omitempty"`
-
-	// Frames: Information related to the frames where OCR detected text
-	// appears.
-	Frames []*GoogleCloudVideointelligenceV1p2beta1TextFrame `json:"frames,omitempty"`
-
 	// Segments: All video segments where OCR detected text appears.
 	Segments []*GoogleCloudVideointelligenceV1p2beta1TextSegment `json:"segments,omitempty"`
 
 	// Text: The detected text.
 	Text string `json:"text,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Confidence") to
+	// ForceSendFields is a list of field names (e.g. "Segments") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -2615,7 +2638,7 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Confidence") to include in
+	// NullFields is a list of field names (e.g. "Segments") to include in
 	// API requests with the JSON null value. By default, fields with empty
 	// values are omitted from API requests. However, any field with an
 	// empty value appearing in NullFields will be sent to the server as
@@ -2630,20 +2653,6 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
-func (s *GoogleCloudVideointelligenceV1p2beta1TextAnnotation) UnmarshalJSON(data []byte) error {
-	type NoMethod GoogleCloudVideointelligenceV1p2beta1TextAnnotation
-	var s1 struct {
-		Confidence gensupport.JSONFloat64 `json:"confidence"`
-		*NoMethod
-	}
-	s1.NoMethod = (*NoMethod)(s)
-	if err := json.Unmarshal(data, &s1); err != nil {
-		return err
-	}
-	s.Confidence = float64(s1.Confidence)
-	return nil
-}
-
 // GoogleCloudVideointelligenceV1p2beta1TextFrame: Video frame level
 // annotation results for text annotation (OCR).
 // Contains information regarding timestamp and bounding box locations
@@ -2684,10 +2693,19 @@
 // GoogleCloudVideointelligenceV1p2beta1TextSegment: Video segment level
 // annotation results for text detection.
 type GoogleCloudVideointelligenceV1p2beta1TextSegment struct {
+	// Confidence: Confidence for the track of detected text. It is
+	// calculated as the highest
+	// over all frames where OCR detected text appears.
+	Confidence float64 `json:"confidence,omitempty"`
+
+	// Frames: Information related to the frames where OCR detected text
+	// appears.
+	Frames []*GoogleCloudVideointelligenceV1p2beta1TextFrame `json:"frames,omitempty"`
+
 	// Segment: Video segment where a text snippet was detected.
 	Segment *GoogleCloudVideointelligenceV1p2beta1VideoSegment `json:"segment,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Segment") to
+	// ForceSendFields is a list of field names (e.g. "Confidence") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -2695,7 +2713,7 @@
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Segment") to include in
+	// NullFields is a list of field names (e.g. "Confidence") to include in
 	// API requests with the JSON null value. By default, fields with empty
 	// values are omitted from API requests. However, any field with an
 	// empty value appearing in NullFields will be sent to the server as
@@ -2710,6 +2728,20 @@
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
+func (s *GoogleCloudVideointelligenceV1p2beta1TextSegment) UnmarshalJSON(data []byte) error {
+	type NoMethod GoogleCloudVideointelligenceV1p2beta1TextSegment
+	var s1 struct {
+		Confidence gensupport.JSONFloat64 `json:"confidence"`
+		*NoMethod
+	}
+	s1.NoMethod = (*NoMethod)(s)
+	if err := json.Unmarshal(data, &s1); err != nil {
+		return err
+	}
+	s.Confidence = float64(s1.Confidence)
+	return nil
+}
+
 // GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress:
 // Annotation progress for a single video.
 type GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress struct {