Merge branch 'master' into profiler-test

Change-Id: I4e83680309651ecf22c6dcd6de928ac8b51e2bad
diff --git a/.travis.yml b/.travis.yml
index 59594d4..7dd0be2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,6 +14,7 @@
   GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
   GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
   GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
+  GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test"
   ./run-tests.sh $TRAVIS_COMMIT
 env:
   matrix:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 95c94a4..09ded8f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -31,11 +31,11 @@
 Google Developers Console is required.
 
 After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
-Ensure the project-level **Owner** 
+Ensure the project-level **Owner**
 [IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
 service account. Alternatively, the account can be granted all of the following roles:
-- **Editor** 
-- **Logs Configuration Writer** 
+- **Editor**
+- **Logs Configuration Writer**
 - **PubSub Admin**
 
 Once you create a project, set the following environment variables to be able to
@@ -43,13 +43,19 @@
 
 - **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
 - **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
-- **GCLOUD_TESTS_API_KEY**: Your API key.
 
-Firestore requires a different project and key:
+Some packages require additional environment variables to be set:
 
-- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
-  supporting Firestore
-- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
+- firestore
+  - **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore.
+  - **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
+- storage
+  - **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the
+    form "projects/P/locations/L/keyRings/R".
+- translate
+  - **GCLOUD_TESTS_API_KEY**: API key for using the Translate API.
+- profiler
+  - **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone.
 
 Install the [gcloud command-line tool][gcloudcli] to your machine and use it
 to create some resources used in integration tests.
@@ -80,11 +86,20 @@
 # NOTE: Spanner instances are priced by the node-hour, so you may want to delete
 # the instance after testing with 'gcloud beta spanner instances delete'.
 
-
+# For Storage integration tests:
+# Enable KMS for your project in the Cloud Console.
+# Create a KMS keyring, in the same location as the default location for your project's buckets.
+$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION
+# Create two keys in the keyring, named key1 and key2.
+$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
+$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
+# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
+$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING
+# Authorize Google Cloud Storage to encrypt and decrypt using key1.
+gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
 ```
 
-Once you've set the environment variables, you can run the integration tests by
-running:
+Once you've done the necessary setup, you can run the integration tests by running:
 
 ``` sh
 $ go test -v cloud.google.com/go/...
diff --git a/README.md b/README.md
index 769a731..c762b16 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@
 import "cloud.google.com/go"
 ```
 
-To install the packages on your system,
+To install the packages on your system, *do not clone the repo*. Instead use
 
 ```
 $ go get -u cloud.google.com/go/...
@@ -33,6 +33,73 @@
 
 ## News
 
+_May 18, 2018_
+
+*v0.23.0*
+
+- bigquery: Add DDL stats to query statistics.
+- bigtable:
+  - cbt: Add cells-per-column limit for row lookup.
+  - cbt: Make it possible to combine read filters.
+- dlp: v2beta2 client removed. Use the v2 client instead.
+- firestore, spanner: Fix compilation errors due to protobuf changes.
+
+_May 8, 2018_
+
+*v0.22.0*
+
+- bigtable:
+  - cbt: Support cells per column limit for row read.
+  - bttest: Correctly handle empty RowSet.
+  - Fix ReadModifyWrite operation in emulator.
+  - Fix API path in GetCluster.
+
+- bigquery:
+  - BEHAVIOR CHANGE: Retry on 503 status code.
+  - Add dataset.DeleteWithContents.
+  - Add SchemaUpdateOptions for query jobs.
+  - Add Timeline to QueryStatistics.
+  - Add more stats to ExplainQueryStage.
+  - Support Parquet data format.
+
+- datastore:
+  - Support omitempty for times.
+
+- dlp:
+  - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client,
+  which is now out of beta.
+  - Add v2 client.
+
+- firestore:
+  - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid.
+
+- iam:
+  - Support JWT signing via SignJwt callopt.
+
+- profiler:
+  - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done.
+  - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute.
+  - Avoid returning empty serial port output.
+
+- pubsub:
+  - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy.
+  - BEHAVIOR CHANGE: Don't backoff on EOF.
+  - pstest: Support Acknowledge and ModifyAckDeadline RPCs.
+
+- redis:
+  - Add v1 beta Redis client.
+
+- spanner:
+  - Support SessionLabels.
+
+- speech:
+  - Add api v1 beta1 client.
+
+- storage:
+  - BEHAVIOR CHANGE: Retry reads when retryable error occurs.
+  - Fix delete of object in requester-pays bucket.
+  - Support KMS integration.
+
 _April 9, 2018_
 
 *v0.21.0*
diff --git a/bigquery/bigquery.go b/bigquery/bigquery.go
index 6427a5c..a32f2ba 100644
--- a/bigquery/bigquery.go
+++ b/bigquery/bigquery.go
@@ -147,7 +147,10 @@
 	})
 }
 
-// This is the correct definition of retryable according to the BigQuery team.
+// This is the correct definition of retryable according to the BigQuery team. It
+// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors
+// retryable; these are returned by systems between the client and the BigQuery
+// service.
 func retryableError(err error) bool {
 	e, ok := err.(*googleapi.Error)
 	if !ok {
@@ -157,5 +160,5 @@
 	if len(e.Errors) > 0 {
 		reason = e.Errors[0].Reason
 	}
-	return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
+	return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
 }
diff --git a/bigquery/dataset.go b/bigquery/dataset.go
index 6a4e56d..a993821 100644
--- a/bigquery/dataset.go
+++ b/bigquery/dataset.go
@@ -147,12 +147,21 @@
 	return q, nil
 }
 
-// Delete deletes the dataset.
+// Delete deletes the dataset.  Delete will fail if the dataset is not empty.
 func (d *Dataset) Delete(ctx context.Context) (err error) {
+	return d.deleteInternal(ctx, false)
+}
+
+// DeleteWithContents deletes the dataset, as well as contained resources.
+func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) {
+	return d.deleteInternal(ctx, true)
+}
+
+func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) {
 	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
 	defer func() { trace.EndSpan(ctx, err) }()
 
-	call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
+	call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents)
 	setClientHeader(call.Header())
 	return call.Do()
 }
@@ -336,6 +345,9 @@
 }
 
 func bqToTable(tr *bq.TableReference, c *Client) *Table {
+	if tr == nil {
+		return nil
+	}
 	return &Table{
 		ProjectID: tr.ProjectId,
 		DatasetID: tr.DatasetId,
diff --git a/bigquery/doc.go b/bigquery/doc.go
index 5269af5..3028294 100644
--- a/bigquery/doc.go
+++ b/bigquery/doc.go
@@ -86,7 +86,7 @@
 
 You can also start the query running and get the results later.
 Create the query as above, but call Run instead of Read. This returns a Job,
-which represents an asychronous operation.
+which represents an asynchronous operation.
 
     job, err := q.Run(ctx)
     if err != nil {
diff --git a/bigquery/integration_test.go b/bigquery/integration_test.go
index 622998e..12bdeba 100644
--- a/bigquery/integration_test.go
+++ b/bigquery/integration_test.go
@@ -34,6 +34,7 @@
 	"cloud.google.com/go/internal"
 	"cloud.google.com/go/internal/pretty"
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"cloud.google.com/go/storage"
 	"golang.org/x/net/context"
 	"google.golang.org/api/googleapi"
@@ -55,8 +56,8 @@
 	testTableExpiration time.Time
 	// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
 	// with underscores.
-	datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
-	tableIDs   = testutil.NewUIDSpaceSep("table", '_')
+	datasetIDs = uid.NewSpace("dataset", &uid.Options{Sep: '_'})
+	tableIDs   = uid.NewSpace("table", &uid.Options{Sep: '_'})
 )
 
 // Note: integration tests cannot be run in parallel, because TestIntegration_Location
@@ -105,28 +106,12 @@
 	}
 	testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
 	return func() {
-		if err := deleteDataset(ctx, dataset); err != nil {
+		if err := dataset.DeleteWithContents(ctx); err != nil {
 			log.Printf("could not delete %s", dataset.DatasetID)
 		}
 	}
 }
 
-func deleteDataset(ctx context.Context, ds *Dataset) error {
-	it := ds.Tables(ctx)
-	for {
-		tbl, err := it.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			return err
-		}
-		if err := tbl.Delete(ctx); err != nil {
-			return err
-		}
-	}
-	return ds.Delete(ctx)
-}
 func TestIntegration_TableCreate(t *testing.T) {
 	// Check that creating a record field with an empty schema is an error.
 	if client == nil {
@@ -167,7 +152,9 @@
 	if err != nil {
 		t.Fatalf("table.create: Did not expect an error, got: %v", err)
 	}
-	view.Delete(ctx)
+	if err := view.Delete(ctx); err != nil {
+		t.Fatal(err)
+	}
 }
 
 func TestIntegration_TableMetadata(t *testing.T) {
@@ -312,6 +299,28 @@
 	}
 }
 
+func TestIntegration_DatasetDeleteWithContents(t *testing.T) {
+	if client == nil {
+		t.Skip("Integration tests skipped")
+	}
+	ctx := context.Background()
+	ds := client.Dataset(datasetIDs.New())
+	if err := ds.Create(ctx, nil); err != nil {
+		t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
+	}
+	table := ds.Table(tableIDs.New())
+	if err := table.Create(ctx, nil); err != nil {
+		t.Fatalf("creating table %s in dataset %s: %v", table.TableID, table.DatasetID, err)
+	}
+	// We expect failure here
+	if err := ds.Delete(ctx); err == nil {
+		t.Fatalf("non-recursive delete of dataset %s succeeded unexpectedly.", ds.DatasetID)
+	}
+	if err := ds.DeleteWithContents(ctx); err != nil {
+		t.Fatalf("deleting recursively dataset %s: %v", ds.DatasetID, err)
+	}
+}
+
 func TestIntegration_DatasetUpdateETags(t *testing.T) {
 	if client == nil {
 		t.Skip("Integration tests skipped")
@@ -1570,7 +1579,7 @@
 		} else if !gotErr && test.err {
 			t.Errorf("%+v:\nsucceeded, but want error", test)
 		}
-		view.Delete(ctx)
+		_ = view.Delete(ctx)
 	}
 }
 
diff --git a/bigquery/job.go b/bigquery/job.go
index fc6686d..7dfa786 100644
--- a/bigquery/job.go
+++ b/bigquery/job.go
@@ -190,12 +190,12 @@
 }
 
 // Done reports whether the job has completed.
-// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
+// After Done returns true, the Err method will return an error if the job completed unsuccessfully.
 func (s *JobStatus) Done() bool {
 	return s.State == Done
 }
 
-// Err returns the error that caused the job to complete unsuccesfully (if any).
+// Err returns the error that caused the job to complete unsuccessfully (if any).
 func (s *JobStatus) Err() error {
 	return s.err
 }
@@ -416,9 +416,19 @@
 	// non-legacy SQL queries.
 	Schema Schema
 
+	// Slot-milliseconds consumed by this query job.
+	SlotMillis int64
+
 	// Standard SQL: list of undeclared query parameter names detected during a
 	// dry run validation.
 	UndeclaredQueryParameterNames []string
+
+	// DDL target table.
+	DDLTargetTable *Table
+
+	// DDL Operation performed on the target table.  Used to report how the
+	// query impacted the DDL target table.
+	DDLOperationPerformed string
 }
 
 // ExplainQueryStage describes one stage of a query.
@@ -735,12 +745,15 @@
 		js.Details = &QueryStatistics{
 			BillingTier:                   s.Query.BillingTier,
 			CacheHit:                      s.Query.CacheHit,
+			DDLTargetTable:                bqToTable(s.Query.DdlTargetTable, c),
+			DDLOperationPerformed:         s.Query.DdlOperationPerformed,
 			StatementType:                 s.Query.StatementType,
 			TotalBytesBilled:              s.Query.TotalBytesBilled,
 			TotalBytesProcessed:           s.Query.TotalBytesProcessed,
 			NumDMLAffectedRows:            s.Query.NumDmlAffectedRows,
 			QueryPlan:                     queryPlanFromProto(s.Query.QueryPlan),
 			Schema:                        bqToSchema(s.Query.Schema),
+			SlotMillis:                    s.Query.TotalSlotMs,
 			Timeline:                      timelineFromProto(s.Query.Timeline),
 			ReferencedTables:              tables,
 			UndeclaredQueryParameterNames: names,
diff --git a/bigquery/load.go b/bigquery/load.go
index c09d4cc..d833002 100644
--- a/bigquery/load.go
+++ b/bigquery/load.go
@@ -47,8 +47,8 @@
 	// Custom encryption configuration (e.g., Cloud KMS keys).
 	DestinationEncryptionConfig *EncryptionConfig
 
-	// SchemaUpdateOptions allows the schema of the destination table to be
-	// updated as a side effect of the load job.
+	// Allows the schema of the destination table to be updated as a side effect of
+	// the load job.
 	SchemaUpdateOptions []string
 }
 
diff --git a/bigquery/query.go b/bigquery/query.go
index b819be4..96c2893 100644
--- a/bigquery/query.go
+++ b/bigquery/query.go
@@ -119,6 +119,10 @@
 
 	// Custom encryption configuration (e.g., Cloud KMS keys).
 	DestinationEncryptionConfig *EncryptionConfig
+
+	// Allows the schema of the destination table to be updated as a side effect of
+	// the query job.
+	SchemaUpdateOptions []string
 }
 
 func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
@@ -131,6 +135,7 @@
 		MaximumBytesBilled:                 qc.MaxBytesBilled,
 		TimePartitioning:                   qc.TimePartitioning.toBQ(),
 		DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
+		SchemaUpdateOptions:                qc.SchemaUpdateOptions,
 	}
 	if len(qc.TableDefinitions) > 0 {
 		qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
@@ -189,16 +194,18 @@
 func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
 	qq := q.Query
 	qc := &QueryConfig{
-		Labels:            q.Labels,
-		DryRun:            q.DryRun,
-		Q:                 qq.Query,
-		CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
-		WriteDisposition:  TableWriteDisposition(qq.WriteDisposition),
-		AllowLargeResults: qq.AllowLargeResults,
-		Priority:          QueryPriority(qq.Priority),
-		MaxBytesBilled:    qq.MaximumBytesBilled,
-		UseLegacySQL:      qq.UseLegacySql == nil || *qq.UseLegacySql,
-		TimePartitioning:  bqToTimePartitioning(qq.TimePartitioning),
+		Labels:                      q.Labels,
+		DryRun:                      q.DryRun,
+		Q:                           qq.Query,
+		CreateDisposition:           TableCreateDisposition(qq.CreateDisposition),
+		WriteDisposition:            TableWriteDisposition(qq.WriteDisposition),
+		AllowLargeResults:           qq.AllowLargeResults,
+		Priority:                    QueryPriority(qq.Priority),
+		MaxBytesBilled:              qq.MaximumBytesBilled,
+		UseLegacySQL:                qq.UseLegacySql == nil || *qq.UseLegacySql,
+		TimePartitioning:            bqToTimePartitioning(qq.TimePartitioning),
+		DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
+		SchemaUpdateOptions:         qq.SchemaUpdateOptions,
 	}
 	qc.UseStandardSQL = !qc.UseLegacySQL
 
diff --git a/bigquery/query_test.go b/bigquery/query_test.go
index 4f4ed54..bfeb3ff 100644
--- a/bigquery/query_test.go
+++ b/bigquery/query_test.go
@@ -352,6 +352,8 @@
 	query.DefaultDatasetID = "def-dataset-id"
 	query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
 	query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
+	query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
+
 	// Note: Other configuration fields are tested in other tests above.
 	// A lot of that can be consolidated once Client.Copy is gone.
 
@@ -367,6 +369,7 @@
 				UseLegacySql:                       &pfalse,
 				TimePartitioning:                   &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
 				DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
+				SchemaUpdateOptions:                []string{"ALLOW_FIELD_ADDITION"},
 			},
 		},
 		JobReference: &bq.JobReference{
diff --git a/bigtable/admin.go b/bigtable/admin.go
index 2d81b32..397e44d 100644
--- a/bigtable/admin.go
+++ b/bigtable/admin.go
@@ -30,7 +30,7 @@
 	"github.com/golang/protobuf/ptypes"
 	durpb "github.com/golang/protobuf/ptypes/duration"
 	"golang.org/x/net/context"
-	"google.golang.org/api/cloudresourcemanager/v1"
+	cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1"
 	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
 	gtransport "google.golang.org/api/transport/grpc"
@@ -342,14 +342,14 @@
 	return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
 }
 
-// Returns a SnapshotIterator for iterating over the snapshots in a cluster.
+// Snapshots returns a SnapshotIterator for iterating over the snapshots in a cluster.
 // To list snapshots across all of the clusters in the instance specify "-" as the cluster.
 //
-// This is a private alpha release of Cloud Bigtable snapshots. This feature
-// is not currently available to most Cloud Bigtable customers. This feature
-// might be changed in backward-incompatible ways and is not recommended for
-// production use. It is not subject to any SLA or deprecation policy.
-func (ac *AdminClient) ListSnapshots(ctx context.Context, cluster string) *SnapshotIterator {
+// This is a private alpha release of Cloud Bigtable snapshots. This feature is not
+// currently available to most Cloud Bigtable customers. This feature might be
+// changed in backward-incompatible ways and is not recommended for production use.
+// It is not subject to any SLA or deprecation policy.
+func (ac *AdminClient) Snapshots(ctx context.Context, cluster string) *SnapshotIterator {
 	ctx = mergeOutgoingMetadata(ctx, ac.md)
 	prefix := ac.instancePrefix()
 	clusterPath := prefix + "/clusters/" + cluster
@@ -392,7 +392,7 @@
 	nameParts := strings.Split(snapshot.Name, "/")
 	name := nameParts[len(nameParts)-1]
 	tablePathParts := strings.Split(snapshot.SourceTable.Name, "/")
-	tableId := tablePathParts[len(tablePathParts)-1]
+	tableID := tablePathParts[len(tablePathParts)-1]
 
 	createTime, err := ptypes.Timestamp(snapshot.CreateTime)
 	if err != nil {
@@ -406,7 +406,7 @@
 
 	return &SnapshotInfo{
 		Name:        name,
-		SourceTable: tableId,
+		SourceTable: tableID,
 		DataSize:    snapshot.DataSizeBytes,
 		CreateTime:  createTime,
 		DeleteTime:  deleteTime,
@@ -710,9 +710,9 @@
 }
 
 // DeleteInstance deletes an instance from the project.
-func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
+func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceID string) error {
 	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.DeleteInstanceRequest{Name: "projects/" + iac.project + "/instances/" + instanceId}
+	req := &btapb.DeleteInstanceRequest{Name: "projects/" + iac.project + "/instances/" + instanceID}
 	_, err := iac.iClient.DeleteInstance(ctx, req)
 	return err
 }
@@ -748,10 +748,10 @@
 }
 
 // InstanceInfo returns information about an instance.
-func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) {
+func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceID string) (*InstanceInfo, error) {
 	ctx = mergeOutgoingMetadata(ctx, iac.md)
 	req := &btapb.GetInstanceRequest{
-		Name: "projects/" + iac.project + "/instances/" + instanceId,
+		Name: "projects/" + iac.project + "/instances/" + instanceID,
 	}
 	res, err := iac.iClient.GetInstance(ctx, req)
 	if err != nil {
@@ -821,18 +821,18 @@
 // is not currently available to most Cloud Bigtable customers. This feature
 // might be changed in backward-incompatible ways and is not recommended for
 // production use. It is not subject to any SLA or deprecation policy.
-func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error {
+func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceID, clusterID string) error {
 	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId}
+	req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
 	_, err := iac.iClient.DeleteCluster(ctx, req)
 	return err
 }
 
 // UpdateCluster updates attributes of a cluster
-func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceId, clusterId string, serveNodes int32) error {
+func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceID, clusterID string, serveNodes int32) error {
 	ctx = mergeOutgoingMetadata(ctx, iac.md)
 	cluster := &btapb.Cluster{
-		Name:       "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId,
+		Name:       "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID,
 		ServeNodes: serveNodes}
 	lro, err := iac.iClient.UpdateCluster(ctx, cluster)
 	if err != nil {
@@ -842,9 +842,9 @@
 }
 
 // Clusters lists the clusters in an instance.
-func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string) ([]*ClusterInfo, error) {
+func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceID string) ([]*ClusterInfo, error) {
 	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceId}
+	req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceID}
 	res, err := iac.iClient.ListClusters(ctx, req)
 	if err != nil {
 		return nil, err
@@ -867,7 +867,7 @@
 // GetCluster fetches a cluster in an instance
 func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) {
 	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters" + clusterID}
+	req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
 	c, err := iac.iClient.GetCluster(ctx, req)
 	if err != nil {
 		return nil, err
diff --git a/bigtable/admin_test.go b/bigtable/admin_test.go
index 0528441..fd6a198 100644
--- a/bigtable/admin_test.go
+++ b/bigtable/admin_test.go
@@ -15,18 +15,17 @@
 package bigtable
 
 import (
+	"fmt"
 	"math"
 	"sort"
+	"strings"
 	"testing"
 	"time"
 
 	"cloud.google.com/go/internal/testutil"
-
-	"fmt"
 	"golang.org/x/net/context"
 	"google.golang.org/api/iterator"
 	btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
-	"strings"
 )
 
 func TestAdminIntegration(t *testing.T) {
@@ -102,7 +101,7 @@
 		t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
 	}
 
-	adminClient.WaitForReplication(ctx, "mytable")
+	must(adminClient.WaitForReplication(ctx, "mytable"))
 
 	if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
 		t.Fatalf("Deleting table: %v", err)
@@ -172,13 +171,13 @@
 	}
 
 	var gotRowCount int
-	tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
+	must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
 		gotRowCount += 1
 		if !strings.HasPrefix(row.Key(), "b") {
 			t.Errorf("Invalid row after dropping range: %v", row)
 		}
 		return true
-	})
+	}))
 	if gotRowCount != 5 {
 		t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
 	}
@@ -274,7 +273,7 @@
 	list := func(cluster string) ([]*SnapshotInfo, error) {
 		infos := []*SnapshotInfo(nil)
 
-		it := adminClient.ListSnapshots(ctx, cluster)
+		it := adminClient.Snapshots(ctx, cluster)
 		for {
 			s, err := it.Next()
 			if err == iterator.Done {
diff --git a/bigtable/bttest/inmem.go b/bigtable/bttest/inmem.go
index c925e25..92fcd0c 100644
--- a/bigtable/bttest/inmem.go
+++ b/bigtable/bttest/inmem.go
@@ -317,7 +317,8 @@
 		return true
 	}
 
-	if req.Rows != nil {
+	if req.Rows != nil &&
+		len(req.Rows.RowKeys)+len(req.Rows.RowRanges) > 0 {
 		// Add the explicitly given keys
 		for _, key := range req.Rows.RowKeys {
 			k := string(key)
@@ -698,8 +699,7 @@
 		}
 		r.mu.Unlock()
 	}
-	stream.Send(res)
-	return nil
+	return stream.Send(res)
 }
 
 func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) {
@@ -861,12 +861,13 @@
 	if !ok {
 		return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
 	}
-	updates := make(map[string]cell) // copy of updated cells; keyed by full column name
 
 	fs := tbl.columnFamilies()
 
 	rowKey := string(req.RowKey)
 	r := tbl.mutableRow(rowKey)
+	resultRow := newRow(rowKey) // copy of updated cells
+
 	// This must be done before the row lock, acquired below, is released.
 	r.mu.Lock()
 	defer r.mu.Unlock()
@@ -914,35 +915,37 @@
 			binary.BigEndian.PutUint64(val[:], uint64(v))
 			newCell = cell{ts: ts, value: val[:]}
 		}
-		key := strings.Join([]string{fam, col}, ":")
-		updates[key] = newCell
+
+		// Store the new cell
 		f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell)
+
+		// Store a copy for the result row
+		resultFamily := resultRow.getOrCreateFamily(fam, fs[fam].order)
+		resultFamily.cellsByColumn(col)           // create the column
+		resultFamily.cells[col] = []cell{newCell} // overwrite the cells
 	}
 
+	// Build the response using the result row
 	res := &btpb.Row{
-		Key: req.RowKey,
+		Key:      req.RowKey,
+		Families: make([]*btpb.Family, len(resultRow.families)),
 	}
-	for col, cell := range updates {
-		i := strings.Index(col, ":")
-		fam, qual := col[:i], col[i+1:]
-		var f *btpb.Family
-		for _, ff := range res.Families {
-			if ff.Name == fam {
-				f = ff
-				break
+
+	for i, family := range resultRow.sortedFamilies() {
+		res.Families[i] = &btpb.Family{
+			Name:    family.name,
+			Columns: make([]*btpb.Column, len(family.colNames)),
+		}
+
+		for j, colName := range family.colNames {
+			res.Families[i].Columns[j] = &btpb.Column{
+				Qualifier: []byte(colName),
+				Cells: []*btpb.Cell{{
+					TimestampMicros: family.cells[colName][0].ts,
+					Value:           family.cells[colName][0].value,
+				}},
 			}
 		}
-		if f == nil {
-			f = &btpb.Family{Name: fam}
-			res.Families = append(res.Families, f)
-		}
-		f.Columns = append(f.Columns, &btpb.Column{
-			Qualifier: []byte(qual),
-			Cells: []*btpb.Cell{{
-				TimestampMicros: cell.ts,
-				Value:           cell.value,
-			}},
-		})
 	}
 	return &btpb.ReadModifyWriteRowResponse{Row: res}, nil
 }
@@ -1264,8 +1267,8 @@
 type family struct {
 	name     string            // Column family name
 	order    uint64            // Creation order of column family
-	colNames []string          // Collumn names are sorted in lexicographical ascending order
-	cells    map[string][]cell // Keyed by collumn name; cells are in descending timestamp order
+	colNames []string          // Column names are sorted in lexicographical ascending order
+	cells    map[string][]cell // Keyed by column name; cells are in descending timestamp order
 }
 
 type byCreationOrder []*family
diff --git a/bigtable/bttest/inmem_test.go b/bigtable/bttest/inmem_test.go
index 8b5c6d8..e63da22 100644
--- a/bigtable/bttest/inmem_test.go
+++ b/bigtable/bttest/inmem_test.go
@@ -23,6 +23,8 @@
 	"testing"
 	"time"
 
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
 	"golang.org/x/net/context"
 	btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
 	btpb "google.golang.org/genproto/googleapis/bigtable/v2"
@@ -99,7 +101,9 @@
 					RowKey:    []byte(fmt.Sprint(rand.Intn(100))),
 					Mutations: ms(),
 				}
-				s.MutateRow(ctx, req)
+				if _, err := s.MutateRow(ctx, req); err != nil {
+					panic(err) // can't use t.Fatal in goroutine
+				}
 			}
 		}()
 		wg.Add(1)
@@ -548,7 +552,9 @@
 		}
 	}
 	for i := count; i > 0; i-- {
-		s.ReadModifyWriteRow(ctx, rmw(i))
+		if _, err := s.ReadModifyWriteRow(ctx, rmw(i)); err != nil {
+			t.Fatal(err)
+		}
 	}
 	req = &btpb.ReadRowsRequest{
 		TableName: tblInfo.Name,
@@ -621,6 +627,87 @@
 	}
 }
 
+func TestServer_ReadModifyWriteRow(t *testing.T) {
+	s := &server{
+		tables: make(map[string]*table),
+	}
+
+	ctx := context.Background()
+	newTbl := btapb.Table{
+		ColumnFamilies: map[string]*btapb.ColumnFamily{
+			"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
+		},
+	}
+	tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
+	if err != nil {
+		t.Fatalf("Creating table: %v", err)
+	}
+
+	req := &btpb.ReadModifyWriteRowRequest{
+		TableName: tbl.Name,
+		RowKey:    []byte("row-key"),
+		Rules: []*btpb.ReadModifyWriteRule{
+			{
+				FamilyName:      "cf",
+				ColumnQualifier: []byte("q1"),
+				Rule: &btpb.ReadModifyWriteRule_AppendValue{
+					AppendValue: []byte("a"),
+				},
+			},
+			// multiple ops for same cell
+			{
+				FamilyName:      "cf",
+				ColumnQualifier: []byte("q1"),
+				Rule: &btpb.ReadModifyWriteRule_AppendValue{
+					AppendValue: []byte("b"),
+				},
+			},
+			// different cell whose qualifier should sort before the prior rules
+			{
+				FamilyName:      "cf",
+				ColumnQualifier: []byte("q0"),
+				Rule: &btpb.ReadModifyWriteRule_IncrementAmount{
+					IncrementAmount: 1,
+				},
+			},
+		},
+	}
+
+	got, err := s.ReadModifyWriteRow(ctx, req)
+
+	if err != nil {
+		t.Fatalf("ReadModifyWriteRow error: %v", err)
+	}
+
+	want := &btpb.ReadModifyWriteRowResponse{
+		Row: &btpb.Row{
+			Key: []byte("row-key"),
+			Families: []*btpb.Family{{
+				Name: "cf",
+				Columns: []*btpb.Column{
+					{
+						Qualifier: []byte("q0"),
+						Cells: []*btpb.Cell{{
+							Value: []byte{0, 0, 0, 0, 0, 0, 0, 1},
+						}},
+					},
+					{
+						Qualifier: []byte("q1"),
+						Cells: []*btpb.Cell{{
+							Value: []byte("ab"),
+						}},
+					},
+				},
+			}},
+		},
+	}
+
+	diff := cmp.Diff(got, want, cmpopts.IgnoreFields(btpb.Cell{}, "TimestampMicros"))
+	if diff != "" {
+		t.Errorf("unexpected response: %s", diff)
+	}
+}
+
 // helper function to populate table data
 func populateTable(ctx context.Context, s *server) (*btapb.Table, error) {
 	newTbl := btapb.Table{
diff --git a/bigtable/cmd/cbt/cbt.go b/bigtable/cmd/cbt/cbt.go
index 2035647..8dcaa6c 100644
--- a/bigtable/cmd/cbt/cbt.go
+++ b/bigtable/cmd/cbt/cbt.go
@@ -184,7 +184,7 @@
 
 For convenience, values of the -project, -instance, -creds,
 -admin-endpoint and -data-endpoint flags may be specified in
-` + cbtconfig.Filename() + ` in this format:
+~/.cbtrc in this format:
 	project = my-project-123
 	instance = my-instance
 	creds = path-to-account-key.json
@@ -332,7 +332,10 @@
 		Name: "lookup",
 		Desc: "Read from a single row",
 		do:   doLookup,
-		Usage: "cbt lookup <table> <row> [app-profile=<app profile id>]\n" +
+		Usage: "cbt lookup <table> <row> [columns=<family:qualifier>,...] [cells-per-column=<n>] " +
+			"[app-profile=<app profile id>]\n" +
+			"  columns                          Read only these columns. Format <column-family>:<column-qualifier>, comma-separated" +
+			"  cells-per-column=<n> 			Read only this many cells per column\n" +
 			"  app-profile=<app profile id>		The app profile id to use for the request (replication alpha)\n",
 		Required: cbtconfig.ProjectAndInstanceRequired,
 	},
@@ -356,12 +359,15 @@
 		Desc: "Read rows",
 		do:   doRead,
 		Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
-			" [regex=<regex>] [count=<n>] [app-profile=<app profile id>]\n" +
+			" [regex=<regex>] [columns=<family:qualifier>,...] [count=<n>] [cells-per-column=<n>]" +
+			" [app-profile=<app profile id>]\n" +
 			"  start=<row>		Start reading at this row\n" +
 			"  end=<row>		Stop reading before this row\n" +
 			"  prefix=<prefix>	Read rows with this prefix\n" +
 			"  regex=<regex> 	Read rows with keys matching this regex\n" +
+			"  columns          Read only these columns. Format <column-family>:<column-qualifier>, comma-separated" +
 			"  count=<n>		Read only this many rows\n" +
+			"  cells-per-column=<n>	Read only this many cells per column\n" +
 			"  app-profile=<app profile id>		The app profile id to use for the request (replication alpha)\n",
 		Required: cbtconfig.ProjectAndInstanceRequired,
 	},
@@ -467,19 +473,16 @@
 	}
 
 	tblConf := bigtable.TableConf{TableID: args[0]}
-	for _, arg := range args[1:] {
-		i := strings.Index(arg, "=")
-		if i < 0 {
-			log.Fatalf("Bad arg %q", arg)
-		}
-		key, val := arg[:i], arg[i+1:]
+	parsed, err := parseArgs(args[1:], []string{"families", "splits"})
+	if err != nil {
+		log.Fatal(err)
+	}
+	for key, val := range parsed {
 		chunks, err := csv.NewReader(strings.NewReader(val)).Read()
 		if err != nil {
-			log.Fatalf("Invalid families arg format: %v", err)
+			log.Fatalf("Invalid %s arg format: %v", key, err)
 		}
 		switch key {
-		default:
-			log.Fatalf("Unknown arg key %q", key)
 		case "families":
 			tblConf.Families = make(map[string]bigtable.GCPolicy)
 			for _, family := range chunks {
@@ -581,21 +584,14 @@
 	}
 
 	numNodes := int64(0)
-	var err error
-	for _, arg := range args[1:] {
-		i := strings.Index(arg, "=")
-		if i < 0 {
-			log.Fatalf("Bad arg %q", arg)
-		}
-		key, val := arg[:i], arg[i+1:]
-		switch key {
-		default:
-			log.Fatalf("Unknown arg key %q", key)
-		case "num-nodes":
-			numNodes, err = strconv.ParseInt(val, 0, 32)
-			if err != nil {
-				log.Fatalf("Bad num-nodes %q: %v", val, err)
-			}
+	parsed, err := parseArgs(args[1:], []string{"num-nodes"})
+	if err != nil {
+		log.Fatal(err)
+	}
+	if val, ok := parsed["num-nodes"]; ok {
+		numNodes, err = strconv.ParseInt(val, 0, 32)
+		if err != nil {
+			log.Fatalf("Bad num-nodes %q: %v", val, err)
 		}
 	}
 	if numNodes > 0 {
@@ -850,19 +846,40 @@
 
 func doLookup(ctx context.Context, args ...string) {
 	if len(args) < 2 {
-		log.Fatalf("usage: cbt lookup <table> <row> [app-profile=<app profile id>]")
+		log.Fatalf("usage: cbt lookup <table> <row> [columns=<family:qualifier>...] [cells-per-column=<n>] " +
+			"[app-profile=<app profile id>]")
 	}
-	var appProfile string
-	if len(args) > 2 {
-		i := strings.Index(args[2], "=")
-		if i < 0 {
-			log.Fatalf("Bad arg %q", args[2])
+
+	parsed, err := parseArgs(args[2:], []string{"columns", "cells-per-column", "app-profile"})
+	if err != nil {
+		log.Fatal(err)
+	}
+	var opts []bigtable.ReadOption
+	var filters []bigtable.Filter
+	if cellsPerColumn := parsed["cells-per-column"]; cellsPerColumn != "" {
+		n, err := strconv.Atoi(cellsPerColumn)
+		if err != nil {
+			log.Fatalf("Bad number of cells per column %q: %v", cellsPerColumn, err)
 		}
-		appProfile = strings.Split(args[2], "=")[1]
+		filters = append(filters, bigtable.LatestNFilter(n))
 	}
+	if columns := parsed["columns"]; columns != "" {
+		columnFilters, err := parseColumnsFilter(columns)
+		if err != nil {
+			log.Fatal(err)
+		}
+		filters = append(filters, columnFilters)
+	}
+
+	if len(filters) > 1 {
+		opts = append(opts, bigtable.RowFilter(bigtable.ChainFilters(filters...)))
+	} else if len(filters) == 1 {
+		opts = append(opts, bigtable.RowFilter(filters[0]))
+	}
+
 	table, row := args[0], args[1]
-	tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(table)
-	r, err := tbl.ReadRow(ctx, row)
+	tbl := getClient(bigtable.ClientConfig{AppProfile: parsed["app-profile"]}).Open(table)
+	r, err := tbl.ReadRow(ctx, row, opts...)
 	if err != nil {
 		log.Fatalf("Reading row: %v", err)
 	}
@@ -982,22 +999,15 @@
 		log.Fatalf("usage: cbt read <table> [args ...]")
 	}
 
-	parsed := make(map[string]string)
-	for _, arg := range args[1:] {
-		i := strings.Index(arg, "=")
-		if i < 0 {
-			log.Fatalf("Bad arg %q", arg)
-		}
-		key, val := arg[:i], arg[i+1:]
-		switch key {
-		default:
-			log.Fatalf("Unknown arg key %q", key)
-		case "limit":
-			// Be nicer; we used to support this, but renamed it to "end".
-			log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
-		case "start", "end", "prefix", "count", "regex", "app-profile":
-			parsed[key] = val
-		}
+	parsed, err := parseArgs(args[1:], []string{
+		"start", "end", "prefix", "columns", "count", "cells-per-column", "regex", "app-profile", "limit",
+	})
+	if err != nil {
+		log.Fatal(err)
+	}
+	if _, ok := parsed["limit"]; ok {
+		// Be nicer; we used to support this, but renamed it to "end".
+		log.Fatal("Unknown arg key 'limit'; did you mean 'end'?")
 	}
 	if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" {
 		log.Fatal(`"start"/"end" may not be mixed with "prefix"`)
@@ -1021,13 +1031,35 @@
 		}
 		opts = append(opts, bigtable.LimitRows(n))
 	}
+
+	var filters []bigtable.Filter
+	if cellsPerColumn := parsed["cells-per-column"]; cellsPerColumn != "" {
+		n, err := strconv.Atoi(cellsPerColumn)
+		if err != nil {
+			log.Fatalf("Bad number of cells per column %q: %v", cellsPerColumn, err)
+		}
+		filters = append(filters, bigtable.LatestNFilter(n))
+	}
 	if regex := parsed["regex"]; regex != "" {
-		opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex)))
+		filters = append(filters, bigtable.RowKeyFilter(regex))
+	}
+	if columns := parsed["columns"]; columns != "" {
+		columnFilters, err := parseColumnsFilter(columns)
+		if err != nil {
+			log.Fatal(err)
+		}
+		filters = append(filters, columnFilters)
+	}
+
+	if len(filters) > 1 {
+		opts = append(opts, bigtable.RowFilter(bigtable.ChainFilters(filters...)))
+	} else if len(filters) == 1 {
+		opts = append(opts, bigtable.RowFilter(filters[0]))
 	}
 
 	// TODO(dsymonds): Support filters.
 	tbl := getClient(bigtable.ClientConfig{AppProfile: parsed["app-profile"]}).Open(args[0])
-	err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
+	err = tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
 		printRow(r)
 		return true
 	}, opts...)
@@ -1185,25 +1217,19 @@
 	tableName := args[2]
 	ttl := bigtable.DefaultSnapshotDuration
 
-	for _, arg := range args[3:] {
-		i := strings.Index(arg, "=")
-		if i < 0 {
-			log.Fatalf("Bad arg %q", arg)
-		}
-		key, val := arg[:i], arg[i+1:]
-		switch key {
-		default:
-			log.Fatalf("Unknown arg key %q", key)
-		case "ttl":
-			var err error
-			ttl, err = parseDuration(val)
-			if err != nil {
-				log.Fatalf("Invalid snapshot ttl value %q: %v", val, err)
-			}
+	parsed, err := parseArgs(args[3:], []string{"ttl"})
+	if err != nil {
+		log.Fatal(err)
+	}
+	if val, ok := parsed["ttl"]; ok {
+		var err error
+		ttl, err = parseDuration(val)
+		if err != nil {
+			log.Fatalf("Invalid snapshot ttl value %q: %v", val, err)
 		}
 	}
 
-	err := getAdminClient().SnapshotTable(ctx, tableName, clusterName, snapshotName, ttl)
+	err = getAdminClient().SnapshotTable(ctx, tableName, clusterName, snapshotName, ttl)
 	if err != nil {
 		log.Fatalf("Failed to create Snapshot: %v", err)
 	}
@@ -1222,7 +1248,7 @@
 		cluster = args[0]
 	}
 
-	it := getAdminClient().ListSnapshots(ctx, cluster)
+	it := getAdminClient().Snapshots(ctx, cluster)
 
 	tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
 	fmt.Fprintf(tw, "Snapshot\tSource Table\tCreated At\tExpires At\n")
@@ -1321,3 +1347,71 @@
 func doVersion(ctx context.Context, args ...string) {
 	fmt.Printf("%s %s %s\n", version, revision, revisionDate)
 }
+
+// parseArgs takes a slice of arguments of the form key=value and returns a map from
+// key to value. It returns an error if an argument is malformed or a key is not in
+// the valid slice.
+func parseArgs(args []string, valid []string) (map[string]string, error) {
+	parsed := make(map[string]string)
+	for _, arg := range args {
+		i := strings.Index(arg, "=")
+		if i < 0 {
+			return nil, fmt.Errorf("Bad arg %q", arg)
+		}
+		key, val := arg[:i], arg[i+1:]
+		if !stringInSlice(key, valid) {
+			return nil, fmt.Errorf("Unknown arg key %q", key)
+		}
+		parsed[key] = val
+	}
+	return parsed, nil
+}
+
+func stringInSlice(s string, list []string) bool {
+	for _, e := range list {
+		if s == e {
+			return true
+		}
+	}
+	return false
+}
+
+func parseColumnsFilter(columns string) (bigtable.Filter, error) {
+	splitColumns := strings.FieldsFunc(columns, func(c rune) bool { return c == ',' })
+	if len(splitColumns) == 1 {
+		filter, err := columnFilter(splitColumns[0])
+		if err != nil {
+			return nil, err
+		}
+		return filter, nil
+	} else {
+		var columnFilters []bigtable.Filter
+		for _, column := range splitColumns {
+			filter, err := columnFilter(column)
+			if err != nil {
+				return nil, err
+			}
+			columnFilters = append(columnFilters, filter)
+		}
+		return bigtable.InterleaveFilters(columnFilters...), nil
+	}
+}
+
+func columnFilter(column string) (bigtable.Filter, error) {
+	splitColumn := strings.Split(column, ":")
+	if len(splitColumn) == 1 {
+		return bigtable.ColumnFilter(splitColumn[0]), nil
+	} else if len(splitColumn) == 2 {
+		if strings.HasSuffix(column, ":") {
+			return bigtable.FamilyFilter(splitColumn[0]), nil
+		} else if strings.HasPrefix(column, ":") {
+			return bigtable.ColumnFilter(splitColumn[1]), nil
+		} else {
+			familyFilter := bigtable.FamilyFilter(splitColumn[0])
+			qualifierFilter := bigtable.ColumnFilter(splitColumn[1])
+			return bigtable.ChainFilters(familyFilter, qualifierFilter), nil
+		}
+	} else {
+		return nil, fmt.Errorf("Bad format for column %q", column)
+	}
+}
diff --git a/bigtable/cmd/cbt/cbt_test.go b/bigtable/cmd/cbt/cbt_test.go
index 2616fb4..fd060a4 100644
--- a/bigtable/cmd/cbt/cbt_test.go
+++ b/bigtable/cmd/cbt/cbt_test.go
@@ -19,6 +19,7 @@
 	"time"
 
 	"cloud.google.com/go/bigtable"
+	"cloud.google.com/go/internal/testutil"
 	"github.com/google/go-cmp/cmp"
 )
 
@@ -111,3 +112,113 @@
 		}
 	}
 }
+
+func TestParseArgs(t *testing.T) {
+	got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"})
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := map[string]string{"a": "1", "b": "2"}
+	if !testutil.Equal(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	if _, err := parseArgs([]string{"a1"}, []string{"a1"}); err == nil {
+		t.Error("malformed: got nil, want error")
+	}
+	if _, err := parseArgs([]string{"a=1"}, []string{"b"}); err == nil {
+		t.Error("invalid: got nil, want error")
+	}
+}
+
+func TestParseColumnsFilter(t *testing.T) {
+	tests := []struct {
+		in   string
+		out  bigtable.Filter
+		fail bool
+	}{
+		{
+			in:  "columnA",
+			out: bigtable.ColumnFilter("columnA"),
+		},
+		{
+			in:  "familyA:columnA",
+			out: bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
+		},
+		{
+			in:  "columnA,columnB",
+			out: bigtable.InterleaveFilters(bigtable.ColumnFilter("columnA"), bigtable.ColumnFilter("columnB")),
+		},
+		{
+			in: "familyA:columnA,columnB",
+			out: bigtable.InterleaveFilters(
+				bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
+				bigtable.ColumnFilter("columnB"),
+			),
+		},
+		{
+			in: "columnA,familyB:columnB",
+			out: bigtable.InterleaveFilters(
+				bigtable.ColumnFilter("columnA"),
+				bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
+			),
+		},
+		{
+			in: "familyA:columnA,familyB:columnB",
+			out: bigtable.InterleaveFilters(
+				bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")),
+				bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
+			),
+		},
+		{
+			in:  "familyA:",
+			out: bigtable.FamilyFilter("familyA"),
+		},
+		{
+			in:  ":columnA",
+			out: bigtable.ColumnFilter("columnA"),
+		},
+		{
+			in: ",:columnA,,familyB:columnB,",
+			out: bigtable.InterleaveFilters(
+				bigtable.ColumnFilter("columnA"),
+				bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")),
+			),
+		},
+		{
+			in:   "familyA:columnA:cellA",
+			fail: true,
+		},
+		{
+			in:   "familyA::columnA",
+			fail: true,
+		},
+	}
+
+	for _, tc := range tests {
+		got, err := parseColumnsFilter(tc.in)
+
+		if !tc.fail && err != nil {
+			t.Errorf("parseColumnsFilter(%q) unexpectedly failed: %v", tc.in, err)
+			continue
+		}
+		if tc.fail && err == nil {
+			t.Errorf("parseColumnsFilter(%q) did not fail", tc.in)
+			continue
+		}
+		if tc.fail {
+			continue
+		}
+
+		var cmpOpts cmp.Options
+		cmpOpts =
+			append(
+				cmpOpts,
+				cmp.AllowUnexported(bigtable.ChainFilters([]bigtable.Filter{}...)),
+				cmp.AllowUnexported(bigtable.InterleaveFilters([]bigtable.Filter{}...)))
+
+		if !cmp.Equal(got, tc.out, cmpOpts) {
+			t.Errorf("parseColumnsFilter(%q) = %v, want %v", tc.in, got, tc.out)
+		}
+	}
+}
diff --git a/bigtable/cmd/cbt/cbtdoc.go b/bigtable/cmd/cbt/cbtdoc.go
index 317ea08..544a1d4 100644
--- a/bigtable/cmd/cbt/cbtdoc.go
+++ b/bigtable/cmd/cbt/cbtdoc.go
@@ -50,6 +50,11 @@
 	set                       Set value of a cell
 	setgcpolicy               Set the GC policy for a column family
 	waitforreplication        Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
+	createtablefromsnapshot   Create a table from a snapshot (snapshots alpha)
+	createsnapshot            Create a snapshot from a source table (snapshots alpha)
+	listsnapshots             List snapshots in a cluster (snapshots alpha)
+	getsnapshot               Get snapshot info (snapshots alpha)
+	deletesnapshot            Delete snapshot in a cluster (snapshots alpha)
 	version                   Print the current cbt version
 
 Use "cbt help <command>" for more information about a command.
@@ -70,7 +75,7 @@
 
 For convenience, values of the -project, -instance, -creds,
 -admin-endpoint and -data-endpoint flags may be specified in
-/usr/local/google/home/igorbernstein/.cbtrc in this format:
+~/.cbtrc in this format:
 	project = my-project-123
 	instance = my-instance
 	creds = path-to-account-key.json
@@ -232,7 +237,8 @@
 Read from a single row
 
 Usage:
-	cbt lookup <table> <row> [app-profile=<app profile id>]
+	cbt lookup <table> <row> [cells-per-column=<n>] [app-profile=<app profile id>]
+	  cells-per-column=<n> 			Read only this many cells per column
 	  app-profile=<app profile id>		The app profile id to use for the request (replication alpha)
 
 
@@ -259,12 +265,13 @@
 Read rows
 
 Usage:
-	cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>] [app-profile=<app profile id>]
+	cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>]
 	  start=<row>		Start reading at this row
 	  end=<row>		Stop reading before this row
 	  prefix=<prefix>	Read rows with this prefix
 	  regex=<regex> 	Read rows with keys matching this regex
 	  count=<n>		Read only this many rows
+	  cells-per-column=<n>	Read only this many cells per column
 	  app-profile=<app profile id>		The app profile id to use for the request (replication alpha)
 
 
@@ -304,6 +311,52 @@
 
 
 
+Create a table from a snapshot (snapshots alpha)
+
+Usage:
+	cbt createtablefromsnapshot <table> <cluster> <snapshot>
+	  table	The name of the table to create
+	  cluster	The cluster where the snapshot is located
+	  snapshot	The snapshot to restore
+
+
+
+
+Create a snapshot from a source table (snapshots alpha)
+
+Usage:
+	cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]
+
+	  [ttl=<d>]		Lifespan of the snapshot (e.g. "1h", "4d")
+
+
+
+
+
+List snapshots in a cluster (snapshots alpha)
+
+Usage:
+	cbt listsnapshots [<cluster>]
+
+
+
+
+Get snapshot info (snapshots alpha)
+
+Usage:
+	cbt getsnapshot <cluster> <snapshot>
+
+
+
+
+Delete snapshot in a cluster (snapshots alpha)
+
+Usage:
+	cbt deletesnapshot <cluster> <snapshot>
+
+
+
+
 Print the current cbt version
 
 Usage:
diff --git a/bigtable/cmd/loadtest/loadtest.go b/bigtable/cmd/loadtest/loadtest.go
index 1c86ed8..daafb35 100644
--- a/bigtable/cmd/loadtest/loadtest.go
+++ b/bigtable/cmd/loadtest/loadtest.go
@@ -123,7 +123,7 @@
 	go func() {
 		s := <-c
 		log.Printf("Caught %v, cleaning scratch table.", s)
-		adminClient.DeleteTable(context.Background(), *scratchTable)
+		_ = adminClient.DeleteTable(context.Background(), *scratchTable)
 		os.Exit(1)
 	}()
 
diff --git a/bigtable/internal/gax/invoke.go b/bigtable/internal/gax/invoke.go
index b7be7d4..8ff75ab 100644
--- a/bigtable/internal/gax/invoke.go
+++ b/bigtable/internal/gax/invoke.go
@@ -21,14 +21,15 @@
 	"math/rand"
 	"time"
 
+	"log"
+	"os"
+
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
-	"log"
-	"os"
 )
 
-var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
+var Logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
 
 // A user defined call stub.
 type APICall func(context.Context) error
@@ -63,7 +64,9 @@
 		// Sleep a random amount up to the current delay
 		d := time.Duration(rand.Int63n(int64(delay)))
 		delayCtx, _ := context.WithTimeout(ctx, delay)
-		logger.Printf("Retryable error: %v, retrying in %v", err, d)
+		if Logger != nil {
+			Logger.Printf("Retryable error: %v, retrying in %v", err, d)
+		}
 		<-delayCtx.Done()
 
 		delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
diff --git a/bigtable/internal/gax/invoke_test.go b/bigtable/internal/gax/invoke_test.go
index 6d3c67e..b10ec59 100644
--- a/bigtable/internal/gax/invoke_test.go
+++ b/bigtable/internal/gax/invoke_test.go
@@ -34,7 +34,7 @@
 	deadline := time.Now().Add(1 * time.Second)
 	ctx, _ := context.WithDeadline(context.Background(), deadline)
 	var invokeTime time.Time
-	Invoke(ctx, func(childCtx context.Context) error {
+	_ = Invoke(ctx, func(childCtx context.Context) error {
 		// Keep failing, make sure we never slept more than max (plus a fudge factor)
 		if !invokeTime.IsZero() {
 			if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) {
diff --git a/bigtable/internal/stat/stats.go b/bigtable/internal/stat/stats.go
index 5fb047f..9baa542 100644
--- a/bigtable/internal/stat/stats.go
+++ b/bigtable/internal/stat/stats.go
@@ -123,10 +123,15 @@
 
 // WriteCSV writes a csv file to the given Writer,
 // with a header row and one row per aggregate.
-func WriteCSV(aggs []*Aggregate, iow io.Writer) error {
+func WriteCSV(aggs []*Aggregate, iow io.Writer) (err error) {
 	w := csv.NewWriter(iow)
-	defer w.Flush()
-	err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
+	defer func() {
+		w.Flush()
+		if err == nil {
+			err = w.Error()
+		}
+	}()
+	err = w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
 	if err != nil {
 		return err
 	}
diff --git a/bigtable/reader_test.go b/bigtable/reader_test.go
index f202891..e1a7f06 100644
--- a/bigtable/reader_test.go
+++ b/bigtable/reader_test.go
@@ -60,10 +60,10 @@
 func TestMultipleCells(t *testing.T) {
 	cr := newChunkReader()
 
-	cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
-	cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
-	cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
-	cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col1", 1, "val2", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col2", 0, "val3", 0, false))
+	mustProcess(t, cr, cc("rs", "fm2", "col1", 0, "val4", 0, false))
 	row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true))
 	if err != nil {
 		t.Fatalf("Processing chunk: %v", err)
@@ -95,8 +95,8 @@
 func TestSplitCells(t *testing.T) {
 	cr := newChunkReader()
 
-	cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false))
-	cr.Process(ccData("world", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "hello ", 11, false))
+	mustProcess(t, cr, ccData("world", 0, false))
 	row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true))
 	if err != nil {
 		t.Fatalf("Processing chunk: %v", err)
@@ -171,12 +171,11 @@
 
 func TestReset(t *testing.T) {
 	cr := newChunkReader()
-
-	cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
-	cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
-	cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
-	cr.Process(ccReset())
-	row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
+	mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col1", 1, "val2", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col2", 0, "val3", 0, false))
+	mustProcess(t, cr, ccReset())
+	row := mustProcess(t, cr, cc("rs1", "fm1", "col1", 1, "val1", 0, true))
 	want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
 	if !testutil.Equal(row["fm1"], want) {
 		t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
@@ -189,13 +188,21 @@
 func TestNewFamEmptyQualifier(t *testing.T) {
 	cr := newChunkReader()
 
-	cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
+	mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false))
 	_, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true))
 	if err == nil {
 		t.Fatalf("Expected error on second chunk with no qualifier set")
 	}
 }
 
+func mustProcess(t *testing.T, cr *chunkReader, cc *btspb.ReadRowsResponse_CellChunk) Row {
+	row, err := cr.Process(cc)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return row
+}
+
 // The read rows acceptance test reads a json file specifying a number of tests,
 // each consisting of one or more cell chunk text protos and one or more resulting
 // cells or errors.
diff --git a/bigtable/retry_test.go b/bigtable/retry_test.go
index 03a9389..83d77af 100644
--- a/bigtable/retry_test.go
+++ b/bigtable/retry_test.go
@@ -21,6 +21,7 @@
 	"time"
 
 	"cloud.google.com/go/bigtable/bttest"
+	"cloud.google.com/go/bigtable/internal/gax"
 	"cloud.google.com/go/internal/testutil"
 	"github.com/golang/protobuf/ptypes/wrappers"
 	"github.com/google/go-cmp/cmp"
@@ -69,6 +70,7 @@
 }
 
 func TestRetryApply(t *testing.T) {
+	gax.Logger = nil
 	ctx := context.Background()
 
 	errCount := 0
@@ -135,6 +137,7 @@
 
 func TestRetryApplyBulk(t *testing.T) {
 	ctx := context.Background()
+	gax.Logger = nil
 
 	// Intercept requests and delegate to an interceptor defined by the test case
 	errCount := 0
@@ -179,28 +182,28 @@
 	f = func(ss grpc.ServerStream) error {
 		var err error
 		req := new(btpb.MutateRowsRequest)
-		ss.RecvMsg(req)
+		must(ss.RecvMsg(req))
 		switch errCount {
 		case 0:
 			// Retryable request failure
 			err = status.Errorf(codes.Unavailable, "")
 		case 1:
 			// Two mutations fail
-			writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
+			must(writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted))
 			err = nil
 		case 2:
 			// Two failures were retried. One will succeed.
 			if want, got := 2, len(req.Entries); want != got {
 				t.Errorf("2 bulk retries, got: %d, want %d", got, want)
 			}
-			writeMutateRowsResponse(ss, codes.OK, codes.Aborted)
+			must(writeMutateRowsResponse(ss, codes.OK, codes.Aborted))
 			err = nil
 		case 3:
 			// One failure was retried and will succeed.
 			if want, got := 1, len(req.Entries); want != got {
 				t.Errorf("1 bulk retry, got: %d, want %d", got, want)
 			}
-			writeMutateRowsResponse(ss, codes.OK)
+			must(writeMutateRowsResponse(ss, codes.OK))
 			err = nil
 		}
 		errCount++
@@ -218,12 +221,12 @@
 	f = func(ss grpc.ServerStream) error {
 		var err error
 		req := new(btpb.MutateRowsRequest)
-		ss.RecvMsg(req)
+		must(ss.RecvMsg(req))
 		switch errCount {
 		case 0:
 			// Give non-idempotent mutation a retryable error code.
 			// Nothing should be retried.
-			writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted)
+			must(writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted))
 			err = nil
 		case 1:
 			t.Errorf("unretryable errors: got one retry, want no retries")
@@ -245,8 +248,7 @@
 
 	// Test individual errors and a deadline exceeded
 	f = func(ss grpc.ServerStream) error {
-		writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
-		return nil
+		return writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
 	}
 	ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
 	errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
@@ -298,6 +300,7 @@
 
 func TestRetryReadRows(t *testing.T) {
 	ctx := context.Background()
+	gax.Logger = nil
 
 	// Intercept requests and delegate to an interceptor defined by the test case
 	errCount := 0
@@ -320,7 +323,7 @@
 	f = func(ss grpc.ServerStream) error {
 		var err error
 		req := new(btpb.ReadRowsRequest)
-		ss.RecvMsg(req)
+		must(ss.RecvMsg(req))
 		switch errCount {
 		case 0:
 			// Retryable request failure
@@ -330,7 +333,7 @@
 			if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
 				t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
 			}
-			writeReadRowsResponse(ss, "a", "b")
+			must(writeReadRowsResponse(ss, "a", "b"))
 			err = status.Errorf(codes.Unavailable, "")
 		case 2:
 			// Retryable request failure
@@ -340,7 +343,7 @@
 			err = status.Errorf(codes.Unavailable, "")
 		case 3:
 			// Write two more rows
-			writeReadRowsResponse(ss, "c", "d")
+			must(writeReadRowsResponse(ss, "c", "d"))
 			err = nil
 		}
 		errCount++
@@ -348,10 +351,10 @@
 	}
 
 	var got []string
-	tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
+	must(tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
 		got = append(got, r.Key())
 		return true
-	})
+	}))
 	want := []string{"a", "b", "c", "d"}
 	if !testutil.Equal(got, want) {
 		t.Errorf("retry range integration: got %v, want %v", got, want)
@@ -370,3 +373,9 @@
 	}
 	return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks})
 }
+
+func must(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/cloud.go b/cloud.go
index 3bd06a5..2754c53 100644
--- a/cloud.go
+++ b/cloud.go
@@ -17,6 +17,11 @@
 Services. See https://godoc.org/cloud.google.com/go for a full list
 of sub-packages.
 
+Client Options
+
+All clients in sub-packages are configurable via client options. These options are
+described here: https://godoc.org/google.golang.org/api/option.
+
 
 Authentication and Authorization
 
@@ -54,5 +59,18 @@
 option to NewClient calls. This configures the underlying gRPC connections to be
 pooled and addressed in a round robin fashion.
 
+
+Using the Libraries with Docker
+
+Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
+hang, because gRPC retries indefinitely. See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/928
+for more information.
+
+Debugging
+
+To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
+https://godoc.org/google.golang.org/grpc/grpclog for more information.
+
+For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
 */
 package cloud // import "cloud.google.com/go"
diff --git a/cloudtasks/apiv2beta2/cloud_tasks_client.go b/cloudtasks/apiv2beta2/cloud_tasks_client.go
new file mode 100644
index 0000000..c68cb59
--- /dev/null
+++ b/cloudtasks/apiv2beta2/cloud_tasks_client.go
@@ -0,0 +1,763 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package cloudtasks
+
+import (
+	"math"
+	"time"
+
+	"cloud.google.com/go/internal/version"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
+	iampb "google.golang.org/genproto/googleapis/iam/v1"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+	ListQueues         []gax.CallOption
+	GetQueue           []gax.CallOption
+	CreateQueue        []gax.CallOption
+	UpdateQueue        []gax.CallOption
+	DeleteQueue        []gax.CallOption
+	PurgeQueue         []gax.CallOption
+	PauseQueue         []gax.CallOption
+	ResumeQueue        []gax.CallOption
+	GetIamPolicy       []gax.CallOption
+	SetIamPolicy       []gax.CallOption
+	TestIamPermissions []gax.CallOption
+	ListTasks          []gax.CallOption
+	GetTask            []gax.CallOption
+	CreateTask         []gax.CallOption
+	DeleteTask         []gax.CallOption
+	LeaseTasks         []gax.CallOption
+	AcknowledgeTask    []gax.CallOption
+	RenewLease         []gax.CallOption
+	CancelLease        []gax.CallOption
+	RunTask            []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("cloudtasks.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultCallOptions() *CallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &CallOptions{
+		ListQueues:         retry[[2]string{"default", "idempotent"}],
+		GetQueue:           retry[[2]string{"default", "idempotent"}],
+		CreateQueue:        retry[[2]string{"default", "non_idempotent"}],
+		UpdateQueue:        retry[[2]string{"default", "non_idempotent"}],
+		DeleteQueue:        retry[[2]string{"default", "non_idempotent"}],
+		PurgeQueue:         retry[[2]string{"default", "non_idempotent"}],
+		PauseQueue:         retry[[2]string{"default", "non_idempotent"}],
+		ResumeQueue:        retry[[2]string{"default", "non_idempotent"}],
+		GetIamPolicy:       retry[[2]string{"default", "idempotent"}],
+		SetIamPolicy:       retry[[2]string{"default", "non_idempotent"}],
+		TestIamPermissions: retry[[2]string{"default", "idempotent"}],
+		ListTasks:          retry[[2]string{"default", "idempotent"}],
+		GetTask:            retry[[2]string{"default", "idempotent"}],
+		CreateTask:         retry[[2]string{"default", "non_idempotent"}],
+		DeleteTask:         retry[[2]string{"default", "idempotent"}],
+		LeaseTasks:         retry[[2]string{"default", "non_idempotent"}],
+		AcknowledgeTask:    retry[[2]string{"default", "non_idempotent"}],
+		RenewLease:         retry[[2]string{"default", "non_idempotent"}],
+		CancelLease:        retry[[2]string{"default", "non_idempotent"}],
+		RunTask:            retry[[2]string{"default", "non_idempotent"}],
+	}
+}
+
+// Client is a client for interacting with Cloud Tasks API.
+type Client struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	client taskspb.CloudTasksClient
+
+	// The call options for this service.
+	CallOptions *CallOptions
+
+	// The x-goog-* metadata to be sent with each request.
+	xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new cloud tasks client.
+//
+// Cloud Tasks allows developers to manage the execution of background
+// work in their applications.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &Client{
+		conn:        conn,
+		CallOptions: defaultCallOptions(),
+
+		client: taskspb.NewCloudTasksClient(conn),
+	}
+	c.setGoogleClientInfo()
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListQueues lists queues.
+//
+// Queues are returned in lexicographical order.
+func (c *Client) ListQueues(ctx context.Context, req *taskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.ListQueues[0:len(c.CallOptions.ListQueues):len(c.CallOptions.ListQueues)], opts...)
+	it := &QueueIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Queue, string, error) {
+		var resp *taskspb.ListQueuesResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.client.ListQueues(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Queues, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// GetQueue gets a queue.
+func (c *Client) GetQueue(ctx context.Context, req *taskspb.GetQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.GetQueue[0:len(c.CallOptions.GetQueue):len(c.CallOptions.GetQueue)], opts...)
+	var resp *taskspb.Queue
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.GetQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateQueue creates a queue.
+//
+// Queues created with this method allow tasks to live for a maximum of 31
+// days. After a task is 31 days old, the task will be deleted regardless of whether
+// it was dispatched or not.
+//
+// WARNING: Using this method may have unintended side effects if you are
+// using an App Engine queue.yaml or queue.xml file to manage your queues.
+// Read
+// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
+// before using this method.
+func (c *Client) CreateQueue(ctx context.Context, req *taskspb.CreateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.CreateQueue[0:len(c.CallOptions.CreateQueue):len(c.CallOptions.CreateQueue)], opts...)
+	var resp *taskspb.Queue
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.CreateQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateQueue updates a queue.
+//
+// This method creates the queue if it does not exist and updates
+// the queue if it does exist.
+//
+// Queues created with this method allow tasks to live for a maximum of 31
+// days. After a task is 31 days old, the task will be deleted regardless of whether
+// it was dispatched or not.
+//
+// WARNING: Using this method may have unintended side effects if you are
+// using an App Engine queue.yaml or queue.xml file to manage your queues.
+// Read
+// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
+// before using this method.
+func (c *Client) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.UpdateQueue[0:len(c.CallOptions.UpdateQueue):len(c.CallOptions.UpdateQueue)], opts...)
+	var resp *taskspb.Queue
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.UpdateQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteQueue deletes a queue.
+//
+// This command will delete the queue even if it has tasks in it.
+//
+// Note: If you delete a queue, a queue with the same name can't be created
+// for 7 days.
+//
+// WARNING: Using this method may have unintended side effects if you are
+// using an App Engine queue.yaml or queue.xml file to manage your queues.
+// Read
+// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
+// before using this method.
+func (c *Client) DeleteQueue(ctx context.Context, req *taskspb.DeleteQueueRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.DeleteQueue[0:len(c.CallOptions.DeleteQueue):len(c.CallOptions.DeleteQueue)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.client.DeleteQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// PurgeQueue purges a queue by deleting all of its tasks.
+//
+// All tasks created before this method is called are permanently deleted.
+//
+// Purge operations can take up to one minute to take effect. Tasks
+// might be dispatched before the purge takes effect. A purge is irreversible.
+func (c *Client) PurgeQueue(ctx context.Context, req *taskspb.PurgeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.PurgeQueue[0:len(c.CallOptions.PurgeQueue):len(c.CallOptions.PurgeQueue)], opts...)
+	var resp *taskspb.Queue
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.PurgeQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// PauseQueue pauses the queue.
+//
+// If a queue is paused then the system will stop dispatching tasks
+// until the queue is resumed via
+// [ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added
+// when the queue is paused. A queue is paused if its
+// [state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
+func (c *Client) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.PauseQueue[0:len(c.CallOptions.PauseQueue):len(c.CallOptions.PauseQueue)], opts...)
+	var resp *taskspb.Queue
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.PauseQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ResumeQueue resume a queue.
+//
+// This method resumes a queue after it has been
+// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
+// [DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored
+// in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it
+// will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
+//
+// WARNING: Resuming many high-QPS queues at the same time can
+// lead to target overloading. If you are resuming high-QPS
+// queues, follow the 500/50/5 pattern described in
+// Managing Cloud Tasks Scaling Risks (at /cloud-tasks/pdfs/managing-cloud-tasks-scaling-risks-2017-06-05.pdf).
+func (c *Client) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.ResumeQueue[0:len(c.CallOptions.ResumeQueue):len(c.CallOptions.ResumeQueue)], opts...)
+	var resp *taskspb.Queue
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.ResumeQueue(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// GetIamPolicy gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue].
+// Returns an empty policy if the resource exists and does not have a policy
+// set.
+//
+// Authorization requires the following Google IAM (at /iam) permission on the
+// specified resource parent:
+//
+//   cloudtasks.queues.getIamPolicy
+func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...)
+	var resp *iampb.Policy
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// SetIamPolicy sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing
+// policy.
+//
+// Note: The Cloud Console does not check queue-level IAM permissions yet.
+// Project-level permissions are required to use the Cloud Console.
+//
+// Authorization requires the following Google IAM (at /iam) permission on the
+// specified resource parent:
+//
+//   cloudtasks.queues.setIamPolicy
+func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...)
+	var resp *iampb.Policy
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// TestIamPermissions returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue].
+// If the resource does not exist, this will return an empty set of
+// permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
+//
+// Note: This operation is designed to be used for building permission-aware
+// UIs and command-line tools, not for authorization checking. This operation
+// may "fail open" without warning.
+func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...)
+	var resp *iampb.TestIamPermissionsResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ListTasks lists the tasks in a queue.
+//
+// By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved
+// due to performance considerations;
+// [response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the
+// subset of information which is returned.
+func (c *Client) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest, opts ...gax.CallOption) *TaskIterator {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.ListTasks[0:len(c.CallOptions.ListTasks):len(c.CallOptions.ListTasks)], opts...)
+	it := &TaskIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Task, string, error) {
+		var resp *taskspb.ListTasksResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.client.ListTasks(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Tasks, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// GetTask gets a task.
+func (c *Client) GetTask(ctx context.Context, req *taskspb.GetTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.GetTask[0:len(c.CallOptions.GetTask):len(c.CallOptions.GetTask)], opts...)
+	var resp *taskspb.Task
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.GetTask(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateTask creates a task and adds it to a queue.
+//
+// To add multiple tasks at the same time, use
+// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch)
+// or the batching documentation for your client library, for example
+// https://developers.google.com/api-client-library/python/guide/batch.
+//
+// Tasks cannot be updated after creation; there is no UpdateTask command.
+//
+//   For App Engine queues (at google.cloud.tasks.v2beta2.AppEngineHttpTarget),
+//   the maximum task size is 100KB.
+//
+//   For pull queues (at google.cloud.tasks.v2beta2.PullTarget), this
+//   the maximum task size is 1MB.
+func (c *Client) CreateTask(ctx context.Context, req *taskspb.CreateTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.CreateTask[0:len(c.CallOptions.CreateTask):len(c.CallOptions.CreateTask)], opts...)
+	var resp *taskspb.Task
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.CreateTask(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteTask deletes a task.
+//
+// A task can be deleted if it is scheduled or dispatched. A task
+// cannot be deleted if it has completed successfully or permanently
+// failed.
+func (c *Client) DeleteTask(ctx context.Context, req *taskspb.DeleteTaskRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.DeleteTask[0:len(c.CallOptions.DeleteTask):len(c.CallOptions.DeleteTask)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.client.DeleteTask(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// LeaseTasks leases tasks from a pull queue for
+// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration].
+//
+// This method is invoked by the worker to obtain a lease. The
+// worker must acknowledge the task via
+// [AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have
+// performed the work associated with the task.
+//
+// The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that
+// the worker needs to perform the work associated with the task. To
+// return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
+// [response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to
+// [FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
+//
+// A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
+// requests are allowed per
+// queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
+// is returned when this limit is
+// exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
+// is also returned when
+// [max_tasks_dispatched_per_second][google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second]
+// is exceeded.
+func (c *Client) LeaseTasks(ctx context.Context, req *taskspb.LeaseTasksRequest, opts ...gax.CallOption) (*taskspb.LeaseTasksResponse, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.LeaseTasks[0:len(c.CallOptions.LeaseTasks):len(c.CallOptions.LeaseTasks)], opts...)
+	var resp *taskspb.LeaseTasksResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.LeaseTasks(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// AcknowledgeTask acknowledges a pull task.
+//
+// The worker, that is, the entity that
+// [leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method
+// to indicate that the work associated with the task has finished.
+//
+// The worker must acknowledge a task within the
+// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease
+// will expire and the task will become available to be leased
+// again. After the task is acknowledged, it will not be returned
+// by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
+// [GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
+// [ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
+//
+// To acknowledge multiple tasks at the same time, use
+// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch)
+// or the batching documentation for your client library, for example
+// https://developers.google.com/api-client-library/python/guide/batch.
+func (c *Client) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTaskRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.AcknowledgeTask[0:len(c.CallOptions.AcknowledgeTask):len(c.CallOptions.AcknowledgeTask)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.client.AcknowledgeTask(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// RenewLease renew the current lease of a pull task.
+//
+// The worker can use this method to extend the lease by a new
+// duration, starting from now. The new task lease will be
+// returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
+func (c *Client) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.RenewLease[0:len(c.CallOptions.RenewLease):len(c.CallOptions.RenewLease)], opts...)
+	var resp *taskspb.Task
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.RenewLease(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CancelLease cancel a pull task's lease.
+//
+// The worker can use this method to cancel a task's lease by
+// setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will
+// make the task available to be leased to the next caller of
+// [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
+func (c *Client) CancelLease(ctx context.Context, req *taskspb.CancelLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.CancelLease[0:len(c.CallOptions.CancelLease):len(c.CallOptions.CancelLease)], opts...)
+	var resp *taskspb.Task
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.CancelLease(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// RunTask forces a task to run now.
+//
+// When this method is called, Cloud Tasks will dispatch the task, even if
+// the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or
+// is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
+//
+// This command is meant to be used for manual debugging. For
+// example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed
+// task after a fix has been made or to manually force a task to be
+// dispatched now.
+//
+// The dispatched task is returned. That is, the task that is returned
+// contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but
+// before the task is received by its target.
+//
+// If Cloud Tasks receives a successful response from the task's
+// target, then the task will be deleted; otherwise the task's
+// [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that
+// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified
+// in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
+//
+// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
+// [NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
+// task that has already succeeded or permanently failed.
+//
+// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a
+// [pull task][google.cloud.tasks.v2beta2.PullMessage].
+func (c *Client) RunTask(ctx context.Context, req *taskspb.RunTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.RunTask[0:len(c.CallOptions.RunTask):len(c.CallOptions.RunTask)], opts...)
+	var resp *taskspb.Task
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.RunTask(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// QueueIterator manages a stream of *taskspb.Queue.
+type QueueIterator struct {
+	items    []*taskspb.Queue
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*taskspb.Queue, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *QueueIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *QueueIterator) Next() (*taskspb.Queue, error) {
+	var item *taskspb.Queue
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *QueueIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *QueueIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// TaskIterator manages a stream of *taskspb.Task.
+type TaskIterator struct {
+	items    []*taskspb.Task
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*taskspb.Task, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TaskIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TaskIterator) Next() (*taskspb.Task, error) {
+	var item *taskspb.Task
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *TaskIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *TaskIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
diff --git a/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go b/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go
new file mode 100644
index 0000000..b954225
--- /dev/null
+++ b/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go
@@ -0,0 +1,401 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package cloudtasks_test
+
+import (
+	"cloud.google.com/go/cloudtasks/apiv2beta2"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
+	iampb "google.golang.org/genproto/googleapis/iam/v1"
+)
+
+func ExampleNewClient() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use client.
+	_ = c
+}
+
+func ExampleClient_ListQueues() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.ListQueuesRequest{
+		// TODO: Fill request struct fields.
+	}
+	it := c.ListQueues(ctx, req)
+	for {
+		resp, err := it.Next()
+		if err == iterator.Done {
+			break
+		}
+		if err != nil {
+			// TODO: Handle error.
+		}
+		// TODO: Use resp.
+		_ = resp
+	}
+}
+
+func ExampleClient_GetQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.GetQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.GetQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_CreateQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.CreateQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.CreateQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_UpdateQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.UpdateQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.UpdateQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_DeleteQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.DeleteQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	err = c.DeleteQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+}
+
+func ExampleClient_PurgeQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.PurgeQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.PurgeQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_PauseQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.PauseQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.PauseQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_ResumeQueue() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.ResumeQueueRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.ResumeQueue(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_GetIamPolicy() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &iampb.GetIamPolicyRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.GetIamPolicy(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_SetIamPolicy() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &iampb.SetIamPolicyRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.SetIamPolicy(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_TestIamPermissions() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &iampb.TestIamPermissionsRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.TestIamPermissions(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_ListTasks() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.ListTasksRequest{
+		// TODO: Fill request struct fields.
+	}
+	it := c.ListTasks(ctx, req)
+	for {
+		resp, err := it.Next()
+		if err == iterator.Done {
+			break
+		}
+		if err != nil {
+			// TODO: Handle error.
+		}
+		// TODO: Use resp.
+		_ = resp
+	}
+}
+
+func ExampleClient_GetTask() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.GetTaskRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.GetTask(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_CreateTask() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.CreateTaskRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.CreateTask(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_DeleteTask() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.DeleteTaskRequest{
+		// TODO: Fill request struct fields.
+	}
+	err = c.DeleteTask(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+}
+
+func ExampleClient_LeaseTasks() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.LeaseTasksRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.LeaseTasks(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_AcknowledgeTask() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.AcknowledgeTaskRequest{
+		// TODO: Fill request struct fields.
+	}
+	err = c.AcknowledgeTask(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+}
+
+func ExampleClient_RenewLease() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.RenewLeaseRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.RenewLease(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_CancelLease() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.CancelLeaseRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.CancelLease(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_RunTask() {
+	ctx := context.Background()
+	c, err := cloudtasks.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &taskspb.RunTaskRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.RunTask(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
diff --git a/dlp/apiv2beta1/doc.go b/cloudtasks/apiv2beta2/doc.go
similarity index 80%
copy from dlp/apiv2beta1/doc.go
copy to cloudtasks/apiv2beta2/doc.go
index 28a2d82..43946f4 100644
--- a/dlp/apiv2beta1/doc.go
+++ b/cloudtasks/apiv2beta2/doc.go
@@ -14,15 +14,15 @@
 
 // AUTO-GENERATED CODE. DO NOT EDIT.
 
-// Package dlp is an auto-generated package for the
-// DLP API.
+// Package cloudtasks is an auto-generated package for the
+// Cloud Tasks API.
 //
 //   NOTE: This package is in alpha. It is not stable, and is likely to change.
 //
-// The Google Data Loss Prevention API provides methods for detection of
-// privacy-sensitive fragments in text, images, and Google Cloud Platform
-// storage repositories.
-package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
+// Manages the execution of large numbers of distributed requests. Cloud
+// Tasks
+// is in Alpha.
+package cloudtasks // import "cloud.google.com/go/cloudtasks/apiv2beta2"
 
 import (
 	"golang.org/x/net/context"
diff --git a/cloudtasks/apiv2beta2/mock_test.go b/cloudtasks/apiv2beta2/mock_test.go
new file mode 100644
index 0000000..9119fa9
--- /dev/null
+++ b/cloudtasks/apiv2beta2/mock_test.go
@@ -0,0 +1,1554 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package cloudtasks
+
+import (
+	durationpb "github.com/golang/protobuf/ptypes/duration"
+	emptypb "github.com/golang/protobuf/ptypes/empty"
+	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+	taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
+	iampb "google.golang.org/genproto/googleapis/iam/v1"
+)
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+	status "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	gstatus "google.golang.org/grpc/status"
+)
+
+var _ = io.EOF
+var _ = ptypes.MarshalAny
+var _ status.Status
+
+type mockCloudTasksServer struct {
+	// Embed for forward compatibility.
+	// Tests will keep working if more methods are added
+	// in the future.
+	taskspb.CloudTasksServer
+
+	reqs []proto.Message
+
+	// If set, all calls return this error.
+	err error
+
+	// responses to return if err == nil
+	resps []proto.Message
+}
+
+func (s *mockCloudTasksServer) ListQueues(ctx context.Context, req *taskspb.ListQueuesRequest) (*taskspb.ListQueuesResponse, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.ListQueuesResponse), nil
+}
+
+func (s *mockCloudTasksServer) GetQueue(ctx context.Context, req *taskspb.GetQueueRequest) (*taskspb.Queue, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Queue), nil
+}
+
+func (s *mockCloudTasksServer) CreateQueue(ctx context.Context, req *taskspb.CreateQueueRequest) (*taskspb.Queue, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Queue), nil
+}
+
+func (s *mockCloudTasksServer) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueRequest) (*taskspb.Queue, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Queue), nil
+}
+
+func (s *mockCloudTasksServer) DeleteQueue(ctx context.Context, req *taskspb.DeleteQueueRequest) (*emptypb.Empty, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*emptypb.Empty), nil
+}
+
+func (s *mockCloudTasksServer) PurgeQueue(ctx context.Context, req *taskspb.PurgeQueueRequest) (*taskspb.Queue, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Queue), nil
+}
+
+func (s *mockCloudTasksServer) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest) (*taskspb.Queue, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Queue), nil
+}
+
+func (s *mockCloudTasksServer) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueRequest) (*taskspb.Queue, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Queue), nil
+}
+
+func (s *mockCloudTasksServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*iampb.Policy), nil
+}
+
+func (s *mockCloudTasksServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*iampb.Policy), nil
+}
+
+func (s *mockCloudTasksServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*iampb.TestIamPermissionsResponse), nil
+}
+
+func (s *mockCloudTasksServer) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest) (*taskspb.ListTasksResponse, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.ListTasksResponse), nil
+}
+
+func (s *mockCloudTasksServer) GetTask(ctx context.Context, req *taskspb.GetTaskRequest) (*taskspb.Task, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Task), nil
+}
+
+func (s *mockCloudTasksServer) CreateTask(ctx context.Context, req *taskspb.CreateTaskRequest) (*taskspb.Task, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Task), nil
+}
+
+func (s *mockCloudTasksServer) DeleteTask(ctx context.Context, req *taskspb.DeleteTaskRequest) (*emptypb.Empty, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*emptypb.Empty), nil
+}
+
+func (s *mockCloudTasksServer) LeaseTasks(ctx context.Context, req *taskspb.LeaseTasksRequest) (*taskspb.LeaseTasksResponse, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.LeaseTasksResponse), nil
+}
+
+func (s *mockCloudTasksServer) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTaskRequest) (*emptypb.Empty, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*emptypb.Empty), nil
+}
+
+func (s *mockCloudTasksServer) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest) (*taskspb.Task, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Task), nil
+}
+
+func (s *mockCloudTasksServer) CancelLease(ctx context.Context, req *taskspb.CancelLeaseRequest) (*taskspb.Task, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Task), nil
+}
+
+func (s *mockCloudTasksServer) RunTask(ctx context.Context, req *taskspb.RunTaskRequest) (*taskspb.Task, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*taskspb.Task), nil
+}
+
+// clientOpt is the option tests should use to connect to the test server.
+// It is initialized by TestMain.
+var clientOpt option.ClientOption
+
+var (
+	mockCloudTasks mockCloudTasksServer
+)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	serv := grpc.NewServer()
+	taskspb.RegisterCloudTasksServer(serv, &mockCloudTasks)
+
+	lis, err := net.Listen("tcp", "localhost:0")
+	if err != nil {
+		log.Fatal(err)
+	}
+	go serv.Serve(lis)
+
+	conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
+	if err != nil {
+		log.Fatal(err)
+	}
+	clientOpt = option.WithGRPCConn(conn)
+
+	os.Exit(m.Run())
+}
+
+func TestCloudTasksListQueues(t *testing.T) {
+	var nextPageToken string = ""
+	var queuesElement *taskspb.Queue = &taskspb.Queue{}
+	var queues = []*taskspb.Queue{queuesElement}
+	var expectedResponse = &taskspb.ListQueuesResponse{
+		NextPageToken: nextPageToken,
+		Queues:        queues,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var request = &taskspb.ListQueuesRequest{
+		Parent: formattedParent,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ListQueues(context.Background(), request).Next()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	want := (interface{})(expectedResponse.Queues[0])
+	got := (interface{})(resp)
+	var ok bool
+
+	switch want := (want).(type) {
+	case proto.Message:
+		ok = proto.Equal(want, got.(proto.Message))
+	default:
+		ok = want == got
+	}
+	if !ok {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksListQueuesError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var request = &taskspb.ListQueuesRequest{
+		Parent: formattedParent,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ListQueues(context.Background(), request).Next()
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksGetQueue(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Queue{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.GetQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksGetQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.GetQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksCreateQueue(t *testing.T) {
+	var name string = "name3373707"
+	var expectedResponse = &taskspb.Queue{
+		Name: name,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var queue *taskspb.Queue = &taskspb.Queue{}
+	var request = &taskspb.CreateQueueRequest{
+		Parent: formattedParent,
+		Queue:  queue,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.CreateQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksCreateQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var queue *taskspb.Queue = &taskspb.Queue{}
+	var request = &taskspb.CreateQueueRequest{
+		Parent: formattedParent,
+		Queue:  queue,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.CreateQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksUpdateQueue(t *testing.T) {
+	var name string = "name3373707"
+	var expectedResponse = &taskspb.Queue{
+		Name: name,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var queue *taskspb.Queue = &taskspb.Queue{}
+	var request = &taskspb.UpdateQueueRequest{
+		Queue: queue,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.UpdateQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksUpdateQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var queue *taskspb.Queue = &taskspb.Queue{}
+	var request = &taskspb.UpdateQueueRequest{
+		Queue: queue,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.UpdateQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksDeleteQueue(t *testing.T) {
+	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.DeleteQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = c.DeleteQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+}
+
+func TestCloudTasksDeleteQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.DeleteQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = c.DeleteQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+}
+func TestCloudTasksPurgeQueue(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Queue{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.PurgeQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.PurgeQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksPurgeQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.PurgeQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.PurgeQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksPauseQueue(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Queue{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.PauseQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.PauseQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksPauseQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.PauseQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.PauseQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksResumeQueue(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Queue{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.ResumeQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ResumeQueue(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksResumeQueueError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.ResumeQueueRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ResumeQueue(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksGetIamPolicy(t *testing.T) {
+	var version int32 = 351608024
+	var etag []byte = []byte("21")
+	var expectedResponse = &iampb.Policy{
+		Version: version,
+		Etag:    etag,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedResource string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &iampb.GetIamPolicyRequest{
+		Resource: formattedResource,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetIamPolicy(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksGetIamPolicyError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedResource string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &iampb.GetIamPolicyRequest{
+		Resource: formattedResource,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetIamPolicy(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksSetIamPolicy(t *testing.T) {
+	var version int32 = 351608024
+	var etag []byte = []byte("21")
+	var expectedResponse = &iampb.Policy{
+		Version: version,
+		Etag:    etag,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedResource string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var policy *iampb.Policy = &iampb.Policy{}
+	var request = &iampb.SetIamPolicyRequest{
+		Resource: formattedResource,
+		Policy:   policy,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.SetIamPolicy(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksSetIamPolicyError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedResource string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var policy *iampb.Policy = &iampb.Policy{}
+	var request = &iampb.SetIamPolicyRequest{
+		Resource: formattedResource,
+		Policy:   policy,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.SetIamPolicy(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksTestIamPermissions(t *testing.T) {
+	var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedResource string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var permissions []string = nil
+	var request = &iampb.TestIamPermissionsRequest{
+		Resource:    formattedResource,
+		Permissions: permissions,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.TestIamPermissions(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksTestIamPermissionsError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedResource string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var permissions []string = nil
+	var request = &iampb.TestIamPermissionsRequest{
+		Resource:    formattedResource,
+		Permissions: permissions,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.TestIamPermissions(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksListTasks(t *testing.T) {
+	var nextPageToken string = ""
+	var tasksElement *taskspb.Task = &taskspb.Task{}
+	var tasks = []*taskspb.Task{tasksElement}
+	var expectedResponse = &taskspb.ListTasksResponse{
+		NextPageToken: nextPageToken,
+		Tasks:         tasks,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.ListTasksRequest{
+		Parent: formattedParent,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ListTasks(context.Background(), request).Next()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	want := (interface{})(expectedResponse.Tasks[0])
+	got := (interface{})(resp)
+	var ok bool
+
+	switch want := (want).(type) {
+	case proto.Message:
+		ok = proto.Equal(want, got.(proto.Message))
+	default:
+		ok = want == got
+	}
+	if !ok {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksListTasksError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var request = &taskspb.ListTasksRequest{
+		Parent: formattedParent,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ListTasks(context.Background(), request).Next()
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksGetTask(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Task{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var request = &taskspb.GetTaskRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetTask(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksGetTaskError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var request = &taskspb.GetTaskRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetTask(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksCreateTask(t *testing.T) {
+	var name string = "name3373707"
+	var expectedResponse = &taskspb.Task{
+		Name: name,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var task *taskspb.Task = &taskspb.Task{}
+	var request = &taskspb.CreateTaskRequest{
+		Parent: formattedParent,
+		Task:   task,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.CreateTask(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksCreateTaskError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var task *taskspb.Task = &taskspb.Task{}
+	var request = &taskspb.CreateTaskRequest{
+		Parent: formattedParent,
+		Task:   task,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.CreateTask(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksDeleteTask(t *testing.T) {
+	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var request = &taskspb.DeleteTaskRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = c.DeleteTask(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+}
+
+func TestCloudTasksDeleteTaskError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var request = &taskspb.DeleteTaskRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = c.DeleteTask(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+}
+func TestCloudTasksLeaseTasks(t *testing.T) {
+	var expectedResponse *taskspb.LeaseTasksResponse = &taskspb.LeaseTasksResponse{}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var leaseDuration *durationpb.Duration = &durationpb.Duration{}
+	var request = &taskspb.LeaseTasksRequest{
+		Parent:        formattedParent,
+		LeaseDuration: leaseDuration,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.LeaseTasks(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksLeaseTasksError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/queues/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]")
+	var leaseDuration *durationpb.Duration = &durationpb.Duration{}
+	var request = &taskspb.LeaseTasksRequest{
+		Parent:        formattedParent,
+		LeaseDuration: leaseDuration,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.LeaseTasks(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksAcknowledgeTask(t *testing.T) {
+	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var scheduleTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
+	var request = &taskspb.AcknowledgeTaskRequest{
+		Name:         formattedName,
+		ScheduleTime: scheduleTime,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = c.AcknowledgeTask(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+}
+
+func TestCloudTasksAcknowledgeTaskError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var scheduleTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
+	var request = &taskspb.AcknowledgeTaskRequest{
+		Name:         formattedName,
+		ScheduleTime: scheduleTime,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = c.AcknowledgeTask(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+}
+func TestCloudTasksRenewLease(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Task{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var scheduleTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
+	var leaseDuration *durationpb.Duration = &durationpb.Duration{}
+	var request = &taskspb.RenewLeaseRequest{
+		Name:          formattedName,
+		ScheduleTime:  scheduleTime,
+		LeaseDuration: leaseDuration,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.RenewLease(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksRenewLeaseError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var scheduleTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
+	var leaseDuration *durationpb.Duration = &durationpb.Duration{}
+	var request = &taskspb.RenewLeaseRequest{
+		Name:          formattedName,
+		ScheduleTime:  scheduleTime,
+		LeaseDuration: leaseDuration,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.RenewLease(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksCancelLease(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Task{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var scheduleTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
+	var request = &taskspb.CancelLeaseRequest{
+		Name:         formattedName,
+		ScheduleTime: scheduleTime,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.CancelLease(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksCancelLeaseError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var scheduleTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
+	var request = &taskspb.CancelLeaseRequest{
+		Name:         formattedName,
+		ScheduleTime: scheduleTime,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.CancelLease(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudTasksRunTask(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var expectedResponse = &taskspb.Task{
+		Name: name2,
+	}
+
+	mockCloudTasks.err = nil
+	mockCloudTasks.reqs = nil
+
+	mockCloudTasks.resps = append(mockCloudTasks.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var request = &taskspb.RunTaskRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.RunTask(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudTasks.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudTasksRunTaskError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudTasks.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/queues/%s/tasks/%s", "[PROJECT]", "[LOCATION]", "[QUEUE]", "[TASK]")
+	var request = &taskspb.RunTaskRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.RunTask(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
diff --git a/datastore/datastore_test.go b/datastore/datastore_test.go
index 7b184fd..e59a27a 100644
--- a/datastore/datastore_test.go
+++ b/datastore/datastore_test.go
@@ -152,17 +152,19 @@
 }
 
 type OmitAll struct {
-	A string `datastore:",omitempty"`
-	B int    `datastore:"Bb,omitempty"`
-	C bool   `datastore:",omitempty,noindex"`
-	F []int  `datastore:",omitempty"`
+	A string    `datastore:",omitempty"`
+	B int       `datastore:"Bb,omitempty"`
+	C bool      `datastore:",omitempty,noindex"`
+	D time.Time `datastore:",omitempty"`
+	F []int     `datastore:",omitempty"`
 }
 
 type Omit struct {
-	A string `datastore:",omitempty"`
-	B int    `datastore:"Bb,omitempty"`
-	C bool   `datastore:",omitempty,noindex"`
-	F []int  `datastore:",omitempty"`
+	A string    `datastore:",omitempty"`
+	B int       `datastore:"Bb,omitempty"`
+	C bool      `datastore:",omitempty,noindex"`
+	D time.Time `datastore:",omitempty"`
+	F []int     `datastore:",omitempty"`
 	S `datastore:",omitempty"`
 }
 
diff --git a/datastore/example_test.go b/datastore/example_test.go
index 88edbe9..b411c52 100644
--- a/datastore/example_test.go
+++ b/datastore/example_test.go
@@ -218,7 +218,7 @@
 }
 
 func ExampleNewQuery_options() {
-	// Query to order the posts by the number of comments they have recieved.
+	// Query to order the posts by the number of comments they have received.
 	q := datastore.NewQuery("Post").Order("-Comments")
 	// Start listing from an offset and limit the results.
 	q = q.Offset(20).Limit(10)
diff --git a/datastore/query.go b/datastore/query.go
index 7bf5840..a6ddfd0 100644
--- a/datastore/query.go
+++ b/datastore/query.go
@@ -443,7 +443,7 @@
 // Count returns the number of results for the given query.
 //
 // The running time and number of API calls made by Count scale linearly with
-// with the sum of the query's offset and limit. Unless the result count is
+// the sum of the query's offset and limit. Unless the result count is
 // expected to be small, it is best to specify a limit; otherwise Count will
 // continue until it finishes counting or the provided context expires.
 func (c *Client) Count(ctx context.Context, q *Query) (n int, err error) {
diff --git a/datastore/query_test.go b/datastore/query_test.go
index 795fa6d..14abaf8 100644
--- a/datastore/query_test.go
+++ b/datastore/query_test.go
@@ -475,6 +475,7 @@
 
 	var gs []Gopher
 
+	// Ignore errors for the rest of this test.
 	client.GetAll(ctx, NewQuery("gopher"), &gs)
 	if got, want := <-gotNamespace, ""; got != want {
 		t.Errorf("GetAll: got namespace %q, want %q", got, want)
diff --git a/datastore/save.go b/datastore/save.go
index b96d07c..b23421e 100644
--- a/datastore/save.go
+++ b/datastore/save.go
@@ -438,6 +438,10 @@
 		return v.Float() == 0
 	case reflect.Interface, reflect.Ptr:
 		return v.IsNil()
+	case reflect.Struct:
+		if t, ok := v.Interface().(time.Time); ok {
+			return t.IsZero()
+		}
 	}
 	return false
 }
diff --git a/datastore/transaction.go b/datastore/transaction.go
index b480d60..6eb5c76 100644
--- a/datastore/transaction.go
+++ b/datastore/transaction.go
@@ -167,7 +167,7 @@
 			return nil, err
 		}
 		if err := f(tx); err != nil {
-			tx.Rollback()
+			_ = tx.Rollback()
 			return nil, err
 		}
 		if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
diff --git a/dlp/apiv2/dlp_client.go b/dlp/apiv2/dlp_client.go
index 10899b0..b12ab62 100644
--- a/dlp/apiv2/dlp_client.go
+++ b/dlp/apiv2/dlp_client.go
@@ -113,6 +113,8 @@
 }
 
 // Client is a client for interacting with Cloud Data Loss Prevention (DLP) API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
 type Client struct {
 	// The connection to the service.
 	conn *grpc.ClientConn
diff --git a/dlp/apiv2beta1/dlp_client.go b/dlp/apiv2beta1/dlp_client.go
deleted file mode 100644
index f61f214..0000000
--- a/dlp/apiv2beta1/dlp_client.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp
-
-import (
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	"cloud.google.com/go/longrunning"
-	lroauto "cloud.google.com/go/longrunning/autogen"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// CallOptions contains the retry settings for each method of Client.
-type CallOptions struct {
-	InspectContent         []gax.CallOption
-	RedactContent          []gax.CallOption
-	DeidentifyContent      []gax.CallOption
-	AnalyzeDataSourceRisk  []gax.CallOption
-	CreateInspectOperation []gax.CallOption
-	ListInspectFindings    []gax.CallOption
-	ListInfoTypes          []gax.CallOption
-	ListRootCategories     []gax.CallOption
-}
-
-func defaultClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("dlp.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultCallOptions() *CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &CallOptions{
-		InspectContent:         retry[[2]string{"default", "non_idempotent"}],
-		RedactContent:          retry[[2]string{"default", "non_idempotent"}],
-		DeidentifyContent:      retry[[2]string{"default", "idempotent"}],
-		AnalyzeDataSourceRisk:  retry[[2]string{"default", "idempotent"}],
-		CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
-		ListInspectFindings:    retry[[2]string{"default", "idempotent"}],
-		ListInfoTypes:          retry[[2]string{"default", "idempotent"}],
-		ListRootCategories:     retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// Client is a client for interacting with DLP API.
-type Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	client dlppb.DlpServiceClient
-
-	// LROClient is used internally to handle longrunning operations.
-	// It is exposed so that its CallOptions can be modified if required.
-	// Users should not Close this client.
-	LROClient *lroauto.OperationsClient
-
-	// The call options for this service.
-	CallOptions *CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewClient creates a new dlp service client.
-//
-// The DLP API is a service that allows clients
-// to detect the presence of Personally Identifiable Information (PII) and other
-// privacy-sensitive data in user-supplied, unstructured data streams, like text
-// blocks or images.
-// The service also includes methods for sensitive data redaction and
-// scheduling of data scans on Google Cloud Platform based data sets.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Client{
-		conn:        conn,
-		CallOptions: defaultCallOptions(),
-
-		client: dlppb.NewDlpServiceClient(conn),
-	}
-	c.setGoogleClientInfo()
-
-	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
-	if err != nil {
-		// This error "should not happen", since we are just reusing old connection
-		// and never actually need to dial.
-		// If this does happen, we could leak conn. However, we cannot close conn:
-		// If the user invoked the function with option.WithGRPCConn,
-		// we would close a connection that's still in use.
-		// TODO(pongad): investigate error conditions.
-		return nil, err
-	}
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Client) setGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// InspectContent finds potentially sensitive info in a list of strings.
-// This method has limits on input size, processing time, and output size.
-func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
-	var resp *dlppb.InspectContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// RedactContent redacts potentially sensitive info from a list of strings.
-// This method has limits on input size, processing time, and output size.
-func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
-	var resp *dlppb.RedactContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.RedactContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
-// This method has limits on input size and output size.
-func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
-	var resp *dlppb.DeidentifyContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
-// Cloud Platform repository.
-func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
-	var resp *longrunningpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return &AnalyzeDataSourceRiskOperation{
-		lro: longrunning.InternalNewOperation(c.LROClient, resp),
-	}, nil
-}
-
-// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
-// repository.
-func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
-	var resp *longrunningpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return &CreateInspectOperationHandle{
-		lro: longrunning.InternalNewOperation(c.LROClient, resp),
-	}, nil
-}
-
-// ListInspectFindings returns list of results for given inspect operation result set id.
-func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
-	var resp *dlppb.ListInspectFindingsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListInfoTypes returns sensitive information types for given category.
-func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
-	var resp *dlppb.ListInfoTypesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListRootCategories returns the list of root categories of sensitive information.
-func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
-	var resp *dlppb.ListRootCategoriesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk.
-type AnalyzeDataSourceRiskOperation struct {
-	lro *longrunning.Operation
-}
-
-// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name.
-// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process.
-func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation {
-	return &AnalyzeDataSourceRiskOperation{
-		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
-	}
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
-	var resp dlppb.RiskAnalysisOperationResult
-	if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
-		return nil, err
-	}
-	return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
-	var resp dlppb.RiskAnalysisOperationResult
-	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
-		return nil, err
-	}
-	if !op.Done() {
-		return nil, nil
-	}
-	return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) {
-	var meta dlppb.RiskAnalysisOperationMetadata
-	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
-		return nil, nil
-	} else if err != nil {
-		return nil, err
-	}
-	return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *AnalyzeDataSourceRiskOperation) Done() bool {
-	return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *AnalyzeDataSourceRiskOperation) Name() string {
-	return op.lro.Name()
-}
-
-// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
-type CreateInspectOperationHandle struct {
-	lro *longrunning.Operation
-}
-
-// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name.
-// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process.
-func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle {
-	return &CreateInspectOperationHandle{
-		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
-	}
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
-	var resp dlppb.InspectOperationResult
-	if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
-		return nil, err
-	}
-	return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
-	var resp dlppb.InspectOperationResult
-	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
-		return nil, err
-	}
-	if !op.Done() {
-		return nil, nil
-	}
-	return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) {
-	var meta dlppb.InspectOperationMetadata
-	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
-		return nil, nil
-	} else if err != nil {
-		return nil, err
-	}
-	return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateInspectOperationHandle) Done() bool {
-	return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateInspectOperationHandle) Name() string {
-	return op.lro.Name()
-}
diff --git a/dlp/apiv2beta1/dlp_client_example_test.go b/dlp/apiv2beta1/dlp_client_example_test.go
deleted file mode 100644
index d33fbd3..0000000
--- a/dlp/apiv2beta1/dlp_client_example_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp_test
-
-import (
-	"cloud.google.com/go/dlp/apiv2beta1"
-	"golang.org/x/net/context"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
-)
-
-func ExampleNewClient() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use client.
-	_ = c
-}
-
-func ExampleClient_InspectContent() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.InspectContentRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.InspectContent(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_RedactContent() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.RedactContentRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.RedactContent(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_DeidentifyContent() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.DeidentifyContentRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.DeidentifyContent(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_AnalyzeDataSourceRisk() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.AnalyzeDataSourceRiskRequest{
-		// TODO: Fill request struct fields.
-	}
-	op, err := c.AnalyzeDataSourceRisk(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	resp, err := op.Wait(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_CreateInspectOperation() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.CreateInspectOperationRequest{
-		// TODO: Fill request struct fields.
-	}
-	op, err := c.CreateInspectOperation(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	resp, err := op.Wait(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ListInspectFindings() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListInspectFindingsRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.ListInspectFindings(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ListInfoTypes() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListInfoTypesRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.ListInfoTypes(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ListRootCategories() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListRootCategoriesRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.ListRootCategories(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
diff --git a/dlp/apiv2beta1/mock_test.go b/dlp/apiv2beta1/mock_test.go
deleted file mode 100644
index 1adb03b..0000000
--- a/dlp/apiv2beta1/mock_test.go
+++ /dev/null
@@ -1,844 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp
-
-import (
-	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
-)
-
-import (
-	"flag"
-	"fmt"
-	"io"
-	"log"
-	"net"
-	"os"
-	"strings"
-	"testing"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	status "google.golang.org/genproto/googleapis/rpc/status"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-	gstatus "google.golang.org/grpc/status"
-)
-
-var _ = io.EOF
-var _ = ptypes.MarshalAny
-var _ status.Status
-
-type mockDlpServer struct {
-	// Embed for forward compatibility.
-	// Tests will keep working if more methods are added
-	// in the future.
-	dlppb.DlpServiceServer
-
-	reqs []proto.Message
-
-	// If set, all calls return this error.
-	err error
-
-	// responses to return if err == nil
-	resps []proto.Message
-}
-
-func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.InspectContentResponse), nil
-}
-
-func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest) (*dlppb.RedactContentResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.RedactContentResponse), nil
-}
-
-func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DeidentifyContentResponse), nil
-}
-
-func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*longrunningpb.Operation), nil
-}
-
-func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*longrunningpb.Operation), nil
-}
-
-func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListInspectFindingsResponse), nil
-}
-
-func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListInfoTypesResponse), nil
-}
-
-func (s *mockDlpServer) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest) (*dlppb.ListRootCategoriesResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListRootCategoriesResponse), nil
-}
-
-// clientOpt is the option tests should use to connect to the test server.
-// It is initialized by TestMain.
-var clientOpt option.ClientOption
-
-var (
-	mockDlp mockDlpServer
-)
-
-func TestMain(m *testing.M) {
-	flag.Parse()
-
-	serv := grpc.NewServer()
-	dlppb.RegisterDlpServiceServer(serv, &mockDlp)
-
-	lis, err := net.Listen("tcp", "localhost:0")
-	if err != nil {
-		log.Fatal(err)
-	}
-	go serv.Serve(lis)
-
-	conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
-	if err != nil {
-		log.Fatal(err)
-	}
-	clientOpt = option.WithGRPCConn(conn)
-
-	os.Exit(m.Run())
-}
-
-func TestDlpServiceInspectContent(t *testing.T) {
-	var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var name string = "EMAIL_ADDRESS"
-	var infoTypesElement = &dlppb.InfoType{
-		Name: name,
-	}
-	var infoTypes = []*dlppb.InfoType{infoTypesElement}
-	var inspectConfig = &dlppb.InspectConfig{
-		InfoTypes: infoTypes,
-	}
-	var type_ string = "text/plain"
-	var value string = "My email is example@example.com."
-	var itemsElement = &dlppb.ContentItem{
-		Type: type_,
-		DataItem: &dlppb.ContentItem_Value{
-			Value: value,
-		},
-	}
-	var items = []*dlppb.ContentItem{itemsElement}
-	var request = &dlppb.InspectContentRequest{
-		InspectConfig: inspectConfig,
-		Items:         items,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.InspectContent(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceInspectContentError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var name string = "EMAIL_ADDRESS"
-	var infoTypesElement = &dlppb.InfoType{
-		Name: name,
-	}
-	var infoTypes = []*dlppb.InfoType{infoTypesElement}
-	var inspectConfig = &dlppb.InspectConfig{
-		InfoTypes: infoTypes,
-	}
-	var type_ string = "text/plain"
-	var value string = "My email is example@example.com."
-	var itemsElement = &dlppb.ContentItem{
-		Type: type_,
-		DataItem: &dlppb.ContentItem_Value{
-			Value: value,
-		},
-	}
-	var items = []*dlppb.ContentItem{itemsElement}
-	var request = &dlppb.InspectContentRequest{
-		InspectConfig: inspectConfig,
-		Items:         items,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.InspectContent(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceRedactContent(t *testing.T) {
-	var expectedResponse *dlppb.RedactContentResponse = &dlppb.RedactContentResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var name string = "EMAIL_ADDRESS"
-	var infoTypesElement = &dlppb.InfoType{
-		Name: name,
-	}
-	var infoTypes = []*dlppb.InfoType{infoTypesElement}
-	var inspectConfig = &dlppb.InspectConfig{
-		InfoTypes: infoTypes,
-	}
-	var type_ string = "text/plain"
-	var value string = "My email is example@example.com."
-	var itemsElement = &dlppb.ContentItem{
-		Type: type_,
-		DataItem: &dlppb.ContentItem_Value{
-			Value: value,
-		},
-	}
-	var items = []*dlppb.ContentItem{itemsElement}
-	var name2 string = "EMAIL_ADDRESS"
-	var infoType = &dlppb.InfoType{
-		Name: name2,
-	}
-	var replaceWith string = "REDACTED"
-	var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
-		InfoType:    infoType,
-		ReplaceWith: replaceWith,
-	}
-	var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
-	var request = &dlppb.RedactContentRequest{
-		InspectConfig:  inspectConfig,
-		Items:          items,
-		ReplaceConfigs: replaceConfigs,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.RedactContent(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceRedactContentError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var name string = "EMAIL_ADDRESS"
-	var infoTypesElement = &dlppb.InfoType{
-		Name: name,
-	}
-	var infoTypes = []*dlppb.InfoType{infoTypesElement}
-	var inspectConfig = &dlppb.InspectConfig{
-		InfoTypes: infoTypes,
-	}
-	var type_ string = "text/plain"
-	var value string = "My email is example@example.com."
-	var itemsElement = &dlppb.ContentItem{
-		Type: type_,
-		DataItem: &dlppb.ContentItem_Value{
-			Value: value,
-		},
-	}
-	var items = []*dlppb.ContentItem{itemsElement}
-	var name2 string = "EMAIL_ADDRESS"
-	var infoType = &dlppb.InfoType{
-		Name: name2,
-	}
-	var replaceWith string = "REDACTED"
-	var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
-		InfoType:    infoType,
-		ReplaceWith: replaceWith,
-	}
-	var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
-	var request = &dlppb.RedactContentRequest{
-		InspectConfig:  inspectConfig,
-		Items:          items,
-		ReplaceConfigs: replaceConfigs,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.RedactContent(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceDeidentifyContent(t *testing.T) {
-	var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
-	var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
-	var items []*dlppb.ContentItem = nil
-	var request = &dlppb.DeidentifyContentRequest{
-		DeidentifyConfig: deidentifyConfig,
-		InspectConfig:    inspectConfig,
-		Items:            items,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.DeidentifyContent(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceDeidentifyContentError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
-	var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
-	var items []*dlppb.ContentItem = nil
-	var request = &dlppb.DeidentifyContentRequest{
-		DeidentifyConfig: deidentifyConfig,
-		InspectConfig:    inspectConfig,
-		Items:            items,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.DeidentifyContent(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
-	var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	any, err := ptypes.MarshalAny(expectedResponse)
-	if err != nil {
-		t.Fatal(err)
-	}
-	mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
-		Name:   "longrunning-test",
-		Done:   true,
-		Result: &longrunningpb.Operation_Response{Response: any},
-	})
-
-	var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
-	var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
-	var request = &dlppb.AnalyzeDataSourceRiskRequest{
-		PrivacyMetric: privacyMetric,
-		SourceTable:   sourceTable,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
-	if err != nil {
-		t.Fatal(err)
-	}
-	resp, err := respLRO.Wait(context.Background())
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = nil
-	mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
-		Name: "longrunning-test",
-		Done: true,
-		Result: &longrunningpb.Operation_Error{
-			Error: &status.Status{
-				Code:    int32(errCode),
-				Message: "test error",
-			},
-		},
-	})
-
-	var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
-	var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
-	var request = &dlppb.AnalyzeDataSourceRiskRequest{
-		PrivacyMetric: privacyMetric,
-		SourceTable:   sourceTable,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
-	if err != nil {
-		t.Fatal(err)
-	}
-	resp, err := respLRO.Wait(context.Background())
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceCreateInspectOperation(t *testing.T) {
-	var name2 string = "name2-1052831874"
-	var expectedResponse = &dlppb.InspectOperationResult{
-		Name: name2,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	any, err := ptypes.MarshalAny(expectedResponse)
-	if err != nil {
-		t.Fatal(err)
-	}
-	mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
-		Name:   "longrunning-test",
-		Done:   true,
-		Result: &longrunningpb.Operation_Response{Response: any},
-	})
-
-	var name string = "EMAIL_ADDRESS"
-	var infoTypesElement = &dlppb.InfoType{
-		Name: name,
-	}
-	var infoTypes = []*dlppb.InfoType{infoTypesElement}
-	var inspectConfig = &dlppb.InspectConfig{
-		InfoTypes: infoTypes,
-	}
-	var url string = "gs://example_bucket/example_file.png"
-	var fileSet = &dlppb.CloudStorageOptions_FileSet{
-		Url: url,
-	}
-	var cloudStorageOptions = &dlppb.CloudStorageOptions{
-		FileSet: fileSet,
-	}
-	var storageConfig = &dlppb.StorageConfig{
-		Type: &dlppb.StorageConfig_CloudStorageOptions{
-			CloudStorageOptions: cloudStorageOptions,
-		},
-	}
-	var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
-	var request = &dlppb.CreateInspectOperationRequest{
-		InspectConfig: inspectConfig,
-		StorageConfig: storageConfig,
-		OutputConfig:  outputConfig,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	respLRO, err := c.CreateInspectOperation(context.Background(), request)
-	if err != nil {
-		t.Fatal(err)
-	}
-	resp, err := respLRO.Wait(context.Background())
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceCreateInspectOperationError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = nil
-	mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
-		Name: "longrunning-test",
-		Done: true,
-		Result: &longrunningpb.Operation_Error{
-			Error: &status.Status{
-				Code:    int32(errCode),
-				Message: "test error",
-			},
-		},
-	})
-
-	var name string = "EMAIL_ADDRESS"
-	var infoTypesElement = &dlppb.InfoType{
-		Name: name,
-	}
-	var infoTypes = []*dlppb.InfoType{infoTypesElement}
-	var inspectConfig = &dlppb.InspectConfig{
-		InfoTypes: infoTypes,
-	}
-	var url string = "gs://example_bucket/example_file.png"
-	var fileSet = &dlppb.CloudStorageOptions_FileSet{
-		Url: url,
-	}
-	var cloudStorageOptions = &dlppb.CloudStorageOptions{
-		FileSet: fileSet,
-	}
-	var storageConfig = &dlppb.StorageConfig{
-		Type: &dlppb.StorageConfig_CloudStorageOptions{
-			CloudStorageOptions: cloudStorageOptions,
-		},
-	}
-	var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
-	var request = &dlppb.CreateInspectOperationRequest{
-		InspectConfig: inspectConfig,
-		StorageConfig: storageConfig,
-		OutputConfig:  outputConfig,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	respLRO, err := c.CreateInspectOperation(context.Background(), request)
-	if err != nil {
-		t.Fatal(err)
-	}
-	resp, err := respLRO.Wait(context.Background())
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceListInspectFindings(t *testing.T) {
-	var nextPageToken string = "nextPageToken-1530815211"
-	var expectedResponse = &dlppb.ListInspectFindingsResponse{
-		NextPageToken: nextPageToken,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
-	var request = &dlppb.ListInspectFindingsRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInspectFindings(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListInspectFindingsError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
-	var request = &dlppb.ListInspectFindingsRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInspectFindings(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceListInfoTypes(t *testing.T) {
-	var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var category string = "PII"
-	var languageCode string = "en"
-	var request = &dlppb.ListInfoTypesRequest{
-		Category:     category,
-		LanguageCode: languageCode,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInfoTypes(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListInfoTypesError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var category string = "PII"
-	var languageCode string = "en"
-	var request = &dlppb.ListInfoTypesRequest{
-		Category:     category,
-		LanguageCode: languageCode,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInfoTypes(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceListRootCategories(t *testing.T) {
-	var expectedResponse *dlppb.ListRootCategoriesResponse = &dlppb.ListRootCategoriesResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var languageCode string = "en"
-	var request = &dlppb.ListRootCategoriesRequest{
-		LanguageCode: languageCode,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListRootCategories(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListRootCategoriesError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var languageCode string = "en"
-	var request = &dlppb.ListRootCategoriesRequest{
-		LanguageCode: languageCode,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListRootCategories(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
diff --git a/firestore/cross_language_test.go b/firestore/conformance_test.go
similarity index 96%
rename from firestore/cross_language_test.go
rename to firestore/conformance_test.go
index 228d61a..be5b8cc 100644
--- a/firestore/cross_language_test.go
+++ b/firestore/conformance_test.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// A runner for the cross-language tests.
+// A runner for the conformance tests.
 
 package firestore
 
@@ -38,14 +38,16 @@
 	fspb "google.golang.org/genproto/googleapis/firestore/v1beta1"
 )
 
-func TestCrossLanguageTests(t *testing.T) {
+const conformanceTestWatchTargetID = 1
+
+func TestConformanceTests(t *testing.T) {
 	const dir = "testdata"
 	fis, err := ioutil.ReadDir(dir)
 	if err != nil {
 		t.Fatal(err)
 	}
 	wtid := watchTargetID
-	watchTargetID = 1
+	watchTargetID = conformanceTestWatchTargetID
 	defer func() { watchTargetID = wtid }()
 	n := 0
 	for _, fi := range fis {
@@ -54,7 +56,7 @@
 			n++
 		}
 	}
-	t.Logf("ran %d cross-language tests", n)
+	t.Logf("ran %d conformance tests", n)
 }
 
 func runTestFromFile(t *testing.T, filename string) {
@@ -192,6 +194,12 @@
 		} else if diff := cmp.Diff(got, tt.Listen.Snapshots); diff != "" {
 			t.Errorf("%s:\n%s", msg, diff)
 		}
+		if tt.Listen.IsError {
+			_, err := iter.Next()
+			if err == nil {
+				t.Errorf("%s: got nil, want error", msg)
+			}
+		}
 
 	default:
 		t.Fatalf("unknown test type %T", tt)
diff --git a/firestore/docref.go b/firestore/docref.go
index 40fd96d..a95cd25 100644
--- a/firestore/docref.go
+++ b/firestore/docref.go
@@ -96,7 +96,8 @@
 //     is the underlying type of a Integer.
 //   - float32 and float64 convert to Double.
 //   - []byte converts to Bytes.
-//   - time.Time converts to Timestamp.
+//   - time.Time and *ts.Timestamp convert to Timestamp. ts is the package
+//     "github.com/golang/protobuf/ptypes/timestamp".
 //   - *latlng.LatLng converts to GeoPoint. latlng is the package
 //     "google.golang.org/genproto/googleapis/type/latlng". You should always use
 //     a pointer to a LatLng.
diff --git a/firestore/docref_test.go b/firestore/docref_test.go
index 92d31fd..62ae034 100644
--- a/firestore/docref_test.go
+++ b/firestore/docref_test.go
@@ -89,7 +89,7 @@
 }
 
 func TestDocSet(t *testing.T) {
-	// Most tests for Set are in the cross-language tests.
+	// Most tests for Set are in the conformance tests.
 	ctx := context.Background()
 	c, srv := newMock(t)
 
@@ -134,7 +134,7 @@
 func TestDocCreate(t *testing.T) {
 	// Verify creation with structs. In particular, make sure zero values
 	// are handled well.
-	// Other tests for Create are handled by the cross-language tests.
+	// Other tests for Create are handled by the conformance tests.
 	ctx := context.Background()
 	c, srv := newMock(t)
 
@@ -199,7 +199,7 @@
 	testFields = map[string]*pb.Value{"a": intval(1)}
 )
 
-// Update is tested by the cross-language tests.
+// Update is tested by the conformance tests.
 
 func TestFPVsFromData(t *testing.T) {
 	type S struct{ X int }
diff --git a/firestore/document.go b/firestore/document.go
index 348327b..ec02637 100644
--- a/firestore/document.go
+++ b/firestore/document.go
@@ -40,7 +40,7 @@
 	CreateTime time.Time
 
 	// Read-only. The time at which the document was last changed. This value
-	// is initally set to CreateTime then increases monotonically with each
+	// is initially set to CreateTime then increases monotonically with each
 	// change to the document. It can also be compared to values from other
 	// documents and the read time of a query.
 	UpdateTime time.Time
@@ -110,7 +110,7 @@
 	if !d.Exists() {
 		return status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path)
 	}
-	return setFromProtoValue(p, &pb.Value{&pb.Value_MapValue{&pb.MapValue{d.proto.Fields}}}, d.c)
+	return setFromProtoValue(p, &pb.Value{ValueType: &pb.Value_MapValue{&pb.MapValue{Fields: d.proto.Fields}}}, d.c)
 }
 
 // DataAt returns the data value denoted by path.
@@ -190,7 +190,7 @@
 	if pv != nil {
 		m := pv.GetMapValue()
 		if m == nil {
-			return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x)
+			return nil, nil, fmt.Errorf("firestore: cannot convert value of type %T into a map", x)
 		}
 		fields = m.Fields
 	}
diff --git a/firestore/fieldpath.go b/firestore/fieldpath.go
index 5e24f49..f34c0b1 100644
--- a/firestore/fieldpath.go
+++ b/firestore/fieldpath.go
@@ -161,7 +161,7 @@
 	} else {
 		v, ok := m[fp[0]]
 		if !ok {
-			v = &pb.Value{&pb.Value_MapValue{&pb.MapValue{map[string]*pb.Value{}}}}
+			v = &pb.Value{ValueType: &pb.Value_MapValue{&pb.MapValue{Fields: map[string]*pb.Value{}}}}
 			m[fp[0]] = v
 		}
 		// The type assertion below cannot fail, because setAtPath is only called
diff --git a/firestore/from_value_test.go b/firestore/from_value_test.go
index 1327fb4..d952bdd 100644
--- a/firestore/from_value_test.go
+++ b/firestore/from_value_test.go
@@ -33,7 +33,7 @@
 var (
 	tm  = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC)
 	ll  = &latlng.LatLng{Latitude: 20, Longitude: 30}
-	ptm = &ts.Timestamp{12345, 67890}
+	ptm = &ts.Timestamp{Seconds: 12345, Nanos: 67890}
 )
 
 func TestCreateFromProtoValue(t *testing.T) {
@@ -52,7 +52,7 @@
 			want: []byte{1, 2},
 		},
 		{
-			in:   &pb.Value{&pb.Value_GeoPointValue{ll}},
+			in:   &pb.Value{ValueType: &pb.Value_GeoPointValue{ll}},
 			want: ll,
 		},
 		{
@@ -200,7 +200,7 @@
 	}{
 		{&bs, bytesval(bytes), bytes},
 		{&tmi, tsval(tm), tm},
-		{&tmp, &pb.Value{&pb.Value_TimestampValue{ptm}}, ptm},
+		{&tmp, &pb.Value{ValueType: &pb.Value_TimestampValue{ptm}}, ptm},
 		{&lli, geoval(ll), ll},
 	} {
 		if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil {
@@ -247,7 +247,7 @@
 		{new(int16), floatval(math.MaxFloat32)},  // doesn't fit
 		{new(uint16), floatval(math.MaxFloat32)}, // doesn't fit
 		{new(float32),
-			&pb.Value{&pb.Value_IntegerValue{math.MaxInt64}}}, // overflow
+			&pb.Value{ValueType: &pb.Value_IntegerValue{math.MaxInt64}}}, // overflow
 	} {
 		err := setFromProtoValue(test.in, test.val, c)
 		if err == nil {
diff --git a/firestore/integration_test.go b/firestore/integration_test.go
index 0580c73..588b1b6 100644
--- a/firestore/integration_test.go
+++ b/firestore/integration_test.go
@@ -29,6 +29,7 @@
 
 	"cloud.google.com/go/internal/pretty"
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"github.com/google/go-cmp/cmp"
 	"github.com/google/go-cmp/cmp/cmpopts"
 
@@ -55,7 +56,7 @@
 var (
 	iClient       *Client
 	iColl         *CollectionRef
-	collectionIDs = testutil.NewUIDSpace("go-integration-test")
+	collectionIDs = uid.NewSpace("go-integration-test", nil)
 )
 
 func initIntegrationTest() {
diff --git a/firestore/options_test.go b/firestore/options_test.go
index 211557b..8bb551e 100644
--- a/firestore/options_test.go
+++ b/firestore/options_test.go
@@ -36,11 +36,11 @@
 		},
 		{
 			in:   []Precondition{Exists},
-			want: &pb.Precondition{&pb.Precondition_Exists{true}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_Exists{true}},
 		},
 		{
 			in:   []Precondition{LastUpdateTime(aTime)},
-			want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_UpdateTime{aTimestamp}},
 		},
 		{
 			in:      []Precondition{Exists, LastUpdateTime(aTime)},
@@ -79,11 +79,11 @@
 		},
 		{
 			in:   []Precondition{Exists},
-			want: &pb.Precondition{&pb.Precondition_Exists{true}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_Exists{true}},
 		},
 		{
 			in:   []Precondition{LastUpdateTime(aTime)},
-			want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_UpdateTime{aTimestamp}},
 		},
 		{
 			in:      []Precondition{Exists, LastUpdateTime(aTime)},
@@ -114,11 +114,11 @@
 	}{
 		{
 			in:   nil,
-			want: &pb.Precondition{&pb.Precondition_Exists{true}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_Exists{true}},
 		},
 		{
 			in:   []Precondition{},
-			want: &pb.Precondition{&pb.Precondition_Exists{true}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_Exists{true}},
 		},
 
 		{
@@ -127,7 +127,7 @@
 		},
 		{
 			in:   []Precondition{LastUpdateTime(aTime)},
-			want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}},
+			want: &pb.Precondition{ConditionType: &pb.Precondition_UpdateTime{aTimestamp}},
 		},
 		{
 			in:      []Precondition{Exists, LastUpdateTime(aTime)},
diff --git a/firestore/query.go b/firestore/query.go
index 5ef6f4a..682bfa3 100644
--- a/firestore/query.go
+++ b/firestore/query.go
@@ -160,7 +160,7 @@
 // Limit returns a new Query that specifies the maximum number of results to return.
 // It must not be negative.
 func (q Query) Limit(n int) Query {
-	q.limit = &wrappers.Int32Value{trunc32(n)}
+	q.limit = &wrappers.Int32Value{Value: trunc32(n)}
 	return q
 }
 
@@ -363,7 +363,7 @@
 			if !ok {
 				return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval)
 			}
-			vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.collectionPath() + "/" + docID}}
+			vals[i] = &pb.Value{ValueType: &pb.Value_ReferenceValue{q.collectionPath() + "/" + docID}}
 		} else {
 			var sawTransform bool
 			vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval))
@@ -387,7 +387,7 @@
 			if dp != qp {
 				return nil, fmt.Errorf("firestore: document snapshot for %s passed to query on %s", dp, qp)
 			}
-			vals[i] = &pb.Value{&pb.Value_ReferenceValue{ds.Ref.Path}}
+			vals[i] = &pb.Value{ValueType: &pb.Value_ReferenceValue{ds.Ref.Path}}
 		} else {
 			val, err := valueAtPath(ord.fieldPath, ds.proto.Fields)
 			if err != nil {
@@ -534,7 +534,7 @@
 }
 
 func fref(fp FieldPath) *pb.StructuredQuery_FieldReference {
-	return &pb.StructuredQuery_FieldReference{fp.toServiceFieldPath()}
+	return &pb.StructuredQuery_FieldReference{FieldPath: fp.toServiceFieldPath()}
 }
 
 func trunc32(i int) int32 {
diff --git a/firestore/query_test.go b/firestore/query_test.go
index 7f373ed..4e85208 100644
--- a/firestore/query_test.go
+++ b/firestore/query_test.go
@@ -153,7 +153,7 @@
 			in:   q.Where("a", ">", 5).Where("b", "<", "foo"),
 			want: &pb.StructuredQuery{
 				Where: &pb.StructuredQuery_Filter{
-					&pb.StructuredQuery_Filter_CompositeFilter{
+					FilterType: &pb.StructuredQuery_Filter_CompositeFilter{
 						&pb.StructuredQuery_CompositeFilter{
 							Op: pb.StructuredQuery_CompositeFilter_AND,
 							Filters: []*pb.StructuredQuery_Filter{
@@ -174,9 +174,9 @@
 			in:   q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("b"), pb.StructuredQuery_ASCENDING},
-					{fref1("a"), pb.StructuredQuery_DESCENDING},
-					{fref1("~"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("b"), Direction: pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_DESCENDING},
+					{Field: fref1("~"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 			},
 		},
@@ -185,7 +185,7 @@
 			in:   q.Offset(2).Limit(3),
 			want: &pb.StructuredQuery{
 				Offset: 2,
-				Limit:  &wrappers.Int32Value{3},
+				Limit:  &wrappers.Int32Value{Value: 3},
 			},
 		},
 		{
@@ -193,7 +193,7 @@
 			in:   q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins
 			want: &pb.StructuredQuery{
 				Offset: 5,
-				Limit:  &wrappers.Int32Value{4},
+				Limit:  &wrappers.Int32Value{Value: 4},
 			},
 		},
 		{
@@ -201,7 +201,7 @@
 			in:   q.OrderBy("a", Asc).StartAt(7).EndBefore(9),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7)},
@@ -218,7 +218,7 @@
 			in:   q.OrderBy("a", Asc).StartAt(7).EndAt(9),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7)},
@@ -235,7 +235,7 @@
 			in:   q.OrderBy("a", Asc).StartAfter(7).EndAt(9),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7)},
@@ -252,7 +252,7 @@
 			in:   q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar"),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{refval(coll.parentPath + "/documents/C/foo")},
@@ -269,8 +269,8 @@
 			in:   q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
-					{fref1("b"), pb.StructuredQuery_DESCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
+					{Field: fref1("b"), Direction: pb.StructuredQuery_DESCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7), intval(8)},
@@ -290,7 +290,7 @@
 				EndAt(3).EndBefore(4),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(2)},
@@ -309,7 +309,7 @@
 			in:   q.StartAt(docsnap),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")},
@@ -322,8 +322,8 @@
 			in:   q.OrderBy("a", Asc).StartAt(docsnap),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
@@ -337,8 +337,8 @@
 			in:   q.OrderBy("a", Desc).StartAt(docsnap),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_DESCENDING},
-					{fref1("__name__"), pb.StructuredQuery_DESCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_DESCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_DESCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
@@ -351,9 +351,9 @@
 			in:   q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap),
 			want: &pb.StructuredQuery{
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_DESCENDING},
-					{fref1("b"), pb.StructuredQuery_ASCENDING},
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_DESCENDING},
+					{Field: fref1("b"), Direction: pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7), intval(8), refval(coll.parentPath + "/documents/C/D")},
@@ -367,7 +367,7 @@
 			want: &pb.StructuredQuery{
 				Where: filtr([]string{"a"}, "==", 3),
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")},
@@ -381,8 +381,8 @@
 			want: &pb.StructuredQuery{
 				Where: filtr([]string{"a"}, "<", 3),
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
@@ -395,7 +395,7 @@
 			in:   q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap),
 			want: &pb.StructuredQuery{
 				Where: &pb.StructuredQuery_Filter{
-					&pb.StructuredQuery_Filter_CompositeFilter{
+					FilterType: &pb.StructuredQuery_Filter_CompositeFilter{
 						&pb.StructuredQuery_CompositeFilter{
 							Op: pb.StructuredQuery_CompositeFilter_AND,
 							Filters: []*pb.StructuredQuery_Filter{
@@ -406,8 +406,8 @@
 					},
 				},
 				OrderBy: []*pb.StructuredQuery_Order{
-					{fref1("a"), pb.StructuredQuery_ASCENDING},
-					{fref1("__name__"), pb.StructuredQuery_ASCENDING},
+					{Field: fref1("a"), Direction: pb.StructuredQuery_ASCENDING},
+					{Field: fref1("__name__"), Direction: pb.StructuredQuery_ASCENDING},
 				},
 				StartAt: &pb.Cursor{
 					Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
diff --git a/firestore/testdata/VERSION b/firestore/testdata/VERSION
index df71bbe..672cd45 100644
--- a/firestore/testdata/VERSION
+++ b/firestore/testdata/VERSION
@@ -1 +1 @@
-SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 8855e12b3f1397b6f3dbb3b0b404d4107c861146
+SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 6c622d8affaf1ea2ebeb063f9898f5d942343cb7
diff --git a/firestore/testdata/listen-doc-remove.textproto b/firestore/testdata/listen-doc-remove.textproto
new file mode 100644
index 0000000..975200f
--- /dev/null
+++ b/firestore/testdata/listen-doc-remove.textproto
@@ -0,0 +1,115 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# The DocumentRemove response behaves exactly like DocumentDelete.
+
+description: "listen: DocumentRemove behaves like DocumentDelete"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  responses: <
+    document_remove: <
+      document: "projects/projectID/databases/(default)/documents/C/d1"
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+    >
+    read_time: <
+      seconds: 1
+    >
+  >
+  snapshots: <
+    changes: <
+      kind: REMOVED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      new_index: -1
+    >
+    read_time: <
+      seconds: 2
+    >
+  >
+>
diff --git a/firestore/testdata/listen-filter-nop.textproto b/firestore/testdata/listen-filter-nop.textproto
new file mode 100644
index 0000000..48fd72d
--- /dev/null
+++ b/firestore/testdata/listen-filter-nop.textproto
@@ -0,0 +1,247 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# A Filter response whose count matches the size of the current state (docs in
+# last snapshot + docs added - docs deleted) is a no-op.
+
+description: "listen: Filter response with same size is a no-op"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d3"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_delete: <
+      document: "projects/projectID/databases/(default)/documents/C/d1"
+    >
+  >
+  responses: <
+    filter: <
+      count: 2
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d2"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 1
+    >
+    read_time: <
+      seconds: 1
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d2"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d3"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: REMOVED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: 1
+      new_index: -1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d3"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 1
+    >
+    read_time: <
+      seconds: 2
+    >
+  >
+>
diff --git a/firestore/testdata/listen-multi-docs.textproto b/firestore/testdata/listen-multi-docs.textproto
new file mode 100644
index 0000000..8778acc
--- /dev/null
+++ b/firestore/testdata/listen-multi-docs.textproto
@@ -0,0 +1,524 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# Changes should be ordered with deletes first, then additions, then mods, each in
+# query order. Old indices refer to the immediately previous state, not the
+# previous snapshot
+
+description: "listen: multiple documents, added, deleted and updated"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d3"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d4"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d5"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 4
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_delete: <
+      document: "projects/projectID/databases/(default)/documents/C/d3"
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: -1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 3
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d6"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_delete: <
+      document: "projects/projectID/databases/(default)/documents/C/d2"
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d4"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: -2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 3
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 4
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d2"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d3"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d4"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 2
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d3"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d4"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 2
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 3
+    >
+    read_time: <
+      seconds: 2
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d4"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: -2
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 3
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: -1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 3
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d6"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d5"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 4
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: REMOVED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      new_index: -1
+    >
+    changes: <
+      kind: REMOVED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d3"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      new_index: -1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d6"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 2
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d5"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 4
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 3
+    >
+    changes: <
+      kind: MODIFIED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d4"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: -2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 3
+        >
+      >
+    >
+    changes: <
+      kind: MODIFIED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: -1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 3
+        >
+      >
+      old_index: 1
+      new_index: 1
+    >
+    read_time: <
+      seconds: 4
+    >
+  >
+>
diff --git a/firestore/testdata/listen-nocurrent.textproto b/firestore/testdata/listen-nocurrent.textproto
new file mode 100644
index 0000000..24239b6
--- /dev/null
+++ b/firestore/testdata/listen-nocurrent.textproto
@@ -0,0 +1,141 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# If the watch state is not marked CURRENT, no snapshot is issued.
+
+description: "listen: no snapshot if we don't see CURRENT"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 2
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d2"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 2
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 2
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 2
+        >
+      >
+      old_index: -1
+      new_index: 1
+    >
+    read_time: <
+      seconds: 2
+    >
+  >
+>
diff --git a/firestore/testdata/listen-removed-target-ids.textproto b/firestore/testdata/listen-removed-target-ids.textproto
new file mode 100644
index 0000000..1e8ead2
--- /dev/null
+++ b/firestore/testdata/listen-removed-target-ids.textproto
@@ -0,0 +1,131 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# A DocumentChange with the watch target ID in the removed_target_ids field is the
+# same as deleting a document.
+
+description: "listen: DocumentChange with removed_target_id is like a delete."
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      removed_target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+    >
+    read_time: <
+      seconds: 1
+    >
+  >
+  snapshots: <
+    changes: <
+      kind: REMOVED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      new_index: -1
+    >
+    read_time: <
+      seconds: 2
+    >
+  >
+>
diff --git a/firestore/testdata/listen-reset.textproto b/firestore/testdata/listen-reset.textproto
new file mode 100644
index 0000000..0c6cf8a
--- /dev/null
+++ b/firestore/testdata/listen-reset.textproto
@@ -0,0 +1,258 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# A RESET message turns off the CURRENT state, and marks all documents as deleted.
+# If a document appeared on the stream but was never part of a snapshot ("d3" in
+# this test), a reset will make it disappear completely.
+
+description: "listen: RESET turns off CURRENT"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 2
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d3"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 2
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: RESET
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 3
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 3
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d2"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 1
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 2
+      >
+    >
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 2
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 1
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 2
+        >
+      >
+      old_index: -1
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+      new_index: 1
+    >
+    read_time: <
+      seconds: 1
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d2"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 3
+      >
+    >
+    changes: <
+      kind: REMOVED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 2
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: 1
+      new_index: -1
+    >
+    changes: <
+      kind: MODIFIED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d2"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 3
+        >
+      >
+    >
+    read_time: <
+      seconds: 3
+    >
+  >
+>
diff --git a/firestore/testdata/listen-target-add-nop.textproto b/firestore/testdata/listen-target-add-nop.textproto
new file mode 100644
index 0000000..3fa7cce
--- /dev/null
+++ b/firestore/testdata/listen-target-add-nop.textproto
@@ -0,0 +1,88 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# A TargetChange_ADD response must have the same watch target ID.
+
+description: "listen: TargetChange_ADD is a no-op if it has the same target ID"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: ADD
+      target_ids: 1
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  snapshots: <
+    docs: <
+      name: "projects/projectID/databases/(default)/documents/C/d1"
+      fields: <
+        key: "a"
+        value: <
+          integer_value: 3
+        >
+      >
+      create_time: <
+        seconds: 1
+      >
+      update_time: <
+        seconds: 1
+      >
+    >
+    changes: <
+      kind: ADDED
+      doc: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      old_index: -1
+    >
+    read_time: <
+      seconds: 1
+    >
+  >
+>
diff --git a/firestore/testdata/listen-target-add-wrong-id.textproto b/firestore/testdata/listen-target-add-wrong-id.textproto
new file mode 100644
index 0000000..8754463
--- /dev/null
+++ b/firestore/testdata/listen-target-add-wrong-id.textproto
@@ -0,0 +1,50 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# A TargetChange_ADD response must have the same watch target ID.
+
+description: "listen: TargetChange_ADD is an error if it has a different target ID"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: ADD
+      target_ids: 2
+      read_time: <
+        seconds: 2
+      >
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  is_error: true
+>
diff --git a/firestore/testdata/listen-target-remove.textproto b/firestore/testdata/listen-target-remove.textproto
new file mode 100644
index 0000000..f34b089
--- /dev/null
+++ b/firestore/testdata/listen-target-remove.textproto
@@ -0,0 +1,46 @@
+# DO NOT MODIFY. This file was generated by
+# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
+
+# A TargetChange_REMOVE response should never be sent.
+
+description: "listen: TargetChange_REMOVE should not appear"
+listen: <
+  responses: <
+    document_change: <
+      document: <
+        name: "projects/projectID/databases/(default)/documents/C/d1"
+        fields: <
+          key: "a"
+          value: <
+            integer_value: 3
+          >
+        >
+        create_time: <
+          seconds: 1
+        >
+        update_time: <
+          seconds: 1
+        >
+      >
+      target_ids: 1
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: CURRENT
+    >
+  >
+  responses: <
+    target_change: <
+      target_change_type: REMOVE
+    >
+  >
+  responses: <
+    target_change: <
+      read_time: <
+        seconds: 1
+      >
+    >
+  >
+  is_error: true
+>
diff --git a/firestore/to_value.go b/firestore/to_value.go
index 1c97ecf..4d2df75 100644
--- a/firestore/to_value.go
+++ b/firestore/to_value.go
@@ -27,7 +27,7 @@
 	"google.golang.org/genproto/googleapis/type/latlng"
 )
 
-var nullValue = &pb.Value{&pb.Value_NullValue{}}
+var nullValue = &pb.Value{ValueType: &pb.Value_NullValue{}}
 
 var (
 	typeOfByteSlice      = reflect.TypeOf([]byte{})
@@ -59,31 +59,31 @@
 	}
 	switch x := vi.(type) {
 	case []byte:
-		return &pb.Value{&pb.Value_BytesValue{x}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_BytesValue{x}}, false, nil
 	case time.Time:
 		ts, err := ptypes.TimestampProto(x)
 		if err != nil {
 			return nil, false, err
 		}
-		return &pb.Value{&pb.Value_TimestampValue{ts}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_TimestampValue{ts}}, false, nil
 	case *ts.Timestamp:
 		if x == nil {
 			// gRPC doesn't like nil oneofs. Use NullValue.
 			return nullValue, false, nil
 		}
-		return &pb.Value{&pb.Value_TimestampValue{x}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_TimestampValue{x}}, false, nil
 	case *latlng.LatLng:
 		if x == nil {
 			// gRPC doesn't like nil oneofs. Use NullValue.
 			return nullValue, false, nil
 		}
-		return &pb.Value{&pb.Value_GeoPointValue{x}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_GeoPointValue{x}}, false, nil
 	case *DocumentRef:
 		if x == nil {
 			// gRPC doesn't like nil oneofs. Use NullValue.
 			return nullValue, false, nil
 		}
-		return &pb.Value{&pb.Value_ReferenceValue{x.Path}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_ReferenceValue{x.Path}}, false, nil
 		// Do not add bool, string, int, etc. to this switch; leave them in the
 		// reflect-based switch below. Moving them here would drop support for
 		// types whose underlying types are those primitives.
@@ -92,15 +92,15 @@
 	}
 	switch v.Kind() {
 	case reflect.Bool:
-		return &pb.Value{&pb.Value_BooleanValue{v.Bool()}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_BooleanValue{v.Bool()}}, false, nil
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return &pb.Value{&pb.Value_IntegerValue{v.Int()}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_IntegerValue{v.Int()}}, false, nil
 	case reflect.Uint8, reflect.Uint16, reflect.Uint32:
-		return &pb.Value{&pb.Value_IntegerValue{int64(v.Uint())}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_IntegerValue{int64(v.Uint())}}, false, nil
 	case reflect.Float32, reflect.Float64:
-		return &pb.Value{&pb.Value_DoubleValue{v.Float()}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_DoubleValue{v.Float()}}, false, nil
 	case reflect.String:
-		return &pb.Value{&pb.Value_StringValue{v.String()}}, false, nil
+		return &pb.Value{ValueType: &pb.Value_StringValue{v.String()}}, false, nil
 	case reflect.Slice:
 		return sliceToProtoValue(v)
 	case reflect.Map:
@@ -139,7 +139,7 @@
 		}
 		vals[i] = val
 	}
-	return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{vals}}}, false, nil
+	return &pb.Value{ValueType: &pb.Value_ArrayValue{&pb.ArrayValue{Values: vals}}}, false, nil
 }
 
 func mapToProtoValue(v reflect.Value) (*pb.Value, bool, error) {
@@ -175,7 +175,7 @@
 		// The entire map consisted of ServerTimestamp values.
 		pv = nil
 	} else {
-		pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}
+		pv = &pb.Value{ValueType: &pb.Value_MapValue{&pb.MapValue{Fields: m}}}
 	}
 	return pv, sawServerTimestamp, nil
 }
@@ -215,7 +215,7 @@
 		// The entire struct consisted of ServerTimestamp or omitempty values.
 		pv = nil
 	} else {
-		pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}
+		pv = &pb.Value{ValueType: &pb.Value_MapValue{&pb.MapValue{Fields: m}}}
 	}
 	return pv, sawServerTimestamp, nil
 }
diff --git a/firestore/to_value_test.go b/firestore/to_value_test.go
index eac26a3..3cec5a8 100644
--- a/firestore/to_value_test.go
+++ b/firestore/to_value_test.go
@@ -64,10 +64,10 @@
 		"I":  intval(1),
 		"U":  intval(2),
 		"F":  floatval(3),
-		"S":  &pb.Value{&pb.Value_StringValue{"four"}},
+		"S":  &pb.Value{ValueType: &pb.Value_StringValue{"four"}},
 		"Y":  bytesval([]byte{5}),
 		"T":  tsval(tm),
-		"Ts": &pb.Value{&pb.Value_TimestampValue{ptm}},
+		"Ts": &pb.Value{ValueType: &pb.Value_TimestampValue{ptm}},
 		"G":  geoval(ll),
 		"L":  arrayval(intval(6)),
 		"M":  mapval(map[string]*pb.Value{"a": intval(7)}),
@@ -95,7 +95,7 @@
 		{"str", strval("str")},
 		{[]byte{1, 2}, bytesval([]byte{1, 2})},
 		{tm, tsval(tm)},
-		{ptm, &pb.Value{&pb.Value_TimestampValue{ptm}}},
+		{ptm, &pb.Value{ValueType: &pb.Value_TimestampValue{ptm}}},
 		{ll, geoval(ll)},
 		{[]int{1, 2}, arrayval(intval(1), intval(2))},
 		{&[]int{1, 2}, arrayval(intval(1), intval(2))},
@@ -254,7 +254,7 @@
 	want := mapval(map[string]*pb.Value{
 		"Time":      tsval(tm),
 		"LatLng":    geoval(ll),
-		"Timestamp": &pb.Value{&pb.Value_TimestampValue{ptm}},
+		"Timestamp": &pb.Value{ValueType: &pb.Value_TimestampValue{ptm}},
 	})
 	if !testEqual(got, want) {
 		t.Errorf("got %+v, want %+v", got, want)
diff --git a/firestore/transaction_test.go b/firestore/transaction_test.go
index 014bc7d..f5a72dc 100644
--- a/firestore/transaction_test.go
+++ b/firestore/transaction_test.go
@@ -92,8 +92,7 @@
 		if err != nil {
 			return err
 		}
-		tx.Update(docref, []Update{{Path: "count", Value: count.(int64) + 1}})
-		return nil
+		return tx.Update(docref, []Update{{Path: "count", Value: count.(int64) + 1}})
 	})
 	if err != nil {
 		t.Fatal(err)
@@ -137,7 +136,7 @@
 			Database: db,
 			Options: &pb.TransactionOptions{
 				Mode: &pb.TransactionOptions_ReadWrite_{
-					&pb.TransactionOptions_ReadWrite{tid},
+					&pb.TransactionOptions_ReadWrite{RetryTransaction: tid},
 				},
 			},
 		},
@@ -226,7 +225,9 @@
 	srv.addRPC(beginReq, beginRes)
 	srv.addRPC(rollbackReq, &empty.Empty{})
 	err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
-		tx.Delete(c.Doc("C/a"))
+		if err := tx.Delete(c.Doc("C/a")); err != nil {
+			return err
+		}
 		if _, err := tx.Get(c.Doc("C/a")); err != nil {
 			return err
 		}
@@ -241,7 +242,9 @@
 	srv.addRPC(beginReq, beginRes)
 	srv.addRPC(rollbackReq, &empty.Empty{})
 	err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
-		tx.Delete(c.Doc("C/a"))
+		if err := tx.Delete(c.Doc("C/a")); err != nil {
+			return err
+		}
 		it := tx.Documents(c.Collection("C").Select("x"))
 		defer it.Stop()
 		if _, err := it.Next(); err != iterator.Done {
@@ -258,8 +261,12 @@
 	srv.addRPC(beginReq, beginRes)
 	srv.addRPC(rollbackReq, &empty.Empty{})
 	err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
-		tx.Delete(c.Doc("C/a"))
-		tx.Get(c.Doc("C/a"))
+		if err := tx.Delete(c.Doc("C/a")); err != nil {
+			return err
+		}
+		if _, err := tx.Get(c.Doc("C/a")); err != nil {
+			return err
+		}
 		return nil
 	})
 	if err != errReadAfterWrite {
@@ -294,7 +301,7 @@
 			Database: db,
 			Options: &pb.TransactionOptions{
 				Mode: &pb.TransactionOptions_ReadWrite_{
-					&pb.TransactionOptions_ReadWrite{tid},
+					&pb.TransactionOptions_ReadWrite{RetryTransaction: tid},
 				},
 			},
 		},
diff --git a/firestore/util_test.go b/firestore/util_test.go
index 0478d1b..119e4b7 100644
--- a/firestore/util_test.go
+++ b/firestore/util_test.go
@@ -103,23 +103,23 @@
 }
 
 func int64val(i int64) *pb.Value {
-	return &pb.Value{&pb.Value_IntegerValue{i}}
+	return &pb.Value{ValueType: &pb.Value_IntegerValue{i}}
 }
 
 func boolval(b bool) *pb.Value {
-	return &pb.Value{&pb.Value_BooleanValue{b}}
+	return &pb.Value{ValueType: &pb.Value_BooleanValue{b}}
 }
 
 func floatval(f float64) *pb.Value {
-	return &pb.Value{&pb.Value_DoubleValue{f}}
+	return &pb.Value{ValueType: &pb.Value_DoubleValue{f}}
 }
 
 func strval(s string) *pb.Value {
-	return &pb.Value{&pb.Value_StringValue{s}}
+	return &pb.Value{ValueType: &pb.Value_StringValue{s}}
 }
 
 func bytesval(b []byte) *pb.Value {
-	return &pb.Value{&pb.Value_BytesValue{b}}
+	return &pb.Value{ValueType: &pb.Value_BytesValue{b}}
 }
 
 func tsval(t time.Time) *pb.Value {
@@ -127,24 +127,24 @@
 	if err != nil {
 		panic(fmt.Sprintf("bad time %s in test: %v", t, err))
 	}
-	return &pb.Value{&pb.Value_TimestampValue{ts}}
+	return &pb.Value{ValueType: &pb.Value_TimestampValue{ts}}
 }
 
 func geoval(ll *latlng.LatLng) *pb.Value {
-	return &pb.Value{&pb.Value_GeoPointValue{ll}}
+	return &pb.Value{ValueType: &pb.Value_GeoPointValue{ll}}
 }
 
 func arrayval(s ...*pb.Value) *pb.Value {
 	if s == nil {
 		s = []*pb.Value{}
 	}
-	return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{s}}}
+	return &pb.Value{ValueType: &pb.Value_ArrayValue{&pb.ArrayValue{Values: s}}}
 }
 
 func mapval(m map[string]*pb.Value) *pb.Value {
-	return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}
+	return &pb.Value{ValueType: &pb.Value_MapValue{&pb.MapValue{Fields: m}}}
 }
 
 func refval(path string) *pb.Value {
-	return &pb.Value{&pb.Value_ReferenceValue{path}}
+	return &pb.Value{ValueType: &pb.Value_ReferenceValue{path}}
 }
diff --git a/firestore/watch.go b/firestore/watch.go
index 2f992e6..e7526e0 100644
--- a/firestore/watch.go
+++ b/firestore/watch.go
@@ -99,7 +99,7 @@
 	compare := func(_, _ *DocumentSnapshot) (int, error) { return 0, nil }
 	return newWatchStream(ctx, dr.Parent.c, compare, &pb.Target{
 		TargetType: &pb.Target_Documents{
-			Documents: &pb.Target_DocumentsTarget{[]string{dr.Path}},
+			Documents: &pb.Target_DocumentsTarget{Documents: []string{dr.Path}},
 		},
 		TargetId: watchTargetID,
 	})
diff --git a/firestore/writebatch_test.go b/firestore/writebatch_test.go
index db38d03..8245902 100644
--- a/firestore/writebatch_test.go
+++ b/firestore/writebatch_test.go
@@ -62,7 +62,7 @@
 							Fields: map[string]*pb.Value{"*": intval(3)},
 						},
 					},
-					UpdateMask: &pb.DocumentMask{[]string{"`*`"}},
+					UpdateMask: &pb.DocumentMask{FieldPaths: []string{"`*`"}},
 					CurrentDocument: &pb.Precondition{
 						ConditionType: &pb.Precondition_Exists{true},
 					},
diff --git a/httpreplay/cmd/httpr/httpr.go b/httpreplay/cmd/httpr/httpr.go
new file mode 100644
index 0000000..fdf08f1
--- /dev/null
+++ b/httpreplay/cmd/httpr/httpr.go
@@ -0,0 +1,112 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// httpr is a proxy that can record or replay HTTP requests.
+// Start httpr with either the -record or -replay flags, providing a filename.
+// Terminate the process with an interrupt (kill -2) to write the log file when recording.
+// To get the CA certificate of the proxy, issue a GET to http://localhost:CP/authority.cer, where
+// CP is the control port.
+
+// +build go1.8
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"os"
+	"os/signal"
+
+	"cloud.google.com/go/httpreplay/internal/proxy"
+	"github.com/google/martian/martianhttp"
+)
+
+var (
+	port         = flag.Int("port", 8080, "port of the proxy")
+	controlPort  = flag.Int("control-port", 8181, "port for controlling the proxy")
+	record       = flag.String("record", "", "record traffic and save to filename")
+	replay       = flag.String("replay", "", "read filename and replay traffic")
+	debugHeaders = flag.Bool("debug-headers", false, "log header mismatches")
+)
+
+func main() {
+	flag.Parse()
+	if *record == "" && *replay == "" {
+		log.Fatal("provide either -record or -replay")
+	}
+	if *record != "" && *replay != "" {
+		log.Fatal("provide only one of -record and -replay")
+	}
+	log.Printf("httpr: starting proxy on port %d and control on port %d", *port, *controlPort)
+
+	var pr *proxy.Proxy
+	var err error
+	if *record != "" {
+		pr, err = proxy.ForRecording(*record, *port)
+	} else {
+		pr, err = proxy.ForReplaying(*replay, *port)
+	}
+	if err != nil {
+		log.Fatal(err)
+	}
+	proxy.DebugHeaders = *debugHeaders
+
+	// Expose handlers on the control port.
+	mux := http.NewServeMux()
+	mux.Handle("/authority.cer", martianhttp.NewAuthorityHandler(pr.CACert))
+	mux.HandleFunc("/initial", handleInitial(pr))
+	lControl, err := net.Listen("tcp", fmt.Sprintf(":%d", *controlPort))
+	if err != nil {
+		log.Fatal(err)
+	}
+	go http.Serve(lControl, mux)
+
+	sigc := make(chan os.Signal, 1)
+	signal.Notify(sigc, os.Interrupt)
+
+	<-sigc
+
+	log.Println("httpr: shutting down")
+	if err := pr.Close(); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func handleInitial(pr *proxy.Proxy) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		switch req.Method {
+		case "GET":
+			if pr.Initial != nil {
+				w.Write(pr.Initial)
+			}
+
+		case "POST":
+			bytes, err := ioutil.ReadAll(req.Body)
+			req.Body.Close()
+			if err != nil {
+				w.WriteHeader(http.StatusInternalServerError)
+				fmt.Fprintf(w, "reading body: %v", err)
+			}
+			pr.Initial = bytes
+
+		default:
+			w.WriteHeader(http.StatusBadRequest)
+			fmt.Fprint(w, "use GET to retrieve initial or POST to set it")
+		}
+	}
+}
diff --git a/httpreplay/cmd/httpr/integration_test.go b/httpreplay/cmd/httpr/integration_test.go
new file mode 100644
index 0000000..1262618
--- /dev/null
+++ b/httpreplay/cmd/httpr/integration_test.go
@@ -0,0 +1,230 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package main_test
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"os/exec"
+	"strings"
+	"testing"
+	"time"
+
+	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/storage"
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"google.golang.org/api/option"
+)
+
+const initial = "initial state"
+
+func TestIntegration_HTTPR(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Integration tests skipped in short mode")
+	}
+	if testutil.ProjID() == "" {
+		t.Fatal("set GCLOUD_TESTS_GOLANG_PROJECT_ID and GCLOUD_TESTS_GOLANG_KEY")
+	}
+	// Get a unique temporary filename.
+	f, err := ioutil.TempFile("", "httpreplay")
+	if err != nil {
+		t.Fatal(err)
+	}
+	replayFilename := f.Name()
+	if err := f.Close(); err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(replayFilename)
+
+	if err := exec.Command("go", "build").Run(); err != nil {
+		t.Fatalf("running 'go build': %v", err)
+	}
+	defer os.Remove("./httpr")
+	want := runRecord(t, replayFilename)
+	got := runReplay(t, replayFilename)
+	if got != want {
+		t.Fatalf("got %q, want %q", got, want)
+	}
+}
+
+func runRecord(t *testing.T, filename string) string {
+	cmd, tr, cport, err := start("-record", filename)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer stop(t, cmd)
+
+	ctx := context.Background()
+	hc := &http.Client{
+		Transport: &oauth2.Transport{
+			Base:   tr,
+			Source: testutil.TokenSource(ctx, storage.ScopeFullControl),
+		},
+	}
+	res, err := http.Post(
+		fmt.Sprintf("http://localhost:%s/initial", cport),
+		"text/plain",
+		strings.NewReader(initial))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.StatusCode != 200 {
+		t.Fatalf("from POST: %s", res.Status)
+	}
+	info, err := getBucketInfo(ctx, hc)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return info
+}
+
+func runReplay(t *testing.T, filename string) string {
+	cmd, tr, cport, err := start("-replay", filename)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer stop(t, cmd)
+
+	hc := &http.Client{Transport: tr}
+	res, err := http.Get(fmt.Sprintf("http://localhost:%s/initial", cport))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.StatusCode != 200 {
+		t.Fatalf("from GET: %s", res.Status)
+	}
+	bytes, err := ioutil.ReadAll(res.Body)
+	res.Body.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := string(bytes), initial; got != want {
+		t.Errorf("initial: got %q, want %q", got, want)
+	}
+	info, err := getBucketInfo(context.Background(), hc)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return info
+}
+
+// Start the proxy binary and wait for it to come up.
+// Return a transport that talks to the proxy, as well as the control port.
+// modeFlag must be either "-record" or "-replay".
+func start(modeFlag, filename string) (*exec.Cmd, *http.Transport, string, error) {
+	pport, err := pickPort()
+	if err != nil {
+		return nil, nil, "", err
+	}
+	cport, err := pickPort()
+	if err != nil {
+		return nil, nil, "", err
+	}
+	cmd := exec.Command("./httpr", "-port", pport, "-control-port", cport, modeFlag, filename, "-debug-headers")
+	if err := cmd.Start(); err != nil {
+		return nil, nil, "", err
+	}
+	// Wait for the server to come up.
+	serverUp := false
+	for i := 0; i < 10; i++ {
+		if conn, err := net.Dial("tcp", "localhost:"+cport); err == nil {
+			conn.Close()
+			serverUp = true
+			break
+		}
+		time.Sleep(time.Second)
+	}
+	if !serverUp {
+		return nil, nil, "", errors.New("server never came up")
+	}
+	tr, err := proxyTransport(pport, cport)
+	if err != nil {
+		return nil, nil, "", err
+	}
+	return cmd, tr, cport, nil
+}
+
+func stop(t *testing.T, cmd *exec.Cmd) {
+	if err := cmd.Process.Signal(os.Interrupt); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// pickPort picks an unused port.
+func pickPort() (string, error) {
+	l, err := net.Listen("tcp", ":0")
+	if err != nil {
+		return "", err
+	}
+	addr := l.Addr().String()
+	_, port, err := net.SplitHostPort(addr)
+	if err != nil {
+		return "", err
+	}
+	l.Close()
+	return port, nil
+}
+
+func proxyTransport(pport, cport string) (*http.Transport, error) {
+	caCert, err := getBody(fmt.Sprintf("http://localhost:%s/authority.cer", cport))
+	if err != nil {
+		return nil, err
+	}
+	caCertPool := x509.NewCertPool()
+	if !caCertPool.AppendCertsFromPEM([]byte(caCert)) {
+		return nil, errors.New("bad CA Cert")
+	}
+	return &http.Transport{
+		Proxy:           http.ProxyURL(&url.URL{Host: "localhost:" + pport}),
+		TLSClientConfig: &tls.Config{RootCAs: caCertPool},
+	}, nil
+}
+
+func getBucketInfo(ctx context.Context, hc *http.Client) (string, error) {
+	client, err := storage.NewClient(ctx, option.WithHTTPClient(hc))
+	if err != nil {
+		return "", err
+	}
+	defer client.Close()
+	b := client.Bucket(testutil.ProjID())
+	attrs, err := b.Attrs(ctx)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("name:%s reqpays:%v location:%s sclass:%s",
+		attrs.Name, attrs.RequesterPays, attrs.Location, attrs.StorageClass), nil
+}
+
+func getBody(url string) ([]byte, error) {
+	res, err := http.Get(url)
+	if err != nil {
+		return nil, err
+	}
+	if res.StatusCode != 200 {
+		return nil, fmt.Errorf("response: %s", res.Status)
+	}
+	defer res.Body.Close()
+	return ioutil.ReadAll(res.Body)
+}
diff --git a/httpreplay/httpreplay.go b/httpreplay/httpreplay.go
new file mode 100644
index 0000000..c131b4d
--- /dev/null
+++ b/httpreplay/httpreplay.go
@@ -0,0 +1,147 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+// Package httpreplay provides an API for recording and replaying traffic
+// from HTTP-based Google API clients.
+//
+// To record:
+// 1.  Call NewRecorder to get a Recorder.
+// 2.  Use its Client method to obtain an HTTP client to use when making API calls.
+// 3.  Close the Recorder when you're done. That will save the
+//     log of interactions to the file you provided to NewRecorder.
+//
+// To replay:
+// 1.  Call NewReplayer with the same filename you used to record to get a Replayer.
+// 2.  Call its Client method and use the client to make the same API calls.
+//     You will get back the recorded responses.
+// 3.  Close the Replayer when you're done.
+//
+// This package is EXPERIMENTAL and is subject to change or removal without notice.
+// It requires Go version 1.8 or higher.
+package httpreplay
+
+// TODO(jba): add examples.
+
+import (
+	"fmt"
+	"net/http"
+
+	"cloud.google.com/go/httpreplay/internal/proxy"
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"google.golang.org/api/option"
+	htransport "google.golang.org/api/transport/http"
+)
+
+// A Recorder records HTTP interactions.
+type Recorder struct {
+	filename string
+	proxy    *proxy.Proxy
+}
+
+// NewRecorder creates a recorder that writes to filename. The file will
+// also store initial state that can be retrieved to configure replay.
+//
+// You must call Close on the Recorder to ensure that all data is written.
+func NewRecorder(filename string, initial []byte) (*Recorder, error) {
+	p, err := proxy.ForRecording(filename, 0)
+	if err != nil {
+		return nil, err
+	}
+	p.Initial = initial
+	return &Recorder{proxy: p}, nil
+}
+
+// Client returns an http.Client to be used for recording. Provide authentication options
+// like option.WithTokenSource as you normally would, or omit them to use Application Default
+// Credentials.
+func (r *Recorder) Client(ctx context.Context, opts ...option.ClientOption) (*http.Client, error) {
+	hc, _, err := htransport.NewClient(ctx, opts...)
+	if err != nil {
+		return nil, err
+	}
+	// The http.Client returned by htransport.NewClient contains an
+	// http.RoundTripper. We want to somehow plug in a Transport that calls the proxy
+	// (returned by r.proxy.Transport).
+	//
+	// htransport.NewClient constructs its RoundTripper via the decorator pattern, by
+	// nesting several implementations of RoundTripper inside each other, ending with
+	// http.DefaultTransport. For example, one of the decorators is oauth2.Transport,
+	// which inserts an Authorization header and then calls the next RoundTripper in
+	// the sequence (stored in a field called Base).
+	//
+	// The problem is that we need to insert the proxy Transport at the end of this
+	// sequence, where http.DefaultTransport currently lives. But we can't traverse
+	// that sequence of RoundTrippers in general, because we don't know their types.
+	//
+	// For now, we only handle the special (but common) case where the first
+	// RoundTripper in the sequence is an oauth2.Transport. We can replace its Base
+	// field with the proxy transport. This causes us to lose the other RoundTrippers
+	// in the sequence, but those aren't essential for testing.
+	//
+	// A better solution would be to add option.WithBaseTransport, which would allow
+	// us to replace the http.DefaultTransport at the end of the sequence with the
+	// transport of our choice.
+	otrans, ok := hc.Transport.(*oauth2.Transport)
+	if !ok {
+		return nil, fmt.Errorf("can't handle Transport of type %T", hc.Transport)
+	}
+	otrans.Base = r.proxy.Transport()
+	return hc, nil
+}
+
+// Close closes the Recorder and saves the log file.
+func (r *Recorder) Close() error {
+	return r.proxy.Close()
+}
+
+// A Replayer replays previously recorded HTTP interactions.
+type Replayer struct {
+	proxy *proxy.Proxy
+}
+
+// NewReplayer creates a replayer that reads from filename.
+func NewReplayer(filename string) (*Replayer, error) {
+	p, err := proxy.ForReplaying(filename, 0)
+	if err != nil {
+		return nil, err
+	}
+	return &Replayer{proxy: p}, nil
+}
+
+// Client returns an HTTP client for replaying. The client does not need to be
+// configured with credentials for authenticating to a server, since it never
+// contacts a real backend.
+func (r *Replayer) Client(ctx context.Context) (*http.Client, error) {
+	return &http.Client{Transport: r.proxy.Transport()}, nil
+}
+
+// Initial returns the initial state saved by the Recorder.
+func (r *Replayer) Initial() []byte {
+	return r.proxy.Initial
+}
+
+// Close closes the replayer.
+func (r *Replayer) Close() error {
+	return r.proxy.Close()
+}
+
+// DebugHeaders helps to determine whether a header should be ignored.
+// When true, if requests have the same method, URL and body but differ
+// in a header, the first mismatched header is logged.
+func DebugHeaders() {
+	proxy.DebugHeaders = true
+}
diff --git a/httpreplay/httpreplay_test.go b/httpreplay/httpreplay_test.go
new file mode 100644
index 0000000..1dba53e
--- /dev/null
+++ b/httpreplay/httpreplay_test.go
@@ -0,0 +1,134 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package httpreplay_test
+
+import (
+	"bytes"
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"testing"
+	"time"
+
+	"cloud.google.com/go/httpreplay"
+	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/storage"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+)
+
+func TestIntegration_RecordAndReplay(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Integration tests skipped in short mode")
+	}
+	f, err := ioutil.TempFile("", "httpreplay")
+	if err != nil {
+		t.Fatal(err)
+	}
+	replayFilename := f.Name()
+	if err := f.Close(); err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(replayFilename)
+	projectID := testutil.ProjID()
+	if projectID == "" {
+		t.Skip("Need project ID. See CONTRIBUTING.md for details.")
+	}
+	ctx := context.Background()
+
+	// Record.
+	initial := time.Now()
+	ibytes, err := json.Marshal(initial)
+	if err != nil {
+		t.Fatal(err)
+	}
+	rec, err := httpreplay.NewRecorder(replayFilename, ibytes)
+	if err != nil {
+		t.Fatal(err)
+	}
+	hc, err := rec.Client(ctx, option.WithTokenSource(
+		testutil.TokenSource(ctx, storage.ScopeFullControl)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	wanta, wantc := run(t, hc)
+	if err := rec.Close(); err != nil {
+		t.Fatalf("rec.Close: %v", err)
+	}
+
+	// Replay.
+	rep, err := httpreplay.NewReplayer(replayFilename)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer rep.Close()
+	hc, err = rep.Client(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	gota, gotc := run(t, hc)
+
+	if diff := testutil.Diff(gota, wanta); diff != "" {
+		t.Error(diff)
+	}
+	if !bytes.Equal(gotc, wantc) {
+		t.Errorf("got %q, want %q", gotc, wantc)
+	}
+	var gotInitial time.Time
+	if err := json.Unmarshal(rep.Initial(), &gotInitial); err != nil {
+		t.Fatal(err)
+	}
+	if !gotInitial.Equal(initial) {
+		t.Errorf("initial: got %v, want %v", gotInitial, initial)
+	}
+}
+
+// TODO(jba): test errors
+
+func run(t *testing.T, hc *http.Client) (*storage.BucketAttrs, []byte) {
+	ctx := context.Background()
+	client, err := storage.NewClient(ctx, option.WithHTTPClient(hc))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer client.Close()
+	b := client.Bucket(testutil.ProjID())
+	attrs, err := b.Attrs(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	obj := b.Object("replay-test")
+	w := obj.NewWriter(ctx)
+	if _, err := w.Write([]byte("hello")); err != nil {
+		t.Fatal(err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	r, err := obj.NewReader(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer r.Close()
+	contents, err := ioutil.ReadAll(r)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return attrs, contents
+}
diff --git a/httpreplay/internal/proxy/record.go b/httpreplay/internal/proxy/record.go
new file mode 100644
index 0000000..bd26352
--- /dev/null
+++ b/httpreplay/internal/proxy/record.go
@@ -0,0 +1,215 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+// The proxy package provides a record/replay HTTP proxy. It is designed to support
+// both an in-memory API (cloud.google.com/go/httpreplay) and a standalone server
+// (cloud.google.com/go/httpreplay/cmd/httpr).
+package proxy
+
+// See github.com/google/martian/cmd/proxy/main.go for the origin of much of this.
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/google/martian"
+	"github.com/google/martian/fifo"
+	"github.com/google/martian/har"
+	"github.com/google/martian/httpspec"
+	"github.com/google/martian/martianlog"
+	"github.com/google/martian/mitm"
+)
+
+// A Proxy is an HTTP proxy that supports recording or replaying requests.
+type Proxy struct {
+	// The certificate that the proxy uses to participate in TLS.
+	CACert *x509.Certificate
+
+	// The URL of the proxy.
+	URL *url.URL
+
+	// Initial state of the client.
+	Initial []byte
+
+	mproxy   *martian.Proxy
+	filename string      // for log
+	logger   *har.Logger // for recording only
+}
+
+// ForRecording returns a Proxy configured to record.
+func ForRecording(filename string, port int) (*Proxy, error) {
+	p, err := newProxy(filename)
+	if err != nil {
+		return nil, err
+	}
+	// Configure the transport for the proxy's outgoing traffic.
+	p.mproxy.SetRoundTripper(&http.Transport{
+		Dial: (&net.Dialer{
+			Timeout:   30 * time.Second,
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+		TLSHandshakeTimeout:   10 * time.Second,
+		ExpectContinueTimeout: time.Second,
+	})
+
+	// Construct a group that performs the standard proxy stack of request/response
+	// modifications.
+	stack, _ := httpspec.NewStack("httpr") // second arg is an internal group that we don't need
+	p.mproxy.SetRequestModifier(stack)
+	p.mproxy.SetResponseModifier(stack)
+
+	// Make a group for logging requests and responses.
+	logGroup := fifo.NewGroup()
+	skipAuth := skipLoggingByHost("accounts.google.com")
+	logGroup.AddRequestModifier(skipAuth)
+	logGroup.AddResponseModifier(skipAuth)
+	p.logger = har.NewLogger()
+	logGroup.AddRequestModifier(martian.RequestModifierFunc(
+		func(req *http.Request) error { return withRedactedHeaders(req, p.logger) }))
+	logGroup.AddResponseModifier(p.logger)
+
+	stack.AddRequestModifier(logGroup)
+	stack.AddResponseModifier(logGroup)
+
+	// Ordinary debug logging.
+	logger := martianlog.NewLogger()
+	logger.SetDecode(true)
+	stack.AddRequestModifier(logger)
+	stack.AddResponseModifier(logger)
+
+	if err := p.start(port); err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+func newProxy(filename string) (*Proxy, error) {
+	mproxy := martian.NewProxy()
+	// Set up a man-in-the-middle configuration with a CA certificate so the proxy can
+	// participate in TLS.
+	x509c, priv, err := mitm.NewAuthority("cloud.google.com/go/httpreplay", "HTTPReplay Authority", time.Hour)
+	if err != nil {
+		return nil, err
+	}
+	mc, err := mitm.NewConfig(x509c, priv)
+	if err != nil {
+		return nil, err
+	}
+	mc.SetValidity(time.Hour)
+	mc.SetOrganization("cloud.google.com/go/httpreplay")
+	mc.SkipTLSVerify(false)
+	if err != nil {
+		return nil, err
+	}
+	mproxy.SetMITM(mc)
+	return &Proxy{
+		mproxy:   mproxy,
+		CACert:   x509c,
+		filename: filename,
+	}, nil
+}
+
+func (p *Proxy) start(port int) error {
+	l, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+	if err != nil {
+		return err
+	}
+	p.URL = &url.URL{Scheme: "http", Host: l.Addr().String()}
+	go p.mproxy.Serve(l)
+	return nil
+}
+
+// Transport returns an http.Transport for clients who want to talk to the proxy.
+func (p *Proxy) Transport() *http.Transport {
+	caCertPool := x509.NewCertPool()
+	caCertPool.AddCert(p.CACert)
+	return &http.Transport{
+		TLSClientConfig: &tls.Config{RootCAs: caCertPool},
+		Proxy:           func(*http.Request) (*url.URL, error) { return p.URL, nil },
+	}
+}
+
+// Close closes the proxy. If the proxy is recording, it also writes the log.
+func (p *Proxy) Close() error {
+	p.mproxy.Close()
+	if p.logger != nil {
+		return p.writeLog()
+	}
+	return nil
+}
+
+type httprFile struct {
+	Initial []byte
+	HAR     *har.HAR
+}
+
+func (p *Proxy) writeLog() error {
+	f := httprFile{
+		Initial: p.Initial,
+		HAR:     p.logger.ExportAndReset(),
+	}
+	bytes, err := json.Marshal(f)
+	if err != nil {
+		return err
+	}
+	return ioutil.WriteFile(p.filename, bytes, 0600) // only accessible by owner
+}
+
+// Headers that may contain sensitive data (auth tokens, keys).
+var sensitiveHeaders = []string{
+	"Authorization",
+	"X-Goog-Encryption-Key",             // used by Cloud Storage for customer-supplied encryption
+	"X-Goog-Copy-Source-Encryption-Key", // ditto
+}
+
+// withRedactedHeaders removes sensitive header contents before calling mod.
+func withRedactedHeaders(req *http.Request, mod martian.RequestModifier) error {
+	// We have to change the headers, then log, then restore them.
+	replaced := map[string]string{}
+	for _, h := range sensitiveHeaders {
+		if v := req.Header.Get(h); v != "" {
+			replaced[h] = v
+			req.Header.Set(h, "REDACTED")
+		}
+	}
+	err := mod.ModifyRequest(req)
+	for h, v := range replaced {
+		req.Header.Set(h, v)
+	}
+	return err
+}
+
+// skipLoggingByHost disables logging for traffic to a particular host.
+type skipLoggingByHost string
+
+func (s skipLoggingByHost) ModifyRequest(req *http.Request) error {
+	if strings.HasPrefix(req.Host, string(s)) {
+		martian.NewContext(req).SkipLogging()
+	}
+	return nil
+}
+
+func (s skipLoggingByHost) ModifyResponse(res *http.Response) error {
+	return s.ModifyRequest(res.Request)
+}
diff --git a/httpreplay/internal/proxy/record_test.go b/httpreplay/internal/proxy/record_test.go
new file mode 100644
index 0000000..a3868e7
--- /dev/null
+++ b/httpreplay/internal/proxy/record_test.go
@@ -0,0 +1,65 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package proxy
+
+import (
+	"net/http"
+	"testing"
+
+	"cloud.google.com/go/internal/testutil"
+	"github.com/google/martian"
+)
+
+func TestWithRedactedHeaders(t *testing.T) {
+	clone := func(h http.Header) http.Header {
+		h2 := http.Header{}
+		for k, v := range h {
+			h2[k] = v
+		}
+		return h2
+	}
+
+	orig := http.Header{
+		"Content-Type":                      {"text/plain"},
+		"Authorization":                     {"oauth2-token"},
+		"X-Goog-Encryption-Key":             {"a-secret-key"},
+		"X-Goog-Copy-Source-Encryption-Key": {"another-secret-key"},
+	}
+	req := &http.Request{Header: clone(orig)}
+	var got http.Header
+	mod := martian.RequestModifierFunc(func(req *http.Request) error {
+		got = clone(req.Header)
+		return nil
+	})
+	if err := withRedactedHeaders(req, mod); err != nil {
+		t.Fatal(err)
+	}
+	// Logged headers should be redacted.
+	want := http.Header{
+		"Content-Type":                      {"text/plain"},
+		"Authorization":                     {"REDACTED"},
+		"X-Goog-Encryption-Key":             {"REDACTED"},
+		"X-Goog-Copy-Source-Encryption-Key": {"REDACTED"},
+	}
+	if !testutil.Equal(got, want) {
+		t.Errorf("got  %+v\nwant %+v", got, want)
+	}
+	// The request's headers should be the same.
+	if got, want := req.Header, orig; !testutil.Equal(got, want) {
+		t.Errorf("got  %+v\nwant %+v", got, want)
+	}
+}
diff --git a/httpreplay/internal/proxy/replay.go b/httpreplay/internal/proxy/replay.go
new file mode 100644
index 0000000..fe87386
--- /dev/null
+++ b/httpreplay/internal/proxy/replay.go
@@ -0,0 +1,336 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package proxy
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"mime"
+	"mime/multipart"
+	"net/http"
+	"reflect"
+	"strings"
+
+	"github.com/google/martian/har"
+	"github.com/google/martian/martianlog"
+)
+
+// ForReplaying returns a Proxy configured to replay.
+func ForReplaying(filename string, port int) (*Proxy, error) {
+	p, err := newProxy(filename)
+	if err != nil {
+		return nil, err
+	}
+	calls, initial, err := readLog(filename)
+	if err != nil {
+		return nil, err
+	}
+	p.mproxy.SetRoundTripper(replayRoundTripper{calls: calls})
+	p.Initial = initial
+
+	// Debug logging.
+	// TODO(jba): factor out from here and ForRecording.
+	logger := martianlog.NewLogger()
+	logger.SetDecode(true)
+	p.mproxy.SetRequestModifier(logger)
+	p.mproxy.SetResponseModifier(logger)
+
+	if err := p.start(port); err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+// A call is an HTTP request and its matching response.
+type call struct {
+	req     *har.Request
+	reqBody *requestBody // parsed request body
+	res     *har.Response
+}
+
+func readLog(filename string) ([]*call, []byte, error) {
+	bytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, nil, err
+	}
+	var f httprFile
+	if err := json.Unmarshal(bytes, &f); err != nil {
+		return nil, nil, err
+	}
+	ignoreIDs := map[string]bool{} // IDs of requests to ignore
+	callsByID := map[string]*call{}
+	var calls []*call
+	for _, e := range f.HAR.Log.Entries {
+		if ignoreIDs[e.ID] {
+			continue
+		}
+		c, ok := callsByID[e.ID]
+		switch {
+		case !ok:
+			if e.Request == nil {
+				return nil, nil, fmt.Errorf("first entry for ID %s does not have a request", e.ID)
+			}
+			if e.Request.Method == "CONNECT" {
+				// Ignore CONNECT methods.
+				ignoreIDs[e.ID] = true
+			} else {
+				reqBody, err := newRequestBodyFromHAR(e.Request)
+				if err != nil {
+					return nil, nil, err
+				}
+				c := &call{e.Request, reqBody, e.Response}
+				calls = append(calls, c)
+				callsByID[e.ID] = c
+			}
+		case e.Request != nil:
+			if e.Response != nil {
+				return nil, nil, errors.New("HAR entry has both request and response")
+			}
+			c.req = e.Request
+		case e.Response != nil:
+			c.res = e.Response
+		default:
+			return nil, nil, errors.New("HAR entry has neither request nor response")
+		}
+	}
+	for _, c := range calls {
+		if c.req == nil || c.res == nil {
+			return nil, nil, fmt.Errorf("missing request or response: %+v", c)
+		}
+	}
+	return calls, f.Initial, nil
+}
+
+type replayRoundTripper struct {
+	calls []*call
+}
+
+func (r replayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	reqBody, err := newRequestBodyFromHTTP(req)
+	if err != nil {
+		return nil, err
+	}
+	for i, call := range r.calls {
+		if call == nil {
+			continue
+		}
+		if requestsMatch(req, reqBody, call.req, call.reqBody) {
+			r.calls[i] = nil // nil out this call so we don't reuse it
+			res := harResponseToHTTPResponse(call.res)
+			res.Request = req
+			return res, nil
+		}
+	}
+	return nil, fmt.Errorf("no matching request for %+v", req)
+}
+
+// Headers that shouldn't be compared, becuase they may differ on different executions
+// of the same code, or may not be present during record or replay.
+var ignoreHeaders = map[string]bool{}
+
+func init() {
+	// Sensitive headers are redacted in the log, so they won't be equal to incoming values.
+	for _, h := range sensitiveHeaders {
+		ignoreHeaders[h] = true
+	}
+	for _, h := range []string{
+		"Content-Type", // handled by requestBody
+		"Date",
+		"Host",
+		"Transfer-Encoding",
+		"Via",
+		"X-Forwarded-For",
+		"X-Forwarded-Host",
+		"X-Forwarded-Proto",
+		"X-Forwarded-Url",
+	} {
+		ignoreHeaders[h] = true
+	}
+}
+
+// Report whether the incoming request in matches the candidate request cand.
+func requestsMatch(in *http.Request, inBody *requestBody, cand *har.Request, candBody *requestBody) bool {
+	// TODO(jba): compare headers?
+	if in.Method != cand.Method {
+		return false
+	}
+	if in.URL.String() != cand.URL {
+		return false
+	}
+	if !inBody.equal(candBody) {
+		return false
+	}
+	// Check headers last. See DebugHeaders.
+	return headersMatch(in.Header, harHeadersToHTTP(cand.Headers), ignoreHeaders)
+}
+
+func harHeadersToHTTP(hhs []har.Header) http.Header {
+	// Unfortunately, the har package joins multiple header values with ", ",
+	// which isn't reversible if any of the values contains a comma.
+	// We hope for the best.
+	res := http.Header{}
+	for _, hh := range hhs {
+		res[hh.Name] = strings.Split(hh.Value, ", ")
+	}
+	return res
+}
+
+// Convert a HAR response to a Go http.Response.
+// HAR (Http ARchive) is a standard for storing HTTP interactions.
+// See http://www.softwareishard.com/blog/har-12-spec.
+func harResponseToHTTPResponse(hr *har.Response) *http.Response {
+	return &http.Response{
+		StatusCode: hr.Status,
+		Status:     hr.StatusText,
+		Proto:      hr.HTTPVersion,
+		// TODO(jba): headers?
+		Body:          ioutil.NopCloser(bytes.NewReader(hr.Content.Text)),
+		ContentLength: int64(len(hr.Content.Text)),
+	}
+}
+
+// A requestBody represents the body of a request. If the content type is multipart, the
+// body is split into parts.
+//
+// The replaying proxy needs to understand multipart bodies because the boundaries are
+// generated randomly, so we can't just compare the entire bodies for equality.
+type requestBody struct {
+	mediaType string   // the media type part of the Content-Type header
+	parts     [][]byte // the parts of the body, or just a single []byte if not multipart
+}
+
+func newRequestBodyFromHTTP(req *http.Request) (*requestBody, error) {
+	defer req.Body.Close()
+	return newRequestBody(req.Header.Get("Content-Type"), req.Body)
+}
+
+func newRequestBodyFromHAR(req *har.Request) (*requestBody, error) {
+	if req.PostData == nil {
+		return nil, nil
+	}
+	var cth string
+	for _, h := range req.Headers {
+		if h.Name == "Content-Type" {
+			cth = h.Value
+			break
+		}
+	}
+	return newRequestBody(cth, strings.NewReader(req.PostData.Text))
+}
+
+// newRequestBody parses the Content-Type header, reads the body, and splits it into
+// parts if necessary.
+func newRequestBody(contentType string, body io.Reader) (*requestBody, error) {
+	if contentType == "" {
+		// No content-type header. There should not be a body.
+		if _, err := body.Read(make([]byte, 1)); err != io.EOF {
+			return nil, errors.New("no Content-Type, but body")
+		}
+		return nil, nil
+	}
+	mediaType, params, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		return nil, err
+	}
+	rb := &requestBody{mediaType: mediaType}
+	if strings.HasPrefix(mediaType, "multipart/") {
+		mr := multipart.NewReader(body, params["boundary"])
+		for {
+			p, err := mr.NextPart()
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				return nil, err
+			}
+			part, err := ioutil.ReadAll(p)
+			if err != nil {
+				return nil, err
+			}
+			// TODO(jba): care about part headers?
+			rb.parts = append(rb.parts, part)
+		}
+	} else {
+		bytes, err := ioutil.ReadAll(body)
+		if err != nil {
+			return nil, err
+		}
+		rb.parts = [][]byte{bytes}
+	}
+	return rb, nil
+}
+
+func (r1 *requestBody) equal(r2 *requestBody) bool {
+	if r1 == nil || r2 == nil {
+		return r1 == r2
+	}
+	if r1.mediaType != r2.mediaType {
+		return false
+	}
+	if len(r1.parts) != len(r2.parts) {
+		return false
+	}
+	for i, p1 := range r1.parts {
+		if !bytes.Equal(p1, r2.parts[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// DebugHeaders helps to determine whether a header should be ignored.
+// When true, if requests have the same method, URL and body but differ
+// in a header, the first mismatched header is logged.
+var DebugHeaders = false
+
+func headersMatch(in, cand http.Header, ignores map[string]bool) bool {
+	for k1, v1 := range in {
+		if ignores[k1] {
+			continue
+		}
+		v2 := cand[k1]
+		if v2 == nil {
+			if DebugHeaders {
+				log.Printf("header %s: present in incoming request but not candidate", k1)
+			}
+			return false
+		}
+		if !reflect.DeepEqual(v1, v2) {
+			if DebugHeaders {
+				log.Printf("header %s: incoming %v, candidate %v", k1, v1, v2)
+			}
+			return false
+		}
+	}
+	for k2 := range cand {
+		if ignores[k2] {
+			continue
+		}
+		if in[k2] == nil {
+			if DebugHeaders {
+				log.Printf("header %s: not in incoming request but present in candidate", k2)
+			}
+			return false
+		}
+	}
+	return true
+}
diff --git a/httpreplay/internal/proxy/replay_test.go b/httpreplay/internal/proxy/replay_test.go
new file mode 100644
index 0000000..929a516
--- /dev/null
+++ b/httpreplay/internal/proxy/replay_test.go
@@ -0,0 +1,114 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package proxy
+
+import (
+	"io/ioutil"
+	"net/http"
+	"strings"
+	"testing"
+
+	"cloud.google.com/go/internal/testutil"
+	"github.com/google/go-cmp/cmp"
+)
+
+func TestRequestBody(t *testing.T) {
+	req1 := &http.Request{
+		Header: http.Header{"Content-Type": {"multipart/mixed; boundary=foo"}},
+		Body: ioutil.NopCloser(strings.NewReader(
+			"--foo\r\nFoo: one\r\n\r\nA section\r\n" +
+				"--foo\r\nFoo: two\r\n\r\nAnd another\r\n" +
+				"--foo--\r\n")),
+	}
+	rb1, err := newRequestBodyFromHTTP(req1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := &requestBody{
+		mediaType: "multipart/mixed",
+		parts: [][]byte{
+			[]byte("A section"),
+			[]byte("And another"),
+		},
+	}
+	if diff := testutil.Diff(rb1, want, cmp.AllowUnexported(requestBody{})); diff != "" {
+		t.Error(diff)
+	}
+
+	// Same contents, different boundary.
+	req2 := &http.Request{
+		Header: http.Header{"Content-Type": {"multipart/mixed; boundary=bar"}},
+		Body: ioutil.NopCloser(strings.NewReader(
+			"--bar\r\nFoo: one\r\n\r\nA section\r\n" +
+				"--bar\r\nFoo: two\r\n\r\nAnd another\r\n" +
+				"--bar--\r\n")),
+	}
+	rb2, err := newRequestBodyFromHTTP(req2)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if diff := testutil.Diff(rb1, want, cmp.AllowUnexported(requestBody{})); diff != "" {
+		t.Error(diff)
+	}
+
+	if !rb1.equal(rb2) {
+		t.Error("equal returned false, want true")
+	}
+}
+
+func TestHeadersMatch(t *testing.T) {
+	for _, test := range []struct {
+		h1, h2 http.Header
+		want   bool
+	}{
+		{
+			http.Header{"A": {"x"}, "B": {"y", "z"}},
+			http.Header{"A": {"x"}, "B": {"y", "z"}},
+			true,
+		},
+		{
+			http.Header{"A": {"x"}, "B": {"y", "z"}},
+			http.Header{"A": {"x"}, "B": {"w"}},
+			false,
+		},
+		{
+			http.Header{"A": {"x"}, "B": {"y", "z"}, "I": {"foo"}},
+			http.Header{"A": {"x"}, "B": {"y", "z"}, "I": {"bar"}},
+			true,
+		},
+		{
+			http.Header{"A": {"x"}, "B": {"y", "z"}},
+			http.Header{"A": {"x"}, "B": {"y", "z"}, "I": {"bar"}},
+			true,
+		},
+		{
+			http.Header{"A": {"x"}, "B": {"y", "z"}, "I": {"foo"}},
+			http.Header{"A": {"x"}, "I": {"bar"}},
+			false,
+		},
+		{
+			http.Header{"A": {"x"}, "I": {"foo"}},
+			http.Header{"A": {"x"}, "B": {"y", "z"}, "I": {"bar"}},
+			false,
+		},
+	} {
+		got := headersMatch(test.h1, test.h2, map[string]bool{"I": true})
+		if got != test.want {
+			t.Errorf("%v, %v: got %t, want %t", test.h1, test.h2, got, test.want)
+		}
+	}
+}
diff --git a/internal/protostruct/protostruct_test.go b/internal/protostruct/protostruct_test.go
index 224cf20..369c0ce 100644
--- a/internal/protostruct/protostruct_test.go
+++ b/internal/protostruct/protostruct_test.go
@@ -26,19 +26,19 @@
 	if got := DecodeToMap(nil); !testutil.Equal(got, map[string]interface{}(nil)) {
 		t.Errorf("DecodeToMap(nil) = %v, want nil", got)
 	}
-	nullv := &pb.Value{&pb.Value_NullValue{}}
-	stringv := &pb.Value{&pb.Value_StringValue{"x"}}
-	boolv := &pb.Value{&pb.Value_BoolValue{true}}
-	numberv := &pb.Value{&pb.Value_NumberValue{2.7}}
+	nullv := &pb.Value{Kind: &pb.Value_NullValue{}}
+	stringv := &pb.Value{Kind: &pb.Value_StringValue{"x"}}
+	boolv := &pb.Value{Kind: &pb.Value_BoolValue{true}}
+	numberv := &pb.Value{Kind: &pb.Value_NumberValue{2.7}}
 	in := &pb.Struct{Fields: map[string]*pb.Value{
 		"n": nullv,
 		"s": stringv,
 		"b": boolv,
 		"f": numberv,
-		"l": &pb.Value{&pb.Value_ListValue{&pb.ListValue{
-			[]*pb.Value{nullv, stringv, boolv, numberv},
+		"l": &pb.Value{Kind: &pb.Value_ListValue{&pb.ListValue{
+			Values: []*pb.Value{nullv, stringv, boolv, numberv},
 		}}},
-		"S": &pb.Value{&pb.Value_StructValue{&pb.Struct{Fields: map[string]*pb.Value{
+		"S": &pb.Value{Kind: &pb.Value_StructValue{&pb.Struct{Fields: map[string]*pb.Value{
 			"n1": nullv,
 			"b1": boolv,
 		}}}},
diff --git a/internal/testutil/unique.go b/internal/uid/uid.go
similarity index 64%
rename from internal/testutil/unique.go
rename to internal/uid/uid.go
index ff0de4a..5c513ba 100644
--- a/internal/testutil/unique.go
+++ b/internal/uid/uid.go
@@ -12,11 +12,10 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// This file supports generating unique IDs so that multiple test executions
-// don't interfere with each other, and cleaning up old entities that may
-// remain if tests exit early.
-
-package testutil
+// Package uid supports generating unique IDs. Its chief purpose is to prevent
+// multiple test executions from interfering with each other, and to facilitate
+// cleanup of old entities that may remain if tests exit early.
+package uid
 
 import (
 	"fmt"
@@ -26,48 +25,58 @@
 	"time"
 )
 
-var startTime = time.Now().UTC()
-
-// A UIDSpace manages a set of unique IDs distinguished by a prefix.
-type UIDSpace struct {
-	Prefix string
-	Sep    rune
+// A Space manages a set of unique IDs distinguished by a prefix.
+type Space struct {
+	Prefix string    // Prefix of UIDs. Read-only.
+	Sep    rune      // Separates UID parts. Read-only.
+	Time   time.Time // Timestamp for UIDs. Read-only.
 	re     *regexp.Regexp
 	mu     sync.Mutex
 	count  int
 }
 
-func NewUIDSpace(prefix string) *UIDSpace {
-	return NewUIDSpaceSep(prefix, '-')
+// Options are optional values for a Space.
+type Options struct {
+	Sep  rune      // Separates parts of the UID. Defaults to '-'.
+	Time time.Time // Timestamp for all UIDs made with this space. Defaults to current time.
 }
 
-func NewUIDSpaceSep(prefix string, sep rune) *UIDSpace {
+func NewSpace(prefix string, opts *Options) *Space {
+	sep := '-'
+	tm := time.Now().UTC()
+	if opts != nil {
+		if opts.Sep != 0 {
+			sep = opts.Sep
+		}
+		if !opts.Time.IsZero() {
+			tm = opts.Time
+		}
+	}
 	re := fmt.Sprintf(`^%s%[2]c(\d{4})(\d{2})(\d{2})%[2]c(\d+)%[2]c\d+$`,
 		regexp.QuoteMeta(prefix), sep)
-	return &UIDSpace{
+	return &Space{
 		Prefix: prefix,
 		Sep:    sep,
+		Time:   tm,
 		re:     regexp.MustCompile(re),
 	}
 }
 
-// New generates a new unique ID . The ID consists of the UIDSpace's prefix, a
+// New generates a new unique ID. The ID consists of the Space's prefix, a
 // timestamp, and a counter value. All unique IDs generated in the same test
 // execution will have the same timestamp.
 //
 // Aside from the characters in the prefix, IDs contain only letters, numbers
 // and sep.
-func (s *UIDSpace) New() string { return s.newID(startTime) }
-
-func (s *UIDSpace) newID(t time.Time) string {
+func (s *Space) New() string {
 	s.mu.Lock()
 	c := s.count
 	s.count++
 	s.mu.Unlock()
 	// Write the time as a date followed by nanoseconds from midnight of that date.
 	// That makes it easier to see the approximate time of the ID when it is displayed.
-	y, m, d := t.Date()
-	ns := t.Sub(time.Date(y, m, d, 0, 0, 0, 0, time.UTC))
+	y, m, d := s.Time.Date()
+	ns := s.Time.Sub(time.Date(y, m, d, 0, 0, 0, 0, time.UTC))
 	// Zero-pad the counter for lexical sort order for IDs with the same timestamp.
 	return fmt.Sprintf("%s%c%04d%02d%02d%c%d%c%04d",
 		s.Prefix, s.Sep, y, m, d, s.Sep, ns, s.Sep, c)
@@ -75,7 +84,7 @@
 
 // Timestamp extracts the timestamp of uid, which must have been generated by
 // s. The second return value is true on success, false if there was a problem.
-func (s *UIDSpace) Timestamp(uid string) (time.Time, bool) {
+func (s *Space) Timestamp(uid string) (time.Time, bool) {
 	subs := s.re.FindStringSubmatch(uid)
 	if subs == nil {
 		return time.Time{}, false
@@ -92,7 +101,7 @@
 
 // Older reports whether uid was created by m and has a timestamp older than
 // the current time by at least d.
-func (s *UIDSpace) Older(uid string, d time.Duration) bool {
+func (s *Space) Older(uid string, d time.Duration) bool {
 	ts, ok := s.Timestamp(uid)
 	if !ok {
 		return false
diff --git a/internal/testutil/unique_test.go b/internal/uid/uid_test.go
similarity index 85%
rename from internal/testutil/unique_test.go
rename to internal/uid/uid_test.go
index 5a39833..fb7762f 100644
--- a/internal/testutil/unique_test.go
+++ b/internal/uid/uid_test.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package testutil
+package uid
 
 import (
 	"testing"
@@ -20,16 +20,16 @@
 )
 
 func TestNew(t *testing.T) {
-	s := NewUIDSpace("prefix")
 	tm := time.Date(2017, 1, 6, 0, 0, 0, 21, time.UTC)
-	got := s.newID(tm)
+	s := NewSpace("prefix", &Options{Time: tm})
+	got := s.New()
 	want := "prefix-20170106-21-0000"
 	if got != want {
 		t.Errorf("got %q, want %q", got, want)
 	}
 
-	s2 := NewUIDSpaceSep("prefix2", '_')
-	got = s2.newID(tm)
+	s2 := NewSpace("prefix2", &Options{Sep: '_', Time: tm})
+	got = s2.New()
 	want = "prefix2_20170106_21_0000"
 	if got != want {
 		t.Errorf("got %q, want %q", got, want)
@@ -37,7 +37,8 @@
 }
 
 func TestTimestamp(t *testing.T) {
-	s := NewUIDSpace("unique-ID")
+	s := NewSpace("unique-ID", nil)
+	startTime := s.Time
 	uid := s.New()
 	got, ok := s.Timestamp(uid)
 	if !ok {
@@ -60,9 +61,9 @@
 }
 
 func TestOlder(t *testing.T) {
-	s := NewUIDSpace("uid")
+	s := NewSpace("uid", nil)
 	// A non-matching ID returns false.
-	id2 := NewUIDSpace("different-prefix").New()
+	id2 := NewSpace("different-prefix", nil).New()
 	if got, want := s.Older(id2, time.Second), false; got != want {
 		t.Errorf("got %t, want %t", got, want)
 	}
diff --git a/logging/logadmin/metrics_test.go b/logging/logadmin/metrics_test.go
index 3a70358..45fca8d 100644
--- a/logging/logadmin/metrics_test.go
+++ b/logging/logadmin/metrics_test.go
@@ -20,11 +20,12 @@
 	"time"
 
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"golang.org/x/net/context"
 	"google.golang.org/api/iterator"
 )
 
-var metricIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-METRIC")
+var metricIDs = uid.NewSpace("GO-CLIENT-TEST-METRIC", nil)
 
 // Initializes the tests before they run.
 func initMetrics(ctx context.Context) {
diff --git a/logging/logadmin/sinks_test.go b/logging/logadmin/sinks_test.go
index 20f4b7c..3d88f6a 100644
--- a/logging/logadmin/sinks_test.go
+++ b/logging/logadmin/sinks_test.go
@@ -24,13 +24,14 @@
 	"time"
 
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"cloud.google.com/go/storage"
 	"golang.org/x/net/context"
 	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
 )
 
-var sinkIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-SINK")
+var sinkIDs = uid.NewSpace("GO-CLIENT-TEST-SINK", nil)
 
 const testFilter = ""
 
@@ -40,7 +41,7 @@
 // Returns a cleanup function to be called after the tests finish.
 func initSinks(ctx context.Context) func() {
 	// Create a unique GCS bucket so concurrent tests don't interfere with each other.
-	bucketIDs := testutil.NewUIDSpace(testProjectID + "-log-sink")
+	bucketIDs := uid.NewSpace(testProjectID+"-log-sink", nil)
 	testBucket := bucketIDs.New()
 	testSinkDestination = "storage.googleapis.com/" + testBucket
 	var storageClient *storage.Client
diff --git a/logging/logging_test.go b/logging/logging_test.go
index 66964a2..e17566d 100644
--- a/logging/logging_test.go
+++ b/logging/logging_test.go
@@ -31,6 +31,7 @@
 
 	cinternal "cloud.google.com/go/internal"
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"cloud.google.com/go/logging"
 	ltesting "cloud.google.com/go/logging/internal/testing"
 	"cloud.google.com/go/logging/logadmin"
@@ -46,7 +47,7 @@
 
 const testLogIDPrefix = "GO-LOGGING-CLIENT/TEST-LOG"
 
-var uids = testutil.NewUIDSpace(testLogIDPrefix)
+var uids = uid.NewSpace(testLogIDPrefix, nil)
 
 var (
 	client        *logging.Client
diff --git a/privacy/dlp/apiv2beta2/dlp_client.go b/privacy/dlp/apiv2beta2/dlp_client.go
deleted file mode 100644
index 4c3282d..0000000
--- a/privacy/dlp/apiv2beta2/dlp_client.go
+++ /dev/null
@@ -1,681 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp
-
-import (
-	"math"
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// CallOptions contains the retry settings for each method of Client.
-type CallOptions struct {
-	InspectContent           []gax.CallOption
-	RedactImage              []gax.CallOption
-	DeidentifyContent        []gax.CallOption
-	ReidentifyContent        []gax.CallOption
-	InspectDataSource        []gax.CallOption
-	AnalyzeDataSourceRisk    []gax.CallOption
-	ListInfoTypes            []gax.CallOption
-	CreateInspectTemplate    []gax.CallOption
-	UpdateInspectTemplate    []gax.CallOption
-	GetInspectTemplate       []gax.CallOption
-	ListInspectTemplates     []gax.CallOption
-	DeleteInspectTemplate    []gax.CallOption
-	CreateDeidentifyTemplate []gax.CallOption
-	UpdateDeidentifyTemplate []gax.CallOption
-	GetDeidentifyTemplate    []gax.CallOption
-	ListDeidentifyTemplates  []gax.CallOption
-	DeleteDeidentifyTemplate []gax.CallOption
-	ListDlpJobs              []gax.CallOption
-	GetDlpJob                []gax.CallOption
-	DeleteDlpJob             []gax.CallOption
-	CancelDlpJob             []gax.CallOption
-}
-
-func defaultClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("dlp.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultCallOptions() *CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &CallOptions{
-		InspectContent:           retry[[2]string{"default", "idempotent"}],
-		RedactImage:              retry[[2]string{"default", "idempotent"}],
-		DeidentifyContent:        retry[[2]string{"default", "idempotent"}],
-		ReidentifyContent:        retry[[2]string{"default", "idempotent"}],
-		InspectDataSource:        retry[[2]string{"default", "non_idempotent"}],
-		AnalyzeDataSourceRisk:    retry[[2]string{"default", "non_idempotent"}],
-		ListInfoTypes:            retry[[2]string{"default", "idempotent"}],
-		CreateInspectTemplate:    retry[[2]string{"default", "non_idempotent"}],
-		UpdateInspectTemplate:    retry[[2]string{"default", "non_idempotent"}],
-		GetInspectTemplate:       retry[[2]string{"default", "idempotent"}],
-		ListInspectTemplates:     retry[[2]string{"default", "idempotent"}],
-		DeleteInspectTemplate:    retry[[2]string{"default", "idempotent"}],
-		CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}],
-		UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}],
-		GetDeidentifyTemplate:    retry[[2]string{"default", "idempotent"}],
-		ListDeidentifyTemplates:  retry[[2]string{"default", "idempotent"}],
-		DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}],
-		ListDlpJobs:              retry[[2]string{"default", "idempotent"}],
-		GetDlpJob:                retry[[2]string{"default", "idempotent"}],
-		DeleteDlpJob:             retry[[2]string{"default", "idempotent"}],
-		CancelDlpJob:             retry[[2]string{"default", "non_idempotent"}],
-	}
-}
-
-// Client is a client for interacting with DLP API.
-type Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	client dlppb.DlpServiceClient
-
-	// The call options for this service.
-	CallOptions *CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewClient creates a new dlp service client.
-//
-// The DLP API is a service that allows clients
-// to detect the presence of Personally Identifiable Information (PII) and other
-// privacy-sensitive data in user-supplied, unstructured data streams, like text
-// blocks or images.
-// The service also includes methods for sensitive data redaction and
-// scheduling of data scans on Google Cloud Platform based data sets.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Client{
-		conn:        conn,
-		CallOptions: defaultCallOptions(),
-
-		client: dlppb.NewDlpServiceClient(conn),
-	}
-	c.setGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Client) setGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// InspectContent finds potentially sensitive info in content.
-// This method has limits on input size, processing time, and output size.
-// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for
-// images (at /dlp/docs/inspecting-images)
-func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
-	var resp *dlppb.InspectContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// RedactImage redacts potentially sensitive info from an image.
-// This method has limits on input size, processing time, and output size.
-// How-to guide (at /dlp/docs/redacting-sensitive-data-images)
-func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...)
-	var resp *dlppb.RedactImageResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.RedactImage(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeidentifyContent de-identifies potentially sensitive info from a ContentItem.
-// This method has limits on input size and output size.
-// How-to guide (at /dlp/docs/deidentify-sensitive-data)
-func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
-	var resp *dlppb.DeidentifyContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ReidentifyContent re-identify content that has been de-identified.
-func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...)
-	var resp *dlppb.ReidentifyContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// InspectDataSource schedules a job scanning content in a Google Cloud Platform data
-// repository. How-to guide (at /dlp/docs/inspecting-storage)
-func (c *Client) InspectDataSource(ctx context.Context, req *dlppb.InspectDataSourceRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.InspectDataSource[0:len(c.CallOptions.InspectDataSource):len(c.CallOptions.InspectDataSource)], opts...)
-	var resp *dlppb.DlpJob
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.InspectDataSource(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
-// Cloud Platform repository. [How-to guide}(/dlp/docs/compute-risk-analysis)
-func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
-	var resp *dlppb.DlpJob
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListInfoTypes returns sensitive information types DLP supports.
-func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
-	var resp *dlppb.ListInfoTypesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// CreateInspectTemplate creates an inspect template for re-using frequently used configuration
-// for inspecting content, images, and storage.
-func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...)
-	var resp *dlppb.InspectTemplate
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateInspectTemplate updates the inspect template.
-func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...)
-	var resp *dlppb.InspectTemplate
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetInspectTemplate gets an inspect template.
-func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...)
-	var resp *dlppb.InspectTemplate
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListInspectTemplates lists inspect templates.
-func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...)
-	it := &InspectTemplateIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) {
-		var resp *dlppb.ListInspectTemplatesResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.InspectTemplates, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// DeleteInspectTemplate deletes inspect templates.
-func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// CreateDeidentifyTemplate creates an Deidentify template for re-using frequently used configuration
-// for Deidentifying content, images, and storage.
-func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...)
-	var resp *dlppb.DeidentifyTemplate
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateDeidentifyTemplate updates the inspect template.
-func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...)
-	var resp *dlppb.DeidentifyTemplate
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetDeidentifyTemplate gets an inspect template.
-func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...)
-	var resp *dlppb.DeidentifyTemplate
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListDeidentifyTemplates lists inspect templates.
-func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...)
-	it := &DeidentifyTemplateIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) {
-		var resp *dlppb.ListDeidentifyTemplatesResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.DeidentifyTemplates, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// DeleteDeidentifyTemplate deletes inspect templates.
-func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// ListDlpJobs lists DlpJobs that match the specified filter in the request.
-func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...)
-	it := &DlpJobIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) {
-		var resp *dlppb.ListDlpJobsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.Jobs, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// GetDlpJob gets the latest state of a long-running DlpJob.
-func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...)
-	var resp *dlppb.DlpJob
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is
-// no longer interested in the DlpJob result. The job will be cancelled if
-// possible.
-func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob.  The server
-// makes a best effort to cancel the DlpJob, but success is not
-// guaranteed.
-func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate.
-type DeidentifyTemplateIterator struct {
-	items    []*dlppb.DeidentifyTemplate
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) {
-	var item *dlppb.DeidentifyTemplate
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *DeidentifyTemplateIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *DeidentifyTemplateIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// DlpJobIterator manages a stream of *dlppb.DlpJob.
-type DlpJobIterator struct {
-	items    []*dlppb.DlpJob
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DlpJobIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) {
-	var item *dlppb.DlpJob
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *DlpJobIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *DlpJobIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate.
-type InspectTemplateIterator struct {
-	items    []*dlppb.InspectTemplate
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) {
-	var item *dlppb.InspectTemplate
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *InspectTemplateIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *InspectTemplateIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
diff --git a/privacy/dlp/apiv2beta2/dlp_client_example_test.go b/privacy/dlp/apiv2beta2/dlp_client_example_test.go
deleted file mode 100644
index 17527d7..0000000
--- a/privacy/dlp/apiv2beta2/dlp_client_example_test.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp_test
-
-import (
-	"cloud.google.com/go/privacy/dlp/apiv2beta2"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2"
-)
-
-func ExampleNewClient() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use client.
-	_ = c
-}
-
-func ExampleClient_InspectContent() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.InspectContentRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.InspectContent(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_RedactImage() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.RedactImageRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.RedactImage(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_DeidentifyContent() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.DeidentifyContentRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.DeidentifyContent(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ReidentifyContent() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ReidentifyContentRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.ReidentifyContent(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_InspectDataSource() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.InspectDataSourceRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.InspectDataSource(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_AnalyzeDataSourceRisk() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.AnalyzeDataSourceRiskRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.AnalyzeDataSourceRisk(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ListInfoTypes() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListInfoTypesRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.ListInfoTypes(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_CreateInspectTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.CreateInspectTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.CreateInspectTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_UpdateInspectTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.UpdateInspectTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.UpdateInspectTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_GetInspectTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.GetInspectTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.GetInspectTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ListInspectTemplates() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListInspectTemplatesRequest{
-		// TODO: Fill request struct fields.
-	}
-	it := c.ListInspectTemplates(ctx, req)
-	for {
-		resp, err := it.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			// TODO: Handle error.
-		}
-		// TODO: Use resp.
-		_ = resp
-	}
-}
-
-func ExampleClient_DeleteInspectTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.DeleteInspectTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	err = c.DeleteInspectTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-}
-
-func ExampleClient_CreateDeidentifyTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.CreateDeidentifyTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.CreateDeidentifyTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_UpdateDeidentifyTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.UpdateDeidentifyTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.UpdateDeidentifyTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_GetDeidentifyTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.GetDeidentifyTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.GetDeidentifyTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_ListDeidentifyTemplates() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListDeidentifyTemplatesRequest{
-		// TODO: Fill request struct fields.
-	}
-	it := c.ListDeidentifyTemplates(ctx, req)
-	for {
-		resp, err := it.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			// TODO: Handle error.
-		}
-		// TODO: Use resp.
-		_ = resp
-	}
-}
-
-func ExampleClient_DeleteDeidentifyTemplate() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.DeleteDeidentifyTemplateRequest{
-		// TODO: Fill request struct fields.
-	}
-	err = c.DeleteDeidentifyTemplate(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-}
-
-func ExampleClient_ListDlpJobs() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.ListDlpJobsRequest{
-		// TODO: Fill request struct fields.
-	}
-	it := c.ListDlpJobs(ctx, req)
-	for {
-		resp, err := it.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			// TODO: Handle error.
-		}
-		// TODO: Use resp.
-		_ = resp
-	}
-}
-
-func ExampleClient_GetDlpJob() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.GetDlpJobRequest{
-		// TODO: Fill request struct fields.
-	}
-	resp, err := c.GetDlpJob(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	// TODO: Use resp.
-	_ = resp
-}
-
-func ExampleClient_DeleteDlpJob() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.DeleteDlpJobRequest{
-		// TODO: Fill request struct fields.
-	}
-	err = c.DeleteDlpJob(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-}
-
-func ExampleClient_CancelDlpJob() {
-	ctx := context.Background()
-	c, err := dlp.NewClient(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-	req := &dlppb.CancelDlpJobRequest{
-		// TODO: Fill request struct fields.
-	}
-	err = c.CancelDlpJob(ctx, req)
-	if err != nil {
-		// TODO: Handle error.
-	}
-}
diff --git a/privacy/dlp/apiv2beta2/doc.go b/privacy/dlp/apiv2beta2/doc.go
deleted file mode 100644
index 43eb41e..0000000
--- a/privacy/dlp/apiv2beta2/doc.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package dlp is an auto-generated package for the
-// DLP API.
-//
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// The Google Data Loss Prevention API provides methods for detection of
-// privacy-sensitive fragments in text, images, and Google Cloud Platform
-// storage repositories.
-package dlp // import "cloud.google.com/go/privacy/dlp/apiv2beta2"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/cloud-platform",
-	}
-}
diff --git a/privacy/dlp/apiv2beta2/mock_test.go b/privacy/dlp/apiv2beta2/mock_test.go
deleted file mode 100644
index 6451737..0000000
--- a/privacy/dlp/apiv2beta2/mock_test.go
+++ /dev/null
@@ -1,1596 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp
-
-import (
-	emptypb "github.com/golang/protobuf/ptypes/empty"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2"
-)
-
-import (
-	"flag"
-	"fmt"
-	"io"
-	"log"
-	"net"
-	"os"
-	"strings"
-	"testing"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	status "google.golang.org/genproto/googleapis/rpc/status"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-	gstatus "google.golang.org/grpc/status"
-)
-
-var _ = io.EOF
-var _ = ptypes.MarshalAny
-var _ status.Status
-
-type mockDlpServer struct {
-	// Embed for forward compatibility.
-	// Tests will keep working if more methods are added
-	// in the future.
-	dlppb.DlpServiceServer
-
-	reqs []proto.Message
-
-	// If set, all calls return this error.
-	err error
-
-	// responses to return if err == nil
-	resps []proto.Message
-}
-
-func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.InspectContentResponse), nil
-}
-
-func (s *mockDlpServer) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest) (*dlppb.RedactImageResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.RedactImageResponse), nil
-}
-
-func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DeidentifyContentResponse), nil
-}
-
-func (s *mockDlpServer) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest) (*dlppb.ReidentifyContentResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ReidentifyContentResponse), nil
-}
-
-func (s *mockDlpServer) InspectDataSource(ctx context.Context, req *dlppb.InspectDataSourceRequest) (*dlppb.DlpJob, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DlpJob), nil
-}
-
-func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*dlppb.DlpJob, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DlpJob), nil
-}
-
-func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListInfoTypesResponse), nil
-}
-
-func (s *mockDlpServer) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest) (*dlppb.InspectTemplate, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.InspectTemplate), nil
-}
-
-func (s *mockDlpServer) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest) (*dlppb.InspectTemplate, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.InspectTemplate), nil
-}
-
-func (s *mockDlpServer) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest) (*dlppb.InspectTemplate, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.InspectTemplate), nil
-}
-
-func (s *mockDlpServer) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest) (*dlppb.ListInspectTemplatesResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListInspectTemplatesResponse), nil
-}
-
-func (s *mockDlpServer) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest) (*emptypb.Empty, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*emptypb.Empty), nil
-}
-
-func (s *mockDlpServer) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DeidentifyTemplate), nil
-}
-
-func (s *mockDlpServer) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DeidentifyTemplate), nil
-}
-
-func (s *mockDlpServer) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DeidentifyTemplate), nil
-}
-
-func (s *mockDlpServer) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest) (*dlppb.ListDeidentifyTemplatesResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListDeidentifyTemplatesResponse), nil
-}
-
-func (s *mockDlpServer) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest) (*emptypb.Empty, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*emptypb.Empty), nil
-}
-
-func (s *mockDlpServer) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest) (*dlppb.ListDlpJobsResponse, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.ListDlpJobsResponse), nil
-}
-
-func (s *mockDlpServer) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest) (*dlppb.DlpJob, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*dlppb.DlpJob), nil
-}
-
-func (s *mockDlpServer) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest) (*emptypb.Empty, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*emptypb.Empty), nil
-}
-
-func (s *mockDlpServer) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest) (*emptypb.Empty, error) {
-	md, _ := metadata.FromIncomingContext(ctx)
-	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
-		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
-	}
-	s.reqs = append(s.reqs, req)
-	if s.err != nil {
-		return nil, s.err
-	}
-	return s.resps[0].(*emptypb.Empty), nil
-}
-
-// clientOpt is the option tests should use to connect to the test server.
-// It is initialized by TestMain.
-var clientOpt option.ClientOption
-
-var (
-	mockDlp mockDlpServer
-)
-
-func TestMain(m *testing.M) {
-	flag.Parse()
-
-	serv := grpc.NewServer()
-	dlppb.RegisterDlpServiceServer(serv, &mockDlp)
-
-	lis, err := net.Listen("tcp", "localhost:0")
-	if err != nil {
-		log.Fatal(err)
-	}
-	go serv.Serve(lis)
-
-	conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
-	if err != nil {
-		log.Fatal(err)
-	}
-	clientOpt = option.WithGRPCConn(conn)
-
-	os.Exit(m.Run())
-}
-
-func TestDlpServiceInspectContent(t *testing.T) {
-	var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.InspectContentRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.InspectContent(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceInspectContentError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.InspectContentRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.InspectContent(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceRedactImage(t *testing.T) {
-	var redactedImage []byte = []byte("28")
-	var extractedText string = "extractedText998260012"
-	var expectedResponse = &dlppb.RedactImageResponse{
-		RedactedImage: redactedImage,
-		ExtractedText: extractedText,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.RedactImageRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.RedactImage(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceRedactImageError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.RedactImageRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.RedactImage(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceDeidentifyContent(t *testing.T) {
-	var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.DeidentifyContentRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.DeidentifyContent(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceDeidentifyContentError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.DeidentifyContentRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.DeidentifyContent(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceReidentifyContent(t *testing.T) {
-	var expectedResponse *dlppb.ReidentifyContentResponse = &dlppb.ReidentifyContentResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.ReidentifyContentRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ReidentifyContent(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceReidentifyContentError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.ReidentifyContentRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ReidentifyContent(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceInspectDataSource(t *testing.T) {
-	var name string = "name3373707"
-	var expectedResponse = &dlppb.DlpJob{
-		Name: name,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.InspectDataSourceRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.InspectDataSource(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceInspectDataSourceError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.InspectDataSourceRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.InspectDataSource(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
-	var name string = "name3373707"
-	var expectedResponse = &dlppb.DlpJob{
-		Name: name,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.AnalyzeDataSourceRiskRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.AnalyzeDataSourceRisk(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.AnalyzeDataSourceRiskRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.AnalyzeDataSourceRisk(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceListInfoTypes(t *testing.T) {
-	var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInfoTypes(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListInfoTypesError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInfoTypes(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceCreateInspectTemplate(t *testing.T) {
-	var name string = "name3373707"
-	var displayName string = "displayName1615086568"
-	var description string = "description-1724546052"
-	var expectedResponse = &dlppb.InspectTemplate{
-		Name:        name,
-		DisplayName: displayName,
-		Description: description,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.CreateInspectTemplateRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.CreateInspectTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceCreateInspectTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.CreateInspectTemplateRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.CreateInspectTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceUpdateInspectTemplate(t *testing.T) {
-	var name2 string = "name2-1052831874"
-	var displayName string = "displayName1615086568"
-	var description string = "description-1724546052"
-	var expectedResponse = &dlppb.InspectTemplate{
-		Name:        name2,
-		DisplayName: displayName,
-		Description: description,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]")
-	var request = &dlppb.UpdateInspectTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.UpdateInspectTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceUpdateInspectTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]")
-	var request = &dlppb.UpdateInspectTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.UpdateInspectTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceGetInspectTemplate(t *testing.T) {
-	var name string = "name3373707"
-	var displayName string = "displayName1615086568"
-	var description string = "description-1724546052"
-	var expectedResponse = &dlppb.InspectTemplate{
-		Name:        name,
-		DisplayName: displayName,
-		Description: description,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.GetInspectTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceGetInspectTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.GetInspectTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceListInspectTemplates(t *testing.T) {
-	var nextPageToken string = ""
-	var inspectTemplatesElement *dlppb.InspectTemplate = &dlppb.InspectTemplate{}
-	var inspectTemplates = []*dlppb.InspectTemplate{inspectTemplatesElement}
-	var expectedResponse = &dlppb.ListInspectTemplatesResponse{
-		NextPageToken:    nextPageToken,
-		InspectTemplates: inspectTemplates,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.ListInspectTemplatesRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInspectTemplates(context.Background(), request).Next()
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	want := (interface{})(expectedResponse.InspectTemplates[0])
-	got := (interface{})(resp)
-	var ok bool
-
-	switch want := (want).(type) {
-	case proto.Message:
-		ok = proto.Equal(want, got.(proto.Message))
-	default:
-		ok = want == got
-	}
-	if !ok {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListInspectTemplatesError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.ListInspectTemplatesRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListInspectTemplates(context.Background(), request).Next()
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceDeleteInspectTemplate(t *testing.T) {
-	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]")
-	var request = &dlppb.DeleteInspectTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.DeleteInspectTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-}
-
-func TestDlpServiceDeleteInspectTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]")
-	var request = &dlppb.DeleteInspectTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.DeleteInspectTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-}
-func TestDlpServiceCreateDeidentifyTemplate(t *testing.T) {
-	var name string = "name3373707"
-	var displayName string = "displayName1615086568"
-	var description string = "description-1724546052"
-	var expectedResponse = &dlppb.DeidentifyTemplate{
-		Name:        name,
-		DisplayName: displayName,
-		Description: description,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.CreateDeidentifyTemplateRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.CreateDeidentifyTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceCreateDeidentifyTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.CreateDeidentifyTemplateRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.CreateDeidentifyTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceUpdateDeidentifyTemplate(t *testing.T) {
-	var name2 string = "name2-1052831874"
-	var displayName string = "displayName1615086568"
-	var description string = "description-1724546052"
-	var expectedResponse = &dlppb.DeidentifyTemplate{
-		Name:        name2,
-		DisplayName: displayName,
-		Description: description,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]")
-	var request = &dlppb.UpdateDeidentifyTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.UpdateDeidentifyTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceUpdateDeidentifyTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]")
-	var request = &dlppb.UpdateDeidentifyTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.UpdateDeidentifyTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceGetDeidentifyTemplate(t *testing.T) {
-	var name2 string = "name2-1052831874"
-	var displayName string = "displayName1615086568"
-	var description string = "description-1724546052"
-	var expectedResponse = &dlppb.DeidentifyTemplate{
-		Name:        name2,
-		DisplayName: displayName,
-		Description: description,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]")
-	var request = &dlppb.GetDeidentifyTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.GetDeidentifyTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceGetDeidentifyTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]")
-	var request = &dlppb.GetDeidentifyTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.GetDeidentifyTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceListDeidentifyTemplates(t *testing.T) {
-	var nextPageToken string = ""
-	var deidentifyTemplatesElement *dlppb.DeidentifyTemplate = &dlppb.DeidentifyTemplate{}
-	var deidentifyTemplates = []*dlppb.DeidentifyTemplate{deidentifyTemplatesElement}
-	var expectedResponse = &dlppb.ListDeidentifyTemplatesResponse{
-		NextPageToken:       nextPageToken,
-		DeidentifyTemplates: deidentifyTemplates,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.ListDeidentifyTemplatesRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next()
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	want := (interface{})(expectedResponse.DeidentifyTemplates[0])
-	got := (interface{})(resp)
-	var ok bool
-
-	switch want := (want).(type) {
-	case proto.Message:
-		ok = proto.Equal(want, got.(proto.Message))
-	default:
-		ok = want == got
-	}
-	if !ok {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListDeidentifyTemplatesError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]")
-	var request = &dlppb.ListDeidentifyTemplatesRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next()
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceDeleteDeidentifyTemplate(t *testing.T) {
-	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]")
-	var request = &dlppb.DeleteDeidentifyTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.DeleteDeidentifyTemplate(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-}
-
-func TestDlpServiceDeleteDeidentifyTemplateError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]")
-	var request = &dlppb.DeleteDeidentifyTemplateRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.DeleteDeidentifyTemplate(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-}
-func TestDlpServiceListDlpJobs(t *testing.T) {
-	var nextPageToken string = ""
-	var jobsElement *dlppb.DlpJob = &dlppb.DlpJob{}
-	var jobs = []*dlppb.DlpJob{jobsElement}
-	var expectedResponse = &dlppb.ListDlpJobsResponse{
-		NextPageToken: nextPageToken,
-		Jobs:          jobs,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.ListDlpJobsRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListDlpJobs(context.Background(), request).Next()
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	want := (interface{})(expectedResponse.Jobs[0])
-	got := (interface{})(resp)
-	var ok bool
-
-	switch want := (want).(type) {
-	case proto.Message:
-		ok = proto.Equal(want, got.(proto.Message))
-	default:
-		ok = want == got
-	}
-	if !ok {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceListDlpJobsError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
-	var request = &dlppb.ListDlpJobsRequest{
-		Parent: formattedParent,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.ListDlpJobs(context.Background(), request).Next()
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceGetDlpJob(t *testing.T) {
-	var name2 string = "name2-1052831874"
-	var expectedResponse = &dlppb.DlpJob{
-		Name: name2,
-	}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]")
-	var request = &dlppb.GetDlpJobRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.GetDlpJob(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
-		t.Errorf("wrong response %q, want %q)", got, want)
-	}
-}
-
-func TestDlpServiceGetDlpJobError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]")
-	var request = &dlppb.GetDlpJobRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	resp, err := c.GetDlpJob(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-	_ = resp
-}
-func TestDlpServiceDeleteDlpJob(t *testing.T) {
-	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]")
-	var request = &dlppb.DeleteDlpJobRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.DeleteDlpJob(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-}
-
-func TestDlpServiceDeleteDlpJobError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]")
-	var request = &dlppb.DeleteDlpJobRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.DeleteDlpJob(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-}
-func TestDlpServiceCancelDlpJob(t *testing.T) {
-	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
-
-	mockDlp.err = nil
-	mockDlp.reqs = nil
-
-	mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
-
-	var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]")
-	var request = &dlppb.CancelDlpJobRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.CancelDlpJob(context.Background(), request)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
-		t.Errorf("wrong request %q, want %q", got, want)
-	}
-
-}
-
-func TestDlpServiceCancelDlpJobError(t *testing.T) {
-	errCode := codes.PermissionDenied
-	mockDlp.err = gstatus.Error(errCode, "test error")
-
-	var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]")
-	var request = &dlppb.CancelDlpJobRequest{
-		Name: formattedName,
-	}
-
-	c, err := NewClient(context.Background(), clientOpt)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.CancelDlpJob(context.Background(), request)
-
-	if st, ok := gstatus.FromError(err); !ok {
-		t.Errorf("got error %v, expected grpc error", err)
-	} else if c := st.Code(); c != errCode {
-		t.Errorf("got error code %q, want %q", c, errCode)
-	}
-}
diff --git a/profiler/integration-test.sh b/profiler/integration-test.sh
index 02b1cfd..bc3e473 100644
--- a/profiler/integration-test.sh
+++ b/profiler/integration-test.sh
@@ -30,7 +30,8 @@
 
 export GOOGLE_APPLICATION_CREDENTIALS="$(pwd)/key.json"
 export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
-export GCLOUD_TESTS_GOLANG_ZONE="us-west1-a"
+export GCLOUD_TESTS_GOLANG_ZONE="us-west1-b"
+export GCLOUD_TESTS_GOLANG_BUCKET="dulcet-port-762-go-cloud-profiler-test"
 
 cd $GOCLOUD_HOME/profiler
 go get -t -tags=integration .
diff --git a/profiler/integration_test.go b/profiler/integration_test.go
index 27498a7..c9f2fd6 100644
--- a/profiler/integration_test.go
+++ b/profiler/integration_test.go
@@ -49,6 +49,13 @@
 # to stop accounting the VM for billing and cores quota.
 trap "sleep 300 && poweroff" EXIT
 
+retry() {
+  for i in {1..3}; do
+    "${@}" && return 0
+  done
+  return 1
+}
+
 # Fail on any error.
 set -eo pipefail
 
@@ -56,16 +63,16 @@
 set -x
 
 # Install git
-apt-get update  >/dev/null
-apt-get -y -q install git >/dev/null
+retry apt-get update >/dev/null
+retry apt-get -y -q install git >/dev/null
 
 # Install desired Go version
 mkdir -p /tmp/bin
-curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
+retry curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
 chmod +x /tmp/bin/gimme
 export PATH=$PATH:/tmp/bin
 
-eval "$(gimme {{.GoVersion}})"
+retry eval "$(gimme {{.GoVersion}})"
 
 # Set $GOPATH
 export GOPATH="$HOME/go"
@@ -74,12 +81,12 @@
 mkdir -p $GOCLOUD_HOME
 
 # Install agent
-git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME >/dev/null
+retry git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME >/dev/null
 
 cd $GOCLOUD_HOME/profiler/busybench
 git checkout profiler-test
 git reset --hard {{.Commit}}
-go get >/dev/null
+retry go get >/dev/null
 
 # Run benchmark with agent
 go run busybench.go --service="{{.Service}}" --mutex_profiling="{{.MutexProfiling}}"
@@ -91,7 +98,7 @@
 const dockerfileFmt = `FROM golang
 RUN git clone https://code.googlesource.com/gocloud /go/src/cloud.google.com/go \
     && cd /go/src/cloud.google.com/go/profiler/busybench && git checkout profiler-test && git reset --hard %s \
-    && go get -v && go install -v
+    && go get && go install
 CMD ["busybench", "--service", "%s"]
  `
 
@@ -246,7 +253,7 @@
 			timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25)
 			defer cancel()
 			if err := gceTr.PollForSerialOutput(timeoutCtx, &tc.InstanceConfig, benchFinishString); err != nil {
-				t.Fatal(err)
+				t.Fatalf("PollForSerialOutput() got error: %v", err)
 			}
 
 			timeNow := time.Now()
diff --git a/profiler/profiler.go b/profiler/profiler.go
index 3a43fe5..8d93685 100644
--- a/profiler/profiler.go
+++ b/profiler/profiler.go
@@ -89,7 +89,7 @@
 	instanceLabel    = "instance"
 	scope            = "https://www.googleapis.com/auth/monitoring.write"
 
-	initialBackoff = time.Second
+	initialBackoff = time.Minute
 	// Ensure the agent will recover within 1 hour.
 	maxBackoff        = time.Hour
 	backoffMultiplier = 1.3 // Backoff envelope increases by this factor on each retry.
@@ -98,12 +98,14 @@
 
 // Config is the profiler configuration.
 type Config struct {
-	// Service (or deprecated Target) must be provided to start the profiler.
-	// It specifies the name of the service under which the profiled data
-	// will be recorded and exposed at the Profiler UI for the project.
-	// You can specify an arbitrary string, but see Deployment.target at
+	// Service must be provided to start the profiler. It specifies the name of
+	// the service under which the profiled data will be recorded and exposed at
+	// the Profiler UI for the project. You can specify an arbitrary string, but
+	// see Deployment.target at
 	// https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudprofiler/v2/profiler.proto
-	// for restrictions.
+	// for restrictions. If the parameter is not set, the agent will probe
+	// GAE_SERVICE environment variable which is present in Google App Engine
+	// environment.
 	// NOTE: The string should be the same across different replicas of
 	// your service so that the globally constant profiling rate is
 	// maintained. Do not put things like PID or unique pod ID in the name.
@@ -112,7 +114,8 @@
 	// ServiceVersion is an optional field specifying the version of the
 	// service. It can be an arbitrary string. Profiler profiles
 	// once per minute for each version of each service in each zone.
-	// ServiceVersion defaults to an empty string.
+	// ServiceVersion defaults to GAE_VERSION environment variable if that is
+	// set, or to empty string otherwise.
 	ServiceVersion string
 
 	// DebugLogging enables detailed debug logging from profiler. It
@@ -130,8 +133,9 @@
 	// When true, collecting the goroutine profiles is disabled.
 	NoGoroutineProfiling bool
 
-	// ProjectID is the Cloud Console project ID to use instead of
-	// the one read from the VM metadata server.
+	// ProjectID is the Cloud Console project ID to use instead of the one set by
+	// GOOGLE_CLOUD_PROJECT environment variable or read from the VM metadata
+	// server.
 	//
 	// Set this if you are running the agent in your local environment
 	// or anywhere else outside of Google Cloud Platform.
@@ -142,14 +146,11 @@
 	// for testing.
 	APIAddr string
 
-	// Target is deprecated, use Service instead.
-	Target string
-
 	instance string
 	zone     string
 }
 
-// startError represents the error occured during the
+// startError represents the error occurred during the
 // initializating and starting of the agent.
 var startError error
 
@@ -411,7 +412,7 @@
 	}
 	d := &pb.Deployment{
 		ProjectId: config.ProjectID,
-		Target:    config.Target,
+		Target:    config.Service,
 		Labels:    labels,
 	}
 
@@ -443,15 +444,11 @@
 func initializeConfig(cfg Config) error {
 	config = cfg
 
-	switch {
-	case config.Service != "":
-		config.Target = config.Service
-	case config.Target == "":
-		config.Target = os.Getenv("GAE_SERVICE")
+	if config.Service == "" {
+		config.Service = os.Getenv("GAE_SERVICE")
 	}
-
-	if config.Target == "" {
-		return errors.New("service name must be specified in the configuration")
+	if config.Service == "" {
+		return errors.New("service name must be configured")
 	}
 
 	if config.ServiceVersion == "" {
diff --git a/profiler/profiler_test.go b/profiler/profiler_test.go
index 07f2ed5..1b64f9b 100644
--- a/profiler/profiler_test.go
+++ b/profiler/profiler_test.go
@@ -49,7 +49,6 @@
 	testProjectID       = "test-project-ID"
 	testInstance        = "test-instance"
 	testZone            = "test-zone"
-	testTarget          = "test-target"
 	testService         = "test-service"
 	testSvcVersion      = "test-service-version"
 	testProfileDuration = time.Second * 10
@@ -396,13 +395,13 @@
 
 		config = tt.config
 		config.ProjectID = testProjectID
-		config.Target = testTarget
+		config.Service = testService
 		mutexEnabled = tt.enableMutex
 		a := initializeAgent(nil)
 
 		wantDeployment := &pb.Deployment{
 			ProjectId: testProjectID,
-			Target:    testTarget,
+			Target:    testService,
 			Labels:    tt.wantDeploymentLabels,
 		}
 		if !testutil.Equal(a.deployment, wantDeployment) {
@@ -458,16 +457,7 @@
 		{
 			"accepts service name",
 			Config{Service: testService},
-			Config{Target: testService, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
-			"",
-			false,
-			true,
-			false,
-		},
-		{
-			"accepts target name",
-			Config{Target: testTarget},
-			Config{Target: testTarget, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testService, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
 			"",
 			false,
 			true,
@@ -476,7 +466,7 @@
 		{
 			"env project overrides GCE project",
 			Config{Service: testService},
-			Config{Target: testService, ProjectID: testEnvProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testService, ProjectID: testEnvProjectID, zone: testZone, instance: testInstance},
 			"",
 			false,
 			true,
@@ -486,7 +476,7 @@
 			"requires service name",
 			Config{},
 			Config{},
-			"service name must be specified in the configuration",
+			"service name must be configured",
 			false,
 			true,
 			false,
@@ -494,16 +484,7 @@
 		{
 			"accepts service name from config and service version from GAE",
 			Config{Service: testService},
-			Config{Target: testService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
-			"",
-			true,
-			true,
-			false,
-		},
-		{
-			"accepts target name from config and service version from GAE",
-			Config{Target: testTarget},
-			Config{Target: testTarget, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
 			"",
 			true,
 			true,
@@ -512,7 +493,7 @@
 		{
 			"reads both service name and version from GAE env vars",
 			Config{},
-			Config{Target: testGAEService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testGAEService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
 			"",
 			true,
 			true,
@@ -521,7 +502,7 @@
 		{
 			"accepts service version from config",
 			Config{Service: testService, ServiceVersion: testSvcVersion},
-			Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
 			"",
 			false,
 			true,
@@ -530,7 +511,7 @@
 		{
 			"configured version has priority over GAE-provided version",
 			Config{Service: testService, ServiceVersion: testSvcVersion},
-			Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance},
 			"",
 			true,
 			true,
@@ -539,7 +520,7 @@
 		{
 			"configured project ID has priority over metadata-provided project ID",
 			Config{Service: testService, ProjectID: testProjectID},
-			Config{Target: testService, ProjectID: testProjectID, zone: testZone, instance: testInstance},
+			Config{Service: testService, ProjectID: testProjectID, zone: testZone, instance: testInstance},
 			"",
 			false,
 			true,
@@ -548,7 +529,7 @@
 		{
 			"configured project ID has priority over environment project ID",
 			Config{Service: testService, ProjectID: testProjectID},
-			Config{Target: testService, ProjectID: testProjectID},
+			Config{Service: testService, ProjectID: testProjectID},
 			"",
 			false,
 			false,
@@ -557,7 +538,7 @@
 		{
 			"requires project ID if not on GCE",
 			Config{Service: testService},
-			Config{Target: testService},
+			Config{Service: testService},
 			"project ID must be specified in the configuration if running outside of GCP",
 			false,
 			false,
@@ -605,7 +586,6 @@
 		if tt.wantErrorString == "" {
 			tt.wantConfig.APIAddr = apiAddress
 		}
-		tt.wantConfig.Service = tt.config.Service
 		if config != tt.wantConfig {
 			t.Errorf("initializeConfig(%v) got: %v, want %v", tt.config, config, tt.wantConfig)
 		}
@@ -856,7 +836,7 @@
 
 	dialGRPC = gtransport.DialInsecure
 	if err := Start(Config{
-		Target:    testTarget,
+		Service:   testService,
 		ProjectID: testProjectID,
 		APIAddr:   srv.Addr,
 		instance:  testInstance,
diff --git a/profiler/proftest/proftest.go b/profiler/proftest/proftest.go
index 7837a89..73601d1 100644
--- a/profiler/proftest/proftest.go
+++ b/profiler/proftest/proftest.go
@@ -222,6 +222,7 @@
 	for {
 		select {
 		case <-ctx.Done():
+			return ctx.Err()
 		case <-time.After(20 * time.Second):
 			resp, err := tr.ComputeService.Instances.GetSerialPortOutput(inst.ProjectID, inst.Zone, inst.Name).Port(2).Context(ctx).Do()
 			if err != nil {
@@ -230,7 +231,7 @@
 				continue
 			}
 			if resp.Contents == "" {
-				log.Printf("Serial port output from instance %s is empty string (will retry)", inst.Name)
+				log.Printf("Ignoring empty serial port output from instance %s (will retry)", inst.Name)
 				continue
 			}
 			if output = resp.Contents; strings.Contains(output, finishString) {
diff --git a/pubsub/debug.go b/pubsub/debug.go
new file mode 100644
index 0000000..c580910
--- /dev/null
+++ b/pubsub/debug.go
@@ -0,0 +1,72 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build psdebug
+
+package pubsub
+
+import (
+	"sync"
+	"time"
+)
+
+var (
+	dmu          sync.Mutex
+	msgTraces    = map[string][]Event{}
+	ackIDToMsgID = map[string]string{}
+)
+
+type Event struct {
+	Desc string
+	At   time.Time
+}
+
+func MessageEvents(msgID string) []Event {
+	dmu.Lock()
+	defer dmu.Unlock()
+	return msgTraces[msgID]
+}
+
+func addRecv(msgID, ackID string, t time.Time) {
+	dmu.Lock()
+	defer dmu.Unlock()
+	ackIDToMsgID[ackID] = msgID
+	addEvent(msgID, "recv", t)
+}
+
+func addAcks(ackIDs []string) {
+	dmu.Lock()
+	defer dmu.Unlock()
+	now := time.Now()
+	for _, id := range ackIDs {
+		addEvent(ackIDToMsgID[id], "ack", now)
+	}
+}
+
+func addModAcks(ackIDs []string, deadlineSecs int32) {
+	dmu.Lock()
+	defer dmu.Unlock()
+	desc := "modack"
+	if deadlineSecs == 0 {
+		desc = "nack"
+	}
+	now := time.Now()
+	for _, id := range ackIDs {
+		addEvent(ackIDToMsgID[id], desc, now)
+	}
+}
+
+func addEvent(msgID, desc string, t time.Time) {
+	msgTraces[msgID] = append(msgTraces[msgID], Event{desc, t})
+}
diff --git a/pubsub/endtoend_test.go b/pubsub/endtoend_test.go
index 6fe0d21..463c285 100644
--- a/pubsub/endtoend_test.go
+++ b/pubsub/endtoend_test.go
@@ -203,7 +203,7 @@
 	for _, dur := range c.durations {
 		ctx2, cancel := context.WithTimeout(ctx, dur)
 		defer cancel()
-		id := sub.name[len(sub.name)-2:]
+		id := sub.name[len(sub.name)-1:]
 		log.Printf("%s: start receive", id)
 		prev := c.total
 		err := sub.Receive(ctx2, c.process)
diff --git a/pubsub/fake_test.go b/pubsub/fake_test.go
index 330d334..c733c3b 100644
--- a/pubsub/fake_test.go
+++ b/pubsub/fake_test.go
@@ -171,6 +171,20 @@
 	return nil
 }
 
+func (s *fakeServer) Acknowledge(ctx context.Context, req *pb.AcknowledgeRequest) (*emptypb.Empty, error) {
+	for _, id := range req.AckIds {
+		s.Acked[id] = true
+	}
+	return &emptypb.Empty{}, nil
+}
+
+func (s *fakeServer) ModifyAckDeadline(ctx context.Context, req *pb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) {
+	for _, id := range req.AckIds {
+		s.Deadlines[id] = req.AckDeadlineSeconds
+	}
+	return &emptypb.Empty{}, nil
+}
+
 func (s *fakeServer) CreateSubscription(ctx context.Context, sub *pb.Subscription) (*pb.Subscription, error) {
 	if s.subs[sub.Name] != nil {
 		return nil, status.Errorf(codes.AlreadyExists, "subscription %q", sub.Name)
diff --git a/pubsub/integration_test.go b/pubsub/integration_test.go
index 7b1a924..05daac4 100644
--- a/pubsub/integration_test.go
+++ b/pubsub/integration_test.go
@@ -26,6 +26,7 @@
 	"cloud.google.com/go/iam"
 	"cloud.google.com/go/internal"
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
 	"google.golang.org/grpc"
@@ -33,8 +34,8 @@
 )
 
 var (
-	topicIDs = testutil.NewUIDSpace("topic")
-	subIDs   = testutil.NewUIDSpace("sub")
+	topicIDs = uid.NewSpace("topic", nil)
+	subIDs   = uid.NewSpace("sub", nil)
 )
 
 // messageData is used to hold the contents of a message so that it can be compared against the contents
diff --git a/pubsub/iterator.go b/pubsub/iterator.go
index 78934e2..5acfb19 100644
--- a/pubsub/iterator.go
+++ b/pubsub/iterator.go
@@ -30,16 +30,19 @@
 // ctx is the context to use for acking messages and extending message deadlines.
 func newMessageIterator(ctx context.Context, subc *vkit.SubscriberClient, subName string, po *pullOptions) *streamingMessageIterator {
 	ps := newPullStream(ctx, subc, subName, int32(po.ackDeadline.Seconds()))
-	return newStreamingMessageIterator(ctx, ps, po)
+	return newStreamingMessageIterator(ctx, ps, po, subc, subName)
 }
 
 type streamingMessageIterator struct {
 	ctx        context.Context
 	po         *pullOptions
 	ps         *pullStream
+	subc       *vkit.SubscriberClient
+	subName    string
 	kaTicker   *time.Ticker  // keep-alive (deadline extensions)
 	ackTicker  *time.Ticker  // message acks
 	nackTicker *time.Ticker  // message nacks (more frequent than acks)
+	pingTicker *time.Ticker  //  sends to the stream to keep it open
 	failed     chan struct{} // closed on stream error
 	stopped    chan struct{} // closed when Stop is called
 	drained    chan struct{} // closed when stopped && no more pending messages
@@ -48,12 +51,13 @@
 	mu                 sync.Mutex
 	ackTimeDist        *distribution.D
 	keepAliveDeadlines map[string]time.Time
-	pendingReq         *pb.StreamingPullRequest
-	pendingModAcks     map[string]int32 // ack IDs whose ack deadline is to be modified
-	err                error            // error from stream failure
+	pendingAcks        map[string]bool
+	pendingNacks       map[string]bool
+	pendingModAcks     map[string]bool // ack IDs whose ack deadline is to be modified
+	err                error           // error from stream failure
 }
 
-func newStreamingMessageIterator(ctx context.Context, ps *pullStream, po *pullOptions) *streamingMessageIterator {
+func newStreamingMessageIterator(ctx context.Context, ps *pullStream, po *pullOptions, subc *vkit.SubscriberClient, subName string) *streamingMessageIterator {
 	// TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a
 	// reasonable default for now, because the minimum ack period is 10s. This
 	// gives us 5s grace.
@@ -63,20 +67,25 @@
 	// Ack promptly so users don't lose work if client crashes.
 	ackTicker := time.NewTicker(100 * time.Millisecond)
 	nackTicker := time.NewTicker(100 * time.Millisecond)
+	pingTicker := time.NewTicker(30 * time.Second)
 	it := &streamingMessageIterator{
 		ctx:                ctx,
 		ps:                 ps,
 		po:                 po,
+		subc:               subc,
+		subName:            subName,
 		kaTicker:           kaTicker,
 		ackTicker:          ackTicker,
 		nackTicker:         nackTicker,
+		pingTicker:         pingTicker,
 		failed:             make(chan struct{}),
 		stopped:            make(chan struct{}),
 		drained:            make(chan struct{}),
 		ackTimeDist:        distribution.New(int(maxAckDeadline/time.Second) + 1),
 		keepAliveDeadlines: map[string]time.Time{},
-		pendingReq:         &pb.StreamingPullRequest{},
-		pendingModAcks:     map[string]int32{},
+		pendingAcks:        map[string]bool{},
+		pendingNacks:       map[string]bool{},
+		pendingModAcks:     map[string]bool{},
 	}
 	it.wg.Add(1)
 	go it.sender()
@@ -125,9 +134,9 @@
 	defer it.mu.Unlock()
 	delete(it.keepAliveDeadlines, ackID)
 	if ack {
-		it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID)
+		it.pendingAcks[ackID] = true
 	} else {
-		it.pendingModAcks[ackID] = 0 // Nack indicated by modifying the deadline to zero.
+		it.pendingNacks[ackID] = true
 	}
 	it.checkDrained()
 }
@@ -170,26 +179,28 @@
 		it.fail(err)
 		return nil, err
 	}
-
 	// We received some messages. Remember them so we can keep them alive. Also,
-	// arrange for a receipt mod-ack (which will occur at the next firing of
-	// nackTicker).
+	// do a receipt mod-ack.
 	maxExt := time.Now().Add(it.po.maxExtension)
-	deadline := trunc32(int64(it.po.ackDeadline.Seconds()))
+	ackIDs := map[string]bool{}
 	it.mu.Lock()
 	now := time.Now()
 	for _, m := range msgs {
 		m.receiveTime = now
+		addRecv(m.ID, m.ackID, now)
 		m.doneFunc = it.done
 		it.keepAliveDeadlines[m.ackID] = maxExt
 		// The receipt mod-ack uses the subscription's configured ack deadline. Don't
-		// change the mod-ack if one is already pending. This is possible if there
-		// are retries.
-		if _, ok := it.pendingModAcks[m.ackID]; !ok {
-			it.pendingModAcks[m.ackID] = deadline
+		// change the mod-ack if the message is going to be nacked. This is possible
+		// if there are retries.
+		if !it.pendingNacks[m.ackID] {
+			ackIDs[m.ackID] = true
 		}
 	}
 	it.mu.Unlock()
+	if !it.sendModAck(ackIDs, trunc32(int64(it.po.ackDeadline.Seconds()))) {
+		return nil, it.err
+	}
 	return msgs, nil
 }
 
@@ -199,11 +210,15 @@
 	defer it.kaTicker.Stop()
 	defer it.ackTicker.Stop()
 	defer it.nackTicker.Stop()
+	defer it.pingTicker.Stop()
 	defer it.ps.CloseSend()
 
 	done := false
 	for !done {
-		send := false
+		sendAcks := false
+		sendNacks := false
+		sendModAcks := false
+		sendPing := false
 		select {
 		case <-it.ctx.Done():
 			// Context canceled or timed out: stop immediately, without
@@ -216,68 +231,75 @@
 
 		case <-it.drained:
 			// All outstanding messages have been marked done:
-			// nothing left to do except send the final request.
+			// nothing left to do except make the final calls.
 			it.mu.Lock()
-			send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingModAcks) > 0)
+			sendAcks = (len(it.pendingAcks) > 0)
+			sendNacks = (len(it.pendingNacks) > 0)
+			// No point in sending modacks.
 			done = true
 
 		case <-it.kaTicker.C:
 			it.mu.Lock()
 			it.handleKeepAlives()
-			send = (len(it.pendingModAcks) > 0)
+			sendModAcks = (len(it.pendingModAcks) > 0)
 
 		case <-it.nackTicker.C:
 			it.mu.Lock()
-			send = (len(it.pendingModAcks) > 0)
+			sendNacks = (len(it.pendingNacks) > 0)
 
 		case <-it.ackTicker.C:
 			it.mu.Lock()
-			send = (len(it.pendingReq.AckIds) > 0)
+			sendAcks = (len(it.pendingAcks) > 0)
+
+		case <-it.pingTicker.C:
+			it.mu.Lock()
+			// Ping only if we are processing messages.
+			sendPing = (len(it.keepAliveDeadlines) > 0)
 		}
 		// Lock is held here.
-		if send {
-			req := it.pendingReq
-			it.pendingReq = &pb.StreamingPullRequest{}
-			modAcks := it.pendingModAcks
-			it.pendingModAcks = map[string]int32{}
-			it.mu.Unlock()
-			for id, s := range modAcks {
-				req.ModifyDeadlineAckIds = append(req.ModifyDeadlineAckIds, id)
-				req.ModifyDeadlineSeconds = append(req.ModifyDeadlineSeconds, s)
-			}
-			err := it.send(req)
-			if err != nil {
-				// The streamingPuller handles retries, so any error here
-				// is fatal to the iterator.
-				it.fail(err)
+		var acks, nacks, modAcks map[string]bool
+		if sendAcks {
+			acks = it.pendingAcks
+			it.pendingAcks = map[string]bool{}
+		}
+		if sendNacks {
+			nacks = it.pendingNacks
+			it.pendingNacks = map[string]bool{}
+		}
+		if sendModAcks {
+			modAcks = it.pendingModAcks
+			it.pendingModAcks = map[string]bool{}
+		}
+		it.mu.Unlock()
+		// Make Ack and ModAck RPCs.
+		if sendAcks {
+			if !it.sendAck(acks) {
 				return
 			}
-		} else {
-			it.mu.Unlock()
+		}
+		if sendNacks {
+			// Nack indicated by modifying the deadline to zero.
+			if !it.sendModAck(nacks, 0) {
+				return
+			}
+		}
+		if sendModAcks {
+			if !it.sendModAck(modAcks, trunc32(int64(it.po.ackDeadline.Seconds()))) {
+				return
+			}
+		}
+		if sendPing {
+			it.pingStream()
 		}
 	}
 }
 
-func (it *streamingMessageIterator) send(req *pb.StreamingPullRequest) error {
-	// Note: len(modAckIDs) == len(modSecs)
-	var rest *pb.StreamingPullRequest
-	for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 {
-		req, rest = splitRequest(req, maxPayload)
-		if err := it.ps.Send(req); err != nil {
-			return err
-		}
-		req = rest
-	}
-	return nil
-}
-
 // handleKeepAlives modifies the pending request to include deadline extensions
 // for live messages. It also purges expired messages.
 //
 // Called with the lock held.
 func (it *streamingMessageIterator) handleKeepAlives() {
 	now := time.Now()
-	dl := trunc32(int64(it.po.ackDeadline.Seconds()))
 	for id, expiry := range it.keepAliveDeadlines {
 		if expiry.Before(now) {
 			// This delete will not result in skipping any map items, as implied by
@@ -286,9 +308,72 @@
 			// https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ.
 			delete(it.keepAliveDeadlines, id)
 		} else {
-			// This will not overwrite a nack, because nacking removes the ID from keepAliveDeadlines.
-			it.pendingModAcks[id] = dl
+			// This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines.
+			it.pendingModAcks[id] = true
 		}
 	}
 	it.checkDrained()
 }
+
+func (it *streamingMessageIterator) sendAck(m map[string]bool) bool {
+	return it.sendAckIDRPC(m, func(ids []string) error {
+		addAcks(ids)
+		return it.subc.Acknowledge(it.ctx, &pb.AcknowledgeRequest{
+			Subscription: it.subName,
+			AckIds:       ids,
+		})
+	})
+}
+
+func (it *streamingMessageIterator) sendModAck(m map[string]bool, deadlineSecs int32) bool {
+	return it.sendAckIDRPC(m, func(ids []string) error {
+		addModAcks(ids, deadlineSecs)
+		return it.subc.ModifyAckDeadline(it.ctx, &pb.ModifyAckDeadlineRequest{
+			Subscription:       it.subName,
+			AckDeadlineSeconds: deadlineSecs,
+			AckIds:             ids,
+		})
+	})
+}
+
+func (it *streamingMessageIterator) sendAckIDRPC(ackIDSet map[string]bool, call func([]string) error) bool {
+	ackIDs := make([]string, 0, len(ackIDSet))
+	for k := range ackIDSet {
+		ackIDs = append(ackIDs, k)
+	}
+	var toSend []string
+	for len(ackIDs) > 0 {
+		toSend, ackIDs = splitRequestIDs(ackIDs, maxPayload)
+		if err := call(toSend); err != nil {
+			// The underlying client handles retries, so any error is fatal to the
+			// iterator.
+			it.fail(err)
+			return false
+		}
+	}
+	return true
+}
+
+// Send a message to the stream to keep it open. The stream will close if there's no
+// traffic on it for a while. By keeping it open, we delay the start of the
+// expiration timer on messages that are buffered by gRPC or elsewhere in the
+// network. This matters if it takes a long time to process messages relative to the
+// default ack deadline, and if the messages are small enough so that many can fit
+// into the buffer.
+func (it *streamingMessageIterator) pingStream() {
+	// Ignore error; if the stream is broken, this doesn't matter anyway.
+	_ = it.ps.Send(&pb.StreamingPullRequest{})
+}
+
+func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) {
+	size := reqFixedOverhead
+	i := 0
+	for size < maxSize && i < len(ids) {
+		size += overheadPerID + len(ids[i])
+		i++
+	}
+	if size > maxSize {
+		i--
+	}
+	return ids[:i], ids[i:]
+}
diff --git a/pubsub/iterator_test.go b/pubsub/iterator_test.go
new file mode 100644
index 0000000..8779f77
--- /dev/null
+++ b/pubsub/iterator_test.go
@@ -0,0 +1,42 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"testing"
+
+	"cloud.google.com/go/internal/testutil"
+)
+
+func TestSplitRequestIDs(t *testing.T) {
+	ids := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}
+	for _, test := range []struct {
+		ids        []string
+		splitIndex int
+	}{
+		{[]string{}, 0},
+		{ids, 2},
+		{ids[:2], 2},
+	} {
+		got1, got2 := splitRequestIDs(test.ids, reqFixedOverhead+20)
+		want1, want2 := test.ids[:test.splitIndex], test.ids[test.splitIndex:]
+		if !testutil.Equal(got1, want1) {
+			t.Errorf("%v, 1: got %v, want %v", test, got1, want1)
+		}
+		if !testutil.Equal(got2, want2) {
+			t.Errorf("%v, 2: got %v, want %v", test, got2, want2)
+		}
+	}
+}
diff --git a/dlp/apiv2beta1/path_funcs.go b/pubsub/nodebug.go
similarity index 60%
rename from dlp/apiv2beta1/path_funcs.go
rename to pubsub/nodebug.go
index 2450034..18bd28a 100644
--- a/dlp/apiv2beta1/path_funcs.go
+++ b/pubsub/nodebug.go
@@ -1,10 +1,10 @@
-// Copyright 2018 Google LLC
+// Copyright 2018 Google Inc. All Rights Reserved.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
-//     https://www.apache.org/licenses/LICENSE-2.0
+//      http://www.apache.org/licenses/LICENSE-2.0
 //
 // Unless required by applicable law or agreed to in writing, software
 // distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,16 +12,14 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package dlp
+// +build !psdebug
 
-// ResultPath returns the path for the result resource.
-//
-// Deprecated: Use
-//   fmt.Sprintf("inspect/results/%s", result)
-// instead.
-func ResultPath(result string) string {
-	return "" +
-		"inspect/results/" +
-		result +
-		""
-}
+package pubsub
+
+import "time"
+
+func addRecv(string, string, time.Time) {}
+
+func addAcks([]string) {}
+
+func addModAcks([]string, int32) {}
diff --git a/pubsub/pullstream.go b/pubsub/pullstream.go
index a5e47a5..fc323a3 100644
--- a/pubsub/pullstream.go
+++ b/pubsub/pullstream.go
@@ -105,7 +105,7 @@
 		err error
 		bo  gax.Backoff
 	)
-	for i := 0; ; i++ {
+	for {
 		spc, err = s.get(spc)
 		if err != nil {
 			// Preserve the existing behavior of not retrying on open. Is that a bug?
@@ -118,7 +118,9 @@
 			if isRetryable(err) {
 				recordStat(s.ctx, StreamRetryCount, 1)
 				if time.Since(start) < 30*time.Second { // don't sleep if we've been blocked for a while
-					gax.Sleep(s.ctx, bo.Pause())
+					if err := gax.Sleep(s.ctx, bo.Pause()); err != nil {
+						return err
+					}
 				}
 				continue
 			}
diff --git a/pubsub/service.go b/pubsub/service.go
index c63e4d9..f73d4b9 100644
--- a/pubsub/service.go
+++ b/pubsub/service.go
@@ -76,45 +76,3 @@
 		return false
 	}
 }
-
-// Split req into a prefix that is smaller than maxSize, and a remainder.
-func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) {
-	const int32Bytes = 4
-
-	// Copy all fields before splitting the variable-sized ones.
-	remainder = &pb.StreamingPullRequest{}
-	*remainder = *req
-	// Split message so it isn't too big.
-	size := reqFixedOverhead
-	i := 0
-	for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) {
-		if i < len(req.AckIds) {
-			size += overheadPerID + len(req.AckIds[i])
-		}
-		if i < len(req.ModifyDeadlineAckIds) {
-			size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes
-		}
-		i++
-	}
-
-	min := func(a, b int) int {
-		if a < b {
-			return a
-		}
-		return b
-	}
-
-	j := i
-	if size > maxSize {
-		j--
-	}
-	k := min(j, len(req.AckIds))
-	remainder.AckIds = req.AckIds[k:]
-	req.AckIds = req.AckIds[:k]
-	k = min(j, len(req.ModifyDeadlineAckIds))
-	remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:]
-	remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:]
-	req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k]
-	req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k]
-	return req, remainder
-}
diff --git a/pubsub/service_test.go b/pubsub/service_test.go
deleted file mode 100644
index 1139d0f..0000000
--- a/pubsub/service_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pubsub
-
-import (
-	"testing"
-
-	"cloud.google.com/go/internal/testutil"
-
-	pb "google.golang.org/genproto/googleapis/pubsub/v1"
-)
-
-func TestSplitRequest(t *testing.T) {
-	split := func(a []string, i int) ([]string, []string) {
-		if len(a) < i {
-			return a, nil
-		}
-		return a[:i], a[i:]
-	}
-	ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}
-	modDeadlines := []int32{1, 2, 3, 4, 5}
-	for i, test := range []struct {
-		ackIDs     []string
-		modAckIDs  []string
-		splitIndex int
-	}{
-		{ackIDs, ackIDs, 2},
-		{nil, ackIDs, 3},
-		{ackIDs, nil, 5},
-		{nil, ackIDs[:1], 1},
-	} {
-		req := &pb.StreamingPullRequest{
-			AckIds:                test.ackIDs,
-			ModifyDeadlineAckIds:  test.modAckIDs,
-			ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)],
-		}
-		a1, a2 := split(test.ackIDs, test.splitIndex)
-		m1, m2 := split(test.modAckIDs, test.splitIndex)
-		want1 := &pb.StreamingPullRequest{
-			AckIds:                a1,
-			ModifyDeadlineAckIds:  m1,
-			ModifyDeadlineSeconds: modDeadlines[:len(m1)],
-		}
-		want2 := &pb.StreamingPullRequest{
-			AckIds:                a2,
-			ModifyDeadlineAckIds:  m2,
-			ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)],
-		}
-		got1, got2 := splitRequest(req, reqFixedOverhead+40)
-		if !testutil.Equal(got1, want1) {
-			t.Errorf("#%d: first:\ngot  %+v\nwant %+v", i, got1, want1)
-		}
-		if !testutil.Equal(got2, want2) {
-			t.Errorf("#%d: second:\ngot  %+v\nwant %+v", i, got2, want2)
-		}
-	}
-}
diff --git a/pubsub/timeout_test.go b/pubsub/timeout_test.go
index 6191bbc..f49a30b 100644
--- a/pubsub/timeout_test.go
+++ b/pubsub/timeout_test.go
@@ -81,7 +81,6 @@
 		t.Fatal(err)
 	}
 	n := atomic.LoadInt64(&nSeen)
-	t.Logf("Receive returned %v after seeing %d messages\n", err, n)
 	if n < nPublish {
 		t.Errorf("got %d messages, want %d", n, nPublish)
 	}
diff --git a/redis/apiv1beta1/cloud_redis_client.go b/redis/apiv1beta1/cloud_redis_client.go
new file mode 100644
index 0000000..3693048
--- /dev/null
+++ b/redis/apiv1beta1/cloud_redis_client.go
@@ -0,0 +1,516 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package redis
+
+import (
+	"math"
+	"time"
+
+	"cloud.google.com/go/internal/version"
+	"cloud.google.com/go/longrunning"
+	lroauto "cloud.google.com/go/longrunning/autogen"
+	anypb "github.com/golang/protobuf/ptypes/any"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	redispb "google.golang.org/genproto/googleapis/cloud/redis/v1beta1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+// CloudRedisCallOptions contains the retry settings for each method of CloudRedisClient.
+type CloudRedisCallOptions struct {
+	ListInstances  []gax.CallOption
+	GetInstance    []gax.CallOption
+	CreateInstance []gax.CallOption
+	UpdateInstance []gax.CallOption
+	DeleteInstance []gax.CallOption
+}
+
+func defaultCloudRedisClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("redis.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultCloudRedisCallOptions() *CloudRedisCallOptions {
+	retry := map[[2]string][]gax.CallOption{}
+	return &CloudRedisCallOptions{
+		ListInstances:  retry[[2]string{"default", "non_idempotent"}],
+		GetInstance:    retry[[2]string{"default", "non_idempotent"}],
+		CreateInstance: retry[[2]string{"default", "non_idempotent"}],
+		UpdateInstance: retry[[2]string{"default", "non_idempotent"}],
+		DeleteInstance: retry[[2]string{"default", "non_idempotent"}],
+	}
+}
+
+// CloudRedisClient is a client for interacting with Google Cloud Memorystore for Redis API.
+type CloudRedisClient struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	cloudRedisClient redispb.CloudRedisClient
+
+	// LROClient is used internally to handle longrunning operations.
+	// It is exposed so that its CallOptions can be modified if required.
+	// Users should not Close this client.
+	LROClient *lroauto.OperationsClient
+
+	// The call options for this service.
+	CallOptions *CloudRedisCallOptions
+
+	// The x-goog-* metadata to be sent with each request.
+	xGoogMetadata metadata.MD
+}
+
+// NewCloudRedisClient creates a new cloud redis client.
+//
+// Configures and manages Cloud Memorystore for Redis instances
+//
+// Google Cloud Memorystore for Redis v1beta1
+//
+// The redis.googleapis.com service implements the Google Cloud Memorystore
+// for Redis API and defines the following resource model for managing Redis
+// instances:
+//
+//   The service works with a collection of cloud projects, named: /projects/*
+//
+//   Each project has a collection of available locations, named: /locations/*
+//
+//   Each location has a collection of Redis instances, named: /instances/*
+//
+//   As such, Redis instances are resources of the form:
+//   /projects/{project_id}/locations/{location_id}/instances/{instance_id}
+//
+// Note that location_id must be refering to a GCP region; for example:
+//
+//   projects/redpepper-1290/locations/us-central1/instances/my-redis
+func NewCloudRedisClient(ctx context.Context, opts ...option.ClientOption) (*CloudRedisClient, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultCloudRedisClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &CloudRedisClient{
+		conn:        conn,
+		CallOptions: defaultCloudRedisCallOptions(),
+
+		cloudRedisClient: redispb.NewCloudRedisClient(conn),
+	}
+	c.setGoogleClientInfo()
+
+	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
+	if err != nil {
+		// This error "should not happen", since we are just reusing old connection
+		// and never actually need to dial.
+		// If this does happen, we could leak conn. However, we cannot close conn:
+		// If the user invoked the function with option.WithGRPCConn,
+		// we would close a connection that's still in use.
+		// TODO(pongad): investigate error conditions.
+		return nil, err
+	}
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *CloudRedisClient) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *CloudRedisClient) Close() error {
+	return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *CloudRedisClient) setGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListInstances lists all Redis instances owned by a project in either the specified
+// location (region) or all locations.
+//
+// The location should have the following format:
+//
+//   projects/{project_id}/locations/{location_id}
+//
+// If location_id is specified as - (wildcard), then all regions
+// available to the project are queried, and the results are aggregated.
+func (c *CloudRedisClient) ListInstances(ctx context.Context, req *redispb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.ListInstances[0:len(c.CallOptions.ListInstances):len(c.CallOptions.ListInstances)], opts...)
+	it := &InstanceIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*redispb.Instance, string, error) {
+		var resp *redispb.ListInstancesResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.cloudRedisClient.ListInstances(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Instances, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// GetInstance gets the details of a specific Redis instance.
+func (c *CloudRedisClient) GetInstance(ctx context.Context, req *redispb.GetInstanceRequest, opts ...gax.CallOption) (*redispb.Instance, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.GetInstance[0:len(c.CallOptions.GetInstance):len(c.CallOptions.GetInstance)], opts...)
+	var resp *redispb.Instance
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.cloudRedisClient.GetInstance(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateInstance creates a Redis instance based on the specified tier and memory size.
+//
+// By default, the instance is peered to the project's
+// default network (at /compute/docs/networks-and-firewalls#networks).
+//
+// The creation is executed asynchronously and callers may check the returned
+// operation to track its progress. Once the operation is completed the Redis
+// instance will be fully functional. Completed longrunning.Operation will
+// contain the new instance object in the response field.
+//
+// The returned operation is automatically deleted after a few hours, so there
+// is no need to call DeleteOperation.
+func (c *CloudRedisClient) CreateInstance(ctx context.Context, req *redispb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.CreateInstance[0:len(c.CallOptions.CreateInstance):len(c.CallOptions.CreateInstance)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.cloudRedisClient.CreateInstance(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &CreateInstanceOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// UpdateInstance updates the metadata and configuration of a specific Redis instance.
+//
+// Completed longrunning.Operation will contain the new instance object
+// in the response field. The returned operation is automatically deleted
+// after a few hours, so there is no need to call DeleteOperation.
+func (c *CloudRedisClient) UpdateInstance(ctx context.Context, req *redispb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.UpdateInstance[0:len(c.CallOptions.UpdateInstance):len(c.CallOptions.UpdateInstance)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.cloudRedisClient.UpdateInstance(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &UpdateInstanceOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// DeleteInstance deletes a specific Redis instance.  Instance stops serving and data is
+// deleted.
+func (c *CloudRedisClient) DeleteInstance(ctx context.Context, req *redispb.DeleteInstanceRequest, opts ...gax.CallOption) (*DeleteInstanceOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.DeleteInstance[0:len(c.CallOptions.DeleteInstance):len(c.CallOptions.DeleteInstance)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.cloudRedisClient.DeleteInstance(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &DeleteInstanceOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// InstanceIterator manages a stream of *redispb.Instance.
+type InstanceIterator struct {
+	items    []*redispb.Instance
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*redispb.Instance, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *InstanceIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *InstanceIterator) Next() (*redispb.Instance, error) {
+	var item *redispb.Instance
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *InstanceIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *InstanceIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// CreateInstanceOperation manages a long-running operation from CreateInstance.
+type CreateInstanceOperation struct {
+	lro *longrunning.Operation
+}
+
+// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
+// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
+func (c *CloudRedisClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
+	return &CreateInstanceOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*redispb.Instance, error) {
+	var resp redispb.Instance
+	if err := op.lro.WaitWithInterval(ctx, &resp, 360000*time.Millisecond, opts...); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*redispb.Instance, error) {
+	var resp redispb.Instance
+	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+		return nil, err
+	}
+	if !op.Done() {
+		return nil, nil
+	}
+	return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateInstanceOperation) Metadata() (*anypb.Any, error) {
+	var meta anypb.Any
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateInstanceOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateInstanceOperation) Name() string {
+	return op.lro.Name()
+}
+
+// DeleteInstanceOperation manages a long-running operation from DeleteInstance.
+type DeleteInstanceOperation struct {
+	lro *longrunning.Operation
+}
+
+// DeleteInstanceOperation returns a new DeleteInstanceOperation from a given name.
+// The name must be that of a previously created DeleteInstanceOperation, possibly from a different process.
+func (c *CloudRedisClient) DeleteInstanceOperation(name string) *DeleteInstanceOperation {
+	return &DeleteInstanceOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning any error encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *DeleteInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
+	return op.lro.WaitWithInterval(ctx, nil, 360000*time.Millisecond, opts...)
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully, op.Done will return true.
+func (op *DeleteInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
+	return op.lro.Poll(ctx, nil, opts...)
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *DeleteInstanceOperation) Metadata() (*anypb.Any, error) {
+	var meta anypb.Any
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *DeleteInstanceOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *DeleteInstanceOperation) Name() string {
+	return op.lro.Name()
+}
+
+// UpdateInstanceOperation manages a long-running operation from UpdateInstance.
+type UpdateInstanceOperation struct {
+	lro *longrunning.Operation
+}
+
+// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
+// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
+func (c *CloudRedisClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
+	return &UpdateInstanceOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*redispb.Instance, error) {
+	var resp redispb.Instance
+	if err := op.lro.WaitWithInterval(ctx, &resp, 360000*time.Millisecond, opts...); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *UpdateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*redispb.Instance, error) {
+	var resp redispb.Instance
+	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+		return nil, err
+	}
+	if !op.Done() {
+		return nil, nil
+	}
+	return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *UpdateInstanceOperation) Metadata() (*anypb.Any, error) {
+	var meta anypb.Any
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *UpdateInstanceOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *UpdateInstanceOperation) Name() string {
+	return op.lro.Name()
+}
diff --git a/redis/apiv1beta1/cloud_redis_client_example_test.go b/redis/apiv1beta1/cloud_redis_client_example_test.go
new file mode 100644
index 0000000..6d2eeb1
--- /dev/null
+++ b/redis/apiv1beta1/cloud_redis_client_example_test.go
@@ -0,0 +1,141 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package redis_test
+
+import (
+	"cloud.google.com/go/redis/apiv1beta1"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	redispb "google.golang.org/genproto/googleapis/cloud/redis/v1beta1"
+)
+
+func ExampleNewCloudRedisClient() {
+	ctx := context.Background()
+	c, err := redis.NewCloudRedisClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use client.
+	_ = c
+}
+
+func ExampleCloudRedisClient_ListInstances() {
+	ctx := context.Background()
+	c, err := redis.NewCloudRedisClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &redispb.ListInstancesRequest{
+		// TODO: Fill request struct fields.
+	}
+	it := c.ListInstances(ctx, req)
+	for {
+		resp, err := it.Next()
+		if err == iterator.Done {
+			break
+		}
+		if err != nil {
+			// TODO: Handle error.
+		}
+		// TODO: Use resp.
+		_ = resp
+	}
+}
+
+func ExampleCloudRedisClient_GetInstance() {
+	ctx := context.Background()
+	c, err := redis.NewCloudRedisClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &redispb.GetInstanceRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.GetInstance(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleCloudRedisClient_CreateInstance() {
+	ctx := context.Background()
+	c, err := redis.NewCloudRedisClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &redispb.CreateInstanceRequest{
+		// TODO: Fill request struct fields.
+	}
+	op, err := c.CreateInstance(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	resp, err := op.Wait(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleCloudRedisClient_UpdateInstance() {
+	ctx := context.Background()
+	c, err := redis.NewCloudRedisClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &redispb.UpdateInstanceRequest{
+		// TODO: Fill request struct fields.
+	}
+	op, err := c.UpdateInstance(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	resp, err := op.Wait(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleCloudRedisClient_DeleteInstance() {
+	ctx := context.Background()
+	c, err := redis.NewCloudRedisClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &redispb.DeleteInstanceRequest{
+		// TODO: Fill request struct fields.
+	}
+	op, err := c.DeleteInstance(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	err = op.Wait(ctx)
+	// TODO: Handle error.
+}
diff --git a/dlp/apiv2beta1/doc.go b/redis/apiv1beta1/doc.go
similarity index 81%
rename from dlp/apiv2beta1/doc.go
rename to redis/apiv1beta1/doc.go
index 28a2d82..e392a6d 100644
--- a/dlp/apiv2beta1/doc.go
+++ b/redis/apiv1beta1/doc.go
@@ -14,15 +14,15 @@
 
 // AUTO-GENERATED CODE. DO NOT EDIT.
 
-// Package dlp is an auto-generated package for the
-// DLP API.
+// Package redis is an auto-generated package for the
+// Google Cloud Memorystore for Redis API.
 //
 //   NOTE: This package is in alpha. It is not stable, and is likely to change.
 //
-// The Google Data Loss Prevention API provides methods for detection of
-// privacy-sensitive fragments in text, images, and Google Cloud Platform
-// storage repositories.
-package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
+// The Google Cloud Memorystore for Redis API is used for creating and
+// managing
+// Redis instances on the Google Cloud Platform.
+package redis // import "cloud.google.com/go/redis/apiv1beta1"
 
 import (
 	"golang.org/x/net/context"
diff --git a/redis/apiv1beta1/mock_test.go b/redis/apiv1beta1/mock_test.go
new file mode 100644
index 0000000..3acf440
--- /dev/null
+++ b/redis/apiv1beta1/mock_test.go
@@ -0,0 +1,641 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package redis
+
+import (
+	emptypb "github.com/golang/protobuf/ptypes/empty"
+	redispb "google.golang.org/genproto/googleapis/cloud/redis/v1beta1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
+	field_maskpb "google.golang.org/genproto/protobuf/field_mask"
+)
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+	status "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	gstatus "google.golang.org/grpc/status"
+)
+
+var _ = io.EOF
+var _ = ptypes.MarshalAny
+var _ status.Status
+
+type mockCloudRedisServer struct {
+	// Embed for forward compatibility.
+	// Tests will keep working if more methods are added
+	// in the future.
+	redispb.CloudRedisServer
+
+	reqs []proto.Message
+
+	// If set, all calls return this error.
+	err error
+
+	// responses to return if err == nil
+	resps []proto.Message
+}
+
+func (s *mockCloudRedisServer) ListInstances(ctx context.Context, req *redispb.ListInstancesRequest) (*redispb.ListInstancesResponse, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*redispb.ListInstancesResponse), nil
+}
+
+func (s *mockCloudRedisServer) GetInstance(ctx context.Context, req *redispb.GetInstanceRequest) (*redispb.Instance, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*redispb.Instance), nil
+}
+
+func (s *mockCloudRedisServer) CreateInstance(ctx context.Context, req *redispb.CreateInstanceRequest) (*longrunningpb.Operation, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*longrunningpb.Operation), nil
+}
+
+func (s *mockCloudRedisServer) UpdateInstance(ctx context.Context, req *redispb.UpdateInstanceRequest) (*longrunningpb.Operation, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*longrunningpb.Operation), nil
+}
+
+func (s *mockCloudRedisServer) DeleteInstance(ctx context.Context, req *redispb.DeleteInstanceRequest) (*longrunningpb.Operation, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*longrunningpb.Operation), nil
+}
+
+// clientOpt is the option tests should use to connect to the test server.
+// It is initialized by TestMain.
+var clientOpt option.ClientOption
+
+var (
+	mockCloudRedis mockCloudRedisServer
+)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	serv := grpc.NewServer()
+	redispb.RegisterCloudRedisServer(serv, &mockCloudRedis)
+
+	lis, err := net.Listen("tcp", "localhost:0")
+	if err != nil {
+		log.Fatal(err)
+	}
+	go serv.Serve(lis)
+
+	conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
+	if err != nil {
+		log.Fatal(err)
+	}
+	clientOpt = option.WithGRPCConn(conn)
+
+	os.Exit(m.Run())
+}
+
+func TestCloudRedisListInstances(t *testing.T) {
+	var nextPageToken string = ""
+	var instancesElement *redispb.Instance = &redispb.Instance{}
+	var instances = []*redispb.Instance{instancesElement}
+	var expectedResponse = &redispb.ListInstancesResponse{
+		NextPageToken: nextPageToken,
+		Instances:     instances,
+	}
+
+	mockCloudRedis.err = nil
+	mockCloudRedis.reqs = nil
+
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], expectedResponse)
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var request = &redispb.ListInstancesRequest{
+		Parent: formattedParent,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ListInstances(context.Background(), request).Next()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudRedis.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	want := (interface{})(expectedResponse.Instances[0])
+	got := (interface{})(resp)
+	var ok bool
+
+	switch want := (want).(type) {
+	case proto.Message:
+		ok = proto.Equal(want, got.(proto.Message))
+	default:
+		ok = want == got
+	}
+	if !ok {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudRedisListInstancesError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudRedis.err = gstatus.Error(errCode, "test error")
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var request = &redispb.ListInstancesRequest{
+		Parent: formattedParent,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.ListInstances(context.Background(), request).Next()
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudRedisGetInstance(t *testing.T) {
+	var name2 string = "name2-1052831874"
+	var displayName string = "displayName1615086568"
+	var locationId string = "locationId552319461"
+	var alternativeLocationId string = "alternativeLocationId-718920621"
+	var redisVersion string = "redisVersion-685310444"
+	var reservedIpRange string = "reservedIpRange-1082940580"
+	var host string = "host3208616"
+	var port int32 = 3446913
+	var currentLocationId string = "currentLocationId1312712735"
+	var statusMessage string = "statusMessage-239442758"
+	var memorySizeGb int32 = 34199707
+	var authorizedNetwork string = "authorizedNetwork-1733809270"
+	var expectedResponse = &redispb.Instance{
+		Name:                  name2,
+		DisplayName:           displayName,
+		LocationId:            locationId,
+		AlternativeLocationId: alternativeLocationId,
+		RedisVersion:          redisVersion,
+		ReservedIpRange:       reservedIpRange,
+		Host:                  host,
+		Port:                  port,
+		CurrentLocationId:     currentLocationId,
+		StatusMessage:         statusMessage,
+		MemorySizeGb:          memorySizeGb,
+		AuthorizedNetwork:     authorizedNetwork,
+	}
+
+	mockCloudRedis.err = nil
+	mockCloudRedis.reqs = nil
+
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], expectedResponse)
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/instances/%s", "[PROJECT]", "[LOCATION]", "[INSTANCE]")
+	var request = &redispb.GetInstanceRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetInstance(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudRedis.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudRedisGetInstanceError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudRedis.err = gstatus.Error(errCode, "test error")
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/instances/%s", "[PROJECT]", "[LOCATION]", "[INSTANCE]")
+	var request = &redispb.GetInstanceRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.GetInstance(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudRedisCreateInstance(t *testing.T) {
+	var name string = "name3373707"
+	var displayName string = "displayName1615086568"
+	var locationId string = "locationId552319461"
+	var alternativeLocationId string = "alternativeLocationId-718920621"
+	var redisVersion string = "redisVersion-685310444"
+	var reservedIpRange string = "reservedIpRange-1082940580"
+	var host string = "host3208616"
+	var port int32 = 3446913
+	var currentLocationId string = "currentLocationId1312712735"
+	var statusMessage string = "statusMessage-239442758"
+	var memorySizeGb2 int32 = 1493816946
+	var authorizedNetwork string = "authorizedNetwork-1733809270"
+	var expectedResponse = &redispb.Instance{
+		Name:                  name,
+		DisplayName:           displayName,
+		LocationId:            locationId,
+		AlternativeLocationId: alternativeLocationId,
+		RedisVersion:          redisVersion,
+		ReservedIpRange:       reservedIpRange,
+		Host:                  host,
+		Port:                  port,
+		CurrentLocationId:     currentLocationId,
+		StatusMessage:         statusMessage,
+		MemorySizeGb:          memorySizeGb2,
+		AuthorizedNetwork:     authorizedNetwork,
+	}
+
+	mockCloudRedis.err = nil
+	mockCloudRedis.reqs = nil
+
+	any, err := ptypes.MarshalAny(expectedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], &longrunningpb.Operation{
+		Name:   "longrunning-test",
+		Done:   true,
+		Result: &longrunningpb.Operation_Response{Response: any},
+	})
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var instanceId string = "test_instance"
+	var tier redispb.Instance_Tier = redispb.Instance_BASIC
+	var memorySizeGb int32 = 1
+	var instance = &redispb.Instance{
+		Tier:         tier,
+		MemorySizeGb: memorySizeGb,
+	}
+	var request = &redispb.CreateInstanceRequest{
+		Parent:     formattedParent,
+		InstanceId: instanceId,
+		Instance:   instance,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.CreateInstance(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudRedis.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudRedisCreateInstanceError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudRedis.err = nil
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], &longrunningpb.Operation{
+		Name: "longrunning-test",
+		Done: true,
+		Result: &longrunningpb.Operation_Error{
+			Error: &status.Status{
+				Code:    int32(errCode),
+				Message: "test error",
+			},
+		},
+	})
+
+	var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
+	var instanceId string = "test_instance"
+	var tier redispb.Instance_Tier = redispb.Instance_BASIC
+	var memorySizeGb int32 = 1
+	var instance = &redispb.Instance{
+		Tier:         tier,
+		MemorySizeGb: memorySizeGb,
+	}
+	var request = &redispb.CreateInstanceRequest{
+		Parent:     formattedParent,
+		InstanceId: instanceId,
+		Instance:   instance,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.CreateInstance(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudRedisUpdateInstance(t *testing.T) {
+	var name string = "name3373707"
+	var displayName2 string = "displayName21615000987"
+	var locationId string = "locationId552319461"
+	var alternativeLocationId string = "alternativeLocationId-718920621"
+	var redisVersion string = "redisVersion-685310444"
+	var reservedIpRange string = "reservedIpRange-1082940580"
+	var host string = "host3208616"
+	var port int32 = 3446913
+	var currentLocationId string = "currentLocationId1312712735"
+	var statusMessage string = "statusMessage-239442758"
+	var memorySizeGb2 int32 = 1493816946
+	var authorizedNetwork string = "authorizedNetwork-1733809270"
+	var expectedResponse = &redispb.Instance{
+		Name:                  name,
+		DisplayName:           displayName2,
+		LocationId:            locationId,
+		AlternativeLocationId: alternativeLocationId,
+		RedisVersion:          redisVersion,
+		ReservedIpRange:       reservedIpRange,
+		Host:                  host,
+		Port:                  port,
+		CurrentLocationId:     currentLocationId,
+		StatusMessage:         statusMessage,
+		MemorySizeGb:          memorySizeGb2,
+		AuthorizedNetwork:     authorizedNetwork,
+	}
+
+	mockCloudRedis.err = nil
+	mockCloudRedis.reqs = nil
+
+	any, err := ptypes.MarshalAny(expectedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], &longrunningpb.Operation{
+		Name:   "longrunning-test",
+		Done:   true,
+		Result: &longrunningpb.Operation_Response{Response: any},
+	})
+
+	var pathsElement string = "display_name"
+	var pathsElement2 string = "memory_size_gb"
+	var paths = []string{pathsElement, pathsElement2}
+	var updateMask = &field_maskpb.FieldMask{
+		Paths: paths,
+	}
+	var displayName string = "UpdatedDisplayName"
+	var memorySizeGb int32 = 4
+	var instance = &redispb.Instance{
+		DisplayName:  displayName,
+		MemorySizeGb: memorySizeGb,
+	}
+	var request = &redispb.UpdateInstanceRequest{
+		UpdateMask: updateMask,
+		Instance:   instance,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.UpdateInstance(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudRedis.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestCloudRedisUpdateInstanceError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudRedis.err = nil
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], &longrunningpb.Operation{
+		Name: "longrunning-test",
+		Done: true,
+		Result: &longrunningpb.Operation_Error{
+			Error: &status.Status{
+				Code:    int32(errCode),
+				Message: "test error",
+			},
+		},
+	})
+
+	var pathsElement string = "display_name"
+	var pathsElement2 string = "memory_size_gb"
+	var paths = []string{pathsElement, pathsElement2}
+	var updateMask = &field_maskpb.FieldMask{
+		Paths: paths,
+	}
+	var displayName string = "UpdatedDisplayName"
+	var memorySizeGb int32 = 4
+	var instance = &redispb.Instance{
+		DisplayName:  displayName,
+		MemorySizeGb: memorySizeGb,
+	}
+	var request = &redispb.UpdateInstanceRequest{
+		UpdateMask: updateMask,
+		Instance:   instance,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.UpdateInstance(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestCloudRedisDeleteInstance(t *testing.T) {
+	var expectedResponse *emptypb.Empty = &emptypb.Empty{}
+
+	mockCloudRedis.err = nil
+	mockCloudRedis.reqs = nil
+
+	any, err := ptypes.MarshalAny(expectedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], &longrunningpb.Operation{
+		Name:   "longrunning-test",
+		Done:   true,
+		Result: &longrunningpb.Operation_Response{Response: any},
+	})
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/instances/%s", "[PROJECT]", "[LOCATION]", "[INSTANCE]")
+	var request = &redispb.DeleteInstanceRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.DeleteInstance(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = respLRO.Wait(context.Background())
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockCloudRedis.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+}
+
+func TestCloudRedisDeleteInstanceError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockCloudRedis.err = nil
+	mockCloudRedis.resps = append(mockCloudRedis.resps[:0], &longrunningpb.Operation{
+		Name: "longrunning-test",
+		Done: true,
+		Result: &longrunningpb.Operation_Error{
+			Error: &status.Status{
+				Code:    int32(errCode),
+				Message: "test error",
+			},
+		},
+	})
+
+	var formattedName string = fmt.Sprintf("projects/%s/locations/%s/instances/%s", "[PROJECT]", "[LOCATION]", "[INSTANCE]")
+	var request = &redispb.DeleteInstanceRequest{
+		Name: formattedName,
+	}
+
+	c, err := NewCloudRedisClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.DeleteInstance(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = respLRO.Wait(context.Background())
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+}
diff --git a/regen-gapic.sh b/regen-gapic.sh
index eb75ffb..eede862 100755
--- a/regen-gapic.sh
+++ b/regen-gapic.sh
@@ -30,8 +30,11 @@
 google/cloud/language/artman_language_v1.yaml
 google/cloud/language/artman_language_v1beta2.yaml
 google/cloud/oslogin/artman_oslogin_v1beta.yaml
+google/cloud/redis/artman_redis_v1beta1.yaml
 google/cloud/speech/artman_speech_v1.yaml
 google/cloud/speech/artman_speech_v1beta1.yaml
+google/cloud/speech/artman_speech_v1p1beta1.yaml
+google/cloud/tasks/artman_cloudtasks.yaml
 google/cloud/videointelligence/artman_videointelligence_v1beta1.yaml
 google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml
 google/cloud/vision/artman_vision_v1.yaml
@@ -45,7 +48,6 @@
 google/logging/artman_logging.yaml
 google/longrunning/artman_longrunning.yaml
 google/monitoring/artman_monitoring.yaml
-google/privacy/dlp/artman_dlp_v2beta1.yaml
 google/privacy/dlp/artman_dlp_v2.yaml
 google/pubsub/artman_pubsub.yaml
 google/spanner/admin/database/artman_spanner_admin_database.yaml
@@ -59,8 +61,8 @@
   cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/
 done
 
-go list cloud.google.com/go/... | grep apiv | xargs go test
+#go list cloud.google.com/go/... | grep apiv | xargs go test
 
-go test -short cloud.google.com/go/...
+#go test -short cloud.google.com/go/...
 
-echo "googleapis version: $(git rev-parse HEAD)"
+#echo "googleapis version: $(git rev-parse HEAD)"
diff --git a/run-tests.sh b/run-tests.sh
index f47ff50..a643e61 100755
--- a/run-tests.sh
+++ b/run-tests.sh
@@ -70,8 +70,8 @@
   return 1
 }
 
-# Collect the packages into two separate lists. (It is faster go test a list of
-# packages than to individually go test each one.)
+# Collect the packages into two separate lists. (It is faster to call "go test" on a
+# list of packages than to individually "go test" each one.)
 
 shorts=
 fulls=
diff --git a/spanner/client.go b/spanner/client.go
index 7597fee..ed3b1ed 100644
--- a/spanner/client.go
+++ b/spanner/client.go
@@ -442,8 +442,7 @@
 	}
 	if !ao.atLeastOnce {
 		return c.ReadWriteTransaction(ctx, func(ctx context.Context, t *ReadWriteTransaction) error {
-			t.BufferWrite(ms)
-			return nil
+			return t.BufferWrite(ms)
 		})
 	}
 
diff --git a/spanner/read.go b/spanner/read.go
index 645c84d..5008580 100644
--- a/spanner/read.go
+++ b/spanner/read.go
@@ -255,7 +255,7 @@
 	// ctx is the caller's context, used for cancel/timeout Next().
 	ctx context.Context
 	// rpc is a factory of streamingReceiver, which might resume
-	// a pervious stream from the point encoded in restartToken.
+	// a previous stream from the point encoded in restartToken.
 	// rpc is always a wrapper of a Cloud Spanner query which is
 	// resumable.
 	rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error)
diff --git a/spanner/row_test.go b/spanner/row_test.go
index 3e3dbc1..02bfe3e 100644
--- a/spanner/row_test.go
+++ b/spanner/row_test.go
@@ -37,45 +37,45 @@
 	row = Row{
 		[]*sppb.StructType_Field{
 			// STRING / STRING ARRAY
-			{"STRING", stringType()},
-			{"NULL_STRING", stringType()},
-			{"STRING_ARRAY", listType(stringType())},
-			{"NULL_STRING_ARRAY", listType(stringType())},
+			{Name: "STRING", Type: stringType()},
+			{Name: "NULL_STRING", Type: stringType()},
+			{Name: "STRING_ARRAY", Type: listType(stringType())},
+			{Name: "NULL_STRING_ARRAY", Type: listType(stringType())},
 			// BYTES / BYTES ARRAY
-			{"BYTES", bytesType()},
-			{"NULL_BYTES", bytesType()},
-			{"BYTES_ARRAY", listType(bytesType())},
-			{"NULL_BYTES_ARRAY", listType(bytesType())},
+			{Name: "BYTES", Type: bytesType()},
+			{Name: "NULL_BYTES", Type: bytesType()},
+			{Name: "BYTES_ARRAY", Type: listType(bytesType())},
+			{Name: "NULL_BYTES_ARRAY", Type: listType(bytesType())},
 			// INT64 / INT64 ARRAY
-			{"INT64", intType()},
-			{"NULL_INT64", intType()},
-			{"INT64_ARRAY", listType(intType())},
-			{"NULL_INT64_ARRAY", listType(intType())},
+			{Name: "INT64", Type: intType()},
+			{Name: "NULL_INT64", Type: intType()},
+			{Name: "INT64_ARRAY", Type: listType(intType())},
+			{Name: "NULL_INT64_ARRAY", Type: listType(intType())},
 			// BOOL / BOOL ARRAY
-			{"BOOL", boolType()},
-			{"NULL_BOOL", boolType()},
-			{"BOOL_ARRAY", listType(boolType())},
-			{"NULL_BOOL_ARRAY", listType(boolType())},
+			{Name: "BOOL", Type: boolType()},
+			{Name: "NULL_BOOL", Type: boolType()},
+			{Name: "BOOL_ARRAY", Type: listType(boolType())},
+			{Name: "NULL_BOOL_ARRAY", Type: listType(boolType())},
 			// FLOAT64 / FLOAT64 ARRAY
-			{"FLOAT64", floatType()},
-			{"NULL_FLOAT64", floatType()},
-			{"FLOAT64_ARRAY", listType(floatType())},
-			{"NULL_FLOAT64_ARRAY", listType(floatType())},
+			{Name: "FLOAT64", Type: floatType()},
+			{Name: "NULL_FLOAT64", Type: floatType()},
+			{Name: "FLOAT64_ARRAY", Type: listType(floatType())},
+			{Name: "NULL_FLOAT64_ARRAY", Type: listType(floatType())},
 			// TIMESTAMP / TIMESTAMP ARRAY
-			{"TIMESTAMP", timeType()},
-			{"NULL_TIMESTAMP", timeType()},
-			{"TIMESTAMP_ARRAY", listType(timeType())},
-			{"NULL_TIMESTAMP_ARRAY", listType(timeType())},
+			{Name: "TIMESTAMP", Type: timeType()},
+			{Name: "NULL_TIMESTAMP", Type: timeType()},
+			{Name: "TIMESTAMP_ARRAY", Type: listType(timeType())},
+			{Name: "NULL_TIMESTAMP_ARRAY", Type: listType(timeType())},
 			// DATE / DATE ARRAY
-			{"DATE", dateType()},
-			{"NULL_DATE", dateType()},
-			{"DATE_ARRAY", listType(dateType())},
-			{"NULL_DATE_ARRAY", listType(dateType())},
+			{Name: "DATE", Type: dateType()},
+			{Name: "NULL_DATE", Type: dateType()},
+			{Name: "DATE_ARRAY", Type: listType(dateType())},
+			{Name: "NULL_DATE_ARRAY", Type: listType(dateType())},
 
 			// STRUCT ARRAY
 			{
-				"STRUCT_ARRAY",
-				listType(
+				Name: "STRUCT_ARRAY",
+				Type: listType(
 					structType(
 						mkField("Col1", intType()),
 						mkField("Col2", floatType()),
@@ -84,8 +84,8 @@
 				),
 			},
 			{
-				"NULL_STRUCT_ARRAY",
-				listType(
+				Name: "NULL_STRUCT_ARRAY",
+				Type: listType(
 					structType(
 						mkField("Col1", intType()),
 						mkField("Col2", floatType()),
@@ -279,7 +279,7 @@
 		{
 			&Row{
 				[]*sppb.StructType_Field{
-					{"Col0", stringType()},
+					{Name: "Col0", Type: stringType()},
 				},
 				[]*proto3.Value{stringProto("value")},
 			},
@@ -291,7 +291,7 @@
 		{
 			&Row{
 				[]*sppb.StructType_Field{
-					{"Col0", stringType()},
+					{Name: "Col0", Type: stringType()},
 				},
 				[]*proto3.Value{stringProto("value")},
 			},
@@ -304,8 +304,8 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
+						Name: "Col0",
+						Type: listType(
 							structType(
 								mkField("Col1", intType()),
 								mkField("Col2", floatType()),
@@ -474,8 +474,8 @@
 				var s string
 				r := &Row{
 					[]*sppb.StructType_Field{
-						{"Val", stringType()},
-						{"Val", stringType()},
+						{Name: "Val", Type: stringType()},
+						{Name: "Val", Type: stringType()},
 					},
 					[]*proto3.Value{stringProto("value1"), stringProto("value2")},
 				}
@@ -491,8 +491,8 @@
 				}{}
 				r := &Row{
 					[]*sppb.StructType_Field{
-						{"Val", stringType()},
-						{"Val", stringType()},
+						{Name: "Val", Type: stringType()},
+						{Name: "Val", Type: stringType()},
 					},
 					[]*proto3.Value{stringProto("value1"), stringProto("value2")},
 				}
@@ -500,8 +500,8 @@
 			},
 			errDupSpannerField("Val", &sppb.StructType{
 				Fields: []*sppb.StructType_Field{
-					{"Val", stringType()},
-					{"Val", stringType()},
+					{Name: "Val", Type: stringType()},
+					{Name: "Val", Type: stringType()},
 				},
 			}),
 		},
@@ -513,13 +513,15 @@
 				}{}
 				r := &Row{
 					[]*sppb.StructType_Field{
-						{"", stringType()},
+						{Name: "", Type: stringType()},
 					},
 					[]*proto3.Value{stringProto("value1")},
 				}
 				return r.ToStruct(s)
 			},
-			errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{{"", stringType()}}}, 0),
+			errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{
+				{Name: "", Type: stringType()},
+			}}, 0),
 		},
 	} {
 		if gotErr := test.f(); !testEqual(gotErr, test.wantErr) {
@@ -612,10 +614,7 @@
 			// Field is not nil, but its type is nil.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						nil,
-					},
+					{Name: "Col0", Type: nil},
 				},
 				[]*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))},
 			},
@@ -626,12 +625,7 @@
 			// Field is not nil, field type is not nil, but it is an array and its array element type is nil.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						&sppb.Type{
-							Code: sppb.TypeCode_ARRAY,
-						},
-					},
+					{Name: "Col0", Type: &sppb.Type{Code: sppb.TypeCode_ARRAY}},
 				},
 				[]*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))},
 			},
@@ -642,10 +636,7 @@
 			// Field specifies valid type, value is nil.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						intType(),
-					},
+					{Name: "Col0", Type: intType()},
 				},
 				[]*proto3.Value{nil},
 			},
@@ -656,10 +647,7 @@
 			// Field specifies INT64 type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						intType(),
-					},
+					{Name: "Col0", Type: intType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}},
 			},
@@ -670,10 +658,7 @@
 			// Field specifies INT64 type, but value is for Number type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						intType(),
-					},
+					{Name: "Col0", Type: intType()},
 				},
 				[]*proto3.Value{floatProto(1.0)},
 			},
@@ -684,10 +669,7 @@
 			// Field specifies INT64 type, but value is wrongly encoded.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						intType(),
-					},
+					{Name: "Col0", Type: intType()},
 				},
 				[]*proto3.Value{stringProto("&1")},
 			},
@@ -701,10 +683,7 @@
 			// Field specifies INT64 type, but value is wrongly encoded.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						intType(),
-					},
+					{Name: "Col0", Type: intType()},
 				},
 				[]*proto3.Value{stringProto("&1")},
 			},
@@ -718,10 +697,7 @@
 			// Field specifies STRING type, but value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						stringType(),
-					},
+					{Name: "Col0", Type: stringType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}},
 			},
@@ -732,10 +708,7 @@
 			// Field specifies STRING type, but value is for ARRAY type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						stringType(),
-					},
+					{Name: "Col0", Type: stringType()},
 				},
 				[]*proto3.Value{listProto(stringProto("value"))},
 			},
@@ -746,10 +719,7 @@
 			// Field specifies FLOAT64 type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						floatType(),
-					},
+					{Name: "Col0", Type: floatType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_NumberValue)(nil)}},
 			},
@@ -760,10 +730,7 @@
 			// Field specifies FLOAT64 type, but value is for BOOL type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						floatType(),
-					},
+					{Name: "Col0", Type: floatType()},
 				},
 				[]*proto3.Value{boolProto(true)},
 			},
@@ -774,10 +741,7 @@
 			// Field specifies FLOAT64 type, but value is wrongly encoded.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						floatType(),
-					},
+					{Name: "Col0", Type: floatType()},
 				},
 				[]*proto3.Value{stringProto("nan")},
 			},
@@ -788,10 +752,7 @@
 			// Field specifies FLOAT64 type, but value is wrongly encoded.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						floatType(),
-					},
+					{Name: "Col0", Type: floatType()},
 				},
 				[]*proto3.Value{stringProto("nan")},
 			},
@@ -802,10 +763,7 @@
 			// Field specifies BYTES type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						bytesType(),
-					},
+					{Name: "Col0", Type: bytesType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}},
 			},
@@ -816,10 +774,7 @@
 			// Field specifies BYTES type, but value is for BOOL type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						bytesType(),
-					},
+					{Name: "Col0", Type: bytesType()},
 				},
 				[]*proto3.Value{boolProto(false)},
 			},
@@ -830,10 +785,7 @@
 			// Field specifies BYTES type, but value is wrongly encoded.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						bytesType(),
-					},
+					{Name: "Col0", Type: bytesType()},
 				},
 				[]*proto3.Value{stringProto("&&")},
 			},
@@ -847,10 +799,7 @@
 			// Field specifies BOOL type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						boolType(),
-					},
+					{Name: "Col0", Type: boolType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_BoolValue)(nil)}},
 			},
@@ -861,10 +810,7 @@
 			// Field specifies BOOL type, but value is for STRING type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						boolType(),
-					},
+					{Name: "Col0", Type: boolType()},
 				},
 				[]*proto3.Value{stringProto("false")},
 			},
@@ -875,10 +821,7 @@
 			// Field specifies TIMESTAMP type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						timeType(),
-					},
+					{Name: "Col0", Type: timeType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}},
 			},
@@ -889,10 +832,7 @@
 			// Field specifies TIMESTAMP type, but value is for BOOL type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						timeType(),
-					},
+					{Name: "Col0", Type: timeType()},
 				},
 				[]*proto3.Value{boolProto(false)},
 			},
@@ -903,10 +843,7 @@
 			// Field specifies TIMESTAMP type, but value is invalid timestamp.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						timeType(),
-					},
+					{Name: "Col0", Type: timeType()},
 				},
 				[]*proto3.Value{stringProto("junk")},
 			},
@@ -920,10 +857,7 @@
 			// Field specifies DATE type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						dateType(),
-					},
+					{Name: "Col0", Type: dateType()},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}},
 			},
@@ -934,10 +868,7 @@
 			// Field specifies DATE type, but value is for BOOL type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						dateType(),
-					},
+					{Name: "Col0", Type: dateType()},
 				},
 				[]*proto3.Value{boolProto(false)},
 			},
@@ -948,10 +879,7 @@
 			// Field specifies DATE type, but value is invalid timestamp.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						dateType(),
-					},
+					{Name: "Col0", Type: dateType()},
 				},
 				[]*proto3.Value{stringProto("junk")},
 			},
@@ -966,10 +894,7 @@
 			// Field specifies ARRAY<INT64> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(intType()),
-					},
+					{Name: "Col0", Type: listType(intType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -980,10 +905,7 @@
 			// Field specifies ARRAY<INT64> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(intType()),
-					},
+					{Name: "Col0", Type: listType(intType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -994,10 +916,7 @@
 			// Field specifies ARRAY<INT64> type, but value is for BYTES type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(intType()),
-					},
+					{Name: "Col0", Type: listType(intType())},
 				},
 				[]*proto3.Value{bytesProto([]byte("value"))},
 			},
@@ -1008,10 +927,7 @@
 			// Field specifies ARRAY<INT64> type, but value is for ARRAY<BOOL> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(intType()),
-					},
+					{Name: "Col0", Type: listType(intType())},
 				},
 				[]*proto3.Value{listProto(boolProto(true))},
 			},
@@ -1023,10 +939,7 @@
 			// Field specifies ARRAY<STRING> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(stringType()),
-					},
+					{Name: "Col0", Type: listType(stringType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1037,10 +950,7 @@
 			// Field specifies ARRAY<STRING> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(stringType()),
-					},
+					{Name: "Col0", Type: listType(stringType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1051,10 +961,7 @@
 			// Field specifies ARRAY<STRING> type, but value is for BOOL type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(stringType()),
-					},
+					{Name: "Col0", Type: listType(stringType())},
 				},
 				[]*proto3.Value{boolProto(true)},
 			},
@@ -1065,10 +972,7 @@
 			// Field specifies ARRAY<STRING> type, but value is for ARRAY<BOOL> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(stringType()),
-					},
+					{Name: "Col0", Type: listType(stringType())},
 				},
 				[]*proto3.Value{listProto(boolProto(true))},
 			},
@@ -1080,10 +984,7 @@
 			// Field specifies ARRAY<FLOAT64> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(floatType()),
-					},
+					{Name: "Col0", Type: listType(floatType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1094,10 +995,7 @@
 			// Field specifies ARRAY<FLOAT64> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(floatType()),
-					},
+					{Name: "Col0", Type: listType(floatType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1108,10 +1006,7 @@
 			// Field specifies ARRAY<FLOAT64> type, but value is for STRING type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(floatType()),
-					},
+					{Name: "Col0", Type: listType(floatType())},
 				},
 				[]*proto3.Value{stringProto("value")},
 			},
@@ -1122,10 +1017,7 @@
 			// Field specifies ARRAY<FLOAT64> type, but value is for ARRAY<BOOL> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(floatType()),
-					},
+					{Name: "Col0", Type: listType(floatType())},
 				},
 				[]*proto3.Value{listProto(boolProto(true))},
 			},
@@ -1137,10 +1029,7 @@
 			// Field specifies ARRAY<BYTES> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(bytesType()),
-					},
+					{Name: "Col0", Type: listType(bytesType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1151,10 +1040,7 @@
 			// Field specifies ARRAY<BYTES> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(bytesType()),
-					},
+					{Name: "Col0", Type: listType(bytesType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1165,10 +1051,7 @@
 			// Field specifies ARRAY<BYTES> type, but value is for FLOAT64 type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(bytesType()),
-					},
+					{Name: "Col0", Type: listType(bytesType())},
 				},
 				[]*proto3.Value{floatProto(1.0)},
 			},
@@ -1179,10 +1062,7 @@
 			// Field specifies ARRAY<BYTES> type, but value is for ARRAY<FLOAT64> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(bytesType()),
-					},
+					{Name: "Col0", Type: listType(bytesType())},
 				},
 				[]*proto3.Value{listProto(floatProto(1.0))},
 			},
@@ -1194,10 +1074,7 @@
 			// Field specifies ARRAY<BOOL> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(boolType()),
-					},
+					{Name: "Col0", Type: listType(boolType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1208,10 +1085,7 @@
 			// Field specifies ARRAY<BOOL> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(boolType()),
-					},
+					{Name: "Col0", Type: listType(boolType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1222,10 +1096,7 @@
 			// Field specifies ARRAY<BOOL> type, but value is for FLOAT64 type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(boolType()),
-					},
+					{Name: "Col0", Type: listType(boolType())},
 				},
 				[]*proto3.Value{floatProto(1.0)},
 			},
@@ -1236,10 +1107,7 @@
 			// Field specifies ARRAY<BOOL> type, but value is for ARRAY<FLOAT64> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(boolType()),
-					},
+					{Name: "Col0", Type: listType(boolType())},
 				},
 				[]*proto3.Value{listProto(floatProto(1.0))},
 			},
@@ -1251,10 +1119,7 @@
 			// Field specifies ARRAY<TIMESTAMP> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(timeType()),
-					},
+					{Name: "Col0", Type: listType(timeType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1265,10 +1130,7 @@
 			// Field specifies ARRAY<TIMESTAMP> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(timeType()),
-					},
+					{Name: "Col0", Type: listType(timeType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1279,10 +1141,7 @@
 			// Field specifies ARRAY<TIMESTAMP> type, but value is for FLOAT64 type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(timeType()),
-					},
+					{Name: "Col0", Type: listType(timeType())},
 				},
 				[]*proto3.Value{floatProto(1.0)},
 			},
@@ -1293,10 +1152,7 @@
 			// Field specifies ARRAY<TIMESTAMP> type, but value is for ARRAY<FLOAT64> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(timeType()),
-					},
+					{Name: "Col0", Type: listType(timeType())},
 				},
 				[]*proto3.Value{listProto(floatProto(1.0))},
 			},
@@ -1308,10 +1164,7 @@
 			// Field specifies ARRAY<DATE> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(dateType()),
-					},
+					{Name: "Col0", Type: listType(dateType())},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1322,10 +1175,7 @@
 			// Field specifies ARRAY<DATE> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(dateType()),
-					},
+					{Name: "Col0", Type: listType(dateType())},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1336,10 +1186,7 @@
 			// Field specifies ARRAY<DATE> type, but value is for FLOAT64 type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(dateType()),
-					},
+					{Name: "Col0", Type: listType(dateType())},
 				},
 				[]*proto3.Value{floatProto(1.0)},
 			},
@@ -1350,10 +1197,7 @@
 			// Field specifies ARRAY<DATE> type, but value is for ARRAY<FLOAT64> type.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(dateType()),
-					},
+					{Name: "Col0", Type: listType(dateType())},
 				},
 				[]*proto3.Value{listProto(floatProto(1.0))},
 			},
@@ -1365,16 +1209,11 @@
 			// Field specifies ARRAY<STRUCT> type, value is having a nil Kind.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(
-							structType(
-								mkField("Col1", intType()),
-								mkField("Col2", floatType()),
-								mkField("Col3", stringType()),
-							),
-						),
-					},
+					{Name: "Col0", Type: listType(structType(
+						mkField("Col1", intType()),
+						mkField("Col2", floatType()),
+						mkField("Col3", stringType()),
+					))},
 				},
 				[]*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}},
 			},
@@ -1389,16 +1228,11 @@
 			// Field specifies ARRAY<STRUCT> type, value is having a nil ListValue.
 			&Row{
 				[]*sppb.StructType_Field{
-					{
-						"Col0",
-						listType(
-							structType(
-								mkField("Col1", intType()),
-								mkField("Col2", floatType()),
-								mkField("Col3", stringType()),
-							),
-						),
-					},
+					{Name: "Col0", Type: listType(structType(
+						mkField("Col1", intType()),
+						mkField("Col2", floatType()),
+						mkField("Col3", stringType()),
+					))},
 				},
 				[]*proto3.Value{{Kind: &proto3.Value_ListValue{}}},
 			},
@@ -1414,8 +1248,8 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
+						Name: "Col0",
+						Type: listType(
 							structType(
 								mkField("Col1", intType()),
 								mkField("Col2", floatType()),
@@ -1434,8 +1268,8 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
+						Name: "Col0",
+						Type: listType(
 							structType(
 								mkField("Col1", intType()),
 								mkField("Col2", floatType()),
@@ -1458,8 +1292,8 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
+						Name: "Col0",
+						Type: listType(
 							structType(
 								mkField("Col1", intType()),
 								mkField("Col2", floatType()),
@@ -1478,8 +1312,8 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
+						Name: "Col0",
+						Type: listType(
 							structType(
 								mkField("Col1", intType()),
 								mkField("Col2", floatType()),
@@ -1503,10 +1337,7 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
-							&sppb.Type{Code: sppb.TypeCode_STRUCT},
-						),
+						Name: "Col0", Type: listType(&sppb.Type{Code: sppb.TypeCode_STRUCT}),
 					},
 				},
 				[]*proto3.Value{listProto(listProto(intProto(1), floatProto(2.0), stringProto("3")))},
@@ -1524,8 +1355,8 @@
 			&Row{
 				[]*sppb.StructType_Field{
 					{
-						"Col0",
-						listType(
+						Name: "Col0",
+						Type: listType(
 							structType(
 								mkField("Col1", intType()),
 								mkField("Col2", floatType()),
@@ -1699,8 +1530,8 @@
 	)
 	r := Row{
 		[]*sppb.StructType_Field{
-			{"F1", stringType()},
-			{"F2", stringType()},
+			{Name: "F1", Type: stringType()},
+			{Name: "F2", Type: stringType()},
 		},
 		[]*proto3.Value{
 			stringProto("v1"),
@@ -1777,9 +1608,9 @@
 			values: []interface{}{5, "abc", GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}},
 			want: &Row{
 				[]*sppb.StructType_Field{
-					{"a", intType()},
-					{"b", stringType()},
-					{"c", listType(intType())},
+					{Name: "a", Type: intType()},
+					{Name: "b", Type: stringType()},
+					{Name: "c", Type: listType(intType())},
 				},
 				[]*proto3.Value{
 					intProto(5),
diff --git a/spanner/session.go b/spanner/session.go
index f4d203d..c667c83 100644
--- a/spanner/session.go
+++ b/spanner/session.go
@@ -1072,7 +1072,7 @@
 	}
 	// If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller
 	// should not try to return the session back into the session pool.
-	// TODO: once gRPC can return auxilary error information, stop parsing the error message.
+	// TODO: once gRPC can return auxiliary error information, stop parsing the error message.
 	if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") {
 		return true
 	}
diff --git a/spanner/spanner_test.go b/spanner/spanner_test.go
index d8dbec1..d5a56f3 100644
--- a/spanner/spanner_test.go
+++ b/spanner/spanner_test.go
@@ -183,7 +183,7 @@
 	}
 	return client, dbPath, func() {
 		client.Close()
-		if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil {
+		if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{Database: dbPath}); err != nil {
 			t.Logf("failed to drop database %s (error %v), might need a manual removal",
 				dbPath, err)
 		}
@@ -672,11 +672,10 @@
 				}
 				bf--
 				bb++
-				tx.BufferWrite([]*Mutation{
+				return tx.BufferWrite([]*Mutation{
 					Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), bf}),
 					Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), bb}),
 				})
-				return nil
 			})
 			if err != nil {
 				t.Fatalf("%d: failed to execute transaction: %v", iter, err)
@@ -924,7 +923,7 @@
 	ctx := context.Background()
 	client, _, tearDown := prepare(ctx, t, singerDBStatements)
 	defer tearDown()
-	client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
+	_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
 		_, err := client.ReadWriteTransaction(ctx,
 			func(context.Context, *ReadWriteTransaction) error { return nil })
 		if ErrCode(err) != codes.FailedPrecondition {
@@ -942,6 +941,9 @@
 		}
 		return nil
 	})
+	if err != nil {
+		t.Fatal(err)
+	}
 }
 
 // Test client recovery on database recreation.
@@ -953,7 +955,7 @@
 	defer tearDown()
 
 	// Drop the testing database.
-	if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil {
+	if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{Database: dbPath}); err != nil {
 		t.Fatalf("failed to drop testing database %v: %v", dbPath, err)
 	}
 
@@ -1526,8 +1528,11 @@
 			}
 			// txn 1 can abort, in that case we skip closing the channel on retry.
 			once.Do(func() { close(cTxn1Start) })
-			tx.BufferWrite([]*Mutation{
+			e = tx.BufferWrite([]*Mutation{
 				Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), int64(b + 1)})})
+			if e != nil {
+				return e
+			}
 			// Wait for second transaction.
 			<-cTxn2Start
 			return nil
@@ -1563,9 +1568,8 @@
 			if b2, e = readBalance(tx, 2, true); e != nil {
 				return e
 			}
-			tx.BufferWrite([]*Mutation{
+			return tx.BufferWrite([]*Mutation{
 				Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), int64(b1 + b2)})})
-			return nil
 		})
 		if e != nil {
 			t.Errorf("Transaction 2 commit, got %v, want nil.", e)
diff --git a/spanner/transaction.go b/spanner/transaction.go
index c35ba4e..9ca62b3 100644
--- a/spanner/transaction.go
+++ b/spanner/transaction.go
@@ -329,7 +329,7 @@
 		}
 		t.mu.Unlock()
 		if err != nil && sh != nil {
-			// Got a valid session handle, but failed to initalize transaction on Cloud Spanner.
+			// Got a valid session handle, but failed to initialize transaction on Cloud Spanner.
 			if shouldDropSession(err) {
 				sh.destroy()
 			}
@@ -623,7 +623,7 @@
 	mu sync.Mutex
 	// state is the current transaction status of the read-write transaction.
 	state txState
-	// wb is the set of buffered mutations waiting to be commited.
+	// wb is the set of buffered mutations waiting to be committed.
 	wb []*Mutation
 }
 
@@ -720,7 +720,7 @@
 func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) {
 	var ts time.Time
 	t.mu.Lock()
-	t.state = txClosed // No futher operations after commit.
+	t.state = txClosed // No further operations after commit.
 	mPb, err := mutationsProto(t.wb)
 	t.mu.Unlock()
 	if err != nil {
@@ -809,9 +809,9 @@
 	sp *sessionPool
 }
 
-// applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends:
-//     1) Context is timeout.
-//     2) An unretryable error(e.g. database not found) occurs.
+// applyAtLeastOnce commits a list of mutations to Cloud Spanner at least once, unless one of the following happens:
+//     1) Context times out.
+//     2) An unretryable error (e.g. database not found) occurs.
 //     3) There is a malformed Mutation object.
 func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) {
 	var (
diff --git a/spanner/value.go b/spanner/value.go
index 9497443..1e6b466 100644
--- a/spanner/value.go
+++ b/spanner/value.go
@@ -1054,7 +1054,7 @@
 }
 
 // decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to
-// the structual information given in sppb.StructType ty.
+// the structural information given in sppb.StructType ty.
 func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) {
 	if pb == nil {
 		return nil, errNilListValue("STRUCT")
@@ -1114,7 +1114,7 @@
 }
 
 // decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to
-// the structual information given in sppb.StructType ty.
+// the structural information given in sppb.StructType ty.
 func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error {
 	if reflect.ValueOf(ptr).IsNil() {
 		return errNilDst(ptr)
@@ -1122,7 +1122,7 @@
 	if ty == nil {
 		return errNilSpannerStructType()
 	}
-	// t holds the structual information of ptr.
+	// t holds the structural information of ptr.
 	t := reflect.TypeOf(ptr).Elem()
 	// v is the actual value that ptr points to.
 	v := reflect.ValueOf(ptr).Elem()
@@ -1168,7 +1168,7 @@
 }
 
 // decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the
-// structual information given in a sppb.StructType.
+// structural information given in a sppb.StructType.
 func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error {
 	if pb == nil {
 		return errNilListValue("STRUCT")
diff --git a/dlp/apiv2beta1/InspectContent_smoke_test.go b/speech/apiv1p1beta1/Recognize_smoke_test.go
similarity index 64%
rename from dlp/apiv2beta1/InspectContent_smoke_test.go
rename to speech/apiv1p1beta1/Recognize_smoke_test.go
index 3c66669..9df22c3 100644
--- a/dlp/apiv2beta1/InspectContent_smoke_test.go
+++ b/speech/apiv1p1beta1/Recognize_smoke_test.go
@@ -14,10 +14,10 @@
 
 // AUTO-GENERATED CODE. DO NOT EDIT.
 
-package dlp
+package speech
 
 import (
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
+	speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
 )
 
 import (
@@ -37,7 +37,7 @@
 var _ = strconv.FormatUint
 var _ = time.Now
 
-func TestDlpServiceSmoke(t *testing.T) {
+func TestSpeechSmoke(t *testing.T) {
 	if testing.Short() {
 		t.Skip("skipping smoke test in short mode")
 	}
@@ -55,25 +55,26 @@
 		t.Fatal(err)
 	}
 
-	var minLikelihood dlppb.Likelihood = dlppb.Likelihood_POSSIBLE
-	var inspectConfig = &dlppb.InspectConfig{
-		MinLikelihood: minLikelihood,
+	var languageCode string = "en-US"
+	var sampleRateHertz int32 = 44100
+	var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC
+	var config = &speechpb.RecognitionConfig{
+		LanguageCode:    languageCode,
+		SampleRateHertz: sampleRateHertz,
+		Encoding:        encoding,
 	}
-	var type_ string = "text/plain"
-	var value string = "my phone number is 215-512-1212"
-	var itemsElement = &dlppb.ContentItem{
-		Type: type_,
-		DataItem: &dlppb.ContentItem_Value{
-			Value: value,
+	var uri string = "gs://gapic-toolkit/hello.flac"
+	var audio = &speechpb.RecognitionAudio{
+		AudioSource: &speechpb.RecognitionAudio_Uri{
+			Uri: uri,
 		},
 	}
-	var items = []*dlppb.ContentItem{itemsElement}
-	var request = &dlppb.InspectContentRequest{
-		InspectConfig: inspectConfig,
-		Items:         items,
+	var request = &speechpb.RecognizeRequest{
+		Config: config,
+		Audio:  audio,
 	}
 
-	if _, err := c.InspectContent(ctx, request); err != nil {
+	if _, err := c.Recognize(ctx, request); err != nil {
 		t.Error(err)
 	}
 }
diff --git a/dlp/apiv2beta1/doc.go b/speech/apiv1p1beta1/doc.go
similarity index 80%
copy from dlp/apiv2beta1/doc.go
copy to speech/apiv1p1beta1/doc.go
index 28a2d82..6781bd1 100644
--- a/dlp/apiv2beta1/doc.go
+++ b/speech/apiv1p1beta1/doc.go
@@ -14,15 +14,13 @@
 
 // AUTO-GENERATED CODE. DO NOT EDIT.
 
-// Package dlp is an auto-generated package for the
-// DLP API.
+// Package speech is an auto-generated package for the
+// Cloud Speech API.
 //
 //   NOTE: This package is in alpha. It is not stable, and is likely to change.
 //
-// The Google Data Loss Prevention API provides methods for detection of
-// privacy-sensitive fragments in text, images, and Google Cloud Platform
-// storage repositories.
-package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
+// Converts audio to text by applying powerful neural network models.
+package speech // import "cloud.google.com/go/speech/apiv1p1beta1"
 
 import (
 	"golang.org/x/net/context"
diff --git a/speech/apiv1p1beta1/mock_test.go b/speech/apiv1p1beta1/mock_test.go
new file mode 100644
index 0000000..21e6705
--- /dev/null
+++ b/speech/apiv1p1beta1/mock_test.go
@@ -0,0 +1,405 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package speech
+
+import (
+	speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
+)
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+	status "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	gstatus "google.golang.org/grpc/status"
+)
+
+var _ = io.EOF
+var _ = ptypes.MarshalAny
+var _ status.Status
+
+type mockSpeechServer struct {
+	// Embed for forward compatibility.
+	// Tests will keep working if more methods are added
+	// in the future.
+	speechpb.SpeechServer
+
+	reqs []proto.Message
+
+	// If set, all calls return this error.
+	err error
+
+	// responses to return if err == nil
+	resps []proto.Message
+}
+
+func (s *mockSpeechServer) Recognize(ctx context.Context, req *speechpb.RecognizeRequest) (*speechpb.RecognizeResponse, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*speechpb.RecognizeResponse), nil
+}
+
+func (s *mockSpeechServer) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest) (*longrunningpb.Operation, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*longrunningpb.Operation), nil
+}
+
+func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error {
+	md, _ := metadata.FromIncomingContext(stream.Context())
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	for {
+		if req, err := stream.Recv(); err == io.EOF {
+			break
+		} else if err != nil {
+			return err
+		} else {
+			s.reqs = append(s.reqs, req)
+		}
+	}
+	if s.err != nil {
+		return s.err
+	}
+	for _, v := range s.resps {
+		if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// clientOpt is the option tests should use to connect to the test server.
+// It is initialized by TestMain.
+var clientOpt option.ClientOption
+
+var (
+	mockSpeech mockSpeechServer
+)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	serv := grpc.NewServer()
+	speechpb.RegisterSpeechServer(serv, &mockSpeech)
+
+	lis, err := net.Listen("tcp", "localhost:0")
+	if err != nil {
+		log.Fatal(err)
+	}
+	go serv.Serve(lis)
+
+	conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
+	if err != nil {
+		log.Fatal(err)
+	}
+	clientOpt = option.WithGRPCConn(conn)
+
+	os.Exit(m.Run())
+}
+
+func TestSpeechRecognize(t *testing.T) {
+	var expectedResponse *speechpb.RecognizeResponse = &speechpb.RecognizeResponse{}
+
+	mockSpeech.err = nil
+	mockSpeech.reqs = nil
+
+	mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse)
+
+	var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC
+	var sampleRateHertz int32 = 44100
+	var languageCode string = "en-US"
+	var config = &speechpb.RecognitionConfig{
+		Encoding:        encoding,
+		SampleRateHertz: sampleRateHertz,
+		LanguageCode:    languageCode,
+	}
+	var uri string = "gs://bucket_name/file_name.flac"
+	var audio = &speechpb.RecognitionAudio{
+		AudioSource: &speechpb.RecognitionAudio_Uri{
+			Uri: uri,
+		},
+	}
+	var request = &speechpb.RecognizeRequest{
+		Config: config,
+		Audio:  audio,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.Recognize(context.Background(), request)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestSpeechRecognizeError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockSpeech.err = gstatus.Error(errCode, "test error")
+
+	var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC
+	var sampleRateHertz int32 = 44100
+	var languageCode string = "en-US"
+	var config = &speechpb.RecognitionConfig{
+		Encoding:        encoding,
+		SampleRateHertz: sampleRateHertz,
+		LanguageCode:    languageCode,
+	}
+	var uri string = "gs://bucket_name/file_name.flac"
+	var audio = &speechpb.RecognitionAudio{
+		AudioSource: &speechpb.RecognitionAudio_Uri{
+			Uri: uri,
+		},
+	}
+	var request = &speechpb.RecognizeRequest{
+		Config: config,
+		Audio:  audio,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.Recognize(context.Background(), request)
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestSpeechLongRunningRecognize(t *testing.T) {
+	var expectedResponse *speechpb.LongRunningRecognizeResponse = &speechpb.LongRunningRecognizeResponse{}
+
+	mockSpeech.err = nil
+	mockSpeech.reqs = nil
+
+	any, err := ptypes.MarshalAny(expectedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{
+		Name:   "longrunning-test",
+		Done:   true,
+		Result: &longrunningpb.Operation_Response{Response: any},
+	})
+
+	var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC
+	var sampleRateHertz int32 = 44100
+	var languageCode string = "en-US"
+	var config = &speechpb.RecognitionConfig{
+		Encoding:        encoding,
+		SampleRateHertz: sampleRateHertz,
+		LanguageCode:    languageCode,
+	}
+	var uri string = "gs://bucket_name/file_name.flac"
+	var audio = &speechpb.RecognitionAudio{
+		AudioSource: &speechpb.RecognitionAudio_Uri{
+			Uri: uri,
+		},
+	}
+	var request = &speechpb.LongRunningRecognizeRequest{
+		Config: config,
+		Audio:  audio,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.LongRunningRecognize(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestSpeechLongRunningRecognizeError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockSpeech.err = nil
+	mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{
+		Name: "longrunning-test",
+		Done: true,
+		Result: &longrunningpb.Operation_Error{
+			Error: &status.Status{
+				Code:    int32(errCode),
+				Message: "test error",
+			},
+		},
+	})
+
+	var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC
+	var sampleRateHertz int32 = 44100
+	var languageCode string = "en-US"
+	var config = &speechpb.RecognitionConfig{
+		Encoding:        encoding,
+		SampleRateHertz: sampleRateHertz,
+		LanguageCode:    languageCode,
+	}
+	var uri string = "gs://bucket_name/file_name.flac"
+	var audio = &speechpb.RecognitionAudio{
+		AudioSource: &speechpb.RecognitionAudio_Uri{
+			Uri: uri,
+		},
+	}
+	var request = &speechpb.LongRunningRecognizeRequest{
+		Config: config,
+		Audio:  audio,
+	}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.LongRunningRecognize(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
+func TestSpeechStreamingRecognize(t *testing.T) {
+	var expectedResponse *speechpb.StreamingRecognizeResponse = &speechpb.StreamingRecognizeResponse{}
+
+	mockSpeech.err = nil
+	mockSpeech.reqs = nil
+
+	mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse)
+
+	var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	stream, err := c.StreamingRecognize(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := stream.Send(request); err != nil {
+		t.Fatal(err)
+	}
+	if err := stream.CloseSend(); err != nil {
+		t.Fatal(err)
+	}
+	resp, err := stream.Recv()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestSpeechStreamingRecognizeError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockSpeech.err = gstatus.Error(errCode, "test error")
+
+	var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{}
+
+	c, err := NewClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	stream, err := c.StreamingRecognize(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := stream.Send(request); err != nil {
+		t.Fatal(err)
+	}
+	if err := stream.CloseSend(); err != nil {
+		t.Fatal(err)
+	}
+	resp, err := stream.Recv()
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
diff --git a/speech/apiv1p1beta1/speech_client.go b/speech/apiv1p1beta1/speech_client.go
new file mode 100644
index 0000000..3ee77b9
--- /dev/null
+++ b/speech/apiv1p1beta1/speech_client.go
@@ -0,0 +1,263 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package speech
+
+import (
+	"time"
+
+	"cloud.google.com/go/internal/version"
+	"cloud.google.com/go/longrunning"
+	lroauto "cloud.google.com/go/longrunning/autogen"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+	Recognize            []gax.CallOption
+	LongRunningRecognize []gax.CallOption
+	StreamingRecognize   []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("speech.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultCallOptions() *CallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &CallOptions{
+		Recognize:            retry[[2]string{"default", "idempotent"}],
+		LongRunningRecognize: retry[[2]string{"default", "non_idempotent"}],
+		StreamingRecognize:   retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// Client is a client for interacting with Cloud Speech API.
+type Client struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	client speechpb.SpeechClient
+
+	// LROClient is used internally to handle longrunning operations.
+	// It is exposed so that its CallOptions can be modified if required.
+	// Users should not Close this client.
+	LROClient *lroauto.OperationsClient
+
+	// The call options for this service.
+	CallOptions *CallOptions
+
+	// The x-goog-* metadata to be sent with each request.
+	xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new speech client.
+//
+// Service that implements Google Cloud Speech API.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &Client{
+		conn:        conn,
+		CallOptions: defaultCallOptions(),
+
+		client: speechpb.NewSpeechClient(conn),
+	}
+	c.setGoogleClientInfo()
+
+	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
+	if err != nil {
+		// This error "should not happen", since we are just reusing old connection
+		// and never actually need to dial.
+		// If this does happen, we could leak conn. However, we cannot close conn:
+		// If the user invoked the function with option.WithGRPCConn,
+		// we would close a connection that's still in use.
+		// TODO(pongad): investigate error conditions.
+		return nil, err
+	}
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// Recognize performs synchronous speech recognition: receive results after all audio
+// has been sent and processed.
+func (c *Client) Recognize(ctx context.Context, req *speechpb.RecognizeRequest, opts ...gax.CallOption) (*speechpb.RecognizeResponse, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.Recognize[0:len(c.CallOptions.Recognize):len(c.CallOptions.Recognize)], opts...)
+	var resp *speechpb.RecognizeResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.Recognize(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// LongRunningRecognize performs asynchronous speech recognition: receive results via the
+// google.longrunning.Operations interface. Returns either an
+// Operation.error or an Operation.response which contains
+// a LongRunningRecognizeResponse message.
+func (c *Client) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest, opts ...gax.CallOption) (*LongRunningRecognizeOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.LongRunningRecognize[0:len(c.CallOptions.LongRunningRecognize):len(c.CallOptions.LongRunningRecognize)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.LongRunningRecognize(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &LongRunningRecognizeOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// StreamingRecognize performs bidirectional streaming speech recognition: receive results while
+// sending audio. This method is only available via the gRPC API (not REST).
+func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...)
+	var resp speechpb.Speech_StreamingRecognizeClient
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// LongRunningRecognizeOperation manages a long-running operation from LongRunningRecognize.
+type LongRunningRecognizeOperation struct {
+	lro *longrunning.Operation
+}
+
+// LongRunningRecognizeOperation returns a new LongRunningRecognizeOperation from a given name.
+// The name must be that of a previously created LongRunningRecognizeOperation, possibly from a different process.
+func (c *Client) LongRunningRecognizeOperation(name string) *LongRunningRecognizeOperation {
+	return &LongRunningRecognizeOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *LongRunningRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) {
+	var resp speechpb.LongRunningRecognizeResponse
+	if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *LongRunningRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) {
+	var resp speechpb.LongRunningRecognizeResponse
+	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+		return nil, err
+	}
+	if !op.Done() {
+		return nil, nil
+	}
+	return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *LongRunningRecognizeOperation) Metadata() (*speechpb.LongRunningRecognizeMetadata, error) {
+	var meta speechpb.LongRunningRecognizeMetadata
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *LongRunningRecognizeOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *LongRunningRecognizeOperation) Name() string {
+	return op.lro.Name()
+}
diff --git a/speech/apiv1p1beta1/speech_client_example_test.go b/speech/apiv1p1beta1/speech_client_example_test.go
new file mode 100644
index 0000000..c3ccb07
--- /dev/null
+++ b/speech/apiv1p1beta1/speech_client_example_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package speech_test
+
+import (
+	"io"
+
+	"cloud.google.com/go/speech/apiv1p1beta1"
+	"golang.org/x/net/context"
+	speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
+)
+
+func ExampleNewClient() {
+	ctx := context.Background()
+	c, err := speech.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use client.
+	_ = c
+}
+
+func ExampleClient_Recognize() {
+	ctx := context.Background()
+	c, err := speech.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &speechpb.RecognizeRequest{
+		// TODO: Fill request struct fields.
+	}
+	resp, err := c.Recognize(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_LongRunningRecognize() {
+	ctx := context.Background()
+	c, err := speech.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &speechpb.LongRunningRecognizeRequest{
+		// TODO: Fill request struct fields.
+	}
+	op, err := c.LongRunningRecognize(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	resp, err := op.Wait(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
+
+func ExampleClient_StreamingRecognize() {
+	ctx := context.Background()
+	c, err := speech.NewClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	stream, err := c.StreamingRecognize(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	go func() {
+		reqs := []*speechpb.StreamingRecognizeRequest{
+			// TODO: Create requests.
+		}
+		for _, req := range reqs {
+			if err := stream.Send(req); err != nil {
+				// TODO: Handle error.
+			}
+		}
+		stream.CloseSend()
+	}()
+	for {
+		resp, err := stream.Recv()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			// TODO: handle error.
+		}
+		// TODO: Use resp.
+		_ = resp
+	}
+}
diff --git a/storage/bucket.go b/storage/bucket.go
index d484e42..83bf9ef 100644
--- a/storage/bucket.go
+++ b/storage/bucket.go
@@ -309,7 +309,7 @@
 	rfc3339Date = "2006-01-02"
 
 	// DeleteAction is a lifecycle action that deletes a live and/or archived
-	// objects. Takes precendence over SetStorageClass actions.
+	// objects. Takes precedence over SetStorageClass actions.
 	DeleteAction = "Delete"
 
 	// SetStorageClassAction changes the storage class of live and/or archived
@@ -533,6 +533,9 @@
 	// configuration.
 	Encryption *BucketEncryption
 
+	// If set, replaces the lifecycle configuration of the bucket.
+	Lifecycle *Lifecycle
+
 	setLabels    map[string]string
 	deleteLabels map[string]bool
 }
@@ -589,6 +592,9 @@
 			rb.Encryption = ua.Encryption.toRawBucketEncryption()
 		}
 	}
+	if ua.Lifecycle != nil {
+		rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
+	}
 	if ua.setLabels != nil || ua.deleteLabels != nil {
 		rb.Labels = map[string]string{}
 		for k, v := range ua.setLabels {
diff --git a/storage/bucket_test.go b/storage/bucket_test.go
index 36fa482..841b14a 100644
--- a/storage/bucket_test.go
+++ b/storage/bucket_test.go
@@ -100,6 +100,14 @@
 		RequesterPays:     false,
 		RetentionPolicy:   &RetentionPolicy{RetentionPeriod: time.Hour},
 		Encryption:        &BucketEncryption{DefaultKMSKeyName: "key2"},
+		Lifecycle: &Lifecycle{
+			Rules: []LifecycleRule{
+				{
+					Action:    LifecycleAction{Type: "Delete"},
+					Condition: LifecycleCondition{AgeInDays: 30},
+				},
+			},
+		},
 	}
 	au.SetLabel("a", "foo")
 	au.DeleteLabel("b")
@@ -121,6 +129,14 @@
 		RetentionPolicy: &raw.BucketRetentionPolicy{RetentionPeriod: 3600},
 		Encryption:      &raw.BucketEncryption{DefaultKmsKeyName: "key2"},
 		NullFields:      []string{"Labels.b"},
+		Lifecycle: &raw.BucketLifecycle{
+			Rule: []*raw.BucketLifecycleRule{
+				{
+					Action:    &raw.BucketLifecycleRuleAction{Type: "Delete"},
+					Condition: &raw.BucketLifecycleRuleCondition{Age: 30},
+				},
+			},
+		},
 	}
 	if msg := testutil.Diff(got, want); msg != "" {
 		t.Error(msg)
diff --git a/storage/copy.go b/storage/copy.go
index b7589b0..50589e0 100644
--- a/storage/copy.go
+++ b/storage/copy.go
@@ -63,6 +63,10 @@
 	// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
 	// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
 	// any.
+	//
+	// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
+	// (via ObjectHandle.Key) on the destination object will result in an error when
+	// Run is called.
 	DestinationKMSKeyName string
 
 	dst, src *ObjectHandle
@@ -79,6 +83,9 @@
 	if err := c.dst.validate(); err != nil {
 		return nil, err
 	}
+	if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
+		return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
+	}
 	// Convert destination attributes to raw form, omitting the bucket.
 	// If the bucket is included but name or content-type aren't, the service
 	// returns a 400 with "Required" as the only message. Omitting the bucket
diff --git a/storage/copy_test.go b/storage/copy_test.go
new file mode 100644
index 0000000..0f87c30
--- /dev/null
+++ b/storage/copy_test.go
@@ -0,0 +1,80 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"net/http"
+	"strings"
+	"testing"
+
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+)
+
+func TestCopyMissingFields(t *testing.T) {
+	// Verify that copying checks for missing fields.a
+	t.Parallel()
+	var tests = []struct {
+		srcBucket, srcName, destBucket, destName string
+		errMsg                                   string
+	}{
+		{
+			"mybucket", "", "mybucket", "destname",
+			"name is empty",
+		},
+		{
+			"mybucket", "srcname", "mybucket", "",
+			"name is empty",
+		},
+		{
+			"", "srcfile", "mybucket", "destname",
+			"name is empty",
+		},
+		{
+			"mybucket", "srcfile", "", "destname",
+			"name is empty",
+		},
+	}
+	ctx := context.Background()
+	client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}}))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, test := range tests {
+		src := client.Bucket(test.srcBucket).Object(test.srcName)
+		dst := client.Bucket(test.destBucket).Object(test.destName)
+		_, err := dst.CopierFrom(src).Run(ctx)
+		if !strings.Contains(err.Error(), test.errMsg) {
+			t.Errorf("CopyTo test #%v:\ngot err  %q\nwant err %q", i, err, test.errMsg)
+		}
+	}
+}
+
+func TestCopyBothEncryptionKeys(t *testing.T) {
+	// Test that using both a customer-supplied key and a KMS key is an error.
+	ctx := context.Background()
+	client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}}))
+	if err != nil {
+		t.Fatal(err)
+	}
+	dest := client.Bucket("b").Object("d").Key(testEncryptionKey)
+	c := dest.CopierFrom(client.Bucket("b").Object("s"))
+	c.DestinationKMSKeyName = "key"
+	if _, err := c.Run(ctx); err == nil {
+		t.Error("got nil, want error")
+	} else if !strings.Contains(err.Error(), "KMS") {
+		t.Errorf(`got %q, want it to contain "KMS"`, err)
+	}
+}
diff --git a/storage/doc.go b/storage/doc.go
index 1740708..96731d3 100644
--- a/storage/doc.go
+++ b/storage/doc.go
@@ -64,7 +64,7 @@
 
 Each bucket has associated metadata, represented in this package by
 BucketAttrs. The third argument to BucketHandle.Create allows you to set
-the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
+the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
 Attrs:
 
     attrs, err := bkt.Attrs(ctx)
diff --git a/storage/integration_test.go b/storage/integration_test.go
index 3918d63..d9ef188 100644
--- a/storage/integration_test.go
+++ b/storage/integration_test.go
@@ -43,6 +43,7 @@
 
 	"cloud.google.com/go/iam"
 	"cloud.google.com/go/internal/testutil"
+	"cloud.google.com/go/internal/uid"
 	"google.golang.org/api/googleapi"
 	"google.golang.org/api/iterator"
 	itesting "google.golang.org/api/iterator/testing"
@@ -52,7 +53,7 @@
 const testPrefix = "go-integration-test"
 
 var (
-	uidSpace   = testutil.NewUIDSpace(testPrefix)
+	uidSpace   = uid.NewSpace(testPrefix, nil)
 	bucketName = uidSpace.New()
 )
 
@@ -251,6 +252,21 @@
 	if !testutil.Equal(attrs.Labels, wantLabels) {
 		t.Fatalf("got %v, want %v", attrs.Labels, wantLabels)
 	}
+
+	// Configure a lifecycle
+	wantLifecycle := Lifecycle{
+		Rules: []LifecycleRule{
+			{
+				Action:    LifecycleAction{Type: "Delete"},
+				Condition: LifecycleCondition{AgeInDays: 30},
+			},
+		},
+	}
+	ua = BucketAttrsToUpdate{Lifecycle: &wantLifecycle}
+	attrs = h.mustUpdateBucket(b, ua)
+	if !testutil.Equal(attrs.Lifecycle, wantLifecycle) {
+		t.Fatalf("got %v, want %v", attrs.Lifecycle, wantLifecycle)
+	}
 }
 
 func TestIntegration_ConditionalDelete(t *testing.T) {
@@ -1246,7 +1262,7 @@
 
 	write := func(w *Writer) error {
 		if _, err := w.Write(data); err != nil {
-			w.Close()
+			_ = w.Close()
 			return err
 		}
 		return w.Close()
@@ -1377,7 +1393,7 @@
 	h := testHelper{t}
 
 	bucketName2 := uidSpace.New()
-	b := client.Bucket(bucketName2)
+	b1 := client.Bucket(bucketName2)
 	projID := testutil.ProjID()
 	// Use Firestore project as a project that does not contain the bucket.
 	otherProjID := os.Getenv(envFirestoreProjID)
@@ -1393,7 +1409,7 @@
 		t.Fatal(err)
 	}
 	defer otherClient.Close()
-	ob := otherClient.Bucket(bucketName2)
+	b2 := otherClient.Bucket(bucketName2)
 	user, err := keyFileEmail(os.Getenv("GCLOUD_TESTS_GOLANG_KEY"))
 	if err != nil {
 		t.Fatal(err)
@@ -1404,8 +1420,8 @@
 	}
 
 	// Create a requester-pays bucket. The bucket is contained in the project projID.
-	h.mustCreate(b, projID, &BucketAttrs{RequesterPays: true})
-	if err := b.ACL().Set(ctx, ACLEntity("user-"+otherUser), RoleOwner); err != nil {
+	h.mustCreate(b1, projID, &BucketAttrs{RequesterPays: true})
+	if err := b1.ACL().Set(ctx, ACLEntity("user-"+otherUser), RoleOwner); err != nil {
 		t.Fatal(err)
 	}
 
@@ -1429,7 +1445,7 @@
 		// user: an Owner on the containing project
 		// userProject: absent
 		// result: success, by the rule permitting access by owners of the containing bucket.
-		if err := f(b); err != nil {
+		if err := f(b1); err != nil {
 			t.Errorf("%s: %v, want nil\n"+
 				"confirm that %s is an Owner on %s",
 				msg, err, user, projID)
@@ -1437,13 +1453,13 @@
 		// user: an Owner on the containing project
 		// userProject: containing project
 		// result: success, by the same rule as above; userProject is unnecessary but allowed.
-		if err := f(b.UserProject(projID)); err != nil {
+		if err := f(b1.UserProject(projID)); err != nil {
 			t.Errorf("%s: got %v, want nil", msg, err)
 		}
 		// user: not an Owner on the containing project
 		// userProject: absent
 		// result: failure, by the standard requester-pays rule
-		err := f(ob)
+		err := f(b2)
 		if got, want := errCode(err), wantErrorCode; got != want {
 			t.Errorf("%s: got error %v, want code %d\n"+
 				"confirm that %s is NOT an Owner on %s",
@@ -1452,7 +1468,7 @@
 		// user: not an Owner on the containing project
 		// userProject: not the containing one, but user has Editor role on it
 		// result: success, by the standard requester-pays rule
-		if err := f(ob.UserProject(otherProjID)); err != nil {
+		if err := f(b2.UserProject(otherProjID)); err != nil {
 			t.Errorf("%s: got %v, want nil\n"+
 				"confirm that %s is an Editor on %s and that that project has billing enabled",
 				msg, err, otherUser, otherProjID)
@@ -1460,7 +1476,7 @@
 		// user: not an Owner on the containing project
 		// userProject: the containing one, on which the user does NOT have Editor permission.
 		// result: failure
-		err = f(ob.UserProject("veener-jba"))
+		err = f(b2.UserProject("veener-jba"))
 		if got, want := errCode(err), 403; got != want {
 			t.Errorf("%s: got error %v, want code %d\n"+
 				"confirm that %s is NOT an Editor on %s",
@@ -1555,25 +1571,22 @@
 		_, err := b.Object("compose").ComposerFrom(b.Object("foo"), b.Object("copy")).Run(ctx)
 		return err
 	})
-
-	// Deletion.
-	// TODO(jba): uncomment when internal bug 78341001 is resolved.
-	// call("delete object", func(b *BucketHandle) error {
-	// 	err := b.Object("foo").Delete(ctx)
-	// 	fmt.Printf("#### deleting foo returns %v\n", err)
-	// 	if err == ErrObjectNotExist {
-	// 		return nil
-	// 	}
-	// 	return err
-	// })
-	b.Object("foo").Delete(ctx) // remove when above is uncommented
+	call("delete object", func(b *BucketHandle) error {
+		// Make sure the object exists, so we don't get confused by ErrObjectNotExist.
+		// The storage service may perform validation in any order (perhaps in parallel),
+		// so if we delete an object that doesn't exist and for which we lack permission,
+		// we could see either of those two errors. (See Google-internal bug 78341001.)
+		h.mustWrite(b1.Object("foo").NewWriter(ctx), []byte("hello")) // note: b1, not b.
+		return b.Object("foo").Delete(ctx)
+	})
+	b1.Object("foo").Delete(ctx) // Make sure object is deleted.
 	for _, obj := range []string{"copy", "compose"} {
-		if err := b.UserProject(projID).Object(obj).Delete(ctx); err != nil {
+		if err := b1.UserProject(projID).Object(obj).Delete(ctx); err != nil {
 			t.Fatalf("could not delete %q: %v", obj, err)
 		}
 	}
 
-	h.mustDeleteBucket(b)
+	h.mustDeleteBucket(b1)
 }
 
 // TODO(jba): move to testutil, factor out from firestore/integration_test.go.
@@ -2067,20 +2080,23 @@
 	if testing.Short() {
 		t.Skip("Integration tests skipped in short mode")
 	}
+	keyRingName := os.Getenv("GCLOUD_TESTS_GOLANG_KEYRING")
+	if keyRingName == "" {
+		t.Fatal("GCLOUD_TESTS_GOLANG_KEYRING must be set. See CONTRIBUTING.md for details")
+	}
 	ctx := context.Background()
 	client := testConfig(ctx, t)
 	defer client.Close()
 	h := testHelper{t}
 
-	// TODO(jba): make the key configurable? Or just require this name?
-	keyNameRoot := "projects/" + testutil.ProjID() + "/locations/global/keyRings/go-integration-test/cryptoKeys/key"
-	keyName := keyNameRoot + "1"
+	keyName1 := keyRingName + "/cryptoKeys/key1"
+	keyName2 := keyRingName + "/cryptoKeys/key2"
 	contents := []byte("my secret")
 
 	write := func(obj *ObjectHandle, setKey bool) {
 		w := obj.NewWriter(ctx)
 		if setKey {
-			w.KMSKeyName = keyName
+			w.KMSKeyName = keyName1
 		}
 		h.mustWrite(w, contents)
 	}
@@ -2091,8 +2107,8 @@
 			t.Errorf("got %v, want %v", got, contents)
 		}
 		attrs := h.mustObjectAttrs(obj)
-		if len(attrs.KMSKeyName) < len(keyName) || attrs.KMSKeyName[:len(keyName)] != keyName {
-			t.Errorf("got %q, want %q", attrs.KMSKeyName, keyName)
+		if len(attrs.KMSKeyName) < len(keyName1) || attrs.KMSKeyName[:len(keyName1)] != keyName1 {
+			t.Errorf("got %q, want %q", attrs.KMSKeyName, keyName1)
 		}
 	}
 
@@ -2104,13 +2120,13 @@
 	h.mustDeleteObject(obj)
 
 	// Encrypt an object with a CSEK, then copy it using a CMEK.
-	src := bkt.Object("csek").Key([]byte("my-secret-AES-256-encryption-key"))
+	src := bkt.Object("csek").Key(testEncryptionKey)
 	if err := writeObject(ctx, src, "text/plain", contents); err != nil {
 		t.Fatal(err)
 	}
 	dest := bkt.Object("cmek")
 	c := dest.CopierFrom(src)
-	c.DestinationKMSKeyName = keyName
+	c.DestinationKMSKeyName = keyName1
 	if _, err := c.Run(ctx); err != nil {
 		t.Fatal(err)
 	}
@@ -2121,12 +2137,13 @@
 	// Create a bucket with a default key, then write and read an object.
 	bkt = client.Bucket(uidSpace.New())
 	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{
-		Encryption: &BucketEncryption{DefaultKMSKeyName: keyName},
+		Location:   "US",
+		Encryption: &BucketEncryption{DefaultKMSKeyName: keyName1},
 	})
 	defer h.mustDeleteBucket(bkt)
 
 	attrs := h.mustBucketAttrs(bkt)
-	if got, want := attrs.Encryption.DefaultKMSKeyName, keyName; got != want {
+	if got, want := attrs.Encryption.DefaultKMSKeyName, keyName1; got != want {
 		t.Fatalf("got %q, want %q", got, want)
 	}
 	obj = bkt.Object("kms")
@@ -2136,7 +2153,6 @@
 
 	// Update the bucket's default key to a different name.
 	// (This key doesn't have to exist.)
-	keyName2 := keyNameRoot + "2"
 	attrs = h.mustUpdateBucket(bkt, BucketAttrsToUpdate{Encryption: &BucketEncryption{DefaultKMSKeyName: keyName2}})
 	if got, want := attrs.Encryption.DefaultKMSKeyName, keyName2; got != want {
 		t.Fatalf("got %q, want %q", got, want)
diff --git a/storage/storage.go b/storage/storage.go
index e3cfa78..df58bb7 100644
--- a/storage/storage.go
+++ b/storage/storage.go
@@ -725,6 +725,9 @@
 	// Cloud KMS key name, in the form
 	// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
 	// if the object is encrypted by such a key.
+	//
+	// Providing both a KMSKeyName and a customer-supplied encryption key (via
+	// ObjectHandle.Key) will result in an error when writing an object.
 	KMSKeyName string
 
 	// Prefix is set only for ObjectAttrs which represent synthetic "directory
diff --git a/storage/storage_test.go b/storage/storage_test.go
index 23622a0..a1ea765 100644
--- a/storage/storage_test.go
+++ b/storage/storage_test.go
@@ -264,44 +264,6 @@
 	return slurp
 }
 
-func TestCopyToMissingFields(t *testing.T) {
-	t.Parallel()
-	var tests = []struct {
-		srcBucket, srcName, destBucket, destName string
-		errMsg                                   string
-	}{
-		{
-			"mybucket", "", "mybucket", "destname",
-			"name is empty",
-		},
-		{
-			"mybucket", "srcname", "mybucket", "",
-			"name is empty",
-		},
-		{
-			"", "srcfile", "mybucket", "destname",
-			"name is empty",
-		},
-		{
-			"mybucket", "srcfile", "", "destname",
-			"name is empty",
-		},
-	}
-	ctx := context.Background()
-	client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}}))
-	if err != nil {
-		panic(err)
-	}
-	for i, test := range tests {
-		src := client.Bucket(test.srcBucket).Object(test.srcName)
-		dst := client.Bucket(test.destBucket).Object(test.destName)
-		_, err := dst.CopierFrom(src).Run(ctx)
-		if !strings.Contains(err.Error(), test.errMsg) {
-			t.Errorf("CopyTo test #%v:\ngot err  %q\nwant err %q", i, err, test.errMsg)
-		}
-	}
-}
-
 func TestObjectNames(t *testing.T) {
 	t.Parallel()
 	// Naming requirements: https://cloud.google.com/storage/docs/bucket-naming
diff --git a/storage/writer.go b/storage/writer.go
index 028fb86..3e9709b 100644
--- a/storage/writer.go
+++ b/storage/writer.go
@@ -88,6 +88,9 @@
 	if !utf8.ValidString(attrs.Name) {
 		return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
 	}
+	if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
+		return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
+	}
 	pr, pw := io.Pipe()
 	w.pw = pw
 	w.opened = true
diff --git a/storage/writer_test.go b/storage/writer_test.go
index c1f7380..ca6f58c 100644
--- a/storage/writer_test.go
+++ b/storage/writer_test.go
@@ -15,10 +15,10 @@
 package storage
 
 import (
-	"bytes"
 	"crypto/sha256"
 	"encoding/base64"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"net/http"
 	"strings"
@@ -32,6 +32,8 @@
 	"google.golang.org/api/option"
 )
 
+var testEncryptionKey = []byte("secret-key-that-is-32-bytes-long")
+
 type fakeTransport struct {
 	gotReq  *http.Request
 	gotBody []byte
@@ -93,14 +95,9 @@
 
 	// Retry on 5xx
 	ft := &fakeTransport{}
-	ft.addResult(&http.Response{
-		StatusCode: 503,
-		Body:       ioutil.NopCloser(&bytes.Buffer{}),
-	}, nil)
-	ft.addResult(&http.Response{
-		StatusCode: 200,
-		Body:       ioutil.NopCloser(strings.NewReader("{}")),
-	}, nil)
+	ft.addResult(&http.Response{StatusCode: 503, Body: bodyReader("")}, nil)
+	ft.addResult(&http.Response{StatusCode: 200, Body: bodyReader("{}")}, nil)
+
 	wc = doWrite(&http.Client{Transport: ft})
 	if err := wc.Close(); err != nil {
 		t.Errorf("got %v, want nil", err)
@@ -115,17 +112,20 @@
 	t.Parallel()
 	ctx := context.Background()
 	ft := &fakeTransport{}
+	ft.addResult(&http.Response{StatusCode: 200, Body: bodyReader("{}")}, nil)
 	hc := &http.Client{Transport: ft}
 	client, err := NewClient(ctx, option.WithHTTPClient(hc))
 	if err != nil {
 		t.Fatalf("error when creating client: %v", err)
 	}
 	obj := client.Bucket("bucketname").Object("filename1")
-	key := []byte("secret-key-that-is-32-bytes-long")
-	wc := obj.Key(key).NewWriter(ctx)
-	// TODO(jba): use something other than fakeTransport, which always returns error.
-	wc.Write([]byte("hello world"))
-	wc.Close()
+	wc := obj.Key(testEncryptionKey).NewWriter(ctx)
+	if _, err := wc.Write([]byte("hello world")); err != nil {
+		t.Fatal(err)
+	}
+	if err := wc.Close(); err != nil {
+		t.Fatal(err)
+	}
 	if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want {
 		t.Errorf("algorithm: got %q, want %q", got, want)
 	}
@@ -133,10 +133,10 @@
 	if err != nil {
 		t.Fatalf("decoding key: %v", err)
 	}
-	if !testutil.Equal(gotKey, key) {
-		t.Errorf("key: got %v, want %v", gotKey, key)
+	if !testutil.Equal(gotKey, testEncryptionKey) {
+		t.Errorf("key: got %v, want %v", gotKey, testEncryptionKey)
 	}
-	wantHash := sha256.Sum256(key)
+	wantHash := sha256.Sum256(testEncryptionKey)
 	gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256"))
 	if err != nil {
 		t.Fatalf("decoding hash: %v", err)
@@ -144,6 +144,21 @@
 	if !testutil.Equal(gotHash, wantHash[:]) { // wantHash is an array
 		t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash)
 	}
+
+	// Using a customer-supplied encryption key and a KMS key together is an error.
+	checkKMSError := func(msg string, err error) {
+		if err == nil {
+			t.Errorf("%s: got nil, want error", msg)
+		} else if !strings.Contains(err.Error(), "KMS") {
+			t.Errorf(`%s: got %q, want it to contain "KMS"`, msg, err)
+		}
+	}
+
+	wc = obj.Key(testEncryptionKey).NewWriter(ctx)
+	wc.KMSKeyName = "key"
+	_, err = wc.Write([]byte{})
+	checkKMSError("Write", err)
+	checkKMSError("Close", wc.Close())
 }
 
 // This test demonstrates the data race on Writer.err that can happen when the
@@ -172,3 +187,7 @@
 	// This call to Write concurrently reads w.err (L169).
 	w.Write([]byte(nil))
 }
+
+func bodyReader(s string) io.ReadCloser {
+	return ioutil.NopCloser(strings.NewReader(s))
+}
diff --git a/vision/apiv1/doc.go b/vision/apiv1/doc.go
index 43d7e04..e94d383 100644
--- a/vision/apiv1/doc.go
+++ b/vision/apiv1/doc.go
@@ -15,7 +15,7 @@
 // AUTO-GENERATED CODE. DO NOT EDIT.
 
 // Package vision is an auto-generated package for the
-// Google Cloud Vision API.
+// Cloud Vision API.
 
 //
 // Integrates Google Vision features, including image labeling, face, logo,
diff --git a/vision/apiv1/image_annotator_client.go b/vision/apiv1/image_annotator_client.go
index e3524a8..901e53f 100644
--- a/vision/apiv1/image_annotator_client.go
+++ b/vision/apiv1/image_annotator_client.go
@@ -20,11 +20,14 @@
 	"time"
 
 	"cloud.google.com/go/internal/version"
+	"cloud.google.com/go/longrunning"
+	lroauto "cloud.google.com/go/longrunning/autogen"
 	gax "github.com/googleapis/gax-go"
 	"golang.org/x/net/context"
 	"google.golang.org/api/option"
 	"google.golang.org/api/transport"
 	visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/metadata"
@@ -32,7 +35,8 @@
 
 // ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient.
 type ImageAnnotatorCallOptions struct {
-	BatchAnnotateImages []gax.CallOption
+	BatchAnnotateImages     []gax.CallOption
+	AsyncBatchAnnotateFiles []gax.CallOption
 }
 
 func defaultImageAnnotatorClientOptions() []option.ClientOption {
@@ -58,11 +62,14 @@
 		},
 	}
 	return &ImageAnnotatorCallOptions{
-		BatchAnnotateImages: retry[[2]string{"default", "idempotent"}],
+		BatchAnnotateImages:     retry[[2]string{"default", "idempotent"}],
+		AsyncBatchAnnotateFiles: retry[[2]string{"default", "idempotent"}],
 	}
 }
 
-// ImageAnnotatorClient is a client for interacting with Google Cloud Vision API.
+// ImageAnnotatorClient is a client for interacting with Cloud Vision API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
 type ImageAnnotatorClient struct {
 	// The connection to the service.
 	conn *grpc.ClientConn
@@ -70,6 +77,11 @@
 	// The gRPC API client.
 	imageAnnotatorClient visionpb.ImageAnnotatorClient
 
+	// LROClient is used internally to handle longrunning operations.
+	// It is exposed so that its CallOptions can be modified if required.
+	// Users should not Close this client.
+	LROClient *lroauto.OperationsClient
+
 	// The call options for this service.
 	CallOptions *ImageAnnotatorCallOptions
 
@@ -94,6 +106,17 @@
 		imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn),
 	}
 	c.setGoogleClientInfo()
+
+	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
+	if err != nil {
+		// This error "should not happen", since we are just reusing old connection
+		// and never actually need to dial.
+		// If this does happen, we could leak conn. However, we cannot close conn:
+		// If the user invoked the function with option.WithGRPCConn,
+		// we would close a connection that's still in use.
+		// TODO(pongad): investigate error conditions.
+		return nil, err
+	}
 	return c, nil
 }
 
@@ -132,3 +155,95 @@
 	}
 	return resp, nil
 }
+
+// AsyncBatchAnnotateFiles run asynchronous image detection and annotation for a list of generic
+// files, such as PDF files, which may contain multiple pages and multiple
+// images per page. Progress and results can be retrieved through the
+// google.longrunning.Operations interface.
+// Operation.metadata contains OperationMetadata (metadata).
+// Operation.response contains AsyncBatchAnnotateFilesResponse (results).
+func (c *ImageAnnotatorClient) AsyncBatchAnnotateFiles(ctx context.Context, req *visionpb.AsyncBatchAnnotateFilesRequest, opts ...gax.CallOption) (*AsyncBatchAnnotateFilesOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.AsyncBatchAnnotateFiles[0:len(c.CallOptions.AsyncBatchAnnotateFiles):len(c.CallOptions.AsyncBatchAnnotateFiles)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.imageAnnotatorClient.AsyncBatchAnnotateFiles(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &AsyncBatchAnnotateFilesOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// AsyncBatchAnnotateFilesOperation manages a long-running operation from AsyncBatchAnnotateFiles.
+type AsyncBatchAnnotateFilesOperation struct {
+	lro *longrunning.Operation
+}
+
+// AsyncBatchAnnotateFilesOperation returns a new AsyncBatchAnnotateFilesOperation from a given name.
+// The name must be that of a previously created AsyncBatchAnnotateFilesOperation, possibly from a different process.
+func (c *ImageAnnotatorClient) AsyncBatchAnnotateFilesOperation(name string) *AsyncBatchAnnotateFilesOperation {
+	return &AsyncBatchAnnotateFilesOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *AsyncBatchAnnotateFilesOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*visionpb.AsyncBatchAnnotateFilesResponse, error) {
+	var resp visionpb.AsyncBatchAnnotateFilesResponse
+	if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *AsyncBatchAnnotateFilesOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*visionpb.AsyncBatchAnnotateFilesResponse, error) {
+	var resp visionpb.AsyncBatchAnnotateFilesResponse
+	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+		return nil, err
+	}
+	if !op.Done() {
+		return nil, nil
+	}
+	return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *AsyncBatchAnnotateFilesOperation) Metadata() (*visionpb.OperationMetadata, error) {
+	var meta visionpb.OperationMetadata
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *AsyncBatchAnnotateFilesOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *AsyncBatchAnnotateFilesOperation) Name() string {
+	return op.lro.Name()
+}
diff --git a/vision/apiv1/image_annotator_client_example_test.go b/vision/apiv1/image_annotator_client_example_test.go
index 974b385..83de6c9 100644
--- a/vision/apiv1/image_annotator_client_example_test.go
+++ b/vision/apiv1/image_annotator_client_example_test.go
@@ -49,3 +49,26 @@
 	// TODO: Use resp.
 	_ = resp
 }
+
+func ExampleImageAnnotatorClient_AsyncBatchAnnotateFiles() {
+	ctx := context.Background()
+	c, err := vision.NewImageAnnotatorClient(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	req := &visionpb.AsyncBatchAnnotateFilesRequest{
+		// TODO: Fill request struct fields.
+	}
+	op, err := c.AsyncBatchAnnotateFiles(ctx, req)
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+	resp, err := op.Wait(ctx)
+	if err != nil {
+		// TODO: Handle error.
+	}
+	// TODO: Use resp.
+	_ = resp
+}
diff --git a/vision/apiv1/mock_test.go b/vision/apiv1/mock_test.go
index 247c1be..b9fa1a0 100644
--- a/vision/apiv1/mock_test.go
+++ b/vision/apiv1/mock_test.go
@@ -18,6 +18,7 @@
 
 import (
 	visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
 )
 
 import (
@@ -72,6 +73,18 @@
 	return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil
 }
 
+func (s *mockImageAnnotatorServer) AsyncBatchAnnotateFiles(ctx context.Context, req *visionpb.AsyncBatchAnnotateFilesRequest) (*longrunningpb.Operation, error) {
+	md, _ := metadata.FromIncomingContext(ctx)
+	if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
+		return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
+	}
+	s.reqs = append(s.reqs, req)
+	if s.err != nil {
+		return nil, s.err
+	}
+	return s.resps[0].(*longrunningpb.Operation), nil
+}
+
 // clientOpt is the option tests should use to connect to the test server.
 // It is initialized by TestMain.
 var clientOpt option.ClientOption
@@ -157,3 +170,85 @@
 	}
 	_ = resp
 }
+func TestImageAnnotatorAsyncBatchAnnotateFiles(t *testing.T) {
+	var expectedResponse *visionpb.AsyncBatchAnnotateFilesResponse = &visionpb.AsyncBatchAnnotateFilesResponse{}
+
+	mockImageAnnotator.err = nil
+	mockImageAnnotator.reqs = nil
+
+	any, err := ptypes.MarshalAny(expectedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], &longrunningpb.Operation{
+		Name:   "longrunning-test",
+		Done:   true,
+		Result: &longrunningpb.Operation_Response{Response: any},
+	})
+
+	var requests []*visionpb.AsyncAnnotateFileRequest = nil
+	var request = &visionpb.AsyncBatchAnnotateFilesRequest{
+		Requests: requests,
+	}
+
+	c, err := NewImageAnnotatorClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.AsyncBatchAnnotateFiles(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) {
+		t.Errorf("wrong request %q, want %q", got, want)
+	}
+
+	if want, got := expectedResponse, resp; !proto.Equal(want, got) {
+		t.Errorf("wrong response %q, want %q)", got, want)
+	}
+}
+
+func TestImageAnnotatorAsyncBatchAnnotateFilesError(t *testing.T) {
+	errCode := codes.PermissionDenied
+	mockImageAnnotator.err = nil
+	mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], &longrunningpb.Operation{
+		Name: "longrunning-test",
+		Done: true,
+		Result: &longrunningpb.Operation_Error{
+			Error: &status.Status{
+				Code:    int32(errCode),
+				Message: "test error",
+			},
+		},
+	})
+
+	var requests []*visionpb.AsyncAnnotateFileRequest = nil
+	var request = &visionpb.AsyncBatchAnnotateFilesRequest{
+		Requests: requests,
+	}
+
+	c, err := NewImageAnnotatorClient(context.Background(), clientOpt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	respLRO, err := c.AsyncBatchAnnotateFiles(context.Background(), request)
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp, err := respLRO.Wait(context.Background())
+
+	if st, ok := gstatus.FromError(err); !ok {
+		t.Errorf("got error %v, expected grpc error", err)
+	} else if c := st.Code(); c != errCode {
+		t.Errorf("got error code %q, want %q", c, errCode)
+	}
+	_ = resp
+}
diff --git a/vision/apiv1p1beta1/image_annotator_client.go b/vision/apiv1p1beta1/image_annotator_client.go
index 25d8972..1a446e4 100644
--- a/vision/apiv1p1beta1/image_annotator_client.go
+++ b/vision/apiv1p1beta1/image_annotator_client.go
@@ -63,6 +63,8 @@
 }
 
 // ImageAnnotatorClient is a client for interacting with Google Cloud Vision API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
 type ImageAnnotatorClient struct {
 	// The connection to the service.
 	conn *grpc.ClientConn