bigquery: make Uploader an alias for Inserter

Rename Uploader to Inserter, at the request of BigQuery team.
The name Inserter matches the BigQuery service documentation.

Fixes #835.

Change-Id: Ib37191c7a8c80848fcf4d541938b36611334e8cd
Reviewed-on: https://code-review.googlesource.com/c/35070
Reviewed-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Eno Compton <enocom@google.com>
diff --git a/bigquery/examples_test.go b/bigquery/examples_test.go
index 031b0cd..6b6af28 100644
--- a/bigquery/examples_test.go
+++ b/bigquery/examples_test.go
@@ -521,26 +521,26 @@
 	fmt.Println(md)
 }
 
-func ExampleTable_Uploader() {
+func ExampleTable_Inserter() {
 	ctx := context.Background()
 	client, err := bigquery.NewClient(ctx, "project-id")
 	if err != nil {
 		// TODO: Handle error.
 	}
-	u := client.Dataset("my_dataset").Table("my_table").Uploader()
-	_ = u // TODO: Use u.
+	ins := client.Dataset("my_dataset").Table("my_table").Inserter()
+	_ = ins // TODO: Use ins.
 }
 
-func ExampleTable_Uploader_options() {
+func ExampleTable_Inserter_options() {
 	ctx := context.Background()
 	client, err := bigquery.NewClient(ctx, "project-id")
 	if err != nil {
 		// TODO: Handle error.
 	}
-	u := client.Dataset("my_dataset").Table("my_table").Uploader()
-	u.SkipInvalidRows = true
-	u.IgnoreUnknownValues = true
-	_ = u // TODO: Use u.
+	ins := client.Dataset("my_dataset").Table("my_table").Inserter()
+	ins.SkipInvalidRows = true
+	ins.IgnoreUnknownValues = true
+	_ = ins // TODO: Use ins.
 }
 
 func ExampleTable_CopierFrom() {
@@ -737,33 +737,33 @@
 	}, "", nil
 }
 
-func ExampleUploader_Put() {
+func ExampleInserter_Put() {
 	ctx := context.Background()
 	client, err := bigquery.NewClient(ctx, "project-id")
 	if err != nil {
 		// TODO: Handle error.
 	}
-	u := client.Dataset("my_dataset").Table("my_table").Uploader()
+	ins := client.Dataset("my_dataset").Table("my_table").Inserter()
 	// Item implements the ValueSaver interface.
 	items := []*Item{
 		{Name: "n1", Size: 32.6, Count: 7},
 		{Name: "n2", Size: 4, Count: 2},
 		{Name: "n3", Size: 101.5, Count: 1},
 	}
-	if err := u.Put(ctx, items); err != nil {
+	if err := ins.Put(ctx, items); err != nil {
 		// TODO: Handle error.
 	}
 }
 
 var schema bigquery.Schema
 
-func ExampleUploader_Put_structSaver() {
+func ExampleInserter_Put_structSaver() {
 	ctx := context.Background()
 	client, err := bigquery.NewClient(ctx, "project-id")
 	if err != nil {
 		// TODO: Handle error.
 	}
-	u := client.Dataset("my_dataset").Table("my_table").Uploader()
+	ins := client.Dataset("my_dataset").Table("my_table").Inserter()
 
 	type score struct {
 		Name string
@@ -776,18 +776,18 @@
 		{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
 		{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
 	}
-	if err := u.Put(ctx, savers); err != nil {
+	if err := ins.Put(ctx, savers); err != nil {
 		// TODO: Handle error.
 	}
 }
 
-func ExampleUploader_Put_struct() {
+func ExampleInserter_Put_struct() {
 	ctx := context.Background()
 	client, err := bigquery.NewClient(ctx, "project-id")
 	if err != nil {
 		// TODO: Handle error.
 	}
-	u := client.Dataset("my_dataset").Table("my_table").Uploader()
+	ins := client.Dataset("my_dataset").Table("my_table").Inserter()
 
 	type score struct {
 		Name string
@@ -799,19 +799,19 @@
 		{Name: "n3", Num: 7},
 	}
 	// Schema is inferred from the score type.
-	if err := u.Put(ctx, scores); err != nil {
+	if err := ins.Put(ctx, scores); err != nil {
 		// TODO: Handle error.
 	}
 }
 
-func ExampleUploader_Put_valuesSaver() {
+func ExampleInserter_Put_valuesSaver() {
 	ctx := context.Background()
 	client, err := bigquery.NewClient(ctx, "project-id")
 	if err != nil {
 		// TODO: Handle error.
 	}
 
-	u := client.Dataset("my_dataset").Table("my_table").Uploader()
+	ins := client.Dataset("my_dataset").Table("my_table").Inserter()
 
 	var vss []*bigquery.ValuesSaver
 	for i, name := range []string{"n1", "n2", "n3"} {
@@ -823,7 +823,7 @@
 		})
 	}
 
-	if err := u.Put(ctx, vss); err != nil {
+	if err := ins.Put(ctx, vss); err != nil {
 		// TODO: Handle error.
 	}
 }
diff --git a/bigquery/integration_test.go b/bigquery/integration_test.go
index 661d683..9172452 100644
--- a/bigquery/integration_test.go
+++ b/bigquery/integration_test.go
@@ -683,7 +683,7 @@
 	}
 }
 
-func TestIntegration_UploadAndRead(t *testing.T) {
+func TestIntegration_InsertAndRead(t *testing.T) {
 	if client == nil {
 		t.Skip("Integration tests skipped")
 	}
@@ -692,7 +692,7 @@
 	defer table.Delete(ctx)
 
 	// Populate the table.
-	upl := table.Uploader()
+	ins := table.Inserter()
 	var (
 		wantRows  [][]Value
 		saverRows []*ValuesSaver
@@ -706,7 +706,7 @@
 			Row:      row,
 		})
 	}
-	if err := upl.Put(ctx, saverRows); err != nil {
+	if err := ins.Put(ctx, saverRows); err != nil {
 		t.Fatal(putError(err))
 	}
 
@@ -844,7 +844,7 @@
 var roundToMicros = cmp.Transformer("RoundToMicros",
 	func(t time.Time) time.Time { return t.Round(time.Microsecond) })
 
-func TestIntegration_UploadAndReadStructs(t *testing.T) {
+func TestIntegration_InsertAndReadStructs(t *testing.T) {
 	if client == nil {
 		t.Skip("Integration tests skipped")
 	}
@@ -867,7 +867,7 @@
 	dtm2 := civil.DateTime{Date: d2, Time: tm2}
 
 	// Populate the table.
-	upl := table.Uploader()
+	ins := table.Inserter()
 	want := []*TestStruct{
 		{
 			"a",
@@ -920,7 +920,7 @@
 	for _, s := range want {
 		savers = append(savers, &StructSaver{Schema: schema, Struct: s})
 	}
-	if err := upl.Put(ctx, savers); err != nil {
+	if err := ins.Put(ctx, savers); err != nil {
 		t.Fatal(putError(err))
 	}
 
@@ -962,15 +962,15 @@
 func (b byName) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
 func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
 
-func TestIntegration_UploadAndReadNullable(t *testing.T) {
+func TestIntegration_InsertAndReadNullable(t *testing.T) {
 	if client == nil {
 		t.Skip("Integration tests skipped")
 	}
 	ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
 	cdt := civil.DateTime{Date: testDate, Time: ctm}
 	rat := big.NewRat(33, 100)
-	testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
-	testUploadAndReadNullable(t, testStructNullable{
+	testInsertAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
+	testInsertAndReadNullable(t, testStructNullable{
 		String:    NullString{"x", true},
 		Bytes:     []byte{1, 2, 3},
 		Integer:   NullInt64{1, true},
@@ -986,14 +986,14 @@
 		[]Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, rat, []Value{int64(4)}})
 }
 
-func testUploadAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
+func testInsertAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
 	ctx := context.Background()
 	table := newTable(t, testStructNullableSchema)
 	defer table.Delete(ctx)
 
 	// Populate the table.
-	upl := table.Uploader()
-	if err := upl.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
+	ins := table.Inserter()
+	if err := ins.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
 		t.Fatal(putError(err))
 	}
 	// Wait until the data has been uploaded. This can take a few seconds, according
@@ -1267,8 +1267,8 @@
 	wantRows := [][]Value{
 		{d, tm, dtm, ts},
 	}
-	upl := table.Uploader()
-	if err := upl.Put(ctx, []*ValuesSaver{
+	ins := table.Inserter()
+	if err := ins.Put(ctx, []*ValuesSaver{
 		{Schema: dtSchema, Row: wantRows[0]},
 	}); err != nil {
 		t.Fatal(putError(err))
@@ -1661,12 +1661,12 @@
 	table := newTable(t, schema)
 	defer table.Delete(ctx)
 
-	upl := table.Uploader()
+	ins := table.Inserter()
 	row := &ValuesSaver{
 		Schema: schema,
 		Row:    []Value{nil, []Value{}, []Value{nil}},
 	}
-	if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
+	if err := ins.Put(ctx, []*ValuesSaver{row}); err != nil {
 		t.Fatal(putError(err))
 	}
 	if err := waitForRow(ctx, table); err != nil {
@@ -1911,8 +1911,8 @@
 	if _, ok := tooBigRat.SetString("1e40"); !ok {
 		t.Fatal("big.Rat.SetString failed")
 	}
-	upl := table.Uploader()
-	err := upl.Put(ctx, []*ValuesSaver{{Schema: schema, Row: []Value{tooBigRat}}})
+	ins := table.Inserter()
+	err := ins.Put(ctx, []*ValuesSaver{{Schema: schema, Row: []Value{tooBigRat}}})
 	if err == nil {
 		t.Fatal("got nil, want error")
 	}
diff --git a/bigquery/uploader.go b/bigquery/uploader.go
index a432ce5..16081af 100644
--- a/bigquery/uploader.go
+++ b/bigquery/uploader.go
@@ -24,9 +24,9 @@
 	bq "google.golang.org/api/bigquery/v2"
 )
 
-// An Uploader does streaming inserts into a BigQuery table.
+// An Inserter does streaming inserts into a BigQuery table.
 // It is safe for concurrent use.
-type Uploader struct {
+type Inserter struct {
 	t *Table
 
 	// SkipInvalidRows causes rows containing invalid data to be silently
@@ -39,7 +39,7 @@
 	// to be treated as invalid records.
 	IgnoreUnknownValues bool
 
-	// A TableTemplateSuffix allows Uploaders to create tables automatically.
+	// A TableTemplateSuffix allows Inserters to create tables automatically.
 	//
 	// Experimental: this option is experimental and may be modified or removed in future versions,
 	// regardless of any other documented package stability guarantees.
@@ -53,15 +53,19 @@
 	TableTemplateSuffix string
 }
 
-// Uploader returns an Uploader that can be used to append rows to t.
-// The returned Uploader may optionally be further configured before its Put method is called.
+// Inserter returns an Inserter that can be used to append rows to t.
+// The returned Inserter may optionally be further configured before its Put method is called.
 //
 // To stream rows into a date-partitioned table at a particular date, add the
 // $yyyymmdd suffix to the table name when constructing the Table.
-func (t *Table) Uploader() *Uploader {
-	return &Uploader{t: t}
+func (t *Table) Inserter() *Inserter {
+	return &Inserter{t: t}
 }
 
+// Uploader calls Inserter.
+// Deprecated: use Table.Inserter instead.
+func (t *Table) Uploader() *Inserter { return t.Inserter() }
+
 // Put uploads one or more rows to the BigQuery service.
 //
 // If src is ValueSaver, then its Save method is called to produce a row for uploading.
@@ -81,8 +85,8 @@
 // in duplicate rows if you do not use insert IDs. Also, if the error persists,
 // the call will run indefinitely. Pass a context with a timeout to prevent
 // hanging calls.
-func (u *Uploader) Put(ctx context.Context, src interface{}) (err error) {
-	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Uploader.Put")
+func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put")
 	defer func() { trace.EndSpan(ctx, err) }()
 
 	savers, err := valueSavers(src)
@@ -158,7 +162,7 @@
 	}, true, nil
 }
 
-func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
+func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error {
 	req, err := u.newInsertRequest(src)
 	if err != nil {
 		return err
@@ -180,7 +184,7 @@
 	return handleInsertErrors(res.InsertErrors, req.Rows)
 }
 
-func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
+func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
 	if savers == nil { // If there are no rows, do nothing.
 		return nil, nil
 	}
@@ -229,3 +233,6 @@
 	}
 	return errs
 }
+
+// Uploader is an obsolete name for Inserter.
+type Uploader = Inserter