feat(vertexai/genai): add SystemInstruction (#9736)

Expose GenerateContentRequest.SystemInstruction.

As usual, put it on the Model, and use the Model field to construct
each request.
diff --git a/vertexai/genai/client.go b/vertexai/genai/client.go
index de18690..9a2c297 100644
--- a/vertexai/genai/client.go
+++ b/vertexai/genai/client.go
@@ -93,9 +93,10 @@
 	fullName string
 
 	GenerationConfig
-	SafetySettings []*SafetySetting
-	Tools          []*Tool
-	ToolConfig     *ToolConfig // configuration for tools
+	SafetySettings    []*SafetySetting
+	Tools             []*Tool
+	ToolConfig        *ToolConfig // configuration for tools
+	SystemInstruction *Content
 }
 
 const defaultMaxOutputTokens = 2048
@@ -142,12 +143,13 @@
 
 func (m *GenerativeModel) newGenerateContentRequest(contents ...*Content) *pb.GenerateContentRequest {
 	return &pb.GenerateContentRequest{
-		Model:            m.fullName,
-		Contents:         support.TransformSlice(contents, (*Content).toProto),
-		SafetySettings:   support.TransformSlice(m.SafetySettings, (*SafetySetting).toProto),
-		Tools:            support.TransformSlice(m.Tools, (*Tool).toProto),
-		ToolConfig:       m.ToolConfig.toProto(),
-		GenerationConfig: m.GenerationConfig.toProto(),
+		Model:             m.fullName,
+		Contents:          support.TransformSlice(contents, (*Content).toProto),
+		SafetySettings:    support.TransformSlice(m.SafetySettings, (*SafetySetting).toProto),
+		Tools:             support.TransformSlice(m.Tools, (*Tool).toProto),
+		ToolConfig:        m.ToolConfig.toProto(),
+		GenerationConfig:  m.GenerationConfig.toProto(),
+		SystemInstruction: m.SystemInstruction.toProto(),
 	}
 }
 
diff --git a/vertexai/genai/client_test.go b/vertexai/genai/client_test.go
index d0e5f7d..7e2055a 100644
--- a/vertexai/genai/client_test.go
+++ b/vertexai/genai/client_test.go
@@ -59,6 +59,21 @@
 		got := responseString(resp)
 		checkMatch(t, got, `15.* cm|[1-9].* inches`)
 	})
+	t.Run("system-instructions", func(t *testing.T) {
+		model := client.GenerativeModel(*modelName)
+		model.Temperature = Ptr[float32](0)
+		model.SystemInstruction = &Content{
+			Parts: []Part{Text("You are Yoda from Star Wars.")},
+		}
+		resp, err := model.GenerateContent(ctx, Text("What is the average size of a swallow?"))
+		if err != nil {
+			t.Fatal(err)
+		}
+		got := responseString(resp)
+		checkMatch(t, got, `[1-9][0-9].* cm|[1-9].* inches`)
+		fmt.Println(got)
+
+	})
 
 	t.Run("streaming", func(t *testing.T) {
 		iter := model.GenerateContentStream(ctx, Text("Are you hungry?"))
diff --git a/vertexai/genai/example_test.go b/vertexai/genai/example_test.go
index 229c7e7..0790791 100644
--- a/vertexai/genai/example_test.go
+++ b/vertexai/genai/example_test.go
@@ -51,6 +51,33 @@
 	printResponse(resp)
 }
 
+// This example shows how to a configure a model. See [GenerationConfig]
+// for the complete set of configuration options.
+func ExampleGenerativeModel_GenerateContent_config() {
+	ctx := context.Background()
+	const projectID = "YOUR PROJECT ID"
+	const location = "GCP LOCATION"
+	client, err := genai.NewClient(ctx, projectID, location)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer client.Close()
+
+	model := client.GenerativeModel("gemini-1.0-pro")
+	model.SetTemperature(0.9)
+	model.SetTopP(0.5)
+	model.SetTopK(20)
+	model.SetMaxOutputTokens(100)
+	model.SystemInstruction = &genai.Content{
+		Parts: []genai.Part{genai.Text("You are Yoda from Star Wars.")},
+	}
+	resp, err := model.GenerateContent(ctx, genai.Text("What is the average size of a swallow?"))
+	if err != nil {
+		log.Fatal(err)
+	}
+	printResponse(resp)
+}
+
 func ExampleGenerativeModel_GenerateContentStream() {
 	ctx := context.Background()
 	client, err := genai.NewClient(ctx, projectID, location)