Skip to content

Commit cff3082

Browse files
authored
feat(generative-ai): update samples to Gemini Flash (GoogleCloudPlatform#11879)
1 parent c3e79ca commit cff3082

9 files changed

+12
-12
lines changed

generative_ai/function_calling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def generate_function_call(project_id: str) -> GenerationResponse:
3333
vertexai.init(project=project_id, location="us-central1")
3434

3535
# Initialize Gemini model
36-
model = GenerativeModel(model_name="gemini-1.0-pro-001")
36+
model = GenerativeModel(model_name="gemini-1.5-flash-001")
3737

3838
# Define the user's prompt in a Content object that we can reuse in model calls
3939
user_prompt_content = Content(

generative_ai/function_calling_chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def generate_function_call_chat(project_id: str) -> ChatSession:
6666

6767
# Initialize Gemini model
6868
model = GenerativeModel(
69-
model_name="gemini-1.0-pro-001",
69+
model_name="gemini-1.5-flash-001",
7070
generation_config=GenerationConfig(temperature=0),
7171
tools=[retail_tool],
7272
)

generative_ai/gemini_chat_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def chat_text_example(project_id: str) -> str:
2424

2525
vertexai.init(project=project_id, location="us-central1")
2626

27-
model = GenerativeModel(model_name="gemini-1.0-pro-002")
27+
model = GenerativeModel(model_name="gemini-1.5-flash-001")
2828

2929
chat = model.start_chat()
3030

@@ -55,7 +55,7 @@ def chat_stream_example(project_id: str) -> str:
5555

5656
vertexai.init(project=project_id, location="us-central1")
5757

58-
model = GenerativeModel(model_name="gemini-1.0-pro-002")
58+
model = GenerativeModel(model_name="gemini-1.5-flash-001")
5959

6060
chat = model.start_chat()
6161

generative_ai/gemini_count_token_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def count_tokens(project_id: str) -> GenerationResponse:
2626

2727
vertexai.init(project=project_id, location="us-central1")
2828

29-
model = GenerativeModel(model_name="gemini-1.0-pro-002")
29+
model = GenerativeModel(model_name="gemini-1.5-flash-001")
3030

3131
prompt = "Why is the sky blue?"
3232

generative_ai/gemini_pro_config_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def generate_text(project_id: str) -> None:
2525

2626
vertexai.init(project=project_id, location="us-central1")
2727

28-
model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")
28+
model = GenerativeModel(model_name="gemini-1.5-flash-001")
2929

3030
# Load example image from local storage
3131
encoded_image = base64.b64encode(open("scones.jpg", "rb").read()).decode("utf-8")

generative_ai/gemini_rapid_evaluation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def create_evaluation_task(project_id: str) -> EvalResult:
6565
],
6666
)
6767

68-
model = GenerativeModel("gemini-1.0-pro")
68+
model = GenerativeModel("gemini-1.5-flash-001")
6969

7070
prompt_template = (
7171
"Instruction: {instruction}. Article: {context}. Summary: {response}"

generative_ai/gemini_safety_config_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def generate_text(project_id: str) -> str:
2626

2727
vertexai.init(project=project_id, location="us-central1")
2828

29-
model = generative_models.GenerativeModel(model_name="gemini-1.0-pro-vision-001")
29+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash-001")
3030

3131
# Generation config
3232
generation_config = generative_models.GenerationConfig(

generative_ai/gemini_tuning.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def gemini_tuning_basic(project_id: str) -> sft.SupervisedTuningJob:
3131
vertexai.init(project=project_id, location="us-central1")
3232

3333
sft_tuning_job = sft.train(
34-
source_model="gemini-1.0-pro-002",
34+
source_model="gemini-1.5-flash-001",
3535
train_dataset="gs://cloud-samples-data/ai-platform/generative_ai/sft_train_data.jsonl",
3636
)
3737

@@ -62,7 +62,7 @@ def gemini_tuning_advanced(project_id: str) -> sft.SupervisedTuningJob:
6262
vertexai.init(project=project_id, location="us-central1")
6363

6464
sft_tuning_job = sft.train(
65-
source_model="gemini-1.0-pro-002",
65+
source_model="gemini-1.5-flash-001",
6666
train_dataset="gs://cloud-samples-data/ai-platform/generative_ai/sft_train_data.jsonl",
6767
# The following parameters are optional
6868
validation_dataset="gs://cloud-samples-data/ai-platform/generative_ai/sft_validation_data.jsonl",

generative_ai/rag.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ def generate_content_with_rag(
318318
)
319319

320320
rag_model = GenerativeModel(
321-
model_name="gemini-1.0-pro-002", tools=[rag_retrieval_tool]
321+
model_name="gemini-1.5-flash-001", tools=[rag_retrieval_tool]
322322
)
323323
response = rag_model.generate_content("Why is the sky blue?")
324324
print(response.text)
@@ -392,7 +392,7 @@ def quickstart(
392392
)
393393
# Create a gemini-pro model instance
394394
rag_model = GenerativeModel(
395-
model_name="gemini-1.0-pro-002", tools=[rag_retrieval_tool]
395+
model_name="gemini-1.5-flash-001", tools=[rag_retrieval_tool]
396396
)
397397

398398
# Generate response

0 commit comments

Comments
 (0)