|
14 | 14 |
|
15 | 15 | """Google Cloud Vertex AI sample for generating an image using only |
16 | 16 | descriptive text as an input. |
17 | | -Example usage: |
18 | | - python generate_image.py --project_id <project-id> --location <location> \ |
19 | | - --output_file <filepath> --prompt <text> |
20 | 17 | """ |
21 | 18 |
|
22 | | -# [START generativeaionvertexai_imagen_generate_image] |
| 19 | +from vertexai.preview import vision_models |
23 | 20 |
|
24 | | -import argparse |
25 | 21 |
|
26 | | -import vertexai |
27 | | -from vertexai.preview.vision_models import ImageGenerationModel |
| 22 | +def generate_image( |
| 23 | + project_id: str, output_file: str, prompt: str |
| 24 | +) -> vision_models.ImageGenerationResponse: |
28 | 25 |
|
| 26 | + # [START generativeaionvertexai_imagen_generate_image] |
29 | 27 |
|
30 | | -def generate_image( |
31 | | - project_id: str, location: str, output_file: str, prompt: str |
32 | | -) -> vertexai.preview.vision_models.ImageGenerationResponse: |
33 | | - """Generate an image using a text prompt. |
34 | | - Args: |
35 | | - project_id: Google Cloud project ID, used to initialize Vertex AI. |
36 | | - location: Google Cloud region, used to initialize Vertex AI. |
37 | | - output_file: Local path to the output image file. |
38 | | - prompt: The text prompt describing what you want to see.""" |
| 28 | + import vertexai |
| 29 | + from vertexai.preview.vision_models import ImageGenerationModel |
| 30 | + |
| 31 | + # TODO(developer): Update and un-comment below lines |
| 32 | + # project_id = "PROJECT_ID" |
| 33 | + # output_file = "my-output.png" |
| 34 | + # prompt = "" # The text prompt describing what you want to see. |
39 | 35 |
|
40 | | - vertexai.init(project=project_id, location=location) |
| 36 | + vertexai.init(project=project_id, location="us-central1") |
41 | 37 |
|
42 | 38 | model = ImageGenerationModel.from_pretrained("imagegeneration@006") |
43 | 39 |
|
44 | 40 | images = model.generate_images( |
45 | 41 | prompt=prompt, |
46 | 42 | # Optional parameters |
47 | 43 | number_of_images=1, |
48 | | - language="en", # prompt language |
49 | | - # By default, a SynthID watermark is added to images, but you can |
50 | | - # disable it. You can't use a seed value and watermark at the same time. |
| 44 | + language="en", |
| 45 | + # You can't use a seed value and watermark at the same time. |
51 | 46 | # add_watermark=False, |
52 | 47 | # seed=100, |
53 | | - aspect_ratio="1:1", # "9:16" "16:9" "4:3" "3:4" |
54 | | - # Adds a filter level to Safety filtering: "block_most" (most strict blocking), |
55 | | - # "block_some" (default), "block_few", or "block_fewest" (available to |
56 | | - # allowlisted users only). |
| 48 | + aspect_ratio="1:1", |
57 | 49 | safety_filter_level="block_some", |
58 | | - # Allows generation of people by the model: "dont_allow" (block |
59 | | - # all people), "allow_adult" (default; allow adults but not children), |
60 | | - # "allow_all" (available to allowlisted users only; allow adults and children) |
61 | 50 | person_generation="allow_adult", |
62 | 51 | ) |
63 | 52 |
|
64 | | - images[0].save(location=output_file, include_generation_parameters=True) |
| 53 | + images[0].save(location=output_file, include_generation_parameters=False) |
65 | 54 |
|
66 | 55 | # Optional. View the generated image in a notebook. |
67 | 56 | # images[0].show() |
68 | 57 |
|
69 | 58 | print(f"Created output image using {len(images[0]._image_bytes)} bytes") |
70 | 59 |
|
71 | | - return images |
72 | | - |
73 | | - |
74 | | -# [END generativeaionvertexai_imagen_generate_image] |
| 60 | + # [END generativeaionvertexai_imagen_generate_image] |
75 | 61 |
|
76 | | -if __name__ == "__main__": |
77 | | - parser = argparse.ArgumentParser() |
78 | | - parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) |
79 | | - parser.add_argument( |
80 | | - "--location", |
81 | | - help="The location in which to initialize Vertex AI.", |
82 | | - default="us-central1", |
83 | | - ) |
84 | | - parser.add_argument( |
85 | | - "--output_file", |
86 | | - help="The local path to the output file (e.g., 'my-output.png').", |
87 | | - required=True, |
88 | | - ) |
89 | | - parser.add_argument( |
90 | | - "--prompt", |
91 | | - help="The text prompt describing what you want to see (e.g., 'a dog reading a newspaper').", |
92 | | - required=True, |
93 | | - ) |
94 | | - args = parser.parse_args() |
95 | | - generate_image( |
96 | | - args.project_id, |
97 | | - args.location, |
98 | | - args.output_file, |
99 | | - args.prompt, |
100 | | - ) |
| 62 | + return images |
0 commit comments