Skip to content

Commit a08d004

Browse files
authored
Merge pull request #55 from AkhmadRamadani/master
Refactor, Ensure Gemini Latest model is correctly set, Fix custom model name usage and update model constants
2 parents 785b6e5 + f59a7e5 commit a08d004

File tree

6 files changed

+658
-597
lines changed

6 files changed

+658
-597
lines changed

lib/src/config/constants.dart

Lines changed: 170 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -2,120 +2,220 @@ import '../models/gemini_model/gemini_model.dart';
22

33
class Constants {
44
Constants._();
5-
static const String defaultModel = 'models/gemini-1.5-flash';
6-
static const String defaultVersion = 'v1beta';
5+
6+
static const String defaultModel = 'models/gemini-2.5-flash';
7+
static const String defaultVersion = 'v1';
78
static const String defaultGenerateType = 'generateContent';
89
static const String baseUrl = 'https://generativelanguage.googleapis.com/';
910

1011
static List<GeminiModel> get geminiDefaultModels => [
1112
{
12-
"name": "models/chat-bison-001",
13+
"name": "models/gemini-2.5-pro",
1314
"version": "001",
14-
"displayName": "Chat Bison",
15-
"description": "Chat-optimized generative language model.",
16-
"inputTokenLimit": 4096,
17-
"outputTokenLimit": 1024,
18-
"supportedGenerationMethods": [
19-
"generateMessage",
20-
"countMessageTokens"
21-
],
22-
"temperature": 0.25,
23-
"topP": 0.95,
24-
"topK": 40
15+
"displayName": "Gemini 2.5 Pro",
16+
"description":
17+
"Our most powerful thinking model with maximum response accuracy and state-of-the-art performance. Best for complex coding, reasoning, and multimodal understanding.",
18+
"inputTokenLimit": 1048576,
19+
"outputTokenLimit": 65536,
20+
"supportedGenerationMethods": ["generateContent", "countTokens"],
21+
"temperature": 0.9,
22+
"topP": 1,
23+
"topK": 32
2524
},
2625
{
27-
"name": "models/text-bison-001",
26+
"name": "models/gemini-2.5-flash",
2827
"version": "001",
29-
"displayName": "Text Bison",
30-
"description": "Model targeted for text generation.",
31-
"inputTokenLimit": 8196,
32-
"outputTokenLimit": 1024,
33-
"supportedGenerationMethods": [
34-
"generateText",
35-
"countTextTokens",
36-
"createTunedTextModel"
37-
],
38-
"temperature": 0.7,
39-
"topP": 0.95,
40-
"topK": 40
28+
"displayName": "Gemini 2.5 Flash",
29+
"description":
30+
"Our best model in terms of price-performance, offering well-rounded capabilities. Best for low latency, high volume tasks that require thinking.",
31+
"inputTokenLimit": 1048576,
32+
"outputTokenLimit": 65536,
33+
"supportedGenerationMethods": ["generateContent", "countTokens"],
34+
"temperature": 0.4,
35+
"topP": 1,
36+
"topK": 32
4137
},
4238
{
43-
"name": "models/embedding-gecko-001",
39+
"name": "models/gemini-2.5-flash-lite",
4440
"version": "001",
45-
"displayName": "Embedding Gecko",
46-
"description": "Obtain a distributed representation of a text.",
47-
"inputTokenLimit": 1024,
48-
"outputTokenLimit": 1,
49-
"supportedGenerationMethods": ["embedText", "countTextTokens"]
41+
"displayName": "Gemini 2.5 Flash-Lite",
42+
"description":
43+
"A Gemini 2.5 Flash model optimized for cost-efficiency and high throughput.",
44+
"inputTokenLimit": 1048576,
45+
"outputTokenLimit": 65536,
46+
"supportedGenerationMethods": ["generateContent", "countTokens"],
47+
"temperature": 0.4,
48+
"topP": 1,
49+
"topK": 32
5050
},
5151
{
52-
"name": "models/embedding-gecko-002",
53-
"version": "002",
54-
"displayName": "Embedding Gecko 002",
55-
"description": "Obtain a distributed representation of a text.",
56-
"inputTokenLimit": 2048,
57-
"outputTokenLimit": 1,
58-
"supportedGenerationMethods": ["embedText", "countTextTokens"]
52+
"name": "models/gemini-2.0-flash-001",
53+
"version": "001",
54+
"displayName": "Gemini 2.0 Flash",
55+
"description":
56+
"Second generation model with next-gen features including superior speed, native tool use, and 1M token context window.",
57+
"inputTokenLimit": 1048576,
58+
"outputTokenLimit": 8192,
59+
"supportedGenerationMethods": ["generateContent", "countTokens"],
60+
"temperature": 0.4,
61+
"topP": 1,
62+
"topK": 32
5963
},
6064
{
61-
"name": "models/gemini-pro",
65+
"name": "models/gemini-2.0-flash-lite-001",
6266
"version": "001",
63-
"displayName": "Gemini Pro",
67+
"displayName": "Gemini 2.0 Flash-Lite",
6468
"description":
65-
"The best model for scaling across a wide range of tasks",
66-
"inputTokenLimit": 30720,
67-
"outputTokenLimit": 2048,
69+
"A Gemini 2.0 Flash model optimized for cost efficiency and low latency.",
70+
"inputTokenLimit": 1048576,
71+
"outputTokenLimit": 8192,
6872
"supportedGenerationMethods": ["generateContent", "countTokens"],
69-
"temperature": 0.9,
73+
"temperature": 0.4,
7074
"topP": 1,
71-
"topK": 1
75+
"topK": 32
7276
},
7377
{
7478
"name": "models/gemini-1.5-flash",
75-
"version": "001",
76-
"displayName": "Gemini Pro Vision",
79+
"version": "002",
80+
"displayName": "Gemini 1.5 Flash",
7781
"description":
78-
"The best image understanding model to handle a broad range of applications",
79-
"inputTokenLimit": 12288,
80-
"outputTokenLimit": 4096,
82+
"Fast and versatile multimodal model for scaling across diverse tasks. (Previous generation)",
83+
"inputTokenLimit": 1048576,
84+
"outputTokenLimit": 8192,
8185
"supportedGenerationMethods": ["generateContent", "countTokens"],
8286
"temperature": 0.4,
8387
"topP": 1,
8488
"topK": 32
8589
},
8690
{
87-
"name": "models/gemini-ultra",
88-
"version": "001",
89-
"displayName": "Gemini Ultra",
90-
"description": "The most capable model for highly complex tasks",
91-
"inputTokenLimit": 30720,
92-
"outputTokenLimit": 2048,
91+
"name": "models/gemini-1.5-pro",
92+
"version": "002",
93+
"displayName": "Gemini 1.5 Pro",
94+
"description":
95+
"Mid-size multimodal model optimized for wide-range reasoning tasks. Can process large amounts of data. (Previous generation)",
96+
"inputTokenLimit": 2097152,
97+
"outputTokenLimit": 8192,
9398
"supportedGenerationMethods": ["generateContent", "countTokens"],
9499
"temperature": 0.9,
95100
"topP": 1,
96101
"topK": 32
97102
},
98103
{
99-
"name": "models/embedding-001",
104+
"name": "models/gemini-embedding-001",
100105
"version": "001",
101-
"displayName": "Embedding 001",
102-
"description": "Obtain a distributed representation of a text.",
106+
"displayName": "Gemini Embedding",
107+
"description":
108+
"Obtain a distributed representation of a text with latest embedding capabilities.",
103109
"inputTokenLimit": 2048,
104110
"outputTokenLimit": 1,
105-
"supportedGenerationMethods": ["embedContent", "countTextTokens"]
111+
"supportedGenerationMethods": ["embedContent", "countTokens"]
106112
},
107113
{
108-
"name": "models/aqa",
114+
"name": "models/text-embedding-005",
115+
"version": "005",
116+
"displayName": "Text Embedding 005",
117+
"description":
118+
"Latest text embedding model with improved performance.",
119+
"inputTokenLimit": 2048,
120+
"outputTokenLimit": 1,
121+
"supportedGenerationMethods": ["embedContent", "countTokens"]
122+
},
123+
{
124+
"name": "models/text-embedding-004",
125+
"version": "004",
126+
"displayName": "Text Embedding 004",
127+
"description": "Text embedding model with robust performance.",
128+
"inputTokenLimit": 2048,
129+
"outputTokenLimit": 1,
130+
"supportedGenerationMethods": ["embedContent", "countTokens"]
131+
},
132+
{
133+
"name": "models/text-multilingual-embedding-002",
134+
"version": "002",
135+
"displayName": "Multilingual Text Embedding",
136+
"description":
137+
"Multilingual text embedding model supporting various languages.",
138+
"inputTokenLimit": 2048,
139+
"outputTokenLimit": 1,
140+
"supportedGenerationMethods": ["embedContent", "countTokens"]
141+
}
142+
].map((e) => GeminiModel.fromJson(e)).toList();
143+
144+
static List<GeminiModel> get geminiLiveModels => [
145+
{
146+
"name": "models/gemini-live-2.5-flash-preview",
147+
"version": "preview",
148+
"displayName": "Gemini 2.5 Flash Live",
149+
"description":
150+
"Low-latency bidirectional voice and video interactions with Gemini 2.5 Flash.",
151+
"inputTokenLimit": 1048576,
152+
"outputTokenLimit": 8192,
153+
"supportedGenerationMethods": ["generateContent", "liveApi"],
154+
"temperature": 0.4,
155+
"topP": 1,
156+
"topK": 32
157+
},
158+
{
159+
"name": "models/gemini-2.0-flash-live-001",
109160
"version": "001",
110-
"displayName": "Model that performs Attributed Question Answering.",
161+
"displayName": "Gemini 2.0 Flash Live",
111162
"description":
112-
"Model trained to return answers to questions that are grounded in provided sources, along with estimating answerable probability.",
113-
"inputTokenLimit": 7168,
114-
"outputTokenLimit": 1024,
115-
"supportedGenerationMethods": ["generateAnswer"],
116-
"temperature": 0.2,
163+
"Low-latency bidirectional voice and video interactions with Gemini 2.0 Flash.",
164+
"inputTokenLimit": 1048576,
165+
"outputTokenLimit": 8192,
166+
"supportedGenerationMethods": ["generateContent", "liveApi"],
167+
"temperature": 0.4,
117168
"topP": 1,
118-
"topK": 40
169+
"topK": 32
170+
}
171+
].map((e) => GeminiModel.fromJson(e)).toList();
172+
173+
static List<GeminiModel> get geminiSpecializedModels => [
174+
{
175+
"name": "models/gemini-2.5-flash-image-preview",
176+
"version": "preview",
177+
"displayName": "Gemini 2.5 Flash Image",
178+
"description":
179+
"Generate and edit images conversationally with Gemini 2.5 Flash.",
180+
"inputTokenLimit": 32768,
181+
"outputTokenLimit": 32768,
182+
"supportedGenerationMethods": ["generateContent", "imageGeneration"]
183+
},
184+
{
185+
"name": "models/gemini-2.0-flash-preview-image-generation",
186+
"version": "preview",
187+
"displayName": "Gemini 2.0 Flash Image Generation",
188+
"description":
189+
"Generate and edit images conversationally with Gemini 2.0 Flash.",
190+
"inputTokenLimit": 32000,
191+
"outputTokenLimit": 8192,
192+
"supportedGenerationMethods": ["generateContent", "imageGeneration"]
193+
},
194+
{
195+
"name": "models/gemini-2.5-flash-preview-tts",
196+
"version": "preview",
197+
"displayName": "Gemini 2.5 Flash TTS",
198+
"description":
199+
"Price-performant text-to-speech model with high control and transparency.",
200+
"inputTokenLimit": 8000,
201+
"outputTokenLimit": 16000,
202+
"supportedGenerationMethods": ["textToSpeech"]
203+
},
204+
{
205+
"name": "models/gemini-2.5-pro-preview-tts",
206+
"version": "preview",
207+
"displayName": "Gemini 2.5 Pro TTS",
208+
"description":
209+
"Most powerful text-to-speech model with high control and transparency.",
210+
"inputTokenLimit": 8000,
211+
"outputTokenLimit": 16000,
212+
"supportedGenerationMethods": ["textToSpeech"]
119213
}
120214
].map((e) => GeminiModel.fromJson(e)).toList();
215+
216+
static List<GeminiModel> get allGeminiModels => [
217+
...geminiDefaultModels,
218+
...geminiLiveModels,
219+
...geminiSpecializedModels,
220+
];
121221
}

0 commit comments

Comments
 (0)