Skip to content

Commit 33ed54d

Browse files
committed
fix: update grok3 name for reasoning
1 parent 9b6013c commit 33ed54d

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

bigcodebench/gen/util/openai_request.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def make_request(
1717
kwargs["top_p"] = 0.95
1818
kwargs["max_completion_tokens"] = max_tokens
1919
kwargs["temperature"] = temperature
20-
if any(model.startswith(m) or model.endswith(m) for m in ["o1-", "o3-", "reasoner", "grok-3-mini"]): # pop top-p and max_completion_tokens
20+
if any(model.startswith(m) or model.endswith(m) for m in ["o1-", "o3-", "reasoner", "grok-3-mini-beta"]): # pop top-p and max_completion_tokens
2121
kwargs.pop("top_p")
2222
kwargs.pop("max_completion_tokens")
2323
kwargs.pop("temperature")

bigcodebench/generate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ def run_codegen(
197197
)
198198

199199
extra = "-" + subset if subset != "full" else ""
200-
if backend == "openai" and reasoning_effort and any(model.startswith(m) or model.endswith(m) for m in ["o1-", "o3-", "reasoner", "grok-3-mini"]):
200+
if backend == "openai" and reasoning_effort and any(model.startswith(m) or model.endswith(m) for m in ["o1-", "o3-", "reasoner", "grok-3-mini-beta"]):
201201
model = model + f"--{reasoning_effort}"
202202

203203
if lora_path:

bigcodebench/provider/openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def codegen(
2828
tokenizer=None,
2929
) for prompt in prompts]
3030
# use concurrency based batching for o1 and deepseek models
31-
if any(self.name.startswith(model) or self.name.endswith(model) for model in ["o1-", "o3-", "reasoner", "grok-3-mini"]):
31+
if any(self.name.startswith(model) or self.name.endswith(model) for model in ["o1-", "o3-", "reasoner", "grok-3-mini-beta"]):
3232
return self._codegen_batch_via_concurrency(messages, num_samples)
3333

3434
return self._codegen_api_batch(messages, num_samples)

0 commit comments

Comments
 (0)