Skip to content

Commit dfbdeab

Browse files
committed
update openai model version
1 parent 7e285fd commit dfbdeab

File tree

10 files changed

+21
-26
lines changed

10 files changed

+21
-26
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ You can configure `~/.metagpt/config2.yaml` according to the [example](https://g
8686
```yaml
8787
llm:
8888
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
89-
model: "gpt-4-turbo" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
89+
model: "gpt-4-turbo" # or gpt-3.5-turbo
9090
base_url: "https://api.openai.com/v1" # or forward url / other llm url
9191
api_key: "YOUR_API_KEY"
9292
```

config/config2.example.yaml

+3-8
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,11 @@ llm:
22
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
33
base_url: "YOUR_BASE_URL"
44
api_key: "YOUR_API_KEY"
5-
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
5+
model: "gpt-4-turbo" # or gpt-3.5-turbo
66
proxy: "YOUR_PROXY" # for LLM API requests
77
# timeout: 600 # Optional. If set to 0, default value is 300.
8-
pricing_plan: "" # Optional. If invalid, it will be automatically filled in with the value of the `model`.
9-
# Azure-exclusive pricing plan mappings:
10-
# - gpt-3.5-turbo 4k: "gpt-3.5-turbo-1106"
11-
# - gpt-4-turbo: "gpt-4-turbo-preview"
12-
# - gpt-4-turbo-vision: "gpt-4-vision-preview"
13-
# - gpt-4 8k: "gpt-4"
14-
# See for more: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
8+
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
9+
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
1510

1611
# RAG Embedding.
1712
# For backward compatibility, if the embedding is not set and the llm's api_type is either openai or azure, the llm's config will be used.

docs/FAQ-EN.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ MetaGPT Community - The position of Chief Evangelist rotates on a monthly basis.
3838
### FAQ
3939

4040
1. Code truncation/ Parsing failure:
41-
1. Check if it's due to exceeding length. Consider using the gpt-4-turbo-preview or other long token versions.
41+
1. Check if it's due to exceeding length. Consider using the gpt-4-turbo or other long token versions.
4242
2. Success rate:
43-
1. There hasn't been a quantitative analysis yet, but the success rate of code generated by gpt-4-turbo-preview is significantly higher than that of gpt-3.5-turbo.
43+
1. There hasn't been a quantitative analysis yet, but the success rate of code generated by gpt-4-turbo is significantly higher than that of gpt-3.5-turbo.
4444
3. Support for incremental, differential updates (if you wish to continue a half-done task):
4545
1. There is now an experimental version. Specify `--inc --project-path "<path>"` or `--inc --project-name "<name>"` on the command line and enter the corresponding requirements to try it.
4646
4. Can existing code be loaded?

examples/debate_simple.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@
1414
from metagpt.team import Team
1515

1616
gpt35 = Config.default()
17-
gpt35.llm.model = "gpt-3.5-turbo-1106"
17+
gpt35.llm.model = "gpt-3.5-turbo"
1818
gpt4 = Config.default()
19-
gpt4.llm.model = "gpt-4-1106-preview"
19+
gpt4.llm.model = "gpt-4-turbo"
2020
action1 = Action(config=gpt4, name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
2121
action2 = Action(config=gpt35, name="BobSay", instruction="Express your opinion with emotion and don't repeat it")
2222
alex = Role(name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action1], watch=[action2])

metagpt/software_company.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ def startup(
125125
# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
126126
llm:
127127
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
128-
model: "gpt-4-turbo" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
128+
model: "gpt-4-turbo" # or gpt-4-turbo / gpt-4-turbo
129129
base_url: "https://api.openai.com/v1" # or forward url / other llm url
130130
api_key: "YOUR_API_KEY"
131131
"""

metagpt/utils/token_counter.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,9 @@
3232
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
3333
"gpt-4-0613": {"prompt": 0.06, "completion": 0.12},
3434
"gpt-4-turbo-preview": {"prompt": 0.01, "completion": 0.03},
35-
"gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
36-
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
3735
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
36+
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
37+
"gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
3838
"gpt-4-vision-preview": {"prompt": 0.01, "completion": 0.03}, # TODO add extra image price calculator
3939
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
4040
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
@@ -157,8 +157,8 @@
157157
TOKEN_MAX = {
158158
"gpt-4-0125-preview": 128000,
159159
"gpt-4-turbo-preview": 128000,
160-
"gpt-4-turbo": 128000,
161160
"gpt-4-1106-preview": 128000,
161+
"gpt-4-turbo": 128000,
162162
"gpt-4-vision-preview": 128000,
163163
"gpt-4-1106-vision-preview": 128000,
164164
"gpt-4": 8192,
@@ -221,7 +221,7 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0125"):
221221
"gpt-4-turbo",
222222
"gpt-4-turbo-preview",
223223
"gpt-4-0125-preview",
224-
"gpt-4-1106-preview",
224+
"gpt-4-turbo",
225225
"gpt-4-vision-preview",
226226
"gpt-4-1106-vision-preview",
227227
}:

tests/config2.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
llm:
22
base_url: "https://api.openai.com/v1"
33
api_key: "sk-xxx"
4-
model: "gpt-3.5-turbo-1106"
4+
model: "gpt-3.5-turbo"
55

66
search:
77
api_type: "serpapi"

tests/metagpt/test_context_mixin.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,11 @@ def test_config_mixin_4_multi_inheritance_override_config():
105105
async def test_config_priority():
106106
"""If action's config is set, then its llm will be set, otherwise, it will use the role's llm"""
107107
home_dir = Path.home() / CONFIG_ROOT
108-
gpt4t = Config.from_home("gpt-4-1106-preview.yaml")
108+
gpt4t = Config.from_home("gpt-4-turbo.yaml")
109109
if not home_dir.exists():
110110
assert gpt4t is None
111111
gpt35 = Config.default()
112-
gpt35.llm.model = "gpt-3.5-turbo-1106"
112+
gpt35.llm.model = "gpt-4-turbo"
113113
gpt4 = Config.default()
114114
gpt4.llm.model = "gpt-4-0613"
115115

@@ -127,8 +127,8 @@ async def test_config_priority():
127127
env = Environment(desc="US election live broadcast")
128128
Team(investment=10.0, env=env, roles=[A, B, C])
129129

130-
assert a1.llm.model == "gpt-4-1106-preview" if Path(home_dir / "gpt-4-1106-preview.yaml").exists() else "gpt-4-0613"
130+
assert a1.llm.model == "gpt-4-turbo" if Path(home_dir / "gpt-4-turbo.yaml").exists() else "gpt-4-0613"
131131
assert a2.llm.model == "gpt-4-0613"
132-
assert a3.llm.model == "gpt-3.5-turbo-1106"
132+
assert a3.llm.model == "gpt-4-turbo"
133133

134134
# history = await team.run(idea="Topic: climate change. Under 80 words per message.", send_to="a1", n_round=3)

tests/metagpt/tools/test_ut_writer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ async def mock_create(*args, **kwargs):
5656
)
5757
],
5858
created=1706710532,
59-
model="gpt-3.5-turbo-1106",
59+
model="gpt-4-turbo",
6060
object="chat.completion",
6161
system_fingerprint="fp_04f9a1eebf",
6262
usage=CompletionUsage(completion_tokens=35, prompt_tokens=1982, total_tokens=2017),

tests/metagpt/utils/test_cost_manager.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212

1313
def test_cost_manager():
1414
cm = CostManager(total_budget=20)
15-
cm.update_cost(prompt_tokens=1000, completion_tokens=100, model="gpt-4-1106-preview")
15+
cm.update_cost(prompt_tokens=1000, completion_tokens=100, model="gpt-4-turbo")
1616
assert cm.get_total_prompt_tokens() == 1000
1717
assert cm.get_total_completion_tokens() == 100
1818
assert cm.get_total_cost() == 0.013
19-
cm.update_cost(prompt_tokens=100, completion_tokens=10, model="gpt-4-1106-preview")
19+
cm.update_cost(prompt_tokens=100, completion_tokens=10, model="gpt-4-turbo")
2020
assert cm.get_total_prompt_tokens() == 1100
2121
assert cm.get_total_completion_tokens() == 110
2222
assert cm.get_total_cost() == 0.0143

0 commit comments

Comments
 (0)