Skip to content

Commit 8182b67

Browse files
authored
Merge pull request #57 from cequence-io/token_count_polishing
Use ModelId constants instead of string literals
2 parents b29b780 + e4922e3 commit 8182b67

File tree

1 file changed

+78
-79
lines changed

1 file changed

+78
-79
lines changed

openai-count-tokens/src/test/scala/io/cequence/openaiscala/service/OpenAICountTokensServiceSpec.scala

Lines changed: 78 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,8 @@ import akka.testkit.TestKit
44
import io.cequence.openaiscala.domain.{
55
AssistantMessage,
66
BaseMessage,
7-
ChatRole,
87
FunctionSpec,
9-
MessageSpec,
8+
ModelId,
109
SystemMessage,
1110
UserMessage
1211
}
@@ -102,7 +101,7 @@ class OpenAICountTokensServiceSpec
102101
messages: Seq[BaseMessage],
103102
expectedTokens: Int,
104103
responseFunctionName: Option[String] = None,
105-
model: String = "gpt-3.5-turbo"
104+
model: String = ModelId.gpt_3_5_turbo
106105
): Unit = {
107106
val countedTokens = countFunMessageTokens(
108107
model,
@@ -142,32 +141,32 @@ class OpenAICountTokensServiceSpec
142141
"You are a helpful consultant assisting with the translation of corporate jargon into plain English."
143142
)
144143

145-
// TODO: add gpt-4-turno-2024-04-09, gpt-4-0125-preview, gpt-4-1106-preview
144+
// TODO: add gpt-4-turbo-2024-04-09, gpt-4-0125-preview, gpt-4-1106-preview
146145
"count tokens for a system message" in new TestCase {
147146
checkTokensForMessageCall(
148147
chat(systemMessage),
149-
"gpt-4-turno-2024-04-09" -> 24,
150-
"gpt-4-1106-preview" -> 24,
151-
"gpt-4-0613" -> 24,
152-
"gpt-4-0125-preview" -> 24,
153-
"gpt-4" -> 24,
154-
"gpt-3.5-turbo" -> 24,
155-
"gpt-3.5-turbo-0301" -> 25,
156-
"gpt-3.5-turbo-0613" -> 24
148+
ModelId.gpt_4_turbo_2024_04_09 -> 24,
149+
ModelId.gpt_4_1106_preview -> 24,
150+
ModelId.gpt_4_0613 -> 24,
151+
ModelId.gpt_4_0125_preview -> 24,
152+
ModelId.gpt_4 -> 24,
153+
ModelId.gpt_3_5_turbo -> 24,
154+
ModelId.gpt_3_5_turbo_0301 -> 25,
155+
ModelId.gpt_3_5_turbo_0613 -> 24
157156
)
158157
}
159158

160159
"count tokens for a named system message" in new TestCase {
161160
checkTokensForMessageCall(
162161
chat(systemMessage.withName("James")),
163-
"gpt-4-turno-2024-04-09" -> 26,
164-
"gpt-4-1106-preview" -> 26,
165-
"gpt-4-0613" -> 26,
166-
"gpt-4-0125-preview" -> 26,
167-
"gpt-4" -> 26,
168-
"gpt-3.5-turbo" -> 26,
169-
"gpt-3.5-turbo-0301" -> 25,
170-
"gpt-3.5-turbo-0613" -> 26
162+
ModelId.gpt_4_turbo_2024_04_09 -> 26,
163+
ModelId.gpt_4_1106_preview -> 26,
164+
ModelId.gpt_4_0613 -> 26,
165+
ModelId.gpt_4_0125_preview -> 26,
166+
ModelId.gpt_4 -> 26,
167+
ModelId.gpt_3_5_turbo -> 26,
168+
ModelId.gpt_3_5_turbo_0301 -> 25,
169+
ModelId.gpt_3_5_turbo_0613 -> 26
171170
)
172171
}
173172

@@ -178,28 +177,28 @@ class OpenAICountTokensServiceSpec
178177
"count tokens for a user message" in new TestCase {
179178
checkTokensForMessageCall(
180179
chat(userMessage),
181-
"gpt-4-turno-2024-04-09" -> 33,
182-
"gpt-4-1106-preview" -> 33,
183-
"gpt-4-0613" -> 33,
184-
"gpt-4-0125-preview" -> 33,
185-
"gpt-4" -> 33,
186-
"gpt-3.5-turbo" -> 33,
187-
"gpt-3.5-turbo-0301" -> 34,
188-
"gpt-3.5-turbo-0613" -> 33
180+
ModelId.gpt_4_turbo_2024_04_09 -> 33,
181+
ModelId.gpt_4_1106_preview -> 33,
182+
ModelId.gpt_4_0613 -> 33,
183+
ModelId.gpt_4_0125_preview -> 33,
184+
ModelId.gpt_4 -> 33,
185+
ModelId.gpt_3_5_turbo -> 33,
186+
ModelId.gpt_3_5_turbo_0301 -> 34,
187+
ModelId.gpt_3_5_turbo_0613 -> 33
189188
)
190189
}
191190

192191
"count tokens for a user message with name" in new TestCase {
193192
checkTokensForMessageCall(
194193
chat(userMessage.withName("Alice")),
195-
"gpt-4-turno-2024-04-09" -> 35,
196-
"gpt-4-1106-preview" -> 35,
197-
"gpt-4-0613" -> 35,
198-
"gpt-4-0125-preview" -> 35,
199-
"gpt-4" -> 35,
200-
"gpt-3.5-turbo" -> 35,
201-
"gpt-3.5-turbo-0301" -> 34,
202-
"gpt-3.5-turbo-0613" -> 35
194+
ModelId.gpt_4_turbo_2024_04_09 -> 35,
195+
ModelId.gpt_4_1106_preview -> 35,
196+
ModelId.gpt_4_0613 -> 35,
197+
ModelId.gpt_4_0125_preview -> 35,
198+
ModelId.gpt_4 -> 35,
199+
ModelId.gpt_3_5_turbo -> 35,
200+
ModelId.gpt_3_5_turbo_0301 -> 34,
201+
ModelId.gpt_3_5_turbo_0613 -> 35
203202
)
204203
}
205204

@@ -210,56 +209,56 @@ class OpenAICountTokensServiceSpec
210209
"count tokens for an assistant message" in new TestCase {
211210
checkTokensForMessageCall(
212211
chat(assistantMessage),
213-
"gpt-4-turno-2024-04-09" -> 44,
214-
"gpt-4-1106-preview" -> 44,
215-
"gpt-4-0613" -> 44,
216-
"gpt-4-0125-preview" -> 44,
217-
"gpt-4" -> 44,
218-
"gpt-3.5-turbo" -> 44,
219-
"gpt-3.5-turbo-0301" -> 45,
220-
"gpt-3.5-turbo-0613" -> 44
212+
ModelId.gpt_4_turbo_2024_04_09 -> 44,
213+
ModelId.gpt_4_1106_preview -> 44,
214+
ModelId.gpt_4_0613 -> 44,
215+
ModelId.gpt_4_0125_preview -> 44,
216+
ModelId.gpt_4 -> 44,
217+
ModelId.gpt_3_5_turbo -> 44,
218+
ModelId.gpt_3_5_turbo_0301 -> 45,
219+
ModelId.gpt_3_5_turbo_0613 -> 44
221220
)
222221
}
223222

224223
"count tokens for an assistant message with name" in new TestCase {
225224
checkTokensForMessageCall(
226225
chat(assistantMessage.withName("Bob")),
227-
"gpt-4-turno-2024-04-09" -> 46,
228-
"gpt-4-1106-preview" -> 46,
229-
"gpt-4-0613" -> 46,
230-
"gpt-4-0125-preview" -> 46,
231-
"gpt-4" -> 46,
232-
"gpt-3.5-turbo" -> 46,
233-
"gpt-3.5-turbo-0301" -> 45,
234-
"gpt-3.5-turbo-0613" -> 46
226+
ModelId.gpt_4_turbo_2024_04_09 -> 46,
227+
ModelId.gpt_4_1106_preview -> 46,
228+
ModelId.gpt_4_0613 -> 46,
229+
ModelId.gpt_4_0125_preview -> 46,
230+
ModelId.gpt_4 -> 46,
231+
ModelId.gpt_3_5_turbo -> 46,
232+
ModelId.gpt_3_5_turbo_0301 -> 45,
233+
ModelId.gpt_3_5_turbo_0613 -> 46
235234
)
236235
}
237236

238237
"count tokens of a chat with two messages" in new TestCase {
239238
checkTokensForMessageCall(
240239
chat(systemMessage, userMessage),
241-
"gpt-4-turno-2024-04-09" -> 54,
242-
"gpt-4-1106-preview" -> 54,
243-
"gpt-4-0613" -> 54,
244-
"gpt-4-0125-preview" -> 54,
245-
"gpt-4" -> 54,
246-
"gpt-3.5-turbo" -> 54,
247-
"gpt-3.5-turbo-0301" -> 56,
248-
"gpt-3.5-turbo-0613" -> 54
240+
ModelId.gpt_4_turbo_2024_04_09 -> 54,
241+
ModelId.gpt_4_1106_preview -> 54,
242+
ModelId.gpt_4_0613 -> 54,
243+
ModelId.gpt_4_0125_preview -> 54,
244+
ModelId.gpt_4 -> 54,
245+
ModelId.gpt_3_5_turbo -> 54,
246+
ModelId.gpt_3_5_turbo_0301 -> 56,
247+
ModelId.gpt_3_5_turbo_0613 -> 54
249248
)
250249
}
251250

252251
"count tokens of a chat with two messages with names" in new TestCase {
253252
checkTokensForMessageCall(
254253
chat(systemMessage.withName("James"), userMessage.withName("Alice")),
255-
"gpt-4-turno-2024-04-09" -> 58,
256-
"gpt-4-1106-preview" -> 58,
257-
"gpt-4-0613" -> 58,
258-
"gpt-4-0125-preview" -> 58,
259-
"gpt-4" -> 58,
260-
"gpt-3.5-turbo" -> 58,
261-
"gpt-3.5-turbo-0301" -> 56,
262-
"gpt-3.5-turbo-0613" -> 58
254+
ModelId.gpt_4_turbo_2024_04_09 -> 58,
255+
ModelId.gpt_4_1106_preview -> 58,
256+
ModelId.gpt_4_0613 -> 58,
257+
ModelId.gpt_4_0125_preview -> 58,
258+
ModelId.gpt_4 -> 58,
259+
ModelId.gpt_3_5_turbo -> 58,
260+
ModelId.gpt_3_5_turbo_0301 -> 56,
261+
ModelId.gpt_3_5_turbo_0613 -> 58
263262
)
264263
}
265264

@@ -285,14 +284,14 @@ class OpenAICountTokensServiceSpec
285284
"count tokens of a chat with multiple messages" in new TestCase {
286285
checkTokensForMessageCall(
287286
openAICookbookTestCaseMessages,
288-
"gpt-4-turno-2024-04-09" -> 129,
289-
"gpt-4-1106-preview" -> 129,
290-
"gpt-4-0613" -> 129,
291-
"gpt-4-0125-preview" -> 129,
292-
"gpt-4" -> 129,
293-
"gpt-3.5-turbo" -> 129,
294-
"gpt-3.5-turbo-0301" -> 127,
295-
"gpt-3.5-turbo-0613" -> 129
287+
ModelId.gpt_4_turbo_2024_04_09 -> 129,
288+
ModelId.gpt_4_1106_preview -> 129,
289+
ModelId.gpt_4_0613 -> 129,
290+
ModelId.gpt_4_0125_preview -> 129,
291+
ModelId.gpt_4 -> 129,
292+
ModelId.gpt_3_5_turbo -> 129,
293+
ModelId.gpt_3_5_turbo_0301 -> 127,
294+
ModelId.gpt_3_5_turbo_0613 -> 129
296295
)
297296
}
298297

@@ -495,7 +494,7 @@ class OpenAICountTokensServiceSpec
495494
Seq(function1),
496495
messages,
497496
expectedTokens = 46,
498-
model = "gpt-4"
497+
model = ModelId.gpt_4
499498
)
500499
}
501500

@@ -522,7 +521,7 @@ class OpenAICountTokensServiceSpec
522521

523522
val messages: Seq[BaseMessage] = Seq(UserMessage("hello"))
524523

525-
private val model = "gpt-3.5-turbo"
524+
private val model = ModelId.gpt_3_5_turbo
526525
private val responseFunctionName = Some("function")
527526
checkTokensForFunctionCall(
528527
Seq(function1),
@@ -566,7 +565,7 @@ class OpenAICountTokensServiceSpec
566565

567566
val messages: Seq[BaseMessage] = Seq(UserMessage("hello"))
568567

569-
private val model = "gpt-3.5-turbo"
568+
private val model = ModelId.gpt_3_5_turbo
570569
private val responseFunctionName = Some("function")
571570
checkTokensForFunctionCall(
572571
Seq(function1),

0 commit comments

Comments
 (0)