Skip to content

Commit 96a169a

Browse files
committed
Reordered fields, so they match the order in the official documentation for better overview of what's there and what's missing. (TheoKanning#388)
1 parent fe0c62f commit 96a169a

File tree

1 file changed

+41
-42
lines changed

1 file changed

+41
-42
lines changed

api/src/main/java/com/theokanning/openai/completion/chat/ChatCompletionRequest.java

Lines changed: 41 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,6 @@
1515
@NoArgsConstructor
1616
public class ChatCompletionRequest {
1717

18-
/**
19-
* ID of the model to use.
20-
*/
21-
String model;
22-
2318
/**
2419
* The messages to generate chat completions for, in the <a
2520
* href="https://platform.openai.com/docs/guides/chat/introduction">chat format</a>.<br>
@@ -28,36 +23,25 @@ public class ChatCompletionRequest {
2823
List<ChatMessage> messages;
2924

3025
/**
31-
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower
32-
* values like 0.2 will make it more focused and deterministic.<br>
33-
* We generally recommend altering this or top_p but not both.
34-
*/
35-
Double temperature;
36-
37-
/**
38-
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens
39-
* with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br>
40-
* We generally recommend altering this or temperature but not both.
41-
*/
42-
@JsonProperty("top_p")
43-
Double topP;
44-
45-
/**
46-
* How many chat completion chatCompletionChoices to generate for each input message.
26+
* ID of the model to use.
4727
*/
48-
Integer n;
28+
String model;
4929

5030
/**
51-
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only <a
52-
* href="https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format">server-sent
53-
* events</a> as they become available, with the stream terminated by a data: [DONE] message.
31+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
32+
* decreasing the model's likelihood to repeat the same line verbatim.
5433
*/
55-
Boolean stream;
34+
@JsonProperty("frequency_penalty")
35+
Double frequencyPenalty;
5636

5737
/**
58-
* Up to 4 sequences where the API will stop generating further tokens.
38+
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100
39+
* to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will
40+
* vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100
41+
* should result in a ban or exclusive selection of the relevant token.
5942
*/
60-
List<String> stop;
43+
@JsonProperty("logit_bias")
44+
Map<String, Integer> logitBias;
6145

6246
/**
6347
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will
@@ -66,6 +50,11 @@ public class ChatCompletionRequest {
6650
@JsonProperty("max_tokens")
6751
Integer maxTokens;
6852

53+
/**
54+
* How many chat completion chatCompletionChoices to generate for each input message.
55+
*/
56+
Integer n;
57+
6958
/**
7059
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
7160
* increasing the model's likelihood to talk about new topics.
@@ -74,38 +63,48 @@ public class ChatCompletionRequest {
7463
Double presencePenalty;
7564

7665
/**
77-
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
78-
* decreasing the model's likelihood to repeat the same line verbatim.
66+
* Up to 4 sequences where the API will stop generating further tokens.
7967
*/
80-
@JsonProperty("frequency_penalty")
81-
Double frequencyPenalty;
68+
List<String> stop;
8269

8370
/**
84-
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100
85-
* to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will
86-
* vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100
87-
* should result in a ban or exclusive selection of the relevant token.
71+
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only <a
72+
* href="https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format">server-sent
73+
* events</a> as they become available, with the stream terminated by a data: [DONE] message.
8874
*/
89-
@JsonProperty("logit_bias")
90-
Map<String, Integer> logitBias;
75+
Boolean stream;
9176

77+
/**
78+
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower
79+
* values like 0.2 will make it more focused and deterministic.<br>
80+
* We generally recommend altering this or top_p but not both.
81+
*/
82+
Double temperature;
9283

9384
/**
94-
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
85+
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens
86+
* with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br>
87+
* We generally recommend altering this or temperature but not both.
9588
*/
96-
String user;
89+
@JsonProperty("top_p")
90+
Double topP;
9791

9892
/**
99-
* A list of the available functions.
93+
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
10094
*/
101-
List<?> functions;
95+
String user;
10296

10397
/**
10498
* Controls how the model responds to function calls, as specified in the <a href="https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call">OpenAI documentation</a>.
10599
*/
106100
@JsonProperty("function_call")
107101
ChatCompletionRequestFunctionCall functionCall;
108102

103+
/**
104+
* A list of the available functions.
105+
*/
106+
List<?> functions;
107+
109108
@Data
110109
@Builder
111110
@AllArgsConstructor

0 commit comments

Comments
 (0)