Skip to content

Commit edc2656

Browse files
authored
server : add option to time limit the generation phase (#9865)
ggml-ci
1 parent 1bde94d commit edc2656

File tree

2 files changed

+46
-6
lines changed

2 files changed

+46
-6
lines changed

examples/server/README.md

+2
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,8 @@ node index.js
374374

375375
`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0`
376376

377+
`t_max_predict_ms`: Set a time limit in milliseconds for the prediction (a.k.a. text-generation) phase. The timeout will trigger if the generation takes more than the specified time (measured since the first token was generated) and if a new-line character has already been generated. Useful for FIM applications. Default: `0`, which is disabled.
378+
377379
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
378380

379381
`id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot. Default: `-1`

examples/server/server.cpp

+44-6
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,12 @@ struct slot_params {
128128
bool stream = true;
129129
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
130130

131-
int32_t n_keep = 0; // number of tokens to keep from initial prompt
132-
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
133-
int32_t n_predict = -1; // new tokens to predict
131+
int32_t n_keep = 0; // number of tokens to keep from initial prompt
132+
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
133+
int32_t n_predict = -1; // new tokens to predict
134+
135+
int64_t t_max_prompt_ms = -1; // TODO: implement
136+
int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit
134137

135138
std::vector<std::string> antiprompt;
136139

@@ -175,6 +178,7 @@ struct server_slot {
175178
server_task_cmpl_type cmpl_type = SERVER_TASK_CMPL_TYPE_NORMAL;
176179

177180
bool has_next_token = true;
181+
bool has_new_line = false;
178182
bool truncated = false;
179183
bool stopped_eos = false;
180184
bool stopped_word = false;
@@ -210,6 +214,7 @@ struct server_slot {
210214

211215
n_prompt_tokens = 0;
212216
generated_text = "";
217+
has_new_line = false;
213218
truncated = false;
214219
stopped_eos = false;
215220
stopped_word = false;
@@ -874,6 +879,8 @@ struct server_context {
874879
slot.sparams.seed = json_value(data, "seed", default_sparams.seed);
875880
slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
876881
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
882+
//slot.params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", default_params.t_max_prompt_ms); // TODO: implement
883+
slot.params.t_max_predict_ms = json_value(data, "t_max_predict_ms", default_params.t_max_predict_ms);
877884

878885
// process "json_schema" and "grammar"
879886
if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
@@ -1101,6 +1108,20 @@ struct server_context {
11011108
SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.params.n_predict);
11021109
}
11031110

1111+
// if we have already seen a new line, we stop after a certain time limit
1112+
if (slot.has_new_line && slot.params.t_max_predict_ms > 0 &&
1113+
(ggml_time_us() - slot.t_start_generation > 1000.0f*slot.params.t_max_predict_ms)) {
1114+
slot.stopped_limit = true;
1115+
slot.has_next_token = false;
1116+
1117+
SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.params.t_max_predict_ms);
1118+
}
1119+
1120+
// check if there is a new line in the generated text
1121+
if (result.text_to_send.find('\n') != std::string::npos) {
1122+
slot.has_new_line = true;
1123+
}
1124+
11041125
// if context shift is disabled, we stop when it reaches the context limit
11051126
if (slot.n_past >= slot.n_ctx) {
11061127
slot.truncated = true;
@@ -1250,6 +1271,7 @@ struct server_context {
12501271
{"tokens_evaluated", slot.n_prompt_tokens},
12511272
{"generation_settings", get_formated_generation(slot)},
12521273
{"prompt", slot.prompt},
1274+
{"has_new_line", slot.has_new_line},
12531275
{"truncated", slot.truncated},
12541276
{"stopped_eos", slot.stopped_eos},
12551277
{"stopped_word", slot.stopped_word},
@@ -1576,6 +1598,7 @@ struct server_context {
15761598
slot_data["prompt"] = slot.prompt;
15771599
slot_data["next_token"] = {
15781600
{"has_next_token", slot.has_next_token},
1601+
{"has_new_line", slot.has_new_line},
15791602
{"n_remain", slot.n_remaining},
15801603
{"n_decoded", slot.n_decoded},
15811604
{"stopped_eos", slot.stopped_eos},
@@ -1914,6 +1937,13 @@ struct server_context {
19141937
auto prefix_tokens = tokenize(slot.params.input_prefix, false, false);
19151938
auto suffix_tokens = tokenize(slot.params.input_suffix, false, false);
19161939

1940+
// for now pick context to fit in a single batch (ratio prefix:suffix = 3:1, TODO: configurable?)
1941+
const int n_suffix_take = std::min<int>(suffix_tokens.size(), n_batch/4);
1942+
const int n_prefix_take = std::min<int>(prefix_tokens.size(), (n_batch - 3) - n_suffix_take);
1943+
1944+
prefix_tokens.erase(prefix_tokens.begin(), prefix_tokens.begin() + prefix_tokens.size() - n_prefix_take);
1945+
suffix_tokens.resize(n_suffix_take);
1946+
19171947
prefix_tokens.insert(prefix_tokens.begin(), llama_token_fim_pre(model));
19181948
suffix_tokens.insert(suffix_tokens.begin(), llama_token_fim_suf(model));
19191949

@@ -1936,9 +1966,17 @@ struct server_context {
19361966

19371967
SLT_INF(slot, "prompt tokenized, n_ctx_slot = %d, n_keep = %d, n_prompt_tokens = %d\n", slot.n_ctx, slot.params.n_keep, slot.n_prompt_tokens);
19381968

1939-
// print prompt tokens:
1940-
for (int i = 0; i < (int) prompt_tokens.size(); i++) {
1941-
SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
1969+
// print prompt tokens (for debugging)
1970+
if (1) {
1971+
// first 16 tokens (avoid flooding logs)
1972+
for (int i = 0; i < std::min<int>(16, prompt_tokens.size()); i++) {
1973+
SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
1974+
}
1975+
} else {
1976+
// all
1977+
for (int i = 0; i < (int) prompt_tokens.size(); i++) {
1978+
SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
1979+
}
19421980
}
19431981

19441982
// empty prompt passed -> release the slot and send empty response

0 commit comments

Comments
 (0)