Skip to content

Commit 4b64181

Browse files
committed
Remove direct access to std streams from llama_main
The goal is to allow running llama_main while connected to other streams, such as TCP sockets. Signed-off-by: Thiago Padilha <[email protected]>
1 parent 2294080 commit 4b64181

File tree

3 files changed

+38
-30
lines changed

3 files changed

+38
-30
lines changed

llama.cpp

+31-28
Original file line numberDiff line numberDiff line change
@@ -717,13 +717,16 @@ int llama_main(
717717
gpt_vocab vocab,
718718
llama_model model,
719719
int64_t t_load_us,
720-
int64_t t_main_start_us) {
720+
int64_t t_main_start_us,
721+
std::istream & instream,
722+
FILE *outstream,
723+
FILE *errstream) {
721724

722725
if (params.seed < 0) {
723726
params.seed = time(NULL);
724727
}
725728

726-
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
729+
fprintf(errstream, "%s: seed = %d\n", __func__, params.seed);
727730

728731
std::mt19937 rng(params.seed);
729732
if (params.random_prompt) {
@@ -769,13 +772,13 @@ int llama_main(
769772
params.interactive = true;
770773
}
771774

772-
fprintf(stderr, "\n");
773-
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
774-
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
775+
fprintf(errstream, "\n");
776+
fprintf(errstream, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
777+
fprintf(errstream, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
775778
for (int i = 0; i < (int) embd_inp.size(); i++) {
776-
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
779+
fprintf(errstream, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
777780
}
778-
fprintf(stderr, "\n");
781+
fprintf(errstream, "\n");
779782
if (params.interactive) {
780783
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
781784
struct sigaction sigint_action;
@@ -787,22 +790,22 @@ int llama_main(
787790
signal(SIGINT, sigint_handler);
788791
#endif
789792

790-
fprintf(stderr, "%s: interactive mode on.\n", __func__);
793+
fprintf(errstream, "%s: interactive mode on.\n", __func__);
791794

792795
if(antipromptv_inp.size()) {
793796
for (size_t apindex = 0; apindex < antipromptv_inp.size(); ++apindex) {
794797
auto antiprompt_inp = antipromptv_inp.at(apindex);
795-
fprintf(stderr, "%s: reverse prompt: '%s'\n", __func__, params.antiprompt.at(apindex).c_str());
796-
fprintf(stderr, "%s: number of tokens in reverse prompt = %zu\n", __func__, antiprompt_inp.size());
798+
fprintf(errstream, "%s: reverse prompt: '%s'\n", __func__, params.antiprompt.at(apindex).c_str());
799+
fprintf(errstream, "%s: number of tokens in reverse prompt = %zu\n", __func__, antiprompt_inp.size());
797800
for (int i = 0; i < (int) antiprompt_inp.size(); i++) {
798-
fprintf(stderr, "%6d -> '%s'\n", antiprompt_inp[i], vocab.id_to_token.at(antiprompt_inp[i]).c_str());
801+
fprintf(errstream, "%6d -> '%s'\n", antiprompt_inp[i], vocab.id_to_token.at(antiprompt_inp[i]).c_str());
799802
}
800-
fprintf(stderr, "\n");
803+
fprintf(errstream, "\n");
801804
}
802805
}
803806
}
804-
fprintf(stderr, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
805-
fprintf(stderr, "\n\n");
807+
fprintf(errstream, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
808+
fprintf(errstream, "\n\n");
806809

807810
std::vector<gpt_vocab::id> embd;
808811

@@ -815,7 +818,7 @@ int llama_main(
815818
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
816819

817820
if (params.interactive) {
818-
fprintf(stderr, "== Running in interactive mode. ==\n"
821+
fprintf(errstream, "== Running in interactive mode. ==\n"
819822
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
820823
" - Press Ctrl+C to interject at any time.\n"
821824
#endif
@@ -831,7 +834,7 @@ int llama_main(
831834

832835
// set the color for the prompt which will be output initially
833836
if (params.use_color) {
834-
printf(ANSI_COLOR_YELLOW);
837+
fprintf(outstream, ANSI_COLOR_YELLOW);
835838
}
836839

837840
while (remaining_tokens > 0 || params.interactive) {
@@ -840,7 +843,7 @@ int llama_main(
840843
const int64_t t_start_us = ggml_time_us();
841844

842845
if (!llama_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
843-
fprintf(stderr, "Failed to predict\n");
846+
fprintf(errstream, "Failed to predict\n");
844847
return 1;
845848
}
846849

@@ -901,9 +904,9 @@ int llama_main(
901904
// display text
902905
if (!input_noecho) {
903906
for (auto id : embd) {
904-
printf("%s", vocab.id_to_token[id].c_str());
907+
fprintf(outstream, "%s", vocab.id_to_token[id].c_str());
905908
}
906-
fflush(stdout);
909+
fflush(outstream);
907910
}
908911
// reset color to default if we there is no pending user input
909912
if (!input_noecho && params.use_color && (int)embd_inp.size() == input_consumed) {
@@ -935,7 +938,7 @@ int llama_main(
935938
std::string line;
936939
bool another_line = true;
937940
do {
938-
std::getline(std::cin, line);
941+
std::getline(instream, line);
939942
if (line.empty() || line.back() != '\\') {
940943
another_line = false;
941944
} else {
@@ -964,7 +967,7 @@ int llama_main(
964967
if (params.interactive) {
965968
is_interacting = true;
966969
} else {
967-
fprintf(stderr, " [end of text]\n");
970+
fprintf(errstream, " [end of text]\n");
968971
break;
969972
}
970973
}
@@ -984,18 +987,18 @@ int llama_main(
984987
{
985988
const int64_t t_main_end_us = ggml_time_us();
986989

987-
fprintf(stderr, "\n\n");
988-
fprintf(stderr, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
989-
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
990-
fprintf(stderr, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
991-
fprintf(stderr, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
992-
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
990+
fprintf(errstream, "\n\n");
991+
fprintf(errstream, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
992+
fprintf(errstream, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
993+
fprintf(errstream, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
994+
fprintf(errstream, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
995+
fprintf(errstream, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
993996
}
994997

995998
ggml_free(model.ctx);
996999

9971000
if (params.use_color) {
998-
printf(ANSI_COLOR_RESET);
1001+
fprintf(outstream, ANSI_COLOR_RESET);
9991002
}
10001003

10011004
return 0;

llama.h

+4-1
Original file line numberDiff line numberDiff line change
@@ -64,5 +64,8 @@ int llama_main(
6464
gpt_vocab vocab,
6565
llama_model model,
6666
int64_t t_load_us,
67-
int64_t t_main_start_us);
67+
int64_t t_main_start_us,
68+
std::istream & instream,
69+
FILE *outstream,
70+
FILE *errstream);
6871
bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx, ggml_type memory_type);

main.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#include "utils.h"
33
#include "llama.h"
44

5+
#include <iostream>
6+
57
const char * llama_print_system_info(void) {
68
static std::string s;
79

@@ -63,5 +65,5 @@ int main(int argc, char ** argv) {
6365
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
6466
}
6567

66-
return llama_main(params, vocab, model, t_main_start_us, t_load_us);
68+
return llama_main(params, vocab, model, t_main_start_us, t_load_us, std::cin, stdout, stderr);
6769
}

0 commit comments

Comments
 (0)