@@ -67,6 +67,7 @@ struct whisper_params {
67
67
bool use_gpu = true ;
68
68
69
69
std::string person = " Georgi" ;
70
+ std::string bot_name = " LLaMA" ;
70
71
std::string language = " en" ;
71
72
std::string model_wsp = " models/ggml-base.en.bin" ;
72
73
std::string model_llama = " models/ggml-llama-7B.bin" ;
@@ -101,7 +102,8 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
101
102
else if (arg == " -vp" || arg == " --verbose-prompt" ) { params.verbose_prompt = true ; }
102
103
else if (arg == " -ng" || arg == " --no-gpu" ) { params.use_gpu = false ; }
103
104
else if (arg == " -p" || arg == " --person" ) { params.person = argv[++i]; }
104
- else if (arg == " --session" ) { params.path_session = argv[++i];}
105
+ else if (arg == " -bn" || arg == " --bot-name" ) { params.bot_name = argv[++i]; }
106
+ else if (arg == " --session" ) { params.path_session = argv[++i]; }
105
107
else if (arg == " -l" || arg == " --language" ) { params.language = argv[++i]; }
106
108
else if (arg == " -mw" || arg == " --model-whisper" ) { params.model_wsp = argv[++i]; }
107
109
else if (arg == " -ml" || arg == " --model-llama" ) { params.model_llama = argv[++i]; }
@@ -146,6 +148,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
146
148
fprintf (stderr, " -vp, --verbose-prompt [%-7s] print prompt at start\n " , params.verbose_prompt ? " true" : " false" );
147
149
fprintf (stderr, " -ng, --no-gpu [%-7s] disable GPU\n " , params.use_gpu ? " false" : " true" );
148
150
fprintf (stderr, " -p NAME, --person NAME [%-7s] person name (for prompt selection)\n " , params.person .c_str ());
151
+ fprintf (stderr, " -bn NAME, --bot-name NAME [%-7s] bot name (to display)\n " , params.bot_name .c_str ());
149
152
fprintf (stderr, " -l LANG, --language LANG [%-7s] spoken language\n " , params.language .c_str ());
150
153
fprintf (stderr, " -mw FILE, --model-whisper [%-7s] whisper model file\n " , params.model_wsp .c_str ());
151
154
fprintf (stderr, " -ml FILE, --model-llama [%-7s] llama model file\n " , params.model_llama .c_str ());
@@ -323,12 +326,11 @@ int main(int argc, char ** argv) {
323
326
float prob0 = 0 .0f ;
324
327
325
328
const std::string chat_symb = " :" ;
326
- const std::string bot_name = " LLaMA" ;
327
329
328
330
std::vector<float > pcmf32_cur;
329
331
std::vector<float > pcmf32_prompt;
330
332
331
- const std::string prompt_whisper = ::replace (k_prompt_whisper, " {1}" , bot_name);
333
+ const std::string prompt_whisper = ::replace (k_prompt_whisper, " {1}" , params. bot_name );
332
334
333
335
// construct the initial prompt for LLaMA inference
334
336
std::string prompt_llama = params.prompt .empty () ? k_prompt_llama : params.prompt ;
@@ -337,7 +339,7 @@ int main(int argc, char ** argv) {
337
339
prompt_llama.insert (0 , 1 , ' ' );
338
340
339
341
prompt_llama = ::replace (prompt_llama, " {0}" , params.person );
340
- prompt_llama = ::replace (prompt_llama, " {1}" , bot_name);
342
+ prompt_llama = ::replace (prompt_llama, " {1}" , params. bot_name );
341
343
342
344
{
343
345
// get time string
@@ -524,7 +526,7 @@ int main(int argc, char ** argv) {
524
526
force_speak = false ;
525
527
526
528
text_heard.insert (0 , 1 , ' ' );
527
- text_heard += " \n " + bot_name + chat_symb;
529
+ text_heard += " \n " + params. bot_name + chat_symb;
528
530
fprintf (stdout, " %s%s%s" , " \033 [1m" , text_heard.c_str (), " \033 [0m" );
529
531
fflush (stdout);
530
532
0 commit comments