Skip to content

Commit 52c8bc3

Browse files
authored
sampling : custom samplers order (#4285)
* Samplers sequence order w parameter * Cleaned commented code * Fixed formatting * Rewrote with unordered_map * Revert and rewrite, too many problems and safeguards would be needed * Fixed code style * Code style fixes according to review * More readable samplers input string, fixed help * Style fix in sampler_queue * Formatting fixes * Fixing whitespaces
1 parent e4b76bb commit 52c8bc3

File tree

5 files changed

+132
-27
lines changed

5 files changed

+132
-27
lines changed

common/common.cpp

+56
Original file line numberDiff line numberDiff line change
@@ -280,6 +280,18 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
280280
params.yarn_beta_slow = std::stof(argv[i]);
281281
} else if (arg == "--memory-f32") {
282282
params.memory_f16 = false;
283+
} else if (arg == "--samplers") {
284+
if (++i >= argc) {
285+
invalid_param = true;
286+
break;
287+
}
288+
sparams.samplers_sequence = parse_samplers_input(argv[i]);
289+
} else if (arg == "--sampling-seq") {
290+
if (++i >= argc) {
291+
invalid_param = true;
292+
break;
293+
}
294+
sparams.samplers_sequence = argv[i];
283295
} else if (arg == "--top-p") {
284296
if (++i >= argc) {
285297
invalid_param = true;
@@ -761,6 +773,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
761773
printf(" -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
762774
printf(" -c N, --ctx-size N size of the prompt context (default: %d, 0 = loaded from model)\n", params.n_ctx);
763775
printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
776+
printf(" --samplers samplers that will be used for generation in the order, separated by \';\', for example: \"top_k;tfs;typical;top_p;min_p;temp\"\n");
777+
printf(" --sampling-seq simplified sequence for samplers that will be used (default: %s)\n", sparams.samplers_sequence.c_str());
764778
printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k);
765779
printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p);
766780
printf(" --min-p N min-p sampling (default: %.1f, 0.0 = disabled)\n", (double)sparams.min_p);
@@ -886,6 +900,48 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
886900
GGML_UNREACHABLE();
887901
}
888902

903+
//
904+
// String parsing
905+
//
906+
907+
std::string parse_samplers_input(std::string input) {
908+
std::string output = "";
909+
// since samplers names are written multiple ways
910+
// make it ready for both system names and input names
911+
std::unordered_map<std::string, char> samplers_symbols {
912+
{"top_k", 'k'},
913+
{"top-k", 'k'},
914+
{"top_p", 'p'},
915+
{"top-p", 'p'},
916+
{"nucleus", 'p'},
917+
{"typical_p", 'y'},
918+
{"typical-p", 'y'},
919+
{"typical", 'y'},
920+
{"min_p", 'm'},
921+
{"min-p", 'm'},
922+
{"tfs_z", 'f'},
923+
{"tfs-z", 'f'},
924+
{"tfs", 'f'},
925+
{"temp", 't'},
926+
{"temperature",'t'}
927+
};
928+
// expected format example: "temp;top_k;tfs_z;typical_p;top_p;min_p"
929+
size_t separator = input.find(';');
930+
while (separator != input.npos) {
931+
std::string name = input.substr(0,separator);
932+
input = input.substr(separator+1);
933+
separator = input.find(';');
934+
935+
if (samplers_symbols.find(name) != samplers_symbols.end()) {
936+
output += samplers_symbols[name];
937+
}
938+
}
939+
if (samplers_symbols.find(input) != samplers_symbols.end()) {
940+
output += samplers_symbols[input];
941+
}
942+
return output;
943+
}
944+
889945
//
890946
// Model utils
891947
//

common/common.h

+6
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,12 @@ std::string gpt_random_prompt(std::mt19937 & rng);
141141

142142
void process_escapes(std::string& input);
143143

144+
//
145+
// String parsing
146+
//
147+
148+
std::string parse_samplers_input(std::string input);
149+
144150
//
145151
// Model utils
146152
//

common/sampling.cpp

+49-11
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,54 @@ std::string llama_sampling_print(const llama_sampling_params & params) {
9999
return std::string(result);
100100
}
101101

102+
std::string llama_sampling_order_print(const llama_sampling_params & params) {
103+
std::string result = "CFG -> Penalties ";
104+
if (params.mirostat == 0) {
105+
for (auto s : params.samplers_sequence) {
106+
switch (s) {
107+
case 'k': result += "-> top_k "; break;
108+
case 'f': result += "-> tfs_z "; break;
109+
case 'y': result += "-> typical_p "; break;
110+
case 'p': result += "-> top_p "; break;
111+
case 'm': result += "-> min_p "; break;
112+
case 't': result += "-> temp "; break;
113+
default : break;
114+
}
115+
}
116+
} else result += "-> mirostat ";
117+
118+
return result;
119+
}
120+
121+
// no reasons to expose this function in header
122+
void sampler_queue(
123+
struct llama_context * ctx_main,
124+
const llama_sampling_params & params,
125+
llama_token_data_array & cur_p,
126+
size_t & min_keep) {
127+
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
128+
129+
const float temp = params.temp;
130+
const int32_t top_k = params.top_k <= 0 ? n_vocab : params.top_k;
131+
const float top_p = params.top_p;
132+
const float min_p = params.min_p;
133+
const float tfs_z = params.tfs_z;
134+
const float typical_p = params.typical_p;
135+
const std::string & samplers_sequence = params.samplers_sequence;
136+
137+
for (auto s : samplers_sequence) {
138+
switch (s){
139+
case 'k': llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep); break;
140+
case 'f': llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); break;
141+
case 'y': llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
142+
case 'p': llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
143+
case 'm': llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
144+
case 't': llama_sample_temp (ctx_main, &cur_p, temp); break;
145+
default : break;
146+
}
147+
}
148+
}
149+
102150
llama_token llama_sampling_sample(
103151
struct llama_sampling_context * ctx_sampling,
104152
struct llama_context * ctx_main,
@@ -109,11 +157,6 @@ llama_token llama_sampling_sample(
109157
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
110158

111159
const float temp = params.temp;
112-
const int32_t top_k = params.top_k <= 0 ? n_vocab : params.top_k;
113-
const float top_p = params.top_p;
114-
const float min_p = params.min_p;
115-
const float tfs_z = params.tfs_z;
116-
const float typical_p = params.typical_p;
117160
const int32_t penalty_last_n = params.penalty_last_n < 0 ? params.n_prev : params.penalty_last_n;
118161
const float penalty_repeat = params.penalty_repeat;
119162
const float penalty_freq = params.penalty_freq;
@@ -188,12 +231,7 @@ llama_token llama_sampling_sample(
188231
// temperature sampling
189232
size_t min_keep = std::max(1, params.n_probs);
190233

191-
llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep);
192-
llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep);
193-
llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep);
194-
llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep);
195-
llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep);
196-
llama_sample_temp (ctx_main, &cur_p, temp);
234+
sampler_queue(ctx_main, params, cur_p, min_keep);
197235

198236
id = llama_sample_token(ctx_main, &cur_p);
199237

common/sampling.h

+20-16
Original file line numberDiff line numberDiff line change
@@ -10,22 +10,23 @@
1010

1111
// sampling parameters
1212
typedef struct llama_sampling_params {
13-
int32_t n_prev = 64; // number of previous tokens to remember
14-
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
15-
int32_t top_k = 40; // <= 0 to use vocab size
16-
float top_p = 0.95f; // 1.0 = disabled
17-
float min_p = 0.05f; // 0.0 = disabled
18-
float tfs_z = 1.00f; // 1.0 = disabled
19-
float typical_p = 1.00f; // 1.0 = disabled
20-
float temp = 0.80f; // 1.0 = disabled
21-
int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
22-
float penalty_repeat = 1.10f; // 1.0 = disabled
23-
float penalty_freq = 0.00f; // 0.0 = disabled
24-
float penalty_present = 0.00f; // 0.0 = disabled
25-
int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
26-
float mirostat_tau = 5.00f; // target entropy
27-
float mirostat_eta = 0.10f; // learning rate
28-
bool penalize_nl = true; // consider newlines as a repeatable token
13+
int32_t n_prev = 64; // number of previous tokens to remember
14+
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
15+
int32_t top_k = 40; // <= 0 to use vocab size
16+
float top_p = 0.95f; // 1.0 = disabled
17+
float min_p = 0.05f; // 0.0 = disabled
18+
float tfs_z = 1.00f; // 1.0 = disabled
19+
float typical_p = 1.00f; // 1.0 = disabled
20+
float temp = 0.80f; // 1.0 = disabled
21+
int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
22+
float penalty_repeat = 1.10f; // 1.0 = disabled
23+
float penalty_freq = 0.00f; // 0.0 = disabled
24+
float penalty_present = 0.00f; // 0.0 = disabled
25+
int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
26+
float mirostat_tau = 5.00f; // target entropy
27+
float mirostat_eta = 0.10f; // learning rate
28+
bool penalize_nl = true; // consider newlines as a repeatable token
29+
std::string samplers_sequence = "kfypmt"; // top_k, tail_free, typical_p, top_p, min_p, temp
2930

3031
std::string grammar; // optional BNF-like grammar to constrain sampling
3132

@@ -80,6 +81,9 @@ std::string llama_sampling_prev_str(llama_sampling_context * ctx_sampling, llama
8081
// Print sampling parameters into a string
8182
std::string llama_sampling_print(const llama_sampling_params & params);
8283

84+
// Print sampling order into a string
85+
std::string llama_sampling_order_print(const llama_sampling_params & params);
86+
8387
// this is a common sampling function used across the examples for convenience
8488
// it can serve as a starting point for implementing your own sampling function
8589
// Note: When using multiple sequences, it is the caller's responsibility to call

examples/main/main.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -437,6 +437,7 @@ int main(int argc, char ** argv) {
437437
}
438438
}
439439
LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
440+
LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
440441
LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
441442
LOG_TEE("\n\n");
442443

0 commit comments

Comments
 (0)