Skip to content

Commit 03c5267

Browse files
committed
llama : use LLAMA_LOG_ macros for logging
1 parent a128c38 commit 03c5267

File tree

1 file changed

+23
-23
lines changed

1 file changed

+23
-23
lines changed

llama.cpp

+23-23
Original file line numberDiff line numberDiff line change
@@ -1114,7 +1114,7 @@ struct llama_mlock {
11141114
suggest = false;
11151115
}
11161116

1117-
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
1117+
LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
11181118
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
11191119
return false;
11201120
}
@@ -1123,7 +1123,7 @@ struct llama_mlock {
11231123

11241124
static void raw_unlock(void * addr, size_t size) {
11251125
if (munlock(addr, size)) {
1126-
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
1126+
LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
11271127
}
11281128
}
11291129
#elif defined(_WIN32)
@@ -1141,7 +1141,7 @@ struct llama_mlock {
11411141
return true;
11421142
}
11431143
if (tries == 2) {
1144-
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
1144+
LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
11451145
len, size, llama_format_win_err(GetLastError()).c_str());
11461146
return false;
11471147
}
@@ -1150,7 +1150,7 @@ struct llama_mlock {
11501150
// set size and try again.
11511151
SIZE_T min_ws_size, max_ws_size;
11521152
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
1153-
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
1153+
LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
11541154
llama_format_win_err(GetLastError()).c_str());
11551155
return false;
11561156
}
@@ -1163,7 +1163,7 @@ struct llama_mlock {
11631163
min_ws_size += increment;
11641164
max_ws_size += increment;
11651165
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
1166-
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
1166+
LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
11671167
llama_format_win_err(GetLastError()).c_str());
11681168
return false;
11691169
}
@@ -1172,7 +1172,7 @@ struct llama_mlock {
11721172

11731173
static void raw_unlock(void * ptr, size_t len) {
11741174
if (!VirtualUnlock(ptr, len)) {
1175-
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
1175+
LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
11761176
llama_format_win_err(GetLastError()).c_str());
11771177
}
11781178
}
@@ -1184,7 +1184,7 @@ struct llama_mlock {
11841184
}
11851185

11861186
bool raw_lock(const void * addr, size_t len) const {
1187-
fprintf(stderr, "warning: mlock not supported on this system\n");
1187+
LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
11881188
return false;
11891189
}
11901190

@@ -2085,13 +2085,13 @@ namespace GGUFMeta {
20852085
__func__, override_type_to_str(override->tag), override->key);
20862086
switch (override->tag) {
20872087
case LLAMA_KV_OVERRIDE_BOOL: {
2088-
printf("%s\n", override->bool_value ? "true" : "false");
2088+
LLAMA_LOG_INFO("%s\n", override->bool_value ? "true" : "false");
20892089
} break;
20902090
case LLAMA_KV_OVERRIDE_INT: {
2091-
printf("%" PRId64 "\n", override->int_value);
2091+
LLAMA_LOG_INFO("%" PRId64 "\n", override->int_value);
20922092
} break;
20932093
case LLAMA_KV_OVERRIDE_FLOAT: {
2094-
printf("%.6f\n", override->float_value);
2094+
LLAMA_LOG_INFO("%.6f\n", override->float_value);
20952095
} break;
20962096
default:
20972097
// Shouldn't be possible to end up here, but just in case...
@@ -6993,7 +6993,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
69936993
if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
69946994

69956995
#ifdef PRETOKENIZERDEBUG
6996-
fprintf(stderr, "FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
6996+
LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
69976997
#endif
69986998
auto source = std::distance(buffer.begin(), it);
69996999

@@ -7006,7 +7006,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
70067006
buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
70077007

70087008
#ifdef PRETOKENIZERDEBUG
7009-
fprintf(stderr, "FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
7009+
LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
70107010
#endif
70117011
it++;
70127012
}
@@ -7022,7 +7022,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
70227022
buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
70237023

70247024
#ifdef PRETOKENIZERDEBUG
7025-
fprintf(stderr, "FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
7025+
LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
70267026
#endif
70277027

70287028
it++;
@@ -7038,7 +7038,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
70387038
raw_text_base_length = right_reminder_length;
70397039

70407040
#ifdef PRETOKENIZERDEBUG
7041-
fprintf(stderr, "RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
7041+
LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
70427042
#endif
70437043
} else {
70447044
if (source == 0) {
@@ -7095,7 +7095,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
70957095
}
70967096

70977097
#ifdef PRETOKENIZERDEBUG
7098-
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
7098+
LLAMA_LOG_WARN(TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
70997099
#endif
71007100
llm_tokenizer_spm tokenizer(vocab);
71017101
llama_escape_whitespace(raw_text);
@@ -7116,7 +7116,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
71167116
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
71177117

71187118
#ifdef PRETOKENIZERDEBUG
7119-
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
7119+
LLAMA_LOG_WARN(TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
71207120
#endif
71217121
llm_tokenizer_bpe tokenizer(vocab);
71227122
tokenizer.tokenize(raw_text, output);
@@ -8641,7 +8641,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
86418641
if (params->imatrix) {
86428642
imatrix_data = static_cast<const std::unordered_map<std::string, std::vector<float>>*>(params->imatrix);
86438643
if (imatrix_data) {
8644-
printf("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
8644+
LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
86458645
}
86468646
}
86478647

@@ -8764,23 +8764,23 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
87648764
if (imatrix_data) {
87658765
auto it = imatrix_data->find(tensor->name);
87668766
if (it == imatrix_data->end()) {
8767-
printf("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
8767+
LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
87688768
} else {
87698769
if (it->second.size() == (size_t)tensor->ne[0]) {
87708770
imatrix = it->second.data();
87718771
} else {
8772-
printf("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
8772+
LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
87738773
int(it->second.size()), int(tensor->ne[0]), tensor->name);
87748774
}
87758775
}
87768776
}
87778777
if ((new_type == GGML_TYPE_IQ2_XXS ||
87788778
new_type == GGML_TYPE_IQ2_XS ||
87798779
(new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
8780-
fprintf(stderr, "\n\n============================================================\n");
8781-
fprintf(stderr, "Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
8782-
fprintf(stderr, "The result will be garbage, so bailing out\n");
8783-
fprintf(stderr, "============================================================\n\n");
8780+
LLAMA_LOG_ERROR("\n\n============================================================\n");
8781+
LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
8782+
LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
8783+
LLAMA_LOG_ERROR("============================================================\n\n");
87848784
throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
87858785
}
87868786

0 commit comments

Comments
 (0)