Skip to content

Commit f88b198

Browse files
committed
llama : fix Vulkan whitelist (ggml-org#11)
1 parent ffd0624 commit f88b198

File tree

1 file changed

+10
-4
lines changed

1 file changed

+10
-4
lines changed

llama.cpp

+10-4
Original file line numberDiff line numberDiff line change
@@ -6352,9 +6352,11 @@ int64_t llama_time_us(void) {
63526352
return ggml_time_us();
63536353
}
63546354

6355-
struct llama_model * llama_load_model_from_file(
6356-
const char * path_model,
6357-
struct llama_context_params params) {
6355+
static struct llama_model * llama_load_model_from_file_internal(
6356+
const char * path_model, struct llama_context_params * params_p
6357+
) {
6358+
auto & params = *params_p;
6359+
63586360
ggml_time_init();
63596361

63606362
llama_model * model = new llama_model;
@@ -6389,6 +6391,10 @@ struct llama_model * llama_load_model_from_file(
63896391
return model;
63906392
}
63916393

6394+
struct llama_model * llama_load_model_from_file(const char * path_model, struct llama_context_params params) {
6395+
return llama_load_model_from_file_internal(path_model, &params);
6396+
}
6397+
63926398
void llama_free_model(struct llama_model * model) {
63936399
delete model;
63946400
}
@@ -6559,7 +6565,7 @@ struct llama_context * llama_new_context_with_model(
65596565
static struct llama_context * llama_init_from_file(
65606566
const char * path_model,
65616567
struct llama_context_params params) {
6562-
struct llama_model * model = llama_load_model_from_file(path_model, params);
6568+
struct llama_model * model = llama_load_model_from_file_internal(path_model, &params);
65636569
if (!model) {
65646570
return nullptr;
65656571
}

0 commit comments

Comments
 (0)