From 65648b341f786ca724beb6e288d748794a21d8ea Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 14 Jan 2024 16:48:16 +0200 Subject: [PATCH 1/9] backend : add eval callback ggml-ci --- examples/simple/simple.cpp | 36 ++++++++++++++++++++++++++++++++++-- ggml-backend.c | 38 ++++++++++++++++++++++++++++++++++++-- ggml-backend.h | 7 +++++++ llama.cpp | 9 +++++++++ llama.h | 4 ++++ 5 files changed, 90 insertions(+), 4 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 9cfde8308f18f..b3ae68492ecfd 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -6,11 +6,36 @@ #include #include +// a function that can be called for every computed node during graph evaluation +// the user can choose to whether to observe the data of the node depending on the tensor parameters +static bool observe_compute(int node_index, struct ggml_tensor * t, void * user_data) { + GGML_UNUSED(user_data); + + // check if name contains soft_max + if (strstr(t->name, "soft_max") != 0) { + printf("%s: node_index = %5d, t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n", + __func__, node_index, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); + + std::vector t_data(ggml_nelements(t)); + ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); + + // print first row + for (int i = 0; i < t->ne[0]; i++) { + printf("%8.4f ", t_data[i]); + } + printf("\n"); + } + + return true; +} + int main(int argc, char ** argv) { gpt_params params; + bool observe = false; + if (argc == 1 || argv[1][0] == '-') { - printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]); + printf("usage: %s MODEL_PATH [PROMPT] [OBSERV]\n" , argv[0]); return 1 ; } @@ -22,6 +47,10 @@ int main(int argc, char ** argv) { params.prompt = argv[2]; } + if (argc >= 4) { + observe = atoi(argv[3]); + } + if (params.prompt.empty()) { params.prompt = "Hello my name is"; } @@ -37,7 +66,7 @@ int main(int argc, char ** argv) { llama_model_params model_params = llama_model_default_params(); - // model_params.n_gpu_layers = 99; // offload all layers to the GPU + model_params.n_gpu_layers = 99; // offload all layers to the GPU llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); @@ -55,6 +84,9 @@ int main(int argc, char ** argv) { ctx_params.n_threads = params.n_threads; ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; + ctx_params.cb_eval = observe ? observe_compute : NULL; + ctx_params.cb_eval_user_data = NULL; + llama_context * ctx = llama_new_context_with_model(model, ctx_params); if (ctx == NULL) { diff --git a/ggml-backend.c b/ggml-backend.c index 505dbba476253..ee78f45fa92bd 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -802,6 +802,9 @@ struct ggml_backend_sched { __attribute__((aligned(GGML_MEM_ALIGN))) #endif char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + sizeof(struct ggml_cgraph)]; + + ggml_backend_sched_eval_callback callback_eval; + void * callback_eval_user_data; }; #define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node) @@ -1324,9 +1327,30 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { ggml_graph_dump_dot(split->graph, NULL, split_filename); #endif + uint64_t compute_start_us = ggml_time_us(); - ggml_backend_graph_compute(split_backend, &split->graph); - //ggml_backend_synchronize(split_backend); // necessary to measure compute time + if (!sched->callback_eval) { + ggml_backend_graph_compute(split_backend, &split->graph); + //ggml_backend_synchronize(split_backend); // necessary to measure compute time + } else { + // similar to ggml_backend_compare_graph_backend + for (int j = 0; j < split->graph.n_nodes; j++) { + struct ggml_tensor * t = split->graph.nodes[j]; + + struct ggml_cgraph gv = ggml_graph_view(&split->graph, j, j + 1); + + ggml_backend_graph_compute(split_backend, &gv); + + if (ggml_is_view_op(t->op)) { + continue; + } + + // TODO: j is node index in the split, not in the original graph + if (!sched->callback_eval(j, t, sched->callback_eval_user_data)) { + break; + } + } + } uint64_t compute_end_us = ggml_time_us(); compute_us[split_backend_id] += compute_end_us - compute_start_us; } @@ -1352,6 +1376,10 @@ static void sched_reset(ggml_backend_sched_t sched) { memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size); memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size); + // TODO: should we clear the callbacks? + //sched->callback_eval = NULL; + //sched->callback_eval_user_data = NULL; + sched->is_reset = true; } @@ -1431,6 +1459,12 @@ void ggml_backend_sched_reset(ggml_backend_sched_t sched) { sched_reset(sched); } + +void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { + sched->callback_eval = callback; + sched->callback_eval_user_data = user_data; +} + int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { return sched->n_splits; } diff --git a/ggml-backend.h b/ggml-backend.h index 4eb244af1d3e7..057ed120189c4 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -148,6 +148,9 @@ extern "C" { struct ggml_backend_sched; typedef struct ggml_backend_sched * ggml_backend_sched_t; + // TODO: propose to rename to ggml_backend_sched_callback_eval + typedef bool (*ggml_backend_sched_eval_callback)(int node_index, struct ggml_tensor * t, void * user_data); + // Initialize a backend scheduler GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size); GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); @@ -168,6 +171,9 @@ extern "C" { // Reset all assignments and allocators - must be called before using the sched allocators to allocate inputs GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched); + // Set a callback to be called for each resulting node during graph compute + GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data); + // // Utils // @@ -183,6 +189,7 @@ extern "C" { GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph); GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy); + // TODO: propose to rename this to ggml_backend_callback_compare typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); // Compare the output of two backends diff --git a/llama.cpp b/llama.cpp index 46c4d11c88873..5c1b211702f37 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1393,6 +1393,9 @@ struct llama_cparams { bool mul_mat_q; bool offload_kqv; + + ggml_backend_sched_eval_callback cb_eval; + void * cb_eval_user_data; }; struct llama_layer { @@ -6254,6 +6257,7 @@ static int llama_decode_internal( //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head); ggml_backend_sched_reset(lctx.sched); + ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data); ggml_cgraph * gf = llama_build_graph(lctx, batch); @@ -9267,6 +9271,8 @@ struct llama_context_params llama_context_default_params() { /*.logits_all =*/ false, /*.embedding =*/ false, /*.offload_kqv =*/ true, + /*.cb_eval =*/ nullptr, + /*.cb_eval_user_data =*/ nullptr, }; return result; @@ -9401,6 +9407,9 @@ struct llama_context * llama_new_context_with_model( hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx : hparams.n_ctx_train; + cparams.cb_eval = params.cb_eval; + cparams.cb_eval_user_data = params.cb_eval_user_data; + auto rope_scaling_type = params.rope_scaling_type; if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) { rope_scaling_type = hparams.rope_scaling_type_train; diff --git a/llama.h b/llama.h index a570b0d6968fb..9f7a51a0f3aeb 100644 --- a/llama.h +++ b/llama.h @@ -2,6 +2,7 @@ #define LLAMA_H #include "ggml.h" +#include "ggml-backend.h" #ifdef GGML_USE_CUBLAS #include "ggml-cuda.h" #define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES @@ -239,6 +240,9 @@ extern "C" { bool logits_all; // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) bool embedding; // embedding mode only bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU + + ggml_backend_sched_eval_callback cb_eval; + void * cb_eval_user_data; }; // model quantization parameters From 01b6f68a003e4de97098001ae9650ee1c3645b13 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 14 Jan 2024 17:30:22 +0200 Subject: [PATCH 2/9] backend : group nodes in a single compute when user don't need them --- examples/simple/simple.cpp | 28 ++++++++++++++++------------ ggml-backend.c | 21 ++++++++++++++------- ggml-backend.h | 8 +++++++- 3 files changed, 37 insertions(+), 20 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index b3ae68492ecfd..dac7aa60afb1d 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -8,23 +8,27 @@ // a function that can be called for every computed node during graph evaluation // the user can choose to whether to observe the data of the node depending on the tensor parameters -static bool observe_compute(int node_index, struct ggml_tensor * t, void * user_data) { +static bool observe_compute(int node_index, struct ggml_tensor * t, bool ask, void * user_data) { GGML_UNUSED(user_data); - // check if name contains soft_max - if (strstr(t->name, "soft_max") != 0) { - printf("%s: node_index = %5d, t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n", - __func__, node_index, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); + // the scheduler is asking us if we want to observe this node + if (ask) { + // check if name contains soft_max + return strstr(t->name, "soft_max") != 0; + } - std::vector t_data(ggml_nelements(t)); - ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); + // print the node data + printf("%s: node_index = %5d, t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n", + __func__, node_index, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); - // print first row - for (int i = 0; i < t->ne[0]; i++) { - printf("%8.4f ", t_data[i]); - } - printf("\n"); + std::vector t_data(ggml_nelements(t)); + ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); + + // print first row + for (int i = 0; i < t->ne[0]; i++) { + printf("%8.4f ", t_data[i]); } + printf("\n"); return true; } diff --git a/ggml-backend.c b/ggml-backend.c index ee78f45fa92bd..0ec46ed32fe81 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1337,18 +1337,25 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { for (int j = 0; j < split->graph.n_nodes; j++) { struct ggml_tensor * t = split->graph.nodes[j]; - struct ggml_cgraph gv = ggml_graph_view(&split->graph, j, j + 1); + int k = j; - ggml_backend_graph_compute(split_backend, &gv); - - if (ggml_is_view_op(t->op)) { - continue; + // check if the user needs data from this node + while (!sched->callback_eval(k, t, true, sched->callback_eval_user_data) && k < split->graph.n_nodes - 1) { + t = split->graph.nodes[++k]; } - // TODO: j is node index in the split, not in the original graph - if (!sched->callback_eval(j, t, sched->callback_eval_user_data)) { + struct ggml_cgraph gv = ggml_graph_view(&split->graph, j, k + 1); + + ggml_backend_graph_compute(split_backend, &gv); + + // TODO: k is node index in the split, not in the original graph + // TODO: avoid the ask == true call here + if (sched->callback_eval(k, t, true, sched->callback_eval_user_data) && + !sched->callback_eval(k, t, false, sched->callback_eval_user_data)) { break; } + + j = k; } } uint64_t compute_end_us = ggml_time_us(); diff --git a/ggml-backend.h b/ggml-backend.h index 057ed120189c4..0d4ff69ba17a2 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -148,8 +148,14 @@ extern "C" { struct ggml_backend_sched; typedef struct ggml_backend_sched * ggml_backend_sched_t; + // when ask == true, the scheduler wants to know if the user wants to observe this node + // this allows the scheduler to batch nodes together in order to evaluate them in a single call + // + // when ask == false, the scheduler is passing the node tensor to the user for observation + // if the user returns false, the scheduler will cancel the graph compute + // // TODO: propose to rename to ggml_backend_sched_callback_eval - typedef bool (*ggml_backend_sched_eval_callback)(int node_index, struct ggml_tensor * t, void * user_data); + typedef bool (*ggml_backend_sched_eval_callback)(int node_index, struct ggml_tensor * t, bool ask, void * user_data); // Initialize a backend scheduler GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size); From 83f3d7a83c6eb9691db3f55477cccb3c9fd1cbab Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 15 Jan 2024 15:52:41 +0200 Subject: [PATCH 3/9] backend : clean-up the implementation ggml-ci --- examples/simple/simple.cpp | 11 ++++++----- ggml-backend.c | 27 +++++++++++++++------------ ggml-backend.h | 4 +--- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index dac7aa60afb1d..ce3497345d0d7 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -8,19 +8,20 @@ // a function that can be called for every computed node during graph evaluation // the user can choose to whether to observe the data of the node depending on the tensor parameters -static bool observe_compute(int node_index, struct ggml_tensor * t, bool ask, void * user_data) { +static bool observe_compute(struct ggml_tensor * t, bool ask, void * user_data) { GGML_UNUSED(user_data); // the scheduler is asking us if we want to observe this node if (ask) { - // check if name contains soft_max + // check if name contains soft_max (customize to your needs) return strstr(t->name, "soft_max") != 0; } - // print the node data - printf("%s: node_index = %5d, t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n", - __func__, node_index, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); + // print the node info + printf("%s: t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n", + __func__, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); + // this will copy the data to host memory (if needed) std::vector t_data(ggml_nelements(t)); ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); diff --git a/ggml-backend.c b/ggml-backend.c index 0ec46ed32fe81..07482bedf2ace 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1334,28 +1334,31 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { //ggml_backend_synchronize(split_backend); // necessary to measure compute time } else { // similar to ggml_backend_compare_graph_backend - for (int j = 0; j < split->graph.n_nodes; j++) { - struct ggml_tensor * t = split->graph.nodes[j]; + for (int j0 = 0; j0 < split->graph.n_nodes; j0++) { + struct ggml_tensor * t = split->graph.nodes[j0]; - int k = j; + int j1 = j0; - // check if the user needs data from this node - while (!sched->callback_eval(k, t, true, sched->callback_eval_user_data) && k < split->graph.n_nodes - 1) { - t = split->graph.nodes[++k]; + // determine the range [j0, j1] of nodes that can be computed together + while (j1 < split->graph.n_nodes - 1) { + // check if the user needs data from this node + if (sched->callback_eval(t, true, sched->callback_eval_user_data)) { + break; + } + + t = split->graph.nodes[++j1]; } - struct ggml_cgraph gv = ggml_graph_view(&split->graph, j, k + 1); + struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1); ggml_backend_graph_compute(split_backend, &gv); - // TODO: k is node index in the split, not in the original graph - // TODO: avoid the ask == true call here - if (sched->callback_eval(k, t, true, sched->callback_eval_user_data) && - !sched->callback_eval(k, t, false, sched->callback_eval_user_data)) { + if (sched->callback_eval(t, true, sched->callback_eval_user_data) && // ask + !sched->callback_eval(t, false, sched->callback_eval_user_data)) { // eval break; } - j = k; + j0 = j1; } } uint64_t compute_end_us = ggml_time_us(); diff --git a/ggml-backend.h b/ggml-backend.h index 0d4ff69ba17a2..5cef4d8b475ea 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -154,8 +154,7 @@ extern "C" { // when ask == false, the scheduler is passing the node tensor to the user for observation // if the user returns false, the scheduler will cancel the graph compute // - // TODO: propose to rename to ggml_backend_sched_callback_eval - typedef bool (*ggml_backend_sched_eval_callback)(int node_index, struct ggml_tensor * t, bool ask, void * user_data); + typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data); // Initialize a backend scheduler GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size); @@ -195,7 +194,6 @@ extern "C" { GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph); GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy); - // TODO: propose to rename this to ggml_backend_callback_compare typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); // Compare the output of two backends From e1b1db9f09fe73ca8460890b0c93c349685c54d7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 15 Jan 2024 16:42:16 +0200 Subject: [PATCH 4/9] simple : do not perform tensor data copy if not needed --- examples/simple/simple.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index ce3497345d0d7..8db37eef76901 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -22,12 +22,20 @@ static bool observe_compute(struct ggml_tensor * t, bool ask, void * user_data) __func__, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); // this will copy the data to host memory (if needed) - std::vector t_data(ggml_nelements(t)); - ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); + static std::vector t_data; + + const bool is_host = ggml_backend_buffer_is_host(t->buffer); + + if (!is_host || ggml_is_contiguous(t)) { + t_data.resize(ggml_nelements(t)); + ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); + } + + const float * data = is_host ? (const float *) t->data : t_data.data(); // print first row for (int i = 0; i < t->ne[0]; i++) { - printf("%8.4f ", t_data[i]); + printf("%8.4f ", data[i]); } printf("\n"); From e0493800cecc2e18964f9c6d9db9db249bfca9c8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 15 Jan 2024 16:43:46 +0200 Subject: [PATCH 5/9] simple : fix --- examples/simple/simple.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 8db37eef76901..b83e7a812d1ca 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -26,7 +26,7 @@ static bool observe_compute(struct ggml_tensor * t, bool ask, void * user_data) const bool is_host = ggml_backend_buffer_is_host(t->buffer); - if (!is_host || ggml_is_contiguous(t)) { + if (!is_host || !ggml_is_contiguous(t)) { t_data.resize(ggml_nelements(t)); ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); } From aa16b5445f138554786b18d1d3f666041b59b683 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 16 Jan 2024 10:52:08 +0200 Subject: [PATCH 6/9] simple : no need for ggml_is_contiguous + fix bool parse --- examples/simple/simple.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index b83e7a812d1ca..6d4746ff44b03 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -26,7 +26,7 @@ static bool observe_compute(struct ggml_tensor * t, bool ask, void * user_data) const bool is_host = ggml_backend_buffer_is_host(t->buffer); - if (!is_host || !ggml_is_contiguous(t)) { + if (!is_host) { t_data.resize(ggml_nelements(t)); ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); } @@ -61,7 +61,7 @@ int main(int argc, char ** argv) { } if (argc >= 4) { - observe = atoi(argv[3]); + observe = !!atoi(argv[3]); } if (params.prompt.empty()) { From 0c96c721509f113f703862dd2ed86e61841b2c54 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 16 Jan 2024 10:52:38 +0200 Subject: [PATCH 7/9] llama : fix callback placement in llama_context_params --- llama.cpp | 4 ++-- llama.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/llama.cpp b/llama.cpp index 5c1b211702f37..3af050c25399d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9265,14 +9265,14 @@ struct llama_context_params llama_context_default_params() { /*.yarn_beta_fast =*/ 32.0f, /*.yarn_beta_slow =*/ 1.0f, /*.yarn_orig_ctx =*/ 0, + /*.cb_eval =*/ nullptr, + /*.cb_eval_user_data =*/ nullptr, /*.type_k =*/ GGML_TYPE_F16, /*.type_v =*/ GGML_TYPE_F16, /*.mul_mat_q =*/ true, /*.logits_all =*/ false, /*.embedding =*/ false, /*.offload_kqv =*/ true, - /*.cb_eval =*/ nullptr, - /*.cb_eval_user_data =*/ nullptr, }; return result; diff --git a/llama.h b/llama.h index 9f7a51a0f3aeb..e268d7a1d0cc9 100644 --- a/llama.h +++ b/llama.h @@ -232,6 +232,9 @@ extern "C" { float yarn_beta_slow; // YaRN high correction dim uint32_t yarn_orig_ctx; // YaRN original context size + ggml_backend_sched_eval_callback cb_eval; + void * cb_eval_user_data; + enum ggml_type type_k; // data type for K cache enum ggml_type type_v; // data type for V cache @@ -240,9 +243,6 @@ extern "C" { bool logits_all; // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) bool embedding; // embedding mode only bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU - - ggml_backend_sched_eval_callback cb_eval; - void * cb_eval_user_data; }; // model quantization parameters From 012ecec5066936df9a6eebf0e1fc38a6751fe902 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 16 Jan 2024 11:02:51 +0200 Subject: [PATCH 8/9] backend : avoid double-ask callback calls --- ggml-backend.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 07482bedf2ace..970495a4c7d8d 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1337,24 +1337,22 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { for (int j0 = 0; j0 < split->graph.n_nodes; j0++) { struct ggml_tensor * t = split->graph.nodes[j0]; + // check if the user needs data from this node + bool need = sched->callback_eval(t, true, sched->callback_eval_user_data); + int j1 = j0; // determine the range [j0, j1] of nodes that can be computed together - while (j1 < split->graph.n_nodes - 1) { - // check if the user needs data from this node - if (sched->callback_eval(t, true, sched->callback_eval_user_data)) { - break; - } - + while (!need && j1 < split->graph.n_nodes - 1) { t = split->graph.nodes[++j1]; + need = sched->callback_eval(t, true, sched->callback_eval_user_data); } struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1); ggml_backend_graph_compute(split_backend, &gv); - if (sched->callback_eval(t, true, sched->callback_eval_user_data) && // ask - !sched->callback_eval(t, false, sched->callback_eval_user_data)) { // eval + if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) { break; } From 200dcaf79985214647b8b582fdfc1bdc401f444a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 17 Jan 2024 15:24:18 +0200 Subject: [PATCH 9/9] simple : restore examples, imatrix will serve as a demo --- examples/simple/simple.cpp | 49 ++------------------------------------ ggml-backend.c | 4 ---- 2 files changed, 2 insertions(+), 51 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 6d4746ff44b03..9cfde8308f18f 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -6,49 +6,11 @@ #include #include -// a function that can be called for every computed node during graph evaluation -// the user can choose to whether to observe the data of the node depending on the tensor parameters -static bool observe_compute(struct ggml_tensor * t, bool ask, void * user_data) { - GGML_UNUSED(user_data); - - // the scheduler is asking us if we want to observe this node - if (ask) { - // check if name contains soft_max (customize to your needs) - return strstr(t->name, "soft_max") != 0; - } - - // print the node info - printf("%s: t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n", - __func__, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); - - // this will copy the data to host memory (if needed) - static std::vector t_data; - - const bool is_host = ggml_backend_buffer_is_host(t->buffer); - - if (!is_host) { - t_data.resize(ggml_nelements(t)); - ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t)); - } - - const float * data = is_host ? (const float *) t->data : t_data.data(); - - // print first row - for (int i = 0; i < t->ne[0]; i++) { - printf("%8.4f ", data[i]); - } - printf("\n"); - - return true; -} - int main(int argc, char ** argv) { gpt_params params; - bool observe = false; - if (argc == 1 || argv[1][0] == '-') { - printf("usage: %s MODEL_PATH [PROMPT] [OBSERV]\n" , argv[0]); + printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]); return 1 ; } @@ -60,10 +22,6 @@ int main(int argc, char ** argv) { params.prompt = argv[2]; } - if (argc >= 4) { - observe = !!atoi(argv[3]); - } - if (params.prompt.empty()) { params.prompt = "Hello my name is"; } @@ -79,7 +37,7 @@ int main(int argc, char ** argv) { llama_model_params model_params = llama_model_default_params(); - model_params.n_gpu_layers = 99; // offload all layers to the GPU + // model_params.n_gpu_layers = 99; // offload all layers to the GPU llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); @@ -97,9 +55,6 @@ int main(int argc, char ** argv) { ctx_params.n_threads = params.n_threads; ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; - ctx_params.cb_eval = observe ? observe_compute : NULL; - ctx_params.cb_eval_user_data = NULL; - llama_context * ctx = llama_new_context_with_model(model, ctx_params); if (ctx == NULL) { diff --git a/ggml-backend.c b/ggml-backend.c index 970495a4c7d8d..8dfbb2af290e7 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1384,10 +1384,6 @@ static void sched_reset(ggml_backend_sched_t sched) { memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size); memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size); - // TODO: should we clear the callbacks? - //sched->callback_eval = NULL; - //sched->callback_eval_user_data = NULL; - sched->is_reset = true; }