Skip to content

Commit c63bb1d

Browse files
CUDA: use mul_mat_q kernels by default (ggml-org#2683)
1 parent 3b6cfe7 commit c63bb1d

File tree

4 files changed

+16
-17
lines changed

4 files changed

+16
-17
lines changed

common/common.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -387,11 +387,11 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
387387
#else
388388
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
389389
#endif // GGML_USE_CUBLAS
390-
} else if (arg == "--mul-mat-q" || arg == "-mmq") {
390+
} else if (arg == "--no-mul-mat-q" || arg == "-nommq") {
391391
#ifdef GGML_USE_CUBLAS
392-
params.mul_mat_q = true;
392+
params.mul_mat_q = false;
393393
#else
394-
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n");
394+
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n");
395395
#endif // GGML_USE_CUBLAS
396396
} else if (arg == "--low-vram" || arg == "-lv") {
397397
#ifdef GGML_USE_CUBLAS
@@ -599,11 +599,11 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
599599
fprintf(stdout, " number of layers to store in VRAM\n");
600600
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
601601
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
602-
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" );
603-
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n" );
604-
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
605-
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
606-
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
602+
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
603+
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
604+
fprintf(stdout, " -nommq, --no-mul-mat-q\n");
605+
fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
606+
fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n");
607607
#endif
608608
fprintf(stdout, " --mtest compute maximum memory usage\n");
609609
fprintf(stdout, " --export export the computation graph to 'llama.ggml'\n");

common/common.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ struct gpt_params {
6868
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
6969

7070
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
71-
bool mul_mat_q = false; // if true, use experimental mul_mat_q kernels
71+
bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
7272
bool memory_f16 = true; // use f16 instead of f32 for memory kv
7373
bool random_prompt = false; // do not randomize prompt if none provided
7474
bool use_color = false; // use color to distinguish generations and inputs

examples/server/server.cpp

+6-7
Original file line numberDiff line numberDiff line change
@@ -671,12 +671,11 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
671671
fprintf(stdout, " number of layers to store in VRAM\n");
672672
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
673673
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
674-
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
675674
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
676675
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
677-
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
678-
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
679-
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
676+
fprintf(stdout, " -nommq, --no-mul-mat-q\n");
677+
fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
678+
fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n");
680679
#endif
681680
fprintf(stdout, " -m FNAME, --model FNAME\n");
682681
fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
@@ -867,12 +866,12 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
867866
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n", {});
868867
#endif // GGML_USE_CUBLAS
869868
}
870-
else if (arg == "--mul-mat-q" || arg == "-mmq")
869+
else if (arg == "--no-mul-mat-q" || arg == "-nommq")
871870
{
872871
#ifdef GGML_USE_CUBLAS
873-
params.mul_mat_q = true;
872+
params.mul_mat_q = false;
874873
#else
875-
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n", {});
874+
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {});
876875
#endif // GGML_USE_CUBLAS
877876
}
878877
else if (arg == "--main-gpu" || arg == "-mg")

ggml-cuda.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ static int g_device_count = -1;
287287
static int g_main_device = 0;
288288
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
289289
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
290-
static bool g_mul_mat_q = false;
290+
static bool g_mul_mat_q = true;
291291

292292
static void * g_scratch_buffer = nullptr;
293293
static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default

0 commit comments

Comments
 (0)