Skip to content

Commit 3a5dfeb

Browse files
authored
Merge branch 'LostRuins:concedo' into koboldcpp-rocm
2 parents 665cc11 + b1f00fa commit 3a5dfeb

File tree

3 files changed

+31
-7
lines changed

3 files changed

+31
-7
lines changed

Makefile

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,14 @@ ifdef LLAMA_CUDA_DMMV_Y
149149
else
150150
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
151151
endif # LLAMA_CUDA_DMMV_Y
152+
ifdef LLAMA_CUDA_DMMV_F16
153+
NVCCFLAGS += -DGGML_CUDA_DMMV_F16
154+
endif # LLAMA_CUDA_DMMV_F16
155+
ifdef LLAMA_CUDA_KQUANTS_ITER
156+
NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
157+
else
158+
NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
159+
endif
152160
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
153161
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
154162
ggml_v2-cuda.o: otherarch/ggml_v2-cuda.cu otherarch/ggml_v2-cuda.h

koboldcpp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -726,7 +726,7 @@ def main(args):
726726
sys.exit(2)
727727

728728
if args.hordeconfig and args.hordeconfig[0]!="":
729-
global friendlymodelname, maxhordelen, showdebug
729+
global friendlymodelname, maxhordelen, maxhordectx, showdebug
730730
friendlymodelname = "koboldcpp/"+args.hordeconfig[0]
731731
if len(args.hordeconfig) > 1:
732732
maxhordelen = int(args.hordeconfig[1])

otherarch/gptj_v3.cpp

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
150150
params.mem_size = ctx_size;
151151
params.mem_buffer = NULL;
152152
params.no_alloc = false;
153-
153+
154154

155155
model.ctx = ggml_init(params);
156156
if (!model.ctx) {
@@ -281,7 +281,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
281281
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
282282
return ModelLoadResult::FAIL;
283283
}
284-
284+
285285

286286
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
287287

@@ -298,7 +298,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
298298
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
299299
return ModelLoadResult::FAIL;
300300
}
301-
301+
302302
}
303303

304304
// for debugging
@@ -367,8 +367,16 @@ bool gptj_eval(
367367
static size_t buf_size = 256u*1024*1024;
368368
static void * buf = malloc(buf_size);
369369

370-
if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) {
371-
const size_t buf_size_new = 320u*1024*1024 + 1.6*(mem_per_token*N); // add 10% to account for ggml object overhead
370+
// use 2 scratch buffers
371+
// TODO: very hacky solution - reimplement in a more elegant way
372+
static size_t scr0_size = (n_ctx>1024?512u:256u)*1024*1024;
373+
static void * scr0 = malloc(scr0_size);
374+
375+
static size_t scr1_size = (n_ctx>1024?512u:256u)*1024*1024;
376+
static void * scr1 = malloc(scr1_size);
377+
378+
if (mem_per_token > 0 && mem_per_token*N*1.05 > buf_size) {
379+
const size_t buf_size_new = 64u*1024*1024 + 1.15*(mem_per_token*N); // add 10% to account for ggml object overhead
372380
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
373381

374382
// reallocate
@@ -388,7 +396,7 @@ bool gptj_eval(
388396
params.mem_size = buf_size;
389397
params.mem_buffer = buf;
390398
params.no_alloc = false;
391-
399+
392400

393401
struct ggml_context * ctx0 = ggml_init(params);
394402
struct ggml_cgraph gf = {};
@@ -403,6 +411,8 @@ bool gptj_eval(
403411
for (int il = 0; il < n_layer; ++il) {
404412
struct ggml_tensor * cur;
405413

414+
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
415+
406416
// norm
407417
{
408418
cur = ggml_norm(ctx0, inpL);
@@ -490,6 +500,8 @@ bool gptj_eval(
490500
cur);
491501
}
492502

503+
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
504+
493505
struct ggml_tensor * inpFF = cur;
494506

495507
// feed-forward network
@@ -525,6 +537,8 @@ bool gptj_eval(
525537
inpL = ggml_add(ctx0, cur, inpL);
526538
}
527539

540+
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
541+
528542
// norm
529543
{
530544
inpL = ggml_norm(ctx0, inpL);
@@ -537,6 +551,8 @@ bool gptj_eval(
537551
ggml_repeat(ctx0, model.ln_f_b, inpL));
538552
}
539553

554+
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
555+
540556
// lm_head
541557
{
542558
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);

0 commit comments

Comments
 (0)