@@ -9539,7 +9539,7 @@ struct ggml_sycl_pool_leg : public ggml_sycl_pool {
9539
9539
}
9540
9540
void * ptr;
9541
9541
size_t look_ahead_size = (size_t) (1.05 * size);
9542
-
9542
+
9543
9543
SYCL_CHECK(
9544
9544
CHECK_TRY_ERROR(ptr = (void *)sycl::malloc_device(
9545
9545
look_ahead_size, *qptr)));
@@ -10335,7 +10335,7 @@ inline void ggml_sycl_op_dequantize_mul_mat_vec(
10335
10335
}
10336
10336
10337
10337
inline void ggml_sycl_op_mul_mat_sycl(
10338
- ggml_backend_sycl_context & ctx,
10338
+ ggml_backend_sycl_context & ctx,
10339
10339
const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
10340
10340
const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
10341
10341
float *dst_dd_i, const int64_t row_low, const int64_t row_high,
@@ -11426,7 +11426,7 @@ static void ggml_sycl_mul_mat_vec_nc(ggml_backend_sycl_context & ctx, const ggml
11426
11426
11427
11427
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
11428
11428
queue_ptr main_stream = ctx.stream();
11429
-
11429
+
11430
11430
void * src0_ddq = src0->data;
11431
11431
float * src1_ddf = (float *) src1->data;
11432
11432
float * dst_ddf = (float *) dst->data;
@@ -12741,7 +12741,7 @@ ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
12741
12741
// FIXME: do not crash if cudaMalloc fails
12742
12742
// currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first
12743
12743
ggml_sycl_set_device(i);
12744
- const queue_ptr stream = ctx->streams[i];
12744
+ const queue_ptr stream = ctx->streams[i];
12745
12745
char * buf;
12746
12746
/*
12747
12747
DPCT1009:208: SYCL uses exceptions to report errors and does not use the
@@ -12878,7 +12878,7 @@ ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer,
12878
12878
was inserted. You need to rewrite this code.
12879
12879
*/
12880
12880
ggml_sycl_set_device(i);
12881
- const queue_ptr stream = ctx->streams[i];
12881
+ const queue_ptr stream = ctx->streams[i];
12882
12882
SYCL_CHECK(CHECK_TRY_ERROR(
12883
12883
(*stream)
12884
12884
.memcpy(buf_host, extra->data_device[i], original_size)
@@ -12981,7 +12981,7 @@ static ggml_backend_buffer_type_i ggml_backend_sycl_split_buffer_type_interface
12981
12981
GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split) {
12982
12982
static std::mutex mutex;
12983
12983
std::lock_guard<std::mutex> lock(mutex);
12984
-
12984
+
12985
12985
GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_split_buffer_type\n");
12986
12986
ggml_check_sycl();
12987
12987
// FIXME: this is not thread safe
0 commit comments