Skip to content

Commit 3f4d2b2

Browse files
cebtenzzrepkrmf
authored andcommitted
benchmark-matmult : do not use integer abs() on a float (ggml-org#3277)
1 parent 4080d69 commit 3f4d2b2

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

examples/benchmark/benchmark-matmult.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
#pragma warning(disable: 4244 4267) // possible loss of data
2222
#endif
2323

24-
void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
24+
static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
2525
struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
2626

2727
if (plan.work_size > 0) {
@@ -32,7 +32,7 @@ void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph,
3232
ggml_graph_compute(graph, &plan);
3333
}
3434

35-
float tensor_sum_elements(const ggml_tensor * tensor) {
35+
static float tensor_sum_elements(const ggml_tensor * tensor) {
3636
double sum = 0;
3737
if (tensor->type == GGML_TYPE_F32) {
3838
for (int j = 0; j < tensor->ne[1]; j++) {
@@ -44,7 +44,7 @@ float tensor_sum_elements(const ggml_tensor * tensor) {
4444
return sum;
4545
}
4646

47-
void tensor_dump(const ggml_tensor * tensor, const char * name) {
47+
static void tensor_dump(const ggml_tensor * tensor, const char * name) {
4848
printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name,
4949
tensor->type, ggml_type_name(tensor->type),
5050
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
@@ -59,7 +59,7 @@ struct benchmark_params_struct {
5959
int32_t n_iterations = 10;
6060
};
6161

62-
void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) {
62+
static void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) {
6363
fprintf(stderr, "usage: %s [options]\n", argv[0]);
6464
fprintf(stderr, "\n");
6565
fprintf(stderr, "options:\n");
@@ -253,7 +253,7 @@ int main(int argc, char ** argv) {
253253
// Check that the matrix multiplication result is in the right ballpark
254254
// We cannot use the exact value from the F32 multiplication because the quantizuation will be slightly different
255255
float sum_of_Q4_result = tensor_sum_elements(gf31.nodes[0]);
256-
float delta = abs(sum_of_Q4_result - sum_of_F32_reference);
256+
float delta = std::abs(sum_of_Q4_result - sum_of_F32_reference);
257257
float allowed_delta = (sum_of_F32_reference) / 1000 / 1000; // Let's accept an epsilon of 10^-6
258258

259259
if (delta > allowed_delta) {

0 commit comments

Comments
 (0)