Skip to content

Commit 8b67998

Browse files
authored
Fix whitespace, add .editorconfig, add GitHub workflow (#883)
1 parent 3e6e70d commit 8b67998

File tree

15 files changed

+84
-46
lines changed

15 files changed

+84
-46
lines changed

.devops/main.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,4 @@ FROM ubuntu:$UBUNTU_VERSION as runtime
1515

1616
COPY --from=build /app/main /main
1717

18-
ENTRYPOINT [ "/main" ]
18+
ENTRYPOINT [ "/main" ]

.dockerignore

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,4 @@ models/*
2121

2222
arm_neon.h
2323
compile_commands.json
24-
Dockerfile
24+
Dockerfile

.ecrc

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"Disable": {
3+
"IndentSize": true
4+
}
5+
}

.editorconfig

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# https://EditorConfig.org
2+
3+
# Top-most EditorConfig file
4+
root = true
5+
6+
# Unix-style newlines with a newline ending every file, utf-8 charset
7+
[*]
8+
end_of_line = lf
9+
insert_final_newline = true
10+
trim_trailing_whitespace = true
11+
charset = utf-8
12+
indent_style = space
13+
indent_size = 4
14+
15+
[Makefile]
16+
indent_style = tab

.github/ISSUE_TEMPLATE/custom.md

+8-8
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ Please provide a detailed written description of what you were trying to do, and
2222

2323
# Current Behavior
2424

25-
Please provide a detailed written description of what `llama.cpp` did, instead.
25+
Please provide a detailed written description of what `llama.cpp` did, instead.
2626

27-
# Environment and Context
27+
# Environment and Context
2828

2929
Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions.
3030

@@ -133,7 +133,7 @@ llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.
133133
llama_model_load: .......................................................................................... done
134134
llama_model_load: model size = 4869.09 MB / num tensors = 723
135135
136-
system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
136+
system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
137137
138138
main: prompt: 'Please close your issue when it has been answered.'
139139
main: number of tokens in prompt = 11
@@ -166,14 +166,14 @@ main: total time = 246406.42 ms
166166
167167
Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.':
168168
169-
3636882.89 msec task-clock # 14.677 CPUs utilized
170-
13509 context-switches # 3.714 /sec
171-
2436 cpu-migrations # 0.670 /sec
172-
10476679 page-faults # 2.881 K/sec
169+
3636882.89 msec task-clock # 14.677 CPUs utilized
170+
13509 context-switches # 3.714 /sec
171+
2436 cpu-migrations # 0.670 /sec
172+
10476679 page-faults # 2.881 K/sec
173173
13133115082869 cycles # 3.611 GHz (16.77%)
174174
29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%)
175175
10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%)
176-
23479217109614 instructions # 1.79 insn per cycle
176+
23479217109614 instructions # 1.79 insn per cycle
177177
# 0.44 stalled cycles per insn (16.76%)
178178
2353072268027 branches # 647.002 M/sec (16.77%)
179179
1998682780 branch-misses # 0.08% of all branches (16.76%)

.github/workflows/docker.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,4 +60,4 @@ jobs:
6060
push: ${{ github.event_name == 'push' }}
6161
platforms: linux/amd64,linux/arm64
6262
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}"
63-
file: ${{ matrix.config.dockerfile }}
63+
file: ${{ matrix.config.dockerfile }}

.github/workflows/editorconfig.yml

+17
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
name: EditorConfig Checker
2+
3+
on:
4+
push:
5+
branches:
6+
- master
7+
pull_request:
8+
branches:
9+
- master
10+
11+
jobs:
12+
editorconfig:
13+
runs-on: ubuntu-latest
14+
steps:
15+
- uses: actions/checkout@v3
16+
- uses: editorconfig-checker/action-editorconfig-checker@main
17+
- run: editorconfig-checker

README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ There 26 letters in the English Alphabet
243243
The majority (54%) are using public transit. This includes buses, trams and metros with over 100 lines throughout the city which make it very accessible for tourists to navigate around town as well as locals who commute by tram or metro on a daily basis
244244
> List 5 words that start with "ca".
245245
cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
246-
>
246+
>
247247
```
248248

249249
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
@@ -254,17 +254,17 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
254254
convert the model from the old format to the new format with [./migrate-ggml-2023-03-30-pr613.py](./migrate-ggml-2023-03-30-pr613.py):
255255

256256
```bash
257-
python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
257+
python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
258258
python3 migrate-ggml-2023-03-30-pr613.py models/gpt4all-7B/gpt4all-lora-quantized.bin models/gpt4all-7B/gpt4all-lora-quantized-new.bin
259259
```
260-
260+
261261
- You can now use the newly generated `gpt4all-lora-quantized-new.bin` model in exactly the same way as all other models
262262
- The original model is saved in the same folder with a suffix `.orig`
263263

264264
### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data
265265

266266
- **Under no circumstances share IPFS, magnet links, or any other links to model downloads anywhere in this respository, including in issues, discussions or pull requests. They will be immediately deleted.**
267-
- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
267+
- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
268268
- Refer to [Facebook's LLaMA repository](https://github.com/facebookresearch/llama/pull/73/files) if you need to request access to the model data.
269269
- Please verify the sha256 checksums of all downloaded model files to confirm that you have the correct model data files before creating an issue relating to your model files.
270270
- The following command will verify if you have all possible latest files in your self-installed `./models` subdirectory:
@@ -284,7 +284,7 @@ convert the model from the old format to the new format with [./migrate-ggml-202
284284
- GPT-3.5 / InstructGPT / ChatGPT:
285285
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
286286
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
287-
287+
288288
### Perplexity (Measuring model quality)
289289

290290
You can use the `perplexity` example to measure perplexity over the given prompt. For more background,

examples/Miku.sh

+6-6
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,15 @@ GEN_OPTIONS=(--batch_size 1024
1919
--top_p 0.5)
2020

2121
if [ -n "$N_THREAD" ]; then
22-
GEN_OPTIONS+=(--threads "$N_THREAD")
22+
GEN_OPTIONS+=(--threads "$N_THREAD")
2323
fi
2424

2525
./main "${GEN_OPTIONS[@]}" \
26-
--model "$MODEL" \
27-
--n_predict "$N_PREDICTS" \
28-
--color --interactive \
29-
--reverse-prompt "${USER_NAME}:" \
30-
--prompt "
26+
--model "$MODEL" \
27+
--n_predict "$N_PREDICTS" \
28+
--color --interactive \
29+
--reverse-prompt "${USER_NAME}:" \
30+
--prompt "
3131
This is a transcript of a 1000 page, never ending conversation between ${USER_NAME} and the cute and helpful AI assistant ${AI_NAME}. ${AI_NAME} is a girl who is an AI running on the users computer.
3232
${AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next.
3333
${AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct she will ask the user for help.

examples/common.cpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHand
2222
extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
2323
extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID);
2424
extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID);
25-
extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
26-
const wchar_t * lpWideCharStr, int cchWideChar,
27-
char * lpMultiByteStr, int cbMultiByte,
25+
extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
26+
const wchar_t * lpWideCharStr, int cchWideChar,
27+
char * lpMultiByteStr, int cbMultiByte,
2828
const char * lpDefaultChar, bool * lpUsedDefaultChar);
2929
#define CP_UTF8 65001
3030
#endif
@@ -328,9 +328,9 @@ void win32_console_init(bool enable_color) {
328328

329329
// Convert a wide Unicode string to an UTF8 string
330330
void win32_utf8_encode(const std::wstring & wstr, std::string & str) {
331-
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
332-
std::string strTo(size_needed, 0);
333-
WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
334-
str = strTo;
331+
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
332+
std::string strTo(size_needed, 0);
333+
WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
334+
str = strTo;
335335
}
336336
#endif

examples/embedding/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
# embedding
2-
3-
TODO
1+
# embedding
2+
3+
TODO

examples/main/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
# main
2-
3-
TODO
1+
# main
2+
3+
TODO

examples/main/main.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ int main(int argc, char ** argv) {
168168
}
169169

170170
// enable interactive mode if reverse prompt or interactive start is specified
171-
if (params.antiprompt.size() != 0 || params.interactive_start) {
171+
if (params.antiprompt.size() != 0 || params.interactive_start) {
172172
params.interactive = true;
173173
}
174174

examples/perplexity/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
# perplexity
2-
3-
TODO
1+
# perplexity
2+
3+
TODO

ggml.c

+7-7
Original file line numberDiff line numberDiff line change
@@ -228,12 +228,12 @@ static inline float fp32_from_bits(uint32_t w) {
228228
}
229229

230230
static inline uint32_t fp32_to_bits(float f) {
231-
union {
232-
float as_value;
233-
uint32_t as_bits;
234-
} fp32;
235-
fp32.as_value = f;
236-
return fp32.as_bits;
231+
union {
232+
float as_value;
233+
uint32_t as_bits;
234+
} fp32;
235+
fp32.as_value = f;
236+
return fp32.as_bits;
237237
}
238238

239239
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
@@ -1881,7 +1881,7 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest
18811881
sum1 += x1->d * y1->d * (vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3));
18821882
#endif
18831883
#else
1884-
const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
1884+
const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
18851885
const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
18861886

18871887
const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));

0 commit comments

Comments
 (0)