Skip to content

Commit 9382756

Browse files
committed
Fix whitespace, add .editorconfig, add GitHub workflow
1 parent 3e6e70d commit 9382756

File tree

12 files changed

+58
-29
lines changed

12 files changed

+58
-29
lines changed

.devops/main.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,4 @@ FROM ubuntu:$UBUNTU_VERSION as runtime
1515

1616
COPY --from=build /app/main /main
1717

18-
ENTRYPOINT [ "/main" ]
18+
ENTRYPOINT [ "/main" ]

.dockerignore

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,4 @@ models/*
2121

2222
arm_neon.h
2323
compile_commands.json
24-
Dockerfile
24+
Dockerfile

.editorconfig

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
# https://EditorConfig.org
2+
3+
# Top-most EditorConfig file
4+
root = true
5+
6+
# Unix-style newlines with a newline ending every file, utf-8 charset
7+
[*]
8+
end_of_line = lf
9+
insert_final_newline = true
10+
trim_trailing_whitespace = true
11+
charset = utf-8
12+
indent_style = space

.github/ISSUE_TEMPLATE/custom.md

+8-8
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ Please provide a detailed written description of what you were trying to do, and
2222

2323
# Current Behavior
2424

25-
Please provide a detailed written description of what `llama.cpp` did, instead.
25+
Please provide a detailed written description of what `llama.cpp` did, instead.
2626

27-
# Environment and Context
27+
# Environment and Context
2828

2929
Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions.
3030

@@ -133,7 +133,7 @@ llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.
133133
llama_model_load: .......................................................................................... done
134134
llama_model_load: model size = 4869.09 MB / num tensors = 723
135135
136-
system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
136+
system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
137137
138138
main: prompt: 'Please close your issue when it has been answered.'
139139
main: number of tokens in prompt = 11
@@ -166,14 +166,14 @@ main: total time = 246406.42 ms
166166
167167
Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.':
168168
169-
3636882.89 msec task-clock # 14.677 CPUs utilized
170-
13509 context-switches # 3.714 /sec
171-
2436 cpu-migrations # 0.670 /sec
172-
10476679 page-faults # 2.881 K/sec
169+
3636882.89 msec task-clock # 14.677 CPUs utilized
170+
13509 context-switches # 3.714 /sec
171+
2436 cpu-migrations # 0.670 /sec
172+
10476679 page-faults # 2.881 K/sec
173173
13133115082869 cycles # 3.611 GHz (16.77%)
174174
29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%)
175175
10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%)
176-
23479217109614 instructions # 1.79 insn per cycle
176+
23479217109614 instructions # 1.79 insn per cycle
177177
# 0.44 stalled cycles per insn (16.76%)
178178
2353072268027 branches # 647.002 M/sec (16.77%)
179179
1998682780 branch-misses # 0.08% of all branches (16.76%)

.github/workflows/docker.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,4 +60,4 @@ jobs:
6060
push: ${{ github.event_name == 'push' }}
6161
platforms: linux/amd64,linux/arm64
6262
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}"
63-
file: ${{ matrix.config.dockerfile }}
63+
file: ${{ matrix.config.dockerfile }}

.github/workflows/editorconfig.yml

+17
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
name: EditorConfig Checker
2+
3+
on:
4+
push:
5+
branches:
6+
- master
7+
pull_request:
8+
branches:
9+
- master
10+
11+
jobs:
12+
editorconfig:
13+
runs-on: ubuntu-latest
14+
steps:
15+
- uses: actions/checkout@v3
16+
- uses: editorconfig-checker/action-editorconfig-checker@main
17+
- run: editorconfig-checker

README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ There 26 letters in the English Alphabet
243243
The majority (54%) are using public transit. This includes buses, trams and metros with over 100 lines throughout the city which make it very accessible for tourists to navigate around town as well as locals who commute by tram or metro on a daily basis
244244
> List 5 words that start with "ca".
245245
cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
246-
>
246+
>
247247
```
248248

249249
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
@@ -254,17 +254,17 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
254254
convert the model from the old format to the new format with [./migrate-ggml-2023-03-30-pr613.py](./migrate-ggml-2023-03-30-pr613.py):
255255

256256
```bash
257-
python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
257+
python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
258258
python3 migrate-ggml-2023-03-30-pr613.py models/gpt4all-7B/gpt4all-lora-quantized.bin models/gpt4all-7B/gpt4all-lora-quantized-new.bin
259259
```
260-
260+
261261
- You can now use the newly generated `gpt4all-lora-quantized-new.bin` model in exactly the same way as all other models
262262
- The original model is saved in the same folder with a suffix `.orig`
263263

264264
### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data
265265

266266
- **Under no circumstances share IPFS, magnet links, or any other links to model downloads anywhere in this respository, including in issues, discussions or pull requests. They will be immediately deleted.**
267-
- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
267+
- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
268268
- Refer to [Facebook's LLaMA repository](https://github.com/facebookresearch/llama/pull/73/files) if you need to request access to the model data.
269269
- Please verify the sha256 checksums of all downloaded model files to confirm that you have the correct model data files before creating an issue relating to your model files.
270270
- The following command will verify if you have all possible latest files in your self-installed `./models` subdirectory:
@@ -284,7 +284,7 @@ convert the model from the old format to the new format with [./migrate-ggml-202
284284
- GPT-3.5 / InstructGPT / ChatGPT:
285285
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
286286
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
287-
287+
288288
### Perplexity (Measuring model quality)
289289

290290
You can use the `perplexity` example to measure perplexity over the given prompt. For more background,

examples/common.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHand
2222
extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
2323
extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID);
2424
extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID);
25-
extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
26-
const wchar_t * lpWideCharStr, int cchWideChar,
27-
char * lpMultiByteStr, int cbMultiByte,
25+
extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
26+
const wchar_t * lpWideCharStr, int cchWideChar,
27+
char * lpMultiByteStr, int cbMultiByte,
2828
const char * lpDefaultChar, bool * lpUsedDefaultChar);
2929
#define CP_UTF8 65001
3030
#endif

examples/embedding/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
# embedding
2-
3-
TODO
1+
# embedding
2+
3+
TODO

examples/main/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
# main
2-
3-
TODO
1+
# main
2+
3+
TODO

examples/main/main.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ int main(int argc, char ** argv) {
168168
}
169169

170170
// enable interactive mode if reverse prompt or interactive start is specified
171-
if (params.antiprompt.size() != 0 || params.interactive_start) {
171+
if (params.antiprompt.size() != 0 || params.interactive_start) {
172172
params.interactive = true;
173173
}
174174

examples/perplexity/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
# perplexity
2-
3-
TODO
1+
# perplexity
2+
3+
TODO

0 commit comments

Comments
 (0)