Skip to content

Commit 6171c9d

Browse files
ochafikngxsonggerganov
authored
Add Jinja template support (#11016)
* Copy minja from google/minja@58f0ca6 * Add --jinja and --chat-template-file flags * Add missing <optional> include * Avoid print in get_hf_chat_template.py * No designated initializers yet * Try and work around msvc++ non-macro max resolution quirk * Update test_chat_completion.py * Wire LLM_KV_TOKENIZER_CHAT_TEMPLATE_N in llama_model_chat_template * Refactor test-chat-template * Test templates w/ minja * Fix deprecation * Add --jinja to llama-run * Update common_chat_format_example to use minja template wrapper * Test chat_template in e2e test * Update utils.py * Update test_chat_completion.py * Update run.cpp * Update arg.cpp * Refactor common_chat_* functions to accept minja template + use_jinja option * Attempt to fix linkage of LLAMA_CHATML_TEMPLATE * Revert LLAMA_CHATML_TEMPLATE refactor * Normalize newlines in test-chat-templates for windows tests * Forward decl minja::chat_template to avoid eager json dep * Flush stdout in chat template before potential crash * Fix copy elision warning * Rm unused optional include * Add missing optional include to server.cpp * Disable jinja test that has a cryptic windows failure * minja: fix vigogne (google/minja#22) * Apply suggestions from code review Co-authored-by: Xuan Son Nguyen <[email protected]> Co-authored-by: Georgi Gerganov <[email protected]> * Finish suggested renamings * Move chat_templates inside server_context + remove mutex * Update --chat-template-file w/ recent change to --chat-template * Refactor chat template validation * Guard against missing eos/bos tokens (null token otherwise throws in llama_vocab::impl::token_get_attr) * Warn against missing eos / bos tokens when jinja template references them * rename: common_chat_template[s] * reinstate assert on chat_templates.template_default * Update minja to google/minja@b8437df * Update minja to google/minja#25 * Update minja from google/minja#27 * rm unused optional header --------- Co-authored-by: Xuan Son Nguyen <[email protected]> Co-authored-by: Georgi Gerganov <[email protected]>
1 parent e28245f commit 6171c9d

22 files changed

+3563
-133
lines changed

Diff for: Makefile

+2
Original file line numberDiff line numberDiff line change
@@ -1361,7 +1361,9 @@ llama-server: \
13611361
examples/server/httplib.h \
13621362
examples/server/index.html.hpp \
13631363
examples/server/loading.html.hpp \
1364+
common/chat-template.hpp \
13641365
common/json.hpp \
1366+
common/minja.hpp \
13651367
$(OBJ_ALL)
13661368
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
13671369
$(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2)

Diff for: common/CMakeLists.txt

+2
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ add_library(${TARGET} STATIC
5656
arg.cpp
5757
arg.h
5858
base64.hpp
59+
chat-template.hpp
5960
common.cpp
6061
common.h
6162
console.cpp
@@ -64,6 +65,7 @@ add_library(${TARGET} STATIC
6465
json.hpp
6566
log.cpp
6667
log.h
68+
minja.hpp
6769
ngram-cache.cpp
6870
ngram-cache.h
6971
sampling.cpp

Diff for: common/arg.cpp

+35-7
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,14 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
325325
throw std::invalid_argument("error: either --embedding or --reranking can be specified, but not both");
326326
}
327327

328+
if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
329+
throw std::runtime_error(string_format(
330+
"error: the supplied chat template is not supported: %s%s\n",
331+
params.chat_template.c_str(),
332+
params.use_jinja ? "" : "\nnote: llama.cpp was started without --jinja, we only support commonly used templates"
333+
));
334+
}
335+
328336
return true;
329337
}
330338

@@ -1947,24 +1955,44 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
19471955
}
19481956
}
19491957
).set_examples({LLAMA_EXAMPLE_SERVER}));
1958+
add_opt(common_arg(
1959+
{"--jinja"},
1960+
"use jinja template for chat (default: disabled)",
1961+
[](common_params & params) {
1962+
params.use_jinja = true;
1963+
}
1964+
).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA"));
19501965
add_opt(common_arg(
19511966
{"--chat-template"}, "JINJA_TEMPLATE",
19521967
string_format(
19531968
"set custom jinja chat template (default: template taken from model's metadata)\n"
19541969
"if suffix/prefix are specified, template will be disabled\n"
1970+
"only commonly used templates are accepted (unless --jinja is set before this flag):\n"
19551971
"list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
19561972
),
19571973
[](common_params & params, const std::string & value) {
1958-
if (!common_chat_verify_template(value)) {
1959-
throw std::runtime_error(string_format(
1960-
"error: the supplied chat template is not supported: %s\n"
1961-
"note: llama.cpp does not use jinja parser, we only support commonly used templates\n",
1962-
value.c_str()
1963-
));
1964-
}
19651974
params.chat_template = value;
19661975
}
19671976
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
1977+
add_opt(common_arg(
1978+
{"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
1979+
string_format(
1980+
"set custom jinja chat template file (default: template taken from model's metadata)\n"
1981+
"if suffix/prefix are specified, template will be disabled\n"
1982+
"only commonly used templates are accepted (unless --jinja is set before this flag):\n"
1983+
"list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
1984+
),
1985+
[](common_params & params, const std::string & value) {
1986+
std::ifstream file(value);
1987+
if (!file) {
1988+
throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
1989+
}
1990+
std::copy(
1991+
std::istreambuf_iterator<char>(file),
1992+
std::istreambuf_iterator<char>(),
1993+
std::back_inserter(params.chat_template));
1994+
}
1995+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE_FILE"));
19681996
add_opt(common_arg(
19691997
{"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
19701998
string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),

Diff for: common/chat-template.hpp

+249
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,249 @@
1+
/*
2+
Copyright 2024 Google LLC
3+
4+
Use of this source code is governed by an MIT-style
5+
license that can be found in the LICENSE file or at
6+
https://opensource.org/licenses/MIT.
7+
*/
8+
// SPDX-License-Identifier: MIT
9+
#pragma once
10+
11+
#include "minja.hpp"
12+
#include <json.hpp>
13+
#include <string>
14+
#include <vector>
15+
16+
using json = nlohmann::ordered_json;
17+
18+
namespace minja {
19+
20+
class chat_template {
21+
public:
22+
23+
private:
24+
bool supports_tools_ = true;
25+
// Meta-Llama-3.1-8B-Instruct's template expects arguments to be an object.
26+
// Most other templates (and OpenAI's API) expect the arguments object to be stringified.
27+
bool requires_object_arguments_ = false;
28+
bool supports_system_role_ = true;
29+
bool supports_parallel_tool_calls_ = false;
30+
std::string source_;
31+
std::string bos_token_;
32+
std::string eos_token_;
33+
std::shared_ptr<minja::TemplateNode> template_root_;
34+
35+
std::string try_render(
36+
const nlohmann::ordered_json & messages,
37+
const nlohmann::ordered_json & tools,
38+
bool add_generation_prompt,
39+
const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
40+
{
41+
try {
42+
auto prompt = apply(messages, tools, add_generation_prompt, extra_context);
43+
// fprintf(stderr, "Prompt: %s\n", prompt.c_str());
44+
return prompt;
45+
} catch (const std::exception & e) {
46+
// fprintf(stderr, "Error: %s\n", e.what());
47+
return "";
48+
}
49+
}
50+
51+
public:
52+
chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token)
53+
: source_(source), bos_token_(bos_token), eos_token_(eos_token)
54+
{
55+
template_root_ = minja::Parser::parse(source_, {
56+
/* .trim_blocks = */ true,
57+
/* .lstrip_blocks = */ true,
58+
/* .keep_trailing_newline = */ false,
59+
});
60+
supports_tools_ = source.find("tools") != std::string::npos;
61+
62+
auto renders_string_arguments =
63+
try_render({
64+
{
65+
{"role", "user"},
66+
{"content", "Hey"}
67+
},
68+
{
69+
{"role", "assistant"},
70+
{"tool_calls", json::array({
71+
{
72+
{"id", "call_1___"},
73+
{"type", "function"},
74+
{"function", {
75+
{"arguments", "{\"code\": \"print('Hello, World!')\"}"},
76+
{"name", "ipython"},
77+
}},
78+
},
79+
})},
80+
}
81+
}, {}, false).find("{\"code\": \"print") != std::string::npos;
82+
if (!renders_string_arguments) {
83+
auto renders_object_arguments =
84+
try_render({
85+
{
86+
{"role", "user"},
87+
{"content", "Hey"}
88+
},
89+
{
90+
{"role", "assistant"},
91+
{"tool_calls", json::array({
92+
{
93+
{"id", "call_1___"},
94+
{"type", "function"},
95+
{"function", {
96+
{"arguments", {
97+
{"code", "print('Hello, World!')"},
98+
}},
99+
{"name", "ipython"},
100+
}},
101+
},
102+
})},
103+
}
104+
}, {}, false).find("{\"code\": \"print") != std::string::npos;
105+
requires_object_arguments_ = renders_object_arguments;
106+
}
107+
supports_parallel_tool_calls_ = source.find("tool_call_id") != std::string::npos;
108+
109+
supports_system_role_ = try_render({
110+
{{"role", "system"}, {"content", "<System Needle>"}},
111+
{{"role", "user"}, {"content", "Hey"}}
112+
}, {}, false).find("<System Needle>") != std::string::npos;
113+
}
114+
115+
const std::string & source() const { return source_; }
116+
const std::string & bos_token() const { return bos_token_; }
117+
const std::string & eos_token() const { return eos_token_; }
118+
bool supports_tools() const { return supports_tools_; }
119+
bool supports_parallel_tool_calls() const { return supports_parallel_tool_calls_; }
120+
121+
std::string apply(
122+
const nlohmann::ordered_json & messages,
123+
const nlohmann::ordered_json & tools,
124+
bool add_generation_prompt,
125+
const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
126+
{
127+
json actual_messages;
128+
129+
// First, "fix" messages so they have a chance to be rendered correctly by the template
130+
131+
if (requires_object_arguments_ || !supports_system_role_ || !supports_tools_) {
132+
actual_messages = json::array();
133+
134+
std::string pending_system;
135+
auto flush_sys = [&]() {
136+
if (!pending_system.empty()) {
137+
actual_messages.push_back({
138+
{"role", "user"},
139+
{"content", pending_system},
140+
});
141+
pending_system.clear();
142+
}
143+
};
144+
for (const auto & message_ : messages) {
145+
auto message = message_;
146+
if (!message.contains("role") || !message.contains("content")) {
147+
throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump());
148+
}
149+
std::string role = message.at("role");
150+
151+
if (message.contains("tool_calls")) {
152+
if (requires_object_arguments_ || !supports_tools_) {
153+
for (auto & tool_call : message.at("tool_calls")) {
154+
if (tool_call["type"] == "function") {
155+
auto & function = tool_call.at("function");
156+
std::string arguments = function.at("arguments");
157+
function["arguments"] = json::parse(arguments);
158+
}
159+
}
160+
}
161+
if (!supports_tools_) {
162+
auto content = message.at("content");
163+
auto tool_calls = json::array();
164+
for (const auto & tool_call : message.at("tool_calls")) {
165+
if (tool_call.at("type") != "function") {
166+
continue;
167+
}
168+
const auto & function = tool_call.at("function");
169+
auto tc = json {
170+
{"name", function.at("name")},
171+
{"arguments", function.at("arguments")},
172+
};
173+
if (tool_call.contains("id")) {
174+
tc["id"] = tool_call["id"];
175+
}
176+
tool_calls.push_back(tc);
177+
}
178+
auto obj = json {
179+
{"tool_calls", tool_calls},
180+
};
181+
if (!content.is_null() && content != "") {
182+
obj["content"] = content;
183+
}
184+
message["content"] = obj.dump(2);
185+
message.erase("tool_calls");
186+
}
187+
}
188+
if (!supports_tools_ && role == "tool") {
189+
message["role"] = "user";
190+
auto obj = json {
191+
{"tool_response", {
192+
{"tool", message.at("name")},
193+
{"content", message.at("content")},
194+
}},
195+
};
196+
if (message.contains("tool_call_id")) {
197+
obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
198+
}
199+
message["content"] = obj.dump(2);
200+
message.erase("name");
201+
}
202+
203+
if (!message["content"].is_null() && !supports_system_role_) {
204+
std::string content = message.at("content");
205+
if (role == "system") {
206+
if (!pending_system.empty()) pending_system += "\n";
207+
pending_system += content;
208+
continue;
209+
} else {
210+
if (role == "user") {
211+
if (!pending_system.empty()) {
212+
message["content"] = pending_system + (content.empty() ? "" : "\n" + content);
213+
pending_system.clear();
214+
}
215+
} else {
216+
flush_sys();
217+
}
218+
}
219+
}
220+
actual_messages.push_back(message);
221+
}
222+
flush_sys();
223+
} else {
224+
actual_messages = messages;
225+
}
226+
227+
auto context = minja::Context::make(json({
228+
{"messages", actual_messages},
229+
{"add_generation_prompt", add_generation_prompt},
230+
{"bos_token", bos_token_},
231+
{"eos_token", eos_token_},
232+
}));
233+
234+
if (!tools.is_null()) {
235+
auto tools_val = minja::Value(tools);
236+
context->set("tools", tools_val);
237+
}
238+
if (!extra_context.is_null()) {
239+
for (auto & kv : extra_context.items()) {
240+
minja::Value val(kv.value());
241+
context->set(kv.key(), val);
242+
}
243+
}
244+
245+
return template_root_->render(context);
246+
}
247+
};
248+
249+
} // namespace minja

0 commit comments

Comments
 (0)