Skip to content

Commit bd9990b

Browse files
committed
minor fixes
1 parent 2853346 commit bd9990b

File tree

3 files changed

+26
-14
lines changed

3 files changed

+26
-14
lines changed

prepare_llm_models.sh

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -73,19 +73,21 @@ if [ -f "$1/$TEXT_GENERATION_MODEL/$TOKENIZER_FILE" ]; then
7373
echo "Models file $1/$TEXT_GENERATION_MODEL/$TOKENIZER_FILE exists. Skipping downloading models."
7474
else
7575
python3 demos/common/export_models/export_model.py text_generation --source_model "$TEXT_GENERATION_MODEL" --weight-format int8 --model_repository_path $1
76-
if [ ! -f "$1/$TEXT_GENERATION_MODEL/chat_template.jinja" ]; then
76+
fi
77+
78+
if [ ! -f "$1/$TEXT_GENERATION_MODEL/$TOKENIZER_FILE" ]; then
79+
echo "[ERROR] Models file $1/$TEXT_GENERATION_MODEL/$TOKENIZER_FILE does not exist."
80+
exit 1
81+
fi
82+
83+
if [ ! -f "$1/$TEXT_GENERATION_MODEL/chat_template.jinja" ]; then
7784
dummy_chat_template="{% for message in messages %}\
7885
{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] }}\
7986
{% elif message['role'] == 'system' %}{{ '<|system|>\n' + message['content'] + eos_token }}\
8087
{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token }}\
8188
{% endif %}\
8289
{% endfor %}"
8390
echo "$dummy_chat_template" > "$1/$TEXT_GENERATION_MODEL/chat_template.jinja"
84-
fi
85-
fi
86-
if [ ! -f "$1/$TEXT_GENERATION_MODEL/$TOKENIZER_FILE" ]; then
87-
echo "[ERROR] Models file $1/$TEXT_GENERATION_MODEL/$TOKENIZER_FILE does not exist."
88-
exit 1
8991
fi
9092

9193
if [ -f "$1/$VLM_MODEL/$TOKENIZER_FILE" ]; then

src/llm/servable_initializer.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,9 @@ void GenAiServableInitializer::loadPyTemplateProcessor(std::shared_ptr<GenAiServ
149149
properties->templateProcessor.bosToken = bosToken;
150150
properties->templateProcessor.eosToken = eosToken;
151151

152+
SPDLOG_LOGGER_DEBUG(modelmanager_logger, "Loading Python Jinja template processor with chat template from tokenizer. Bos token: {}, Eos token: {}, chat template: \n{}",
153+
bosToken, eosToken, chatTemplate);
154+
152155
py::gil_scoped_acquire acquire;
153156
try {
154157
auto locals = py::dict("chat_template"_a = chatTemplate,
@@ -250,6 +253,13 @@ void GenAiServableInitializer::loadPyTemplateProcessor(std::shared_ptr<GenAiServ
250253
if template_entry.get("name") == "tool_use":
251254
tool_chat_template = template_entry.get("template")
252255
256+
# Try read tool_use.jinja template file from additional_chat_templates directory if exists
257+
additional_templates_dir = Path(templates_directory + "/additional_chat_templates")
258+
tool_use_template_file = additional_templates_dir / "tool_use.jinja"
259+
if tool_use_template_file.is_file():
260+
with open(tool_use_template_file, "r", encoding="utf-8") as f:
261+
tool_chat_template = f.read()
262+
253263
# Load templates from strings
254264
template = jinja_env.from_string(chat_template)
255265
if tool_chat_template is not None:

windows_prepare_llm_models.bat

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -79,23 +79,23 @@ if exist "%~1\%TEXT_GENERATION_MODEL%\%TOKENIZER_FILE%" (
7979
) else (
8080
echo Downloading text generation model to %~1\%TEXT_GENERATION_MODEL% directory.
8181
python demos\common\export_models\export_model.py text_generation --source_model "%TEXT_GENERATION_MODEL%" --weight-format int8 --model_repository_path %~1
82+
if !errorlevel! neq 0 exit /b !errorlevel!
83+
)
84+
if not exist "%~1\%TEXT_GENERATION_MODEL%\%TOKENIZER_FILE%" (
85+
echo Models file %~1\%TEXT_GENERATION_MODEL%\%TOKENIZER_FILE% does not exists.
86+
exit /b 1
87+
)
8288

83-
if not exist "%~1\%TEXT_GENERATION_MODEL%\chat_template.jinja" (
89+
if not exist "%~1\%TEXT_GENERATION_MODEL%\chat_template.jinja" (
8490
set "dummy_chat_template={%% for message in messages %%}^
8591
{%% if message['role'] == 'user' %%}{{ 'User: ' + message['content'] }}^
8692
{%% elif message['role'] == 'system' %%}{{ '<|system|>\n' + message['content'] + eos_token }}^
8793
{%% elif message['role'] == 'assistant' %%}{{ message['content'] + eos_token }}^
8894
{%% endif %%}^
8995
{%% endfor %%}"
9096
echo !dummy_chat_template! > "%~1\%TEXT_GENERATION_MODEL%\chat_template.jinja"
91-
)
92-
93-
if !errorlevel! neq 0 exit /b !errorlevel!
97+
if !errorlevel! neq 0 exit /b !errorlevel!
9498
)
95-
if not exist "%~1\%TEXT_GENERATION_MODEL%\%TOKENIZER_FILE%" (
96-
echo Models file %~1\%TEXT_GENERATION_MODEL%\%TOKENIZER_FILE% does not exists.
97-
exit /b 1
98-
)
9999

100100
if exist "%~1\%EMBEDDING_MODEL%\embeddings\%LEGACY_MODEL_FILE%" (
101101
echo Models file %~1\%EMBEDDING_MODEL%\embeddings\%LEGACY_MODEL_FILE% exists. Skipping downloading models.

0 commit comments

Comments
 (0)