Skip to content

Commit 282698b

Browse files
authored
server: pass seed param from command line to llama
1 parent 3e7eae4 commit 282698b

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

llama_cpp/server/app.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,9 @@ class Settings(BaseSettings):
3030
ge=0,
3131
description="The number of layers to put on the GPU. The rest will be on the CPU.",
3232
)
33+
seed: int = Field(
34+
default=1337, description="Random seed. -1 for random."
35+
)
3336
n_batch: int = Field(
3437
default=512, ge=1, description="The batch size to use per eval."
3538
)
@@ -109,6 +112,7 @@ def create_app(settings: Optional[Settings] = None):
109112
llama = llama_cpp.Llama(
110113
model_path=settings.model,
111114
n_gpu_layers=settings.n_gpu_layers,
115+
seed=settings.seed,
112116
f16_kv=settings.f16_kv,
113117
use_mlock=settings.use_mlock,
114118
use_mmap=settings.use_mmap,

0 commit comments

Comments
 (0)