1818
1919import llama_cpp .llama_cpp as llama_cpp
2020
21- from ._utils import suppress_stdout_stderr
22-
2321
2422# Python wrappers over llama.h structs
2523
@@ -30,7 +28,6 @@ class _LlamaModel:
3028
3129 _llama_free_model = None
3230 # NOTE: this must be "saved" here to avoid exceptions when calling __del__
33- _suppress_stdout_stderr = suppress_stdout_stderr
3431
3532 def __init__ (
3633 self ,
@@ -48,16 +45,14 @@ def __init__(
4845 if not os .path .exists (path_model ):
4946 raise ValueError (f"Model path does not exist: { path_model } " )
5047
51- with self ._suppress_stdout_stderr (disable = self .verbose ):
52- self .model = llama_cpp .llama_load_model_from_file (
53- self .path_model .encode ("utf-8" ), self .params
54- )
48+ self .model = llama_cpp .llama_load_model_from_file (
49+ self .path_model .encode ("utf-8" ), self .params
50+ )
5551
5652 def __del__ (self ):
57- with self ._suppress_stdout_stderr (disable = self .verbose ):
58- if self .model is not None and self ._llama_free_model is not None :
59- self ._llama_free_model (self .model )
60- self .model = None
53+ if self .model is not None and self ._llama_free_model is not None :
54+ self ._llama_free_model (self .model )
55+ self .model = None
6156
6257 def vocab_type (self ) -> int :
6358 assert self .model is not None
@@ -240,8 +235,6 @@ class _LlamaContext:
240235 NOTE: For stability it's recommended you use the Llama class instead."""
241236
242237 _llama_free = None
243- # NOTE: this must be "saved" here to avoid exceptions when calling __del__
244- _suppress_stdout_stderr = suppress_stdout_stderr
245238
246239 def __init__ (
247240 self ,
@@ -256,16 +249,16 @@ def __init__(
256249
257250 self ._llama_free = llama_cpp ._lib .llama_free # type: ignore
258251
259- with self ._suppress_stdout_stderr (disable = self .verbose ):
260- self .ctx = llama_cpp .llama_new_context_with_model (
261- self .model .model , self .params
262- )
252+ assert self .model .model is not None
253+
254+ self .ctx = llama_cpp .llama_new_context_with_model (
255+ self .model .model , self .params
256+ )
263257
264258 def __del__ (self ):
265- with self ._suppress_stdout_stderr (disable = self .verbose ):
266- if self .ctx is not None and self ._llama_free is not None :
267- self ._llama_free (self .ctx )
268- self .ctx = None
259+ if self .ctx is not None and self ._llama_free is not None :
260+ self ._llama_free (self .ctx )
261+ self .ctx = None
269262
270263 def n_ctx (self ) -> int :
271264 assert self .ctx is not None
@@ -493,8 +486,6 @@ def default_params():
493486
494487class _LlamaBatch :
495488 _llama_batch_free = None
496- # NOTE: this must be "saved" here to avoid exceptions when calling __del__
497- _suppress_stdout_stderr = suppress_stdout_stderr
498489
499490 def __init__ (
500491 self , * , n_tokens : int , embd : int , n_seq_max : int , verbose : bool = True
@@ -506,16 +497,14 @@ def __init__(
506497
507498 self ._llama_batch_free = llama_cpp ._lib .llama_batch_free # type: ignore
508499
509- with self ._suppress_stdout_stderr (disable = self .verbose ):
510- self .batch = llama_cpp .llama_batch_init (
511- self .n_tokens , self .embd , self .n_seq_max
512- )
500+ self .batch = llama_cpp .llama_batch_init (
501+ self .n_tokens , self .embd , self .n_seq_max
502+ )
513503
514504 def __del__ (self ):
515- with self ._suppress_stdout_stderr (disable = self .verbose ):
516- if self .batch is not None and self ._llama_batch_free is not None :
517- self ._llama_batch_free (self .batch )
518- self .batch = None
505+ if self .batch is not None and self ._llama_batch_free is not None :
506+ self ._llama_batch_free (self .batch )
507+ self .batch = None
519508
520509 def set_batch (self , batch : Sequence [int ], n_past : int , logits_all : bool ):
521510 assert self .batch is not None
0 commit comments