|
2 | 2 |
|
3 | 3 | from pydantic import BaseModel |
4 | 4 |
|
| 5 | +from guardrails.utils.exception_utils import UserFacingException |
5 | 6 | from guardrails.utils.llm_response import LLMResponse |
6 | 7 | from guardrails.utils.openai_utils import ( |
7 | 8 | AsyncOpenAIClient, |
|
12 | 13 | get_static_openai_create_func, |
13 | 14 | ) |
14 | 15 | from guardrails.utils.pydantic_utils import convert_pydantic_model_to_openai_fn |
| 16 | +from guardrails.utils.safe_get import safe_get |
15 | 17 |
|
16 | 18 |
|
17 | 19 | class PromptCallableException(Exception): |
@@ -287,6 +289,124 @@ def _invoke_llm( |
287 | 289 | return LLMResponse(output=anthropic_response.completion) |
288 | 290 |
|
289 | 291 |
|
| 292 | +class HuggingFaceModelCallable(PromptCallableBase): |
| 293 | + def _invoke_llm( |
| 294 | + self, prompt: str, model_generate: Any, *args, **kwargs |
| 295 | + ) -> LLMResponse: |
| 296 | + try: |
| 297 | + import transformers # noqa: F401 # type: ignore |
| 298 | + except ImportError: |
| 299 | + raise PromptCallableException( |
| 300 | + "The `transformers` package is not installed. " |
| 301 | + "Install with `pip install transformers`" |
| 302 | + ) |
| 303 | + try: |
| 304 | + import torch |
| 305 | + except ImportError: |
| 306 | + raise PromptCallableException( |
| 307 | + "The `torch` package is not installed. " |
| 308 | + "Install with `pip install torch`" |
| 309 | + ) |
| 310 | + |
| 311 | + tokenizer = kwargs.pop("tokenizer") |
| 312 | + if not tokenizer: |
| 313 | + raise UserFacingException( |
| 314 | + ValueError( |
| 315 | + "'tokenizer' must be provided in order to use Hugging Face models!" |
| 316 | + ) |
| 317 | + ) |
| 318 | + |
| 319 | + torch_device = "cuda" if torch.cuda.is_available() else "cpu" |
| 320 | + |
| 321 | + return_tensors = kwargs.pop("return_tensors", "pt") |
| 322 | + skip_special_tokens = kwargs.pop("skip_special_tokens", True) |
| 323 | + |
| 324 | + input_ids = kwargs.pop("input_ids", None) |
| 325 | + input_values = kwargs.pop("input_values", None) |
| 326 | + input_features = kwargs.pop("input_features", None) |
| 327 | + pixel_values = kwargs.pop("pixel_values", None) |
| 328 | + model_inputs = kwargs.pop("model_inputs", {}) |
| 329 | + if ( |
| 330 | + input_ids is None |
| 331 | + and input_values is None |
| 332 | + and input_features is None |
| 333 | + and pixel_values is None |
| 334 | + and not model_inputs |
| 335 | + ): |
| 336 | + model_inputs = tokenizer(prompt, return_tensors=return_tensors).to( |
| 337 | + torch_device |
| 338 | + ) |
| 339 | + else: |
| 340 | + model_inputs["input_ids"] = input_ids |
| 341 | + model_inputs["input_values"] = input_values |
| 342 | + model_inputs["input_features"] = input_features |
| 343 | + model_inputs["pixel_values"] = pixel_values |
| 344 | + |
| 345 | + do_sample = kwargs.pop("do_sample", None) |
| 346 | + temperature = kwargs.pop("temperature", None) |
| 347 | + if not do_sample and temperature == 0: |
| 348 | + temperature = None |
| 349 | + |
| 350 | + model_inputs["do_sample"] = do_sample |
| 351 | + model_inputs["temperature"] = temperature |
| 352 | + |
| 353 | + output = model_generate( |
| 354 | + **model_inputs, |
| 355 | + **kwargs, |
| 356 | + ) |
| 357 | + |
| 358 | + # NOTE: This is currently restricted to single outputs |
| 359 | + # Should we choose to support multiple return sequences, |
| 360 | + # We would need to either validate all of them |
| 361 | + # and choose the one with the least failures, |
| 362 | + # or accept a selection function |
| 363 | + decoded_output = tokenizer.decode( |
| 364 | + output[0], skip_special_tokens=skip_special_tokens |
| 365 | + ) |
| 366 | + |
| 367 | + return LLMResponse(output=decoded_output) |
| 368 | + |
| 369 | + |
| 370 | +class HuggingFacePipelineCallable(PromptCallableBase): |
| 371 | + def _invoke_llm(self, prompt: str, pipeline: Any, *args, **kwargs) -> LLMResponse: |
| 372 | + try: |
| 373 | + import transformers # noqa: F401 # type: ignore |
| 374 | + except ImportError: |
| 375 | + raise PromptCallableException( |
| 376 | + "The `transformers` package is not installed. " |
| 377 | + "Install with `pip install transformers`" |
| 378 | + ) |
| 379 | + try: |
| 380 | + import torch # noqa: F401 # type: ignore |
| 381 | + except ImportError: |
| 382 | + raise PromptCallableException( |
| 383 | + "The `torch` package is not installed. " |
| 384 | + "Install with `pip install torch`" |
| 385 | + ) |
| 386 | + |
| 387 | + content_key = kwargs.pop("content_key", "generated_text") |
| 388 | + |
| 389 | + temperature = kwargs.pop("temperature", None) |
| 390 | + if temperature == 0: |
| 391 | + temperature = None |
| 392 | + |
| 393 | + output = pipeline( |
| 394 | + prompt, |
| 395 | + temperature=temperature, |
| 396 | + *args, |
| 397 | + **kwargs, |
| 398 | + ) |
| 399 | + |
| 400 | + # NOTE: This is currently restricted to single outputs |
| 401 | + # Should we choose to support multiple return sequences, |
| 402 | + # We would need to either validate all of them |
| 403 | + # and choose the one with the least failures, |
| 404 | + # or accept a selection function |
| 405 | + content = safe_get(output[0], content_key) |
| 406 | + |
| 407 | + return LLMResponse(output=content) |
| 408 | + |
| 409 | + |
290 | 410 | class ArbitraryCallable(PromptCallableBase): |
291 | 411 | def __init__(self, llm_api: Callable, *args, **kwargs): |
292 | 412 | self.llm_api = llm_api |
@@ -364,6 +484,42 @@ def get_llm_ask(llm_api: Callable, *args, **kwargs) -> PromptCallableBase: |
364 | 484 | except ImportError: |
365 | 485 | pass |
366 | 486 |
|
| 487 | + try: |
| 488 | + from transformers import ( # noqa: F401 # type: ignore |
| 489 | + FlaxPreTrainedModel, |
| 490 | + GenerationMixin, |
| 491 | + PreTrainedModel, |
| 492 | + TFPreTrainedModel, |
| 493 | + ) |
| 494 | + |
| 495 | + api_self = getattr(llm_api, "__self__", None) |
| 496 | + |
| 497 | + if ( |
| 498 | + isinstance(api_self, PreTrainedModel) |
| 499 | + or isinstance(api_self, TFPreTrainedModel) |
| 500 | + or isinstance(api_self, FlaxPreTrainedModel) |
| 501 | + ): |
| 502 | + if ( |
| 503 | + hasattr(llm_api, "__func__") |
| 504 | + and llm_api.__func__ == GenerationMixin.generate |
| 505 | + ): |
| 506 | + return HuggingFaceModelCallable(*args, model_generate=llm_api, **kwargs) |
| 507 | + raise ValueError("Only text generation models are supported at this time.") |
| 508 | + except ImportError: |
| 509 | + pass |
| 510 | + try: |
| 511 | + from transformers import Pipeline # noqa: F401 # type: ignore |
| 512 | + |
| 513 | + if isinstance(llm_api, Pipeline): |
| 514 | + # Couldn't find a constant for this |
| 515 | + if llm_api.task == "text-generation": |
| 516 | + return HuggingFacePipelineCallable(*args, pipeline=llm_api, **kwargs) |
| 517 | + raise ValueError( |
| 518 | + "Only text generation pipelines are supported at this time." |
| 519 | + ) |
| 520 | + except ImportError: |
| 521 | + pass |
| 522 | + |
367 | 523 | # Let the user pass in an arbitrary callable. |
368 | 524 | return ArbitraryCallable(*args, llm_api=llm_api, **kwargs) |
369 | 525 |
|
|
0 commit comments