Skip to content

Commit bfaec30

Browse files
authored
Use niquests (#7)
1 parent 9486f8d commit bfaec30

File tree

18 files changed

+349
-241
lines changed

18 files changed

+349
-241
lines changed

.github/workflows/publish.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ jobs:
1111
steps:
1212
- uses: actions/checkout@v4
1313
- uses: extractions/setup-just@v2
14-
- uses: astral-sh/setup-uv@v3
14+
- uses: astral-sh/setup-uv@v4
1515
with:
1616
enable-cache: true
1717
cache-dependency-glob: "**/pyproject.toml"

.github/workflows/test.yml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
steps:
1818
- uses: actions/checkout@v4
1919
- uses: extractions/setup-just@v2
20-
- uses: astral-sh/setup-uv@v3
20+
- uses: astral-sh/setup-uv@v4
2121
with:
2222
enable-cache: true
2323
cache-dependency-glob: "**/pyproject.toml"
@@ -32,13 +32,12 @@ jobs:
3232
- "3.10"
3333
- "3.11"
3434
- "3.12"
35-
- "3.13"
3635
steps:
3736
- uses: actions/checkout@v4
3837
- uses: extractions/setup-just@v2
39-
- uses: astral-sh/setup-uv@v3
38+
- uses: astral-sh/setup-uv@v4
4039
with:
4140
enable-cache: true
4241
cache-dependency-glob: "**/pyproject.toml"
43-
- run: uv python install ${{ matrix.python-version }}
42+
- run: uv venv --python ${{ matrix.python-version }}
4443
- run: just test -vv

Justfile

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,18 @@ lint:
1010
uv run --group lint ruff format
1111
uv run --group lint mypy .
1212

13+
_test-no-http *args:
14+
uv run pytest --ignore tests/test_http.py {{ args }}
15+
1316
test *args:
17+
#!/bin/bash
18+
uv run litestar --app tests.testing_app:app run &
19+
APP_PID=$!
1420
uv run pytest {{ args }}
21+
TEST_RESULT=$?
22+
kill $APP_PID
23+
wait $APP_PID 2>/dev/null
24+
exit $TEST_RESULT
1525

1626
publish:
1727
rm -rf dist

README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -162,23 +162,23 @@ async with any_llm_client.OpenAIClient(config, ...) as client:
162162
#### Timeouts, proxy & other HTTP settings
163163

164164

165-
Pass custom [HTTPX](https://www.python-httpx.org) kwargs to `any_llm_client.get_client()`:
165+
Pass custom [niquests](https://niquests.readthedocs.io) kwargs to `any_llm_client.get_client()`:
166166

167167
```python
168-
import httpx
168+
import urllib3
169169

170170
import any_llm_client
171171

172172

173173
async with any_llm_client.get_client(
174174
...,
175-
mounts={"https://api.openai.com": httpx.AsyncHTTPTransport(proxy="http://localhost:8030")},
176-
timeout=httpx.Timeout(None, connect=5.0),
175+
proxies={"https://api.openai.com": "http://localhost:8030"},
176+
timeout=urllib3.Timeout(total=10.0, connect=5.0),
177177
) as client:
178178
...
179179
```
180180

181-
Default timeout is `httpx.Timeout(None, connect=5.0)` (5 seconds on connect, unlimited on read, write or pool).
181+
Default timeout is `urllib3.Timeout(total=None, connect=5.0)`.
182182

183183
#### Retries
184184

any_llm_client/clients/openai.py

Lines changed: 29 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
from http import HTTPStatus
77

88
import annotated_types
9-
import httpx
10-
import httpx_sse
9+
import niquests
1110
import pydantic
1211
import typing_extensions
1312

@@ -20,8 +19,9 @@
2019
OutOfTokensOrSymbolsError,
2120
UserMessage,
2221
)
23-
from any_llm_client.http import get_http_client_from_kwargs, make_http_request, make_streaming_http_request
22+
from any_llm_client.http import HttpClient, HttpStatusError
2423
from any_llm_client.retry import RequestRetryConfig
24+
from any_llm_client.sse import parse_sse_events
2525

2626

2727
OPENAI_AUTH_TOKEN_ENV_NAME: typing.Final = "ANY_LLM_CLIENT_OPENAI_AUTH_TOKEN"
@@ -99,31 +99,34 @@ def _make_user_assistant_alternate_messages(
9999
yield ChatCompletionsMessage(role=current_message_role, content="\n\n".join(current_message_content_chunks))
100100

101101

102-
def _handle_status_error(*, status_code: int, content: bytes) -> typing.NoReturn:
103-
if status_code == HTTPStatus.BAD_REQUEST and b"Please reduce the length of the messages" in content: # vLLM
104-
raise OutOfTokensOrSymbolsError(response_content=content)
105-
raise LLMError(response_content=content)
102+
def _handle_status_error(error: HttpStatusError) -> typing.NoReturn:
103+
if (
104+
error.status_code == HTTPStatus.BAD_REQUEST and b"Please reduce the length of the messages" in error.content
105+
): # vLLM
106+
raise OutOfTokensOrSymbolsError(response_content=error.content)
107+
raise LLMError(response_content=error.content)
106108

107109

108110
@dataclasses.dataclass(slots=True, init=False)
109111
class OpenAIClient(LLMClient):
110112
config: OpenAIConfig
111-
httpx_client: httpx.AsyncClient
113+
http_client: HttpClient
112114
request_retry: RequestRetryConfig
113115

114116
def __init__(
115117
self,
116118
config: OpenAIConfig,
117119
*,
118120
request_retry: RequestRetryConfig | None = None,
119-
**httpx_kwargs: typing.Any, # noqa: ANN401
121+
**niquests_kwargs: typing.Any, # noqa: ANN401
120122
) -> None:
121123
self.config = config
122-
self.request_retry = request_retry or RequestRetryConfig()
123-
self.httpx_client = get_http_client_from_kwargs(httpx_kwargs)
124+
self.http_client = HttpClient(
125+
request_retry=request_retry or RequestRetryConfig(), niquests_kwargs=niquests_kwargs
126+
)
124127

125-
def _build_request(self, payload: dict[str, typing.Any]) -> httpx.Request:
126-
return self.httpx_client.build_request(
128+
def _build_request(self, payload: dict[str, typing.Any]) -> niquests.Request:
129+
return niquests.Request(
127130
method="POST",
128131
url=str(self.config.url),
129132
json=payload,
@@ -152,24 +155,17 @@ async def request_llm_message(
152155
**extra or {},
153156
).model_dump(mode="json")
154157
try:
155-
response: typing.Final = await make_http_request(
156-
httpx_client=self.httpx_client,
157-
request_retry=self.request_retry,
158-
build_request=lambda: self._build_request(payload),
159-
)
160-
except httpx.HTTPStatusError as exception:
161-
_handle_status_error(status_code=exception.response.status_code, content=exception.response.content)
162-
try:
163-
return ChatCompletionsNotStreamingResponse.model_validate_json(response.content).choices[0].message.content
164-
finally:
165-
await response.aclose()
158+
response: typing.Final = await self.http_client.request(self._build_request(payload))
159+
except HttpStatusError as exception:
160+
_handle_status_error(exception)
161+
return ChatCompletionsNotStreamingResponse.model_validate_json(response).choices[0].message.content
166162

167-
async def _iter_partial_responses(self, response: httpx.Response) -> typing.AsyncIterable[str]:
163+
async def _iter_partial_responses(self, response: typing.AsyncIterable[bytes]) -> typing.AsyncIterable[str]:
168164
text_chunks: typing.Final = []
169-
async for event in httpx_sse.EventSource(response).aiter_sse():
170-
if event.data == "[DONE]":
165+
async for one_event in parse_sse_events(response):
166+
if one_event.data == "[DONE]":
171167
break
172-
validated_response = ChatCompletionsStreamingEvent.model_validate_json(event.data)
168+
validated_response = ChatCompletionsStreamingEvent.model_validate_json(one_event.data)
173169
if not (one_chunk := validated_response.choices[0].delta.content):
174170
continue
175171
text_chunks.append(one_chunk)
@@ -187,19 +183,13 @@ async def stream_llm_partial_messages(
187183
**extra or {},
188184
).model_dump(mode="json")
189185
try:
190-
async with make_streaming_http_request(
191-
httpx_client=self.httpx_client,
192-
request_retry=self.request_retry,
193-
build_request=lambda: self._build_request(payload),
194-
) as response:
186+
async with self.http_client.stream(request=self._build_request(payload)) as response:
195187
yield self._iter_partial_responses(response)
196-
except httpx.HTTPStatusError as exception:
197-
content: typing.Final = await exception.response.aread()
198-
await exception.response.aclose()
199-
_handle_status_error(status_code=exception.response.status_code, content=content)
188+
except HttpStatusError as exception:
189+
_handle_status_error(exception)
200190

201191
async def __aenter__(self) -> typing_extensions.Self:
202-
await self.httpx_client.__aenter__()
192+
await self.http_client.__aenter__()
203193
return self
204194

205195
async def __aexit__(
@@ -208,4 +198,4 @@ async def __aexit__(
208198
exc_value: BaseException | None,
209199
traceback: types.TracebackType | None,
210200
) -> None:
211-
await self.httpx_client.__aexit__(exc_type=exc_type, exc_value=exc_value, traceback=traceback)
201+
await self.http_client.__aexit__(exc_type=exc_type, exc_value=exc_value, traceback=traceback)

any_llm_client/clients/yandexgpt.py

Lines changed: 28 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,12 @@
66
from http import HTTPStatus
77

88
import annotated_types
9-
import httpx
9+
import niquests
1010
import pydantic
1111
import typing_extensions
1212

1313
from any_llm_client.core import LLMClient, LLMConfig, LLMError, Message, OutOfTokensOrSymbolsError, UserMessage
14-
from any_llm_client.http import get_http_client_from_kwargs, make_http_request, make_streaming_http_request
14+
from any_llm_client.http import HttpClient, HttpStatusError
1515
from any_llm_client.retry import RequestRetryConfig
1616

1717

@@ -61,34 +61,34 @@ class YandexGPTResponse(pydantic.BaseModel):
6161
result: YandexGPTResult
6262

6363

64-
def _handle_status_error(*, status_code: int, content: bytes) -> typing.NoReturn:
65-
if status_code == HTTPStatus.BAD_REQUEST and (
66-
b"number of input tokens must be no more than" in content
67-
or (b"text length is" in content and b"which is outside the range" in content)
64+
def _handle_status_error(error: HttpStatusError) -> typing.NoReturn:
65+
if error.status_code == HTTPStatus.BAD_REQUEST and (
66+
b"number of input tokens must be no more than" in error.content
67+
or (b"text length is" in error.content and b"which is outside the range" in error.content)
6868
):
69-
raise OutOfTokensOrSymbolsError(response_content=content)
70-
raise LLMError(response_content=content)
69+
raise OutOfTokensOrSymbolsError(response_content=error.content)
70+
raise LLMError(response_content=error.content)
7171

7272

7373
@dataclasses.dataclass(slots=True, init=False)
7474
class YandexGPTClient(LLMClient):
7575
config: YandexGPTConfig
76-
httpx_client: httpx.AsyncClient
77-
request_retry: RequestRetryConfig
76+
http_client: HttpClient
7877

7978
def __init__(
8079
self,
8180
config: YandexGPTConfig,
8281
*,
8382
request_retry: RequestRetryConfig | None = None,
84-
**httpx_kwargs: typing.Any, # noqa: ANN401
83+
**niquests_kwargs: typing.Any, # noqa: ANN401
8584
) -> None:
8685
self.config = config
87-
self.request_retry = request_retry or RequestRetryConfig()
88-
self.httpx_client = get_http_client_from_kwargs(httpx_kwargs)
86+
self.http_client = HttpClient(
87+
request_retry=request_retry or RequestRetryConfig(), niquests_kwargs=niquests_kwargs
88+
)
8989

90-
def _build_request(self, payload: dict[str, typing.Any]) -> httpx.Request:
91-
return self.httpx_client.build_request(
90+
def _build_request(self, payload: dict[str, typing.Any]) -> niquests.Request:
91+
return niquests.Request(
9292
method="POST",
9393
url=str(self.config.url),
9494
json=payload,
@@ -121,18 +121,14 @@ async def request_llm_message(
121121
)
122122

123123
try:
124-
response: typing.Final = await make_http_request(
125-
httpx_client=self.httpx_client,
126-
request_retry=self.request_retry,
127-
build_request=lambda: self._build_request(payload),
128-
)
129-
except httpx.HTTPStatusError as exception:
130-
_handle_status_error(status_code=exception.response.status_code, content=exception.response.content)
131-
132-
return YandexGPTResponse.model_validate_json(response.content).result.alternatives[0].message.text
133-
134-
async def _iter_completion_messages(self, response: httpx.Response) -> typing.AsyncIterable[str]:
135-
async for one_line in response.aiter_lines():
124+
response: typing.Final = await self.http_client.request(self._build_request(payload))
125+
except HttpStatusError as exception:
126+
_handle_status_error(exception)
127+
128+
return YandexGPTResponse.model_validate_json(response).result.alternatives[0].message.text
129+
130+
async def _iter_completion_messages(self, response: typing.AsyncIterable[bytes]) -> typing.AsyncIterable[str]:
131+
async for one_line in response:
136132
validated_response = YandexGPTResponse.model_validate_json(one_line)
137133
yield validated_response.result.alternatives[0].message.text
138134

@@ -145,19 +141,13 @@ async def stream_llm_partial_messages(
145141
)
146142

147143
try:
148-
async with make_streaming_http_request(
149-
httpx_client=self.httpx_client,
150-
request_retry=self.request_retry,
151-
build_request=lambda: self._build_request(payload),
152-
) as response:
144+
async with self.http_client.stream(request=self._build_request(payload)) as response:
153145
yield self._iter_completion_messages(response)
154-
except httpx.HTTPStatusError as exception:
155-
content: typing.Final = await exception.response.aread()
156-
await exception.response.aclose()
157-
_handle_status_error(status_code=exception.response.status_code, content=content)
146+
except HttpStatusError as exception:
147+
_handle_status_error(exception)
158148

159149
async def __aenter__(self) -> typing_extensions.Self:
160-
await self.httpx_client.__aenter__()
150+
await self.http_client.__aenter__()
161151
return self
162152

163153
async def __aexit__(
@@ -166,4 +156,4 @@ async def __aexit__(
166156
exc_value: BaseException | None,
167157
traceback: types.TracebackType | None,
168158
) -> None:
169-
await self.httpx_client.__aexit__(exc_type=exc_type, exc_value=exc_value, traceback=traceback)
159+
await self.http_client.__aexit__(exc_type=exc_type, exc_value=exc_value, traceback=traceback)

any_llm_client/core.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -48,18 +48,6 @@ def AssistantMessage(text: str) -> Message: # noqa: N802
4848
return Message(role=MessageRole.assistant, text=text)
4949

5050

51-
@dataclasses.dataclass
52-
class LLMError(Exception):
53-
response_content: bytes
54-
55-
def __str__(self) -> str:
56-
return self.__repr__().removeprefix(self.__class__.__name__)
57-
58-
59-
@dataclasses.dataclass
60-
class OutOfTokensOrSymbolsError(LLMError): ...
61-
62-
6351
class LLMConfig(pydantic.BaseModel):
6452
model_config = pydantic.ConfigDict(protected_namespaces=())
6553
api_type: str
@@ -83,3 +71,15 @@ async def __aexit__(
8371
exc_value: BaseException | None,
8472
traceback: types.TracebackType | None,
8573
) -> None: ...
74+
75+
76+
@dataclasses.dataclass
77+
class LLMError(Exception):
78+
response_content: bytes
79+
80+
def __str__(self) -> str:
81+
return self.__repr__().removeprefix(self.__class__.__name__)
82+
83+
84+
@dataclasses.dataclass
85+
class OutOfTokensOrSymbolsError(LLMError): ...

0 commit comments

Comments
 (0)