@@ -18,10 +18,15 @@ def test_endpoint_detection():
1818 ("https://apac.api.openai.com/v1" , "o3-mini" , True , "APAC endpoint with o3-mini" ),
1919 ("https://eu.api.openai.com/v1" , "gpt-4" , False , "EU endpoint with gpt-4" ),
2020 ("https://api.openai.com/v1" , "gpt-3.5-turbo" , False , "US endpoint with gpt-3.5" ),
21- ("https://azure.openai.com/" , "o1-mini" , False , "Azure endpoint (not OpenAI)" ),
22- ("https://fake.com/api.openai.com" , "o1-mini" , False , "Fake endpoint with o1" ),
23- (None , "o1-mini" , False , "None endpoint" ),
24- ("" , "o1-mini" , False , "Empty endpoint" ),
21+ ("https://azure.openai.com/" , "o1-mini" , True , "Azure endpoint with reasoning model" ),
22+ ("https://my-resource.openai.azure.com/" , "gpt-5" , True , "Azure with gpt-5" ),
23+ ("http://localhost:8000/v1" , "o1-mini" , True , "OptiLLM proxy with o1-mini" ),
24+ ("http://localhost:8000/v1" , "gpt-5-nano" , True , "OptiLLM proxy with gpt-5-nano" ),
25+ ("http://localhost:8000/v1" , "gpt-4" , False , "OptiLLM proxy with gpt-4" ),
26+ ("https://openrouter.ai/api/v1" , "o3-mini" , True , "OpenRouter with reasoning model" ),
27+ ("https://fake.com/api.openai.com" , "o1-mini" , True , "Any endpoint with reasoning model" ),
28+ (None , "o1-mini" , True , "None endpoint with reasoning model" ),
29+ ("" , "o1-mini" , True , "Empty endpoint with reasoning model" ),
2530 ("https://eu.api.openai.com/v1" , "O1-MINI" , True , "EU with uppercase model" ),
2631 ("HTTPS://EU.API.OPENAI.COM/v1" , "o1-mini" , True , "Uppercase URL" ),
2732 ]
@@ -33,23 +38,11 @@ def test_endpoint_detection():
3338 failed = 0
3439
3540 for api_base , model , expected_result , description in test_cases :
36- # This is the exact logic from your fixed code
41+ # This is the exact logic from the fixed code
3742 model_lower = str (model ).lower ()
38- api_base_lower = (api_base or "" ).lower ()
39-
40- is_openai_api = (
41- api_base_lower .startswith ("https://api.openai.com" )
42- or api_base_lower .startswith ("https://eu.api.openai.com" )
43- or api_base_lower .startswith ("https://apac.api.openai.com" )
44- or api_base_lower .startswith ("http://api.openai.com" )
45- or api_base_lower .startswith ("http://eu.api.openai.com" )
46- or api_base_lower .startswith ("http://apac.api.openai.com" )
47- )
48-
49- is_openai_reasoning_model = (
50- is_openai_api
51- and model_lower .startswith (OPENAI_REASONING_MODEL_PREFIXES )
52- )
43+
44+ # Model-pattern based detection (works for all endpoints)
45+ is_openai_reasoning_model = model_lower .startswith (OPENAI_REASONING_MODEL_PREFIXES )
5346
5447 # Determine which parameter would be used
5548 param_used = "max_completion_tokens" if is_openai_reasoning_model else "max_tokens"
@@ -66,7 +59,6 @@ def test_endpoint_detection():
6659 print (f"\n { status } | { description } " )
6760 print (f" API Base: { api_base } " )
6861 print (f" Model: { model } " )
69- print (f" is_openai_api: { is_openai_api } " )
7062 print (f" is_reasoning_model: { is_openai_reasoning_model } " )
7163 print (f" Parameter used: { param_used } " )
7264 print (f" Expected: { expected_param } " )
0 commit comments