Skip to content

Commit e2b5eda

Browse files
refactor: clean up whitespace and improve code consistency in OpenAI tracing examples
1 parent 3b494fb commit e2b5eda

File tree

3 files changed

+169
-225
lines changed

3 files changed

+169
-225
lines changed

examples/tracing/openai/responses_api_example.py

Lines changed: 56 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -14,23 +14,25 @@
1414
import openai
1515
from openlayer.lib import trace_openai, trace_async_openai
1616

17+
1718
def setup_environment():
1819
"""Set up environment variables for the example."""
1920
# OpenAI API key
2021
os.environ["OPENAI_API_KEY"] = "your-openai-api-key-here"
21-
22+
2223
# Openlayer configuration
2324
os.environ["OPENLAYER_API_KEY"] = "your-openlayer-api-key-here"
2425
os.environ["OPENLAYER_INFERENCE_PIPELINE_ID"] = "your-pipeline-id-here"
2526

27+
2628
def chat_completions_example():
2729
"""Example using the traditional Chat Completions API with tracing."""
2830
print("=== Chat Completions API Example ===")
29-
31+
3032
# Create and trace OpenAI client
3133
client = openai.OpenAI()
3234
traced_client = trace_openai(client)
33-
35+
3436
# Use Chat Completions API normally - tracing happens automatically
3537
response = traced_client.chat.completions.create(
3638
model="gpt-4o-mini",
@@ -41,23 +43,24 @@ def chat_completions_example():
4143
temperature=0.7,
4244
max_tokens=100,
4345
)
44-
46+
4547
print(f"Chat Completion Response: {response.choices[0].message.content}")
4648
print("✓ Chat Completions API call traced successfully")
4749

50+
4851
def responses_api_example():
4952
"""Example using the new Responses API with tracing."""
5053
print("\n=== Responses API Example ===")
51-
54+
5255
# Create and trace OpenAI client
5356
client = openai.OpenAI()
5457
traced_client = trace_openai(client)
55-
58+
5659
# Check if Responses API is available
57-
if not hasattr(traced_client, 'responses'):
60+
if not hasattr(traced_client, "responses"):
5861
print("⚠️ Responses API not available in this OpenAI client version")
5962
return
60-
63+
6164
# Use Responses API with different parameter format
6265
response = traced_client.responses.create(
6366
model="gpt-4o-mini",
@@ -66,19 +69,20 @@ def responses_api_example():
6669
max_output_tokens=50,
6770
temperature=0.5,
6871
)
69-
72+
7073
# Note: The actual response structure depends on OpenAI's implementation
7174
print(f"Responses API Response: {response}")
7275
print("✓ Responses API call traced successfully")
7376

77+
7478
def streaming_chat_completions_example():
7579
"""Example using streaming Chat Completions API with tracing."""
7680
print("\n=== Streaming Chat Completions Example ===")
77-
81+
7882
# Create and trace OpenAI client
7983
client = openai.OpenAI()
8084
traced_client = trace_openai(client)
81-
85+
8286
# Streaming chat completion
8387
stream = traced_client.chat.completions.create(
8488
model="gpt-4o-mini",
@@ -88,35 +92,36 @@ def streaming_chat_completions_example():
8892
stream=True,
8993
temperature=0.7,
9094
)
91-
95+
9296
print("Streaming response: ", end="", flush=True)
9397
for chunk in stream:
9498
if chunk.choices[0].delta.content is not None:
9599
print(chunk.choices[0].delta.content, end="", flush=True)
96100
print()
97101
print("✓ Streaming Chat Completions call traced successfully")
98102

103+
99104
def streaming_responses_api_example():
100105
"""Example using streaming Responses API with tracing."""
101106
print("\n=== Streaming Responses API Example ===")
102-
107+
103108
# Create and trace OpenAI client
104109
client = openai.OpenAI()
105110
traced_client = trace_openai(client)
106-
111+
107112
# Check if Responses API is available
108-
if not hasattr(traced_client, 'responses'):
113+
if not hasattr(traced_client, "responses"):
109114
print("⚠️ Responses API not available in this OpenAI client version")
110115
return
111-
116+
112117
# Streaming responses
113118
stream = traced_client.responses.create(
114119
model="gpt-4o-mini",
115120
input="Tell me a short joke about programming.",
116121
stream=True,
117122
max_output_tokens=100,
118123
)
119-
124+
120125
print("Streaming response: ", end="", flush=True)
121126
for event in stream:
122127
# Handle different types of response stream events
@@ -125,33 +130,31 @@ def streaming_responses_api_example():
125130
print()
126131
print("✓ Streaming Responses API call traced successfully")
127132

133+
128134
def function_calling_example():
129135
"""Example using function calling with both APIs."""
130136
print("\n=== Function Calling Example ===")
131-
137+
132138
# Create and trace OpenAI client
133139
client = openai.OpenAI()
134140
traced_client = trace_openai(client)
135-
141+
136142
# Define a simple function
137-
tools = [{
138-
"type": "function",
139-
"function": {
140-
"name": "get_weather",
141-
"description": "Get the current weather for a location",
142-
"parameters": {
143-
"type": "object",
144-
"properties": {
145-
"location": {
146-
"type": "string",
147-
"description": "City name"
148-
}
143+
tools = [
144+
{
145+
"type": "function",
146+
"function": {
147+
"name": "get_weather",
148+
"description": "Get the current weather for a location",
149+
"parameters": {
150+
"type": "object",
151+
"properties": {"location": {"type": "string", "description": "City name"}},
152+
"required": ["location"],
149153
},
150-
"required": ["location"]
151-
}
154+
},
152155
}
153-
}]
154-
156+
]
157+
155158
# Chat Completions with function calling
156159
response = traced_client.chat.completions.create(
157160
model="gpt-4o-mini",
@@ -161,12 +164,12 @@ def function_calling_example():
161164
tools=tools,
162165
tool_choice="auto",
163166
)
164-
167+
165168
print(f"Function call response: {response.choices[0].message}")
166169
print("✓ Function calling with Chat Completions traced successfully")
167-
170+
168171
# Responses API with function calling (if available)
169-
if hasattr(traced_client, 'responses'):
172+
if hasattr(traced_client, "responses"):
170173
try:
171174
response = traced_client.responses.create(
172175
model="gpt-4o-mini",
@@ -179,14 +182,15 @@ def function_calling_example():
179182
except Exception as e:
180183
print(f"⚠️ Responses API function calling not yet supported: {e}")
181184

185+
182186
async def async_examples():
183187
"""Examples using async clients."""
184188
print("\n=== Async Examples ===")
185-
189+
186190
# Create and trace async OpenAI client
187191
client = openai.AsyncOpenAI()
188192
traced_client = trace_async_openai(client)
189-
193+
190194
# Async chat completion
191195
response = await traced_client.chat.completions.create(
192196
model="gpt-4o-mini",
@@ -195,12 +199,12 @@ async def async_examples():
195199
],
196200
temperature=0.1,
197201
)
198-
202+
199203
print(f"Async chat response: {response.choices[0].message.content}")
200204
print("✓ Async Chat Completions traced successfully")
201-
205+
202206
# Async responses (if available)
203-
if hasattr(traced_client, 'responses'):
207+
if hasattr(traced_client, "responses"):
204208
try:
205209
response = await traced_client.responses.create(
206210
model="gpt-4o-mini",
@@ -212,26 +216,28 @@ async def async_examples():
212216
except Exception as e:
213217
print(f"⚠️ Async Responses API error: {e}")
214218

219+
215220
def main():
216221
"""Run all examples."""
217222
print("OpenAI Chat Completions + Responses API Tracing Examples")
218223
print("=" * 60)
219-
224+
220225
# Setup (in real usage, set these in your environment)
221226
setup_environment()
222-
227+
223228
try:
224229
# Sync examples
225230
chat_completions_example()
226231
responses_api_example()
227232
streaming_chat_completions_example()
228233
streaming_responses_api_example()
229234
function_calling_example()
230-
235+
231236
# Async examples
232237
import asyncio
238+
233239
asyncio.run(async_examples())
234-
240+
235241
print("\n🎉 All examples completed successfully!")
236242
print("\nKey Benefits of the New Implementation:")
237243
print("✓ Backward compatibility - existing Chat Completions code works unchanged")
@@ -240,10 +246,11 @@ def main():
240246
print("✓ Function calling - tool/function calls are properly captured in traces")
241247
print("✓ Enhanced metadata - Responses API provides richer traceability information")
242248
print("✓ Async support - both sync and async clients work seamlessly")
243-
249+
244250
except Exception as e:
245251
print(f"❌ Example failed: {e}")
246252
print("Note: This example requires valid OpenAI API keys and Openlayer configuration")
247253

254+
248255
if __name__ == "__main__":
249-
main()
256+
main()

0 commit comments

Comments
 (0)