Hello Schönwald, Alexander,
Welcome to the Microsoft Q&A and thank you for posting your questions here.
I understand that you would like to make Code Interpreter usable with Responses API.
Start from parse the response to extract both IDs. Like the below response structure (based on typical Azure Responses API output):
{
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"model": "gpt-5",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "code_interpreter",
"arguments": "{}",
"output": {
"files": [
{
"file_id": "cfile-abc123",
"container_id": "container-xyz"
}
]
}
}
}
]
}
}
]
}
Secondly, after calling the Responses API in your code, extract the container_id
and file_id
from the response:
# Example: Processing Responses API output
response = client.chat.completions.create(
model="gpt-5",
messages=[...],
tools=[...] # Include code interpreter tool
)
# Extract file and container IDs from the response
file_ids = []
container_ids = []
for choice in response.choices:
if choice.message.tool_calls:
for tool_call in choice.message.tool_calls:
if tool_call.function.name == "code_interpreter":
output = tool_call.function.output # This may be a string or dict; parse accordingly
# Assuming output is a JSON string or dict with files list
if isinstance(output, str):
output = json.loads(output)
files = output.get("files", [])
for file_info in files:
file_id = file_info.get("file_id")
container_id = file_info.get("container_id")
if file_id and container_id:
file_ids.append(file_id)
container_ids.append(container_id)
# Store these in your database or session for later download
Store both IDs in your database or session so that when a user requests to download a file, you can retrieve the corresponding container_id
for a given file_id
. Then, use the Azure OpenAI client's configuration to form the download URL and make the request and to avoid external dependencies, since the Azure OpenAI client (from the openai
package) uses httpx
, directly with the client's settings like below:
from django.http import StreamingHttpResponse, Http404
import httpx
import json
from omnigw.factories.apps.chat.azure_openai_client_factory import AzureOpenAIClientFactory
def openai_file_proxy(request, file_id):
client = AzureOpenAIClientFactory.get_chat_client("gpt-4.1") # Adjust model as needed
file_id_str = str(file_id)
# Check if it's a container file (cfile-{id})
if file_id_str.startswith("cfile-"):
# Retrieve container_id from your database or session based on file_id
# Example: container_id = get_container_id_from_db(file_id_str)
container_id = request.GET.get("container_id") # Fallback to query parameter, but prefer from storage
if not container_id:
# Try to get from stored data; you need to implement this based on your storage
container_id = get_container_id_from_storage(file_id_str) # hypothetical function
if not container_id:
raise Http404("Container ID not found for this file. It must be provided or stored.")
# Get Azure client configuration
base_url = client.base_url.rstrip('/') # Remove trailing slash if present
api_key = client.api_key
api_version = client.api_version # e.g., "2024-02-15-preview"
# Construct the Azure container download URL
url = f"{base_url}/openai/containers/files/{container_id}/content"
params = {
"fileId": file_id_str,
"api-version": api_version
}
headers = {
"api-key": api_key,
"Content-Type": "application/octet-stream"
}
# Use httpx to stream the file with the same timeout as the client
try:
with httpx.Client() as http_client:
response = http_client.get(url, headers=headers, params=params, timeout=30.0)
response.raise_for_status()
except httpx.RequestError as e:
raise Http404(f"Failed to connect to Azure: {str(e)}")
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
raise Http404("File not found in container.")
else:
raise Http404(f"Azure API error: {e.response.status_code}")
# Extract filename from Content-Disposition header or use file_id
content_disposition = response.headers.get("Content-Disposition", "")
filename = ""
if "filename=" in content_disposition:
filename = content_disposition.split("filename=")[1].strip('"')
else:
filename = file_id_str
# Stream the response back to the client
def file_iterator():
for chunk in response.iter_bytes(chunk_size=8192):
yield chunk
content_type = response.headers.get("Content-Type", "application/octet-stream")
resp = StreamingHttpResponse(file_iterator(), content_type=content_type)
resp["Content-Disposition"] = f'attachment; filename="{filename}"'
return resp
# Handle standard files (non-container)
try:
meta = client.files.retrieve(file_id_str)
file_content = client.files.content(file_id_str)
except Exception as e:
raise Http404(f"File not found: {str(e)}")
content_type = getattr(meta, 'mime_type', None) or getattr(meta, 'content_type', None) or getattr(meta, 'type', None) or "application/octet-stream"
filename = getattr(meta, 'filename', None) or getattr(meta, 'name', None) or file_id_str
if hasattr(file_content, 'iter_bytes'):
def file_iterator():
for chunk in file_content.iter_bytes():
yield chunk
stream = file_iterator()
elif hasattr(file_content, 'read'):
stream = file_content
else:
stream = [file_content]
resp = StreamingHttpResponse(stream, content_type=content_type)
resp["Content-Disposition"] = f'attachment; filename="{filename}"'
return resp
# Hypothetical function to retrieve container_id from storage
def get_container_id_from_storage(file_id):
# Implement based on your storage (e.g., database lookup)
# Example: return ContainerFile.objects.get(file_id=file_id).container_id
return None # Replace with actual implementation
I hope this is helpful! Do not hesitate to let me know if you have any other questions or clarifications.
Please don't forget to close up the thread here by upvoting and accept it as an answer if it is helpful.