The assistant's message, containing role and content fields.
finish_reason
string
"stop", "length", "tool_calls", or "content_filter".
Usage Object
Field
Type
Description
prompt_tokens
integer
Number of tokens in the prompt.
completion_tokens
integer
Number of tokens in the generated completion.
total_tokens
integer
Total tokens used (prompt_tokens + completion_tokens).
prompt_tokens_details
object
Breakdown of prompt tokens. Contains cached_tokens (number of tokens served from cache).
completion_tokens_details
object
Breakdown of completion tokens. Contains reasoning_tokens (tokens used for internal reasoning).
Examples
Basic Request
curl https://api.routerhub.ai/v1/chat/completions \
-H "Authorization: Bearer $ROUTERHUB_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "anthropic/claude-sonnet-4",
"messages": [
{"role": "user", "content": "What is the capital of France?"}
],
"max_tokens": 256
}'
import requests
response = requests.post(
"https://api.routerhub.ai/v1/chat/completions",
headers={
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json",
},
json={
"model": "anthropic/claude-sonnet-4",
"messages": [
{"role": "user", "content": "What is the capital of France?"}
],
"max_tokens": 256,
},
)
data = response.json()
print(data["choices"][0]["message"]["content"])
from openai import OpenAI
client = OpenAI(
base_url="https://api.routerhub.ai/v1",
api_key="YOUR_API_KEY",
)
response = client.chat.completions.create(
model="anthropic/claude-sonnet-4",
messages=[
{"role": "user", "content": "What is the capital of France?"}
],
max_tokens=256,
)
print(response.choices[0].message.content)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.routerhub.ai/v1",
api_key="YOUR_API_KEY",
model="anthropic/claude-sonnet-4",
max_tokens=256,
)
response = llm.invoke("What is the capital of France?")
print(response.content)
Multi-turn Conversation
curl https://api.routerhub.ai/v1/chat/completions \
-H "Authorization: Bearer $ROUTERHUB_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "google/gemini-2.5-pro",
"messages": [
{"role": "user", "content": "What is photosynthesis?"},
{"role": "assistant", "content": "Photosynthesis is the process by which plants convert sunlight, water, and carbon dioxide into glucose and oxygen."},
{"role": "user", "content": "What is the chemical equation for it?"}
],
"max_tokens": 512
}'
import requests
response = requests.post(
"https://api.routerhub.ai/v1/chat/completions",
headers={
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json",
},
json={
"model": "google/gemini-2.5-pro",
"messages": [
{"role": "user", "content": "What is photosynthesis?"},
{"role": "assistant", "content": "Photosynthesis is the process by which plants convert sunlight, water, and carbon dioxide into glucose and oxygen."},
{"role": "user", "content": "What is the chemical equation for it?"},
],
"max_tokens": 512,
},
)
print(response.json()["choices"][0]["message"]["content"])
from openai import OpenAI
client = OpenAI(
base_url="https://api.routerhub.ai/v1",
api_key="YOUR_API_KEY",
)
response = client.chat.completions.create(
model="google/gemini-2.5-pro",
messages=[
{"role": "user", "content": "What is photosynthesis?"},
{"role": "assistant", "content": "Photosynthesis is the process by which plants convert sunlight, water, and carbon dioxide into glucose and oxygen."},
{"role": "user", "content": "What is the chemical equation for it?"},
],
max_tokens=512,
)
print(response.choices[0].message.content)
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage
llm = ChatOpenAI(
base_url="https://api.routerhub.ai/v1",
api_key="YOUR_API_KEY",
model="google/gemini-2.5-pro",
max_tokens=512,
)
response = llm.invoke([
HumanMessage(content="What is photosynthesis?"),
AIMessage(content="Photosynthesis is the process by which plants convert sunlight, water, and carbon dioxide into glucose and oxygen."),
HumanMessage(content="What is the chemical equation for it?"),
])
print(response.content)
from openai import OpenAI
client = OpenAI(
base_url="https://api.routerhub.ai/v1",
api_key="YOUR_API_KEY",
)
response = client.chat.completions.create(
model="openai/gpt-4.1",
messages=[
{"role": "system", "content": "You are a helpful math tutor. Explain concepts step by step."},
{"role": "user", "content": "Solve: 2x + 5 = 13"},
],
temperature=0.2,
max_tokens=1024,
)
print(response.choices[0].message.content)
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage
llm = ChatOpenAI(
base_url="https://api.routerhub.ai/v1",
api_key="YOUR_API_KEY",
model="openai/gpt-4.1",
temperature=0.2,
max_tokens=1024,
)
response = llm.invoke([
SystemMessage(content="You are a helpful math tutor. Explain concepts step by step."),
HumanMessage(content="Solve: 2x + 5 = 13"),
])
print(response.content)
Sample Response
{
"id": "chatcmpl-abc123def456",
"object": "chat.completion",
"created": 1709251200,
"model": "anthropic/claude-sonnet-4",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "The capital of France is Paris. It is the largest city in France and serves as the country's political, economic, and cultural center."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 14,
"completion_tokens": 32,
"total_tokens": 46,
"prompt_tokens_details": {
"cached_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0
}
},
"system_fingerprint": null,
"service_tier": "default"
}