Tool Calling
The Siraya AI supports Anthropic-compatible function calling, allowing models to call tools and functions.
Example request
import os
import anthropic
client = anthropic.Anthropic(
api_key="<API_KEY>",
base_url='https://llm.siraya.pro'
)
message = client.messages.create(
model='claude-sonnet-4-5@20250929',
max_tokens=1024,
tools=[
{
'name': 'get_weather',
'description': 'Get the current weather in a given location',
'input_schema': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA'
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit'],
'description': 'The unit for temperature'
}
},
'required': ['location']
}
}
],
messages=[
{
'role': 'user',
'content': 'What is the weather like in San Francisco?'
}
],
)
print('Response:', message.content)
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({
"<API_KEY>,
baseURL: 'https://llm.siraya.pro',
});
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5@20250929',
max_tokens: 1024,
tools: [
{
name: 'get_weather',
description: 'Get the current weather in a given location',
input_schema: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'The city and state, e.g. San Francisco, CA',
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
description: 'The unit for temperature',
},
},
required: ['location'],
},
},
],
messages: [
{
role: 'user',
content: 'What is the weather like in San Francisco?',
},
],
});
console.log('Response:', JSON.stringify(message.content, null, 2));
Tool call response format
When the model makes tool calls, the response includes tool use blocks:
{
"id": "msg_bdrk_01TfRsaKQknpW7KvJ6GmRe14",
"content": [
{
"id": "toolu_bdrk_018NGGhvJPRVPzme1rkv61JF",
"input": {
"location": "San Francisco, CA"
},
"name": "get_weather",
"type": "tool_use"
}
],
"model": "claude-sonnet-4-5@20250929",
"role": "assistant",
"stop_reason": "tool_use",
"stop_sequence": null,
"type": "message",
"usage": {
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"input_tokens": 615,
"output_tokens": 56,
"server_tool_use": null,
"service_tier": null,
"cache_creation": {
"ephemeral_1h_input_tokens": 0,
"ephemeral_5m_input_tokens": 0
}
}
}