import Telnyx from 'telnyx';
const client = new Telnyx({
apiKey: 'My API Key',
});
const inferenceEmbedding = await client.ai.assistants.versions.update('version_id', {
assistant_id: 'assistant_id',
});
console.log(inferenceEmbedding.id);{
"id": "<string>",
"name": "<string>",
"created_at": "2023-11-07T05:31:56Z",
"model": "<string>",
"instructions": "<string>",
"description": "<string>",
"tools": [
{
"type": "webhook",
"webhook": {
"name": "<string>",
"description": "<string>",
"url": "https://example.com/api/v1/function",
"method": "POST",
"headers": [
{
"name": "<string>",
"value": "<string>"
}
],
"body_parameters": {
"properties": {
"age": {
"description": "The age of the customer.",
"type": "integer"
},
"location": {
"description": "The location of the customer.",
"type": "string"
}
},
"required": [
"age",
"location"
],
"type": "object"
},
"path_parameters": {
"properties": {
"id": {
"description": "The id of the customer.",
"type": "string"
}
},
"required": [
"id"
],
"type": "object"
},
"query_parameters": {
"properties": {
"page": {
"description": "The page number.",
"type": "integer"
}
},
"required": [
"page"
],
"type": "object"
}
}
}
],
"greeting": "<string>",
"llm_api_key_ref": "<string>",
"voice_settings": {
"voice": "<string>",
"voice_speed": 1,
"api_key_ref": "<string>",
"background_audio": {
"type": "predefined_media",
"value": "silence"
}
},
"transcription": {
"model": "deepgram/flux",
"language": "<string>",
"region": "<string>",
"settings": {
"smart_format": true,
"numerals": true,
"eot_threshold": 123,
"eot_timeout_ms": 123
}
},
"telephony_settings": {
"default_texml_app_id": "<string>",
"supports_unauthenticated_web_calls": true
},
"messaging_settings": {
"default_messaging_profile_id": "<string>",
"delivery_status_webhook_url": "<string>"
},
"enabled_features": [
"telephony"
],
"insight_settings": {
"insight_group_id": "<string>"
},
"privacy_settings": {
"data_retention": true
},
"dynamic_variables_webhook_url": "<string>",
"dynamic_variables": {},
"import_metadata": {
"import_provider": "elevenlabs",
"import_id": "<string>"
}
}Updates the configuration of a specific assistant version. Can not update main version
import Telnyx from 'telnyx';
const client = new Telnyx({
apiKey: 'My API Key',
});
const inferenceEmbedding = await client.ai.assistants.versions.update('version_id', {
assistant_id: 'assistant_id',
});
console.log(inferenceEmbedding.id);{
"id": "<string>",
"name": "<string>",
"created_at": "2023-11-07T05:31:56Z",
"model": "<string>",
"instructions": "<string>",
"description": "<string>",
"tools": [
{
"type": "webhook",
"webhook": {
"name": "<string>",
"description": "<string>",
"url": "https://example.com/api/v1/function",
"method": "POST",
"headers": [
{
"name": "<string>",
"value": "<string>"
}
],
"body_parameters": {
"properties": {
"age": {
"description": "The age of the customer.",
"type": "integer"
},
"location": {
"description": "The location of the customer.",
"type": "string"
}
},
"required": [
"age",
"location"
],
"type": "object"
},
"path_parameters": {
"properties": {
"id": {
"description": "The id of the customer.",
"type": "string"
}
},
"required": [
"id"
],
"type": "object"
},
"query_parameters": {
"properties": {
"page": {
"description": "The page number.",
"type": "integer"
}
},
"required": [
"page"
],
"type": "object"
}
}
}
],
"greeting": "<string>",
"llm_api_key_ref": "<string>",
"voice_settings": {
"voice": "<string>",
"voice_speed": 1,
"api_key_ref": "<string>",
"background_audio": {
"type": "predefined_media",
"value": "silence"
}
},
"transcription": {
"model": "deepgram/flux",
"language": "<string>",
"region": "<string>",
"settings": {
"smart_format": true,
"numerals": true,
"eot_threshold": 123,
"eot_timeout_ms": 123
}
},
"telephony_settings": {
"default_texml_app_id": "<string>",
"supports_unauthenticated_web_calls": true
},
"messaging_settings": {
"default_messaging_profile_id": "<string>",
"delivery_status_webhook_url": "<string>"
},
"enabled_features": [
"telephony"
],
"insight_settings": {
"insight_group_id": "<string>"
},
"privacy_settings": {
"data_retention": true
},
"dynamic_variables_webhook_url": "<string>",
"dynamic_variables": {},
"import_metadata": {
"import_provider": "elevenlabs",
"import_id": "<string>"
}
}Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
ID of the model to use. You can use the Get models API to see all of your available models,
System instructions for the assistant. These may be templated with dynamic variables
The tools that the assistant can use. These may be templated with dynamic variables
Show child attributes
webhook Show child attributes
The name of the tool.
The description of the tool.
The URL of the external tool to be called. This URL is going to be used by the assistant. The URL can be templated like: https://example.com/api/v1/{id}, where {id} is a placeholder for a value that will be provided by the assistant if path_parameters are provided with the id attribute.
"https://example.com/api/v1/function"
The HTTP method to be used when calling the external tool.
GET, POST, PUT, DELETE, PATCH The headers to be sent to the external tool.
Show child attributes
The value of the header. Note that we support mustache templating for the value. For example you can use Bearer {{#integration_secret}}test-secret{{/integration_secret}} to pass the value of the integration secret as the bearer token. Telnyx signature headers will be automatically added to the request.
The body parameters the webhook tool accepts, described as a JSON Schema object. These parameters will be passed to the webhook as the body of the request. See the JSON Schema reference for documentation about the format
{
"properties": {
"age": {
"description": "The age of the customer.",
"type": "integer"
},
"location": {
"description": "The location of the customer.",
"type": "string"
}
},
"required": ["age", "location"],
"type": "object"
}The path parameters the webhook tool accepts, described as a JSON Schema object. These parameters will be passed to the webhook as the path of the request if the URL contains a placeholder for a value. See the JSON Schema reference for documentation about the format
{
"properties": {
"id": {
"description": "The id of the customer.",
"type": "string"
}
},
"required": ["id"],
"type": "object"
}The query parameters the webhook tool accepts, described as a JSON Schema object. These parameters will be passed to the webhook as the query of the request. See the JSON Schema reference for documentation about the format
{
"properties": {
"page": {
"description": "The page number.",
"type": "integer"
}
},
"required": ["page"],
"type": "object"
}Text that the assistant will use to start the conversation. This may be templated with dynamic variables
This is only needed when using third-party inference providers. The identifier for an integration secret /v2/integration_secrets that refers to your LLM provider's API key. Warning: Free plans are unlikely to work with this integration.
Show child attributes
The voice to be used by the voice assistant. Check the full list of available voices via our voices API.
To use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the api_key_ref field. See integration secrets documentation for details. For Telnyx voices, use Telnyx.<model_id>.<voice_id> (e.g. Telnyx.KokoroTTS.af_heart)
The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.
The identifier for an integration secret /v2/integration_secrets that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.
Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.
Show child attributes
Select from predefined media options.
predefined_media The predefined media to use. silence disables background audio.
silence, office Show child attributes
The speech to text model to be used by the voice assistant. All the deepgram models are run on-premise.
deepgram/flux is optimized for turn-taking but is English-only.deepgram/nova-3 is multi-lingual with automatic language detection but slightly higher latency.deepgram/flux, deepgram/nova-3, deepgram/nova-2, azure/fast, distil-whisper/distil-large-v2, openai/whisper-large-v3-turbo The language of the audio to be transcribed. If not set, of if set to auto, the model will automatically detect the language.
Region on third party cloud providers (currently Azure) if using one of their models
Show child attributes
Available only for deepgram/flux. Confidence required to trigger an end of turn. Higher values = more reliable turn detection but slightly increased latency.
Available only for deepgram/flux. Maximum milliseconds of silence before forcing an end of turn, regardless of confidence.
Show child attributes
Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.
When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.
Show child attributes
Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.
The URL where webhooks related to delivery statused for assistant messages will be sent.
If telephony is enabled, the assistant will be able to make and receive calls. If messaging is enabled, the assistant will be able to send and receive messages.
telephony, messaging Show child attributes
If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.
Map of dynamic variables and their default values
Returns the updated assistant version configuration
ID of the model to use. You can use the Get models API to see all of your available models,
System instructions for the assistant. These may be templated with dynamic variables
The tools that the assistant can use. These may be templated with dynamic variables
Show child attributes
webhook Show child attributes
The name of the tool.
The description of the tool.
The URL of the external tool to be called. This URL is going to be used by the assistant. The URL can be templated like: https://example.com/api/v1/{id}, where {id} is a placeholder for a value that will be provided by the assistant if path_parameters are provided with the id attribute.
"https://example.com/api/v1/function"
The HTTP method to be used when calling the external tool.
GET, POST, PUT, DELETE, PATCH The headers to be sent to the external tool.
Show child attributes
The value of the header. Note that we support mustache templating for the value. For example you can use Bearer {{#integration_secret}}test-secret{{/integration_secret}} to pass the value of the integration secret as the bearer token. Telnyx signature headers will be automatically added to the request.
The body parameters the webhook tool accepts, described as a JSON Schema object. These parameters will be passed to the webhook as the body of the request. See the JSON Schema reference for documentation about the format
{
"properties": {
"age": {
"description": "The age of the customer.",
"type": "integer"
},
"location": {
"description": "The location of the customer.",
"type": "string"
}
},
"required": ["age", "location"],
"type": "object"
}The path parameters the webhook tool accepts, described as a JSON Schema object. These parameters will be passed to the webhook as the path of the request if the URL contains a placeholder for a value. See the JSON Schema reference for documentation about the format
{
"properties": {
"id": {
"description": "The id of the customer.",
"type": "string"
}
},
"required": ["id"],
"type": "object"
}The query parameters the webhook tool accepts, described as a JSON Schema object. These parameters will be passed to the webhook as the query of the request. See the JSON Schema reference for documentation about the format
{
"properties": {
"page": {
"description": "The page number.",
"type": "integer"
}
},
"required": ["page"],
"type": "object"
}Text that the assistant will use to start the conversation. This may be templated with dynamic variables
This is only needed when using third-party inference providers. The identifier for an integration secret /v2/integration_secrets that refers to your LLM provider's API key. Warning: Free plans are unlikely to work with this integration.
Show child attributes
The voice to be used by the voice assistant. Check the full list of available voices via our voices API.
To use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the api_key_ref field. See integration secrets documentation for details. For Telnyx voices, use Telnyx.<model_id>.<voice_id> (e.g. Telnyx.KokoroTTS.af_heart)
The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.
The identifier for an integration secret /v2/integration_secrets that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.
Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.
Show child attributes
Select from predefined media options.
predefined_media The predefined media to use. silence disables background audio.
silence, office Show child attributes
The speech to text model to be used by the voice assistant. All the deepgram models are run on-premise.
deepgram/flux is optimized for turn-taking but is English-only.deepgram/nova-3 is multi-lingual with automatic language detection but slightly higher latency.deepgram/flux, deepgram/nova-3, deepgram/nova-2, azure/fast, distil-whisper/distil-large-v2, openai/whisper-large-v3-turbo The language of the audio to be transcribed. If not set, of if set to auto, the model will automatically detect the language.
Region on third party cloud providers (currently Azure) if using one of their models
Show child attributes
Available only for deepgram/flux. Confidence required to trigger an end of turn. Higher values = more reliable turn detection but slightly increased latency.
Available only for deepgram/flux. Maximum milliseconds of silence before forcing an end of turn, regardless of confidence.
Show child attributes
Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.
When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.
Show child attributes
Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.
The URL where webhooks related to delivery statused for assistant messages will be sent.
If telephony is enabled, the assistant will be able to make and receive calls. If messaging is enabled, the assistant will be able to send and receive messages.
telephony, messaging Show child attributes
If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.
Map of dynamic variables and their values
Was this page helpful?