Understanding how Edge Compute executes your functions helps you optimize performance and build reliable applications.
Overview
Edge Compute runs your functions in lightweight Linux containers. When a request arrives:
- Routing — Request is routed to the nearest edge location
- Container selection — An existing warm container handles the request, or a new one starts (cold start)
- Execution — Your function processes the request
- Response — Result is returned to the caller
- Keep-alive — Container stays warm for subsequent requests
Function Lifecycle
Cold Start Phase
When a function receives its first request or scales up, a new container initializes:
# Global initialization (runs once per container)
import json
from database import create_pool
# Expensive operations here — only run once
db_pool = create_pool()
cache = {}
# Function handler (runs per request)
async def handler(request):
# Fast path — reuse initialized resources
data = await request.json()
result = await db_pool.query('SELECT * FROM users')
return Response(json.dumps(result))
package function
import (
"database/sql"
"encoding/json"
"log"
"net/http"
"os"
)
// Global initialization (runs once per container)
var db *sql.DB
func init() {
// Expensive operations here — only run once
var err error
db, err = sql.Open("postgres", os.Getenv("DATABASE_URL"))
if err != nil {
log.Fatal(err)
}
}
// Function handler (runs per request)
func Handle(w http.ResponseWriter, r *http.Request) {
// Fast path — reuse initialized resources
rows, err := db.Query("SELECT * FROM users")
if err != nil {
http.Error(w, err.Error(), 500)
return
}
defer rows.Close()
var users []map[string]interface{}
cols, _ := rows.Columns()
for rows.Next() {
vals := make([]interface{}, len(cols))
ptrs := make([]interface{}, len(cols))
for i := range vals {
ptrs[i] = &vals[i]
}
rows.Scan(ptrs...)
row := make(map[string]interface{})
for i, col := range cols {
row[col] = vals[i]
}
users = append(users, row)
}
json.NewEncoder(w).Encode(users)
}
import io.quarkus.funqy.Funq;
import jakarta.inject.Inject;
import io.agroal.api.AgroalDataSource;
import java.sql.ResultSet;
import java.util.*;
public class Function {
@Inject
AgroalDataSource dataSource;
@Funq
public List<Map<String, Object>> getUsers() throws Exception {
try (var conn = dataSource.getConnection();
var stmt = conn.createStatement();
var rs = stmt.executeQuery("SELECT * FROM users")) {
List<Map<String, Object>> users = new ArrayList<>();
var meta = rs.getMetaData();
int cols = meta.getColumnCount();
while (rs.next()) {
Map<String, Object> row = new HashMap<>();
for (int i = 1; i <= cols; i++) {
row.put(meta.getColumnName(i), rs.getObject(i));
}
users.add(row);
}
return users;
}
}
}
Cold start timeline:
- Container image pulled (cached at edge)
- Runtime initializes (Python/Go/Quarkus)
- Global code executes (imports, connections)
- First request handled
Warm Execution
Subsequent requests reuse the same container instance:
import httpx
# Global variables persist between requests
client = httpx.Client(timeout=10.0)
async def handler(request):
# Fast execution — resources already initialized
print(f"Request: {request.method} {request.path}")
resp = client.get("https://api.example.com/data")
return resp.json()
package function
import (
"encoding/json"
"log"
"net/http"
"os"
"time"
)
// Global variables persist between requests
var (
client *http.Client
logger *log.Logger
)
func init() {
// One-time initialization
client = &http.Client{Timeout: 10 * time.Second}
logger = log.New(os.Stdout, "[EDGE] ", log.LstdFlags)
}
func Handle(w http.ResponseWriter, r *http.Request) {
// Fast execution — resources already initialized
logger.Printf("Request: %s %s", r.Method, r.URL.Path)
resp, err := client.Get("https://api.example.com/data")
if err != nil {
http.Error(w, err.Error(), 500)
return
}
defer resp.Body.Close()
var data map[string]interface{}
json.NewDecoder(resp.Body).Decode(&data)
json.NewEncoder(w).Encode(data)
}
import io.quarkus.funqy.Funq;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.net.URI;
public class Function {
private static final HttpClient client = HttpClient.newHttpClient();
@Funq
public String fetchData(String path) throws Exception {
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create("https://api.example.com" + path))
.timeout(java.time.Duration.ofSeconds(10))
.build();
HttpResponse<String> response = client.send(request,
HttpResponse.BodyHandlers.ofString());
return response.body();
}
}
Container Recycling
Containers are recycled when:
- Idle for extended periods (to free resources)
- Memory limits approached
- New deployment shipped
- Platform scaling decisions
Don’t rely on container persistence for critical state. Use KV or external storage for data that must survive restarts.
Cold Start Optimization
Minimize cold start latency with these patterns:
1. Lazy Initialization
Defer expensive operations until needed:
# Bad — always initializes
ml_model = load_model('large_model.pkl') # 2 seconds
def handler(request):
return ml_model.predict(request.data)
# Good — only loads when needed
_ml_model = None
def get_model():
global _ml_model
if _ml_model is None:
_ml_model = load_model('large_model.pkl')
return _ml_model
def handler(request):
if request.path == '/predict':
return get_model().predict(request.data)
return {"status": "ok"}
// Bad — always initializes
var mlModel = loadModel("large_model.pkl") // 2 seconds
func Handle(w http.ResponseWriter, r *http.Request) {
result := mlModel.Predict(r.Body)
json.NewEncoder(w).Encode(result)
}
// Good — only loads when needed
var (
mlModel *Model
modelOnce sync.Once
)
func getModel() *Model {
modelOnce.Do(func() {
mlModel = loadModel("large_model.pkl")
})
return mlModel
}
func Handle(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/predict" {
result := getModel().Predict(r.Body)
json.NewEncoder(w).Encode(result)
return
}
json.NewEncoder(w).Encode(map[string]string{"status": "ok"})
}
// Bad — always initializes at class load
static final Model model = Model.load("large_model.pkl");
@Funq
public Result predict(Input input) {
return model.predict(input);
}
// Good — only loads when needed
import java.util.function.Supplier;
private static final Supplier<Model> model = Suppliers.memoize(() ->
Model.load("large_model.pkl")
);
@Funq
public Result predict(Input input) {
return model.get().predict(input);
}
@Funq
public String status() {
return "ok"; // No model load if just checking status
}
2. Minimize Dependencies
# Bad — imports everything
import pandas as pd
import numpy as np
import tensorflow as tf
# Good — import only what you need
from json import loads, dumps
// Bad — imports everything
import (
"github.com/heavy/ml-library"
"github.com/huge/data-processing"
)
// Good — import only what you need
import (
"encoding/json"
"net/http"
)
// Bad — imports heavy dependencies
import com.fasterxml.jackson.databind.*;
// Good — use built-in JSON handling
import jakarta.json.Json;
import jakarta.json.JsonObject;
3. Connection Pooling
# Initialize pool globally
from sqlalchemy import create_engine
from sqlalchemy.pool import QueuePool
engine = create_engine(
DATABASE_URL,
poolclass=QueuePool,
pool_size=5,
max_overflow=10
)
def handler(request):
with engine.connect() as conn:
result = conn.execute("SELECT * FROM users")
return list(result)
// Initialize pool globally
var db *sql.DB
func init() {
var err error
db, err = sql.Open("postgres", os.Getenv("DATABASE_URL"))
if err != nil {
log.Fatal(err)
}
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(5)
}
func Handle(w http.ResponseWriter, r *http.Request) {
rows, err := db.Query("SELECT * FROM users")
if err != nil {
http.Error(w, err.Error(), 500)
return
}
defer rows.Close()
// ...
}
// Quarkus automatically configures connection pooling with Agroal
// Add to application.properties:
// quarkus.datasource.jdbc.url=jdbc:postgresql://...
// quarkus.datasource.jdbc.max-size=20
@ApplicationScoped
public class UserService {
@Inject
AgroalDataSource ds;
public List<User> getUsers() throws SQLException {
try (var conn = ds.getConnection();
var stmt = conn.createStatement();
var rs = stmt.executeQuery("SELECT * FROM users")) {
// ...
}
}
}
Concurrency
Each container handles one request at a time by default. The platform automatically scales containers based on traffic:
Traffic: 100 requests/second
↓
Platform scales to ~100 containers
↓
Each container handles ~1 req/sec
Scaling Behavior
| Traffic Pattern | Platform Response |
|---|
| Traffic spike | New containers start (cold starts) |
| Sustained load | Containers stay warm |
| Traffic drops | Containers gradually recycle |
| Zero traffic | All containers recycle after idle timeout |
Request Timeouts
Functions have execution time limits:
| Tier | Timeout |
|---|
| Default | 30 seconds |
| Extended | 60 seconds (configurable) |
Handle timeouts gracefully:
import asyncio
async def handler(request):
try:
# Set timeout for 25 seconds (5 sec buffer before platform timeout)
result = await asyncio.wait_for(
long_running_operation(),
timeout=25.0
)
return result
except asyncio.TimeoutError:
return {"error": "Operation timed out", "partial": get_partial_result()}
func Handle(w http.ResponseWriter, r *http.Request) {
// Create context with timeout (5 sec buffer before platform timeout)
ctx, cancel := context.WithTimeout(r.Context(), 25*time.Second)
defer cancel()
resultCh := make(chan Result, 1)
go func() {
resultCh <- longRunningOperation(ctx)
}()
select {
case result := <-resultCh:
json.NewEncoder(w).Encode(result)
case <-ctx.Done():
json.NewEncoder(w).Encode(map[string]string{
"error": "Operation timed out",
})
}
}
import io.smallrye.mutiny.Uni;
import java.time.Duration;
@Funq
public Uni<Result> handleRequest(Input input) {
return Uni.createFrom().item(() -> longRunningOperation(input))
.onItem().ifNoItem().after(Duration.ofSeconds(25))
.failWith(new TimeoutException("Operation timed out"));
}
Triggers
Functions can be invoked by:
HTTP Requests
# func.toml
[edge_compute]
func_name = "my-api"
# Accessible at: https://my-api-{orgId}.telnyxcompute.com
Webhooks
Configure Telnyx services to call your function:
async def handler(request):
event = await request.json()
if event.get('event_type') == 'message.received':
handle_incoming_message(event['data'])
elif event.get('event_type') == 'call.initiated':
handle_call(event['data'])
return {"status": "ok"}
func Handle(w http.ResponseWriter, r *http.Request) {
var event WebhookEvent
json.NewDecoder(r.Body).Decode(&event)
switch event.EventType {
case "message.received":
handleIncomingMessage(event.Data)
case "call.initiated":
handleCall(event.Data)
}
json.NewEncoder(w).Encode(map[string]string{"status": "ok"})
}
import io.quarkus.funqy.Funq;
import java.util.Map;
public class WebhookHandler {
@Funq
public Map<String, String> handle(WebhookEvent event) {
switch (event.eventType) {
case "message.received":
handleIncomingMessage(event.data);
break;
case "call.initiated":
handleCall(event.data);
break;
}
return Map.of("status", "ok");
}
}
Cron Triggers (Coming Soon)
🔜 Scheduled execution via cron expressions is planned for a future release.
Graceful Shutdown
When containers recycle, your function receives a shutdown signal:
import signal
import sys
def shutdown_handler(signum, frame):
# Clean up resources
db_pool.close()
cache.flush()
sys.exit(0)
signal.signal(signal.SIGTERM, shutdown_handler)
func init() {
// Set up graceful shutdown
stop := make(chan os.Signal, 1)
signal.Notify(stop, syscall.SIGTERM)
go func() {
<-stop
log.Println("Shutting down...")
db.Close()
os.Exit(0)
}()
}
import io.quarkus.runtime.ShutdownEvent;
import jakarta.enterprise.event.Observes;
public class ShutdownHandler {
void onShutdown(@Observes ShutdownEvent event) {
// Clean up resources
closeConnections();
flushCache();
}
}
Best Practices
- Initialize globally — Move expensive setup outside request handlers
- Keep handlers fast — Aim for < 100ms p99 latency
- Use connection pools — Reuse database/HTTP connections
- Handle errors gracefully — Return meaningful error responses
- Don’t store state in memory — Use KV or external storage for persistence
- Set appropriate timeouts — On outbound requests to prevent hanging
Next Steps
- Bindings — Connect to Telnyx platform services
- Limits — Understand resource constraints
- Configuration — Set environment variables and secrets