API Reference
Complete API reference for Bastio's security endpoints.
API Reference
Bastio provides OpenAI-compatible endpoints with built-in security layers. All requests are processed through our security engine before being forwarded to your chosen AI provider.
Bastio maintains full compatibility with OpenAI, Anthropic, and other major AI provider APIs. Simply change your base URL and use your Bastio API key.
Base URL
https://api.bastio.com/v1Authentication
All API requests require authentication using API keys. Include your API key in the Authorization header:
curl -H "Authorization: Bearer bastio_sk_your_api_key_here" \
     https://api.bastio.com/v1/chat/completionsimport openai
client = openai.OpenAI(
    api_key="bastio_sk_your_api_key_here",
    base_url="https://api.bastio.com/v1"
)import OpenAI from 'openai';
const client = new OpenAI({
  apiKey: 'bastio_sk_your_api_key_here',
  baseURL: 'https://api.bastio.com/v1'
});config := openai.DefaultConfig("bastio_sk_your_api_key_here")
config.BaseURL = "https://api.bastio.com/v1"
client := openai.NewClientWithConfig(config)Chat Completions
POST /chat/completions
Create a chat completion with security processing. Supports both streaming and non-streaming responses.
Request Body
{
  "model": "gpt-4",
  "messages": [
    {
      "role": "user",
      "content": "Hello, how are you?"
    }
  ],
  "stream": false,
  "temperature": 0.7
}{
  "model": "gpt-4",
  "messages": [
    {
      "role": "user",
      "content": "Tell me a short story"
    }
  ],
  "stream": true,
  "temperature": 0.7
}Parameters
| Parameter | Type | Required | Description | 
|---|---|---|---|
| model | string | Yes | The model to use (e.g., "gpt-4", "claude-3-sonnet") | 
| messages | array | Yes | Array of message objects | 
| stream | boolean | No | Whether to stream the response (default: false) | 
| temperature | number | No | Sampling temperature (0-2) | 
| max_tokens | number | No | Maximum tokens to generate | 
| top_p | number | No | Nucleus sampling parameter | 
Response
Standard JSON response returned when stream: false or omitted:
{
  "id": "chatcmpl-123",
  "object": "chat.completion",
  "created": 1677652288,
  "choices": [{
    "index": 0,
    "message": {
      "role": "assistant",
      "content": "Hello! I'm doing well, thank you for asking."
    },
    "finish_reason": "stop"
  }],
  "usage": {
    "prompt_tokens": 9,
    "completion_tokens": 12,
    "total_tokens": 21
  },
  "bastio": {
    "threat_score": 0.1,
    "detected_threats": [],
    "processing_time_ms": 45
  }
}Server-Sent Events (SSE) stream returned when stream: true:
data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gpt-4","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}
data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gpt-4","choices":[{"index":0,"delta":{"content":"Hello"},"finish_reason":null}]}
data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gpt-4","choices":[{"index":0,"delta":{"content":"!"},"finish_reason":null}]}
data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gpt-4","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}
data: [DONE]Each chunk contains:
- delta: Incremental content (first chunk includes- role)
- finish_reason: Null until final chunk, then "stop", "length", or "content_filter"
- Stream ends with data: [DONE]
Code Examples
import openai
client = openai.OpenAI(
    api_key="bastio_sk_your_key_here",
    base_url="https://api.bastio.com/v1"
)
# Non-streaming
response = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
# Streaming
stream = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "Tell me a story"}],
    stream=True
)
for chunk in stream:
    if chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="")import OpenAI from 'openai';
const client = new OpenAI({
  apiKey: 'bastio_sk_your_key_here',
  baseURL: 'https://api.bastio.com/v1'
});
// Non-streaming
const response = await client.chat.completions.create({
  model: 'gpt-4',
  messages: [{ role: 'user', content: 'Hello!' }]
});
console.log(response.choices[0].message.content);
// Streaming
const stream = await client.chat.completions.create({
  model: 'gpt-4',
  messages: [{ role: 'user', content: 'Tell me a story' }],
  stream: true
});
for await (const chunk of stream) {
  process.stdout.write(chunk.choices[0]?.delta?.content || '');
}# Non-streaming
curl -X POST https://api.bastio.com/v1/chat/completions \
  -H "Authorization: Bearer bastio_sk_your_key_here" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "gpt-4",
    "messages": [{"role": "user", "content": "Hello!"}]
  }'
# Streaming
curl -X POST https://api.bastio.com/v1/chat/completions \
  -H "Authorization: Bearer bastio_sk_your_key_here" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "gpt-4",
    "messages": [{"role": "user", "content": "Tell me a story"}],
    "stream": true
  }'import (
    "context"
    "fmt"
    "io"
    openai "github.com/sashabaranov/go-openai"
)
config := openai.DefaultConfig("bastio_sk_your_key_here")
config.BaseURL = "https://api.bastio.com/v1"
client := openai.NewClientWithConfig(config)
// Non-streaming
resp, err := client.CreateChatCompletion(
    context.Background(),
    openai.ChatCompletionRequest{
        Model: openai.GPT4,
        Messages: []openai.ChatCompletionMessage{
            {Role: openai.ChatMessageRoleUser, Content: "Hello!"},
        },
    },
)
fmt.Println(resp.Choices[0].Message.Content)
// Streaming
stream, err := client.CreateChatCompletionStream(
    context.Background(),
    openai.ChatCompletionRequest{
        Model: openai.GPT4,
        Messages: []openai.ChatCompletionMessage{
            {Role: openai.ChatMessageRoleUser, Content: "Tell me a story"},
        },
        Stream: true,
    },
)
defer stream.Close()
for {
    response, err := stream.Recv()
    if err == io.EOF {
        break
    }
    fmt.Print(response.Choices[0].Delta.Content)
}Security Headers
Bastio provides additional security information in response headers:
- X-Bastio-Threat-Score: Overall threat score (0-1)
- X-Bastio-Processing-Time: Processing time in milliseconds
- X-Bastio-Request-Id: Unique request identifier for tracking
Error Responses
Bastio uses standard HTTP status codes and provides detailed error information:
For comprehensive error handling examples, see our Integration Guide.