Getting Started
Installation Guide
Detailed installation instructions for all supported platforms and frameworks.
Installation Guide
This guide covers detailed installation and setup instructions for integrating Bastio with various platforms, frameworks, and deployment environments.
Overview
Bastio works as a security proxy between your application and AI providers. No special SDKs are required - simply change your API endpoint and use your Bastio API key.
Account Setup
1. Create Organization
- Sign up at www.bastio.com
- Create or join an organization
- Verify your email address
- Complete security onboarding
2. Generate API Keys
# Navigate to Dashboard > API Keys
# Click "Generate New Key"
# Configure permissions:Recommended API Key Configuration:
- Environment: Production/Staging/Development
- Rate Limits: Set appropriate limits for your use case
- Permissions:
- ✅ Chat Completions
- ✅ Embeddings (if used)
- ✅ Image Generation (if used)
 
- IP Restrictions: Limit to your server IPs (recommended)
Platform Integration
Node.js / JavaScript
Using OpenAI SDK
npm install openaiimport OpenAI from 'openai';
const client = new OpenAI({
  apiKey: process.env.BASTIO_API_KEY,
  baseURL: 'https://api.bastio.com/v1'
});
// Usage
const completion = await client.chat.completions.create({
  messages: [{ role: 'user', content: 'Hello!' }],
  model: 'gpt-4',
});Using Anthropic SDK
npm install @anthropic-ai/sdkimport Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({
  apiKey: process.env.BASTIO_API_KEY,
  baseURL: 'https://api.bastio.com/v1'
});
const message = await anthropic.messages.create({
  model: 'claude-3-sonnet-20240229',
  max_tokens: 1000,
  messages: [{ role: 'user', content: 'Hello!' }]
});Python
Using OpenAI Python Client
pip install openaifrom openai import OpenAI
client = OpenAI(
    api_key=os.environ.get("BASTIO_API_KEY"),
    base_url="https://api.bastio.com/v1"
)
completion = client.chat.completions.create(
    model="gpt-4",
    messages=[
        {"role": "user", "content": "Hello!"}
    ]
)Using Anthropic Python Client
pip install anthropicimport anthropic
client = anthropic.Anthropic(
    api_key=os.environ.get("BASTIO_API_KEY"),
    base_url="https://api.bastio.com/v1"
)
message = client.messages.create(
    model="claude-3-sonnet-20240229",
    max_tokens=1000,
    messages=[
        {"role": "user", "content": "Hello!"}
    ]
)Go
package main
import (
    "context"
    "fmt"
    "os"
    
    "github.com/sashabaranov/go-openai"
)
func main() {
    config := openai.DefaultConfig(os.Getenv("BASTIO_API_KEY"))
    config.BaseURL = "https://api.bastio.com/v1"
    client := openai.NewClientWithConfig(config)
    resp, err := client.CreateChatCompletion(
        context.Background(),
        openai.ChatCompletionRequest{
            Model: openai.GPT4,
            Messages: []openai.ChatCompletionMessage{
                {
                    Role:    openai.ChatMessageRoleUser,
                    Content: "Hello!",
                },
            },
        },
    )
    if err != nil {
        fmt.Printf("Error: %v\n", err)
        return
    }
    fmt.Println(resp.Choices[0].Message.Content)
}Java
// Using OkHttp for HTTP requests
import okhttp3.*;
import java.io.IOException;
public class BastioClient {
    private static final String BASE_URL = "https://api.bastio.com/v1";
    private final OkHttpClient client;
    private final String apiKey;
    public BastioClient(String apiKey) {
        this.client = new OkHttpClient();
        this.apiKey = apiKey;
    }
    public String createChatCompletion(String message) throws IOException {
        String json = """
            {
                "model": "gpt-4",
                "messages": [
                    {"role": "user", "content": "%s"}
                ]
            }
            """.formatted(message);
        RequestBody body = RequestBody.create(
            json, MediaType.get("application/json; charset=utf-8"));
        Request request = new Request.Builder()
            .url(BASE_URL + "/chat/completions")
            .header("Authorization", "Bearer " + apiKey)
            .post(body)
            .build();
        try (Response response = client.newCall(request).execute()) {
            return response.body().string();
        }
    }
}PHP
<?php
require_once 'vendor/autoload.php';
use GuzzleHttp\Client;
class BastioClient {
    private $client;
    private $apiKey;
    private $baseUrl = 'https://api.bastio.com/v1';
    public function __construct($apiKey) {
        $this->apiKey = $apiKey;
        $this->client = new Client();
    }
    public function createChatCompletion($message) {
        $response = $this->client->post($this->baseUrl . '/chat/completions', [
            'headers' => [
                'Authorization' => 'Bearer ' . $this->apiKey,
                'Content-Type' => 'application/json',
            ],
            'json' => [
                'model' => 'gpt-4',
                'messages' => [
                    ['role' => 'user', 'content' => $message]
                ]
            ]
        ]);
        return json_decode($response->getBody(), true);
    }
}
?>Framework Integration
Next.js (App Router)
Create an API route for secure server-side AI requests:
// app/api/chat/route.ts
import OpenAI from 'openai';
const client = new OpenAI({
  apiKey: process.env.BASTIO_API_KEY!,
  baseURL: 'https://api.bastio.com/v1'
});
export async function POST(req: Request) {
  const { message } = await req.json();
  const completion = await client.chat.completions.create({
    model: 'gpt-4',
    messages: [{ role: 'user', content: message }],
    stream: true,
  });
  return new Response(completion.body, {
    headers: { 'Content-Type': 'text/plain' },
  });
}Express.js
import express from 'express';
import OpenAI from 'openai';
const app = express();
const client = new OpenAI({
  apiKey: process.env.BASTIO_API_KEY,
  baseURL: 'https://api.bastio.com/v1'
});
app.use(express.json());
app.post('/api/chat', async (req, res) => {
  try {
    const { message } = req.body;
    
    const completion = await client.chat.completions.create({
      model: 'gpt-4',
      messages: [{ role: 'user', content: message }]
    });
    res.json({ response: completion.choices[0].message.content });
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});
app.listen(3000);FastAPI (Python)
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import openai
import os
app = FastAPI()
client = openai.OpenAI(
    api_key=os.getenv("BASTIO_API_KEY"),
    base_url="https://api.bastio.com/v1"
)
class ChatRequest(BaseModel):
    message: str
@app.post("/chat")
async def chat(request: ChatRequest):
    try:
        completion = client.chat.completions.create(
            model="gpt-4",
            messages=[{"role": "user", "content": request.message}]
        )
        return {"response": completion.choices[0].message.content}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))Environment Configuration
Environment Variables
Create a .env file in your project root:
# Bastio Configuration
BASTIO_API_KEY=bastio_sk_your_key_here
BASTIO_BASE_URL=https://api.bastio.com/v1
# Optional: Custom timeout settings
BASTIO_TIMEOUT=30000
BASTIO_MAX_RETRIES=3Configuration Files
Create a configuration file for your application:
{
  "bastio": {
    "apiKey": "${BASTIO_API_KEY}",
    "baseUrl": "https://api.bastio.com/v1",
    "timeout": 30000,
    "retries": 3,
    "security": {
      "threatLevel": "medium",
      "enablePiiDetection": true,
      "enableBotDetection": true,
      "enableJailbreakPrevention": true
    }
  }
}Deployment
Docker
FROM node:18-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
# Set environment variables
ENV BASTIO_API_KEY=bastio_sk_your_key_here
ENV BASTIO_BASE_URL=https://api.bastio.com/v1
EXPOSE 3000
CMD ["npm", "start"]Kubernetes
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-ai-app
spec:
  replicas: 3
  selector:
    matchLabels:
      app: my-ai-app
  template:
    metadata:
      labels:
        app: my-ai-app
    spec:
      containers:
      - name: app
        image: my-ai-app:latest
        env:
        - name: BASTIO_API_KEY
          valueFrom:
            secretKeyRef:
              name: bastio-secret
              key: api-key
        - name: BASTIO_BASE_URL
          value: "https://api.bastio.com/v1"Vercel
# Install Vercel CLI
npm i -g vercel
# Set environment variables
vercel env add BASTIO_API_KEY
vercel env add BASTIO_BASE_URL
# Deploy
vercel deployRailway
# Install Railway CLI
npm install -g @railway/cli
# Login and set environment variables
railway login
railway variables set BASTIO_API_KEY=bastio_sk_your_key_here
railway variables set BASTIO_BASE_URL=https://api.bastio.com/v1
# Deploy
railway deployVerification
Test Your Integration
Use this simple test script to verify your installation:
#!/bin/bash
curl -X POST https://api.bastio.com/v1/chat/completions \
  -H "Authorization: Bearer $BASTIO_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "gpt-4",
    "messages": [
      {"role": "user", "content": "Say hello to confirm Bastio is working"}
    ]
  }'Expected Response
{
  "id": "chatcmpl-123",
  "object": "chat.completion",
  "created": 1677652288,
  "model": "gpt-4",
  "choices": [{
    "index": 0,
    "message": {
      "role": "assistant",
      "content": "Hello! Bastio is working correctly and your AI requests are now secured."
    },
    "finish_reason": "stop"
  }],
  "usage": {
    "prompt_tokens": 15,
    "completion_tokens": 16,
    "total_tokens": 31
  },
  "bastio": {
    "threat_score": 0.1,
    "processing_time_ms": 45,
    "request_id": "req_abc123"
  }
}Troubleshooting
Common Issues
Authentication Error (401)
# Check your API key format
echo $BASTIO_API_KEY
# Should start with: bastio_sk_Connection Timeout
// Increase timeout for large requests
const client = new OpenAI({
  apiKey: process.env.BASTIO_API_KEY,
  baseURL: 'https://api.bastio.com/v1',
  timeout: 60000 // 60 seconds
});SSL Certificate Issues
# For development only - never in production
export NODE_TLS_REJECT_UNAUTHORIZED=0Getting Help
- 📧 Email Support: support@bastio.com
- 💬 Chat Support: Available in dashboard
- 📚 Documentation: Complete guides available
- 🎯 Status Page: status.bastio.com
Next Steps
After successful installation: