Build a Chatbot
Build a production-ready AI chatbot with Assisters Chat API
Build a Chatbot with Assisters API
TL;DR
Use assisters-chat-v1 for conversations (128K context, streaming support). Add assisters-moderation-v1 for content safety. Build a complete chatbot in under 50 lines of code. Fully OpenAI-compatible—use existing SDKs.
Build intelligent, safe chatbots using Assisters' proprietary chat models. This tutorial covers everything from basic conversations to production-ready features.
Why Assisters Chat?
128K Context
assisters-chat-v1 handles long conversations and documents without losing context
Real-time Streaming
Token-by-token streaming for instant, engaging user experiences
Built-in Safety
Pair with assisters-moderation-v1 for automatic content filtering
OpenAI Compatible
Drop-in replacement—use existing code, just change the base URL
Quick Start: Basic Chatbot
from openai import OpenAI
client = OpenAI(
api_key="ask_your_api_key", # From assisters.dev/dashboard
base_url="https://api.assisters.dev/v1"
)
def chat(user_message: str, history: list = None) -> str:
"""Simple chatbot using Assisters Chat."""
if history is None:
history = []
messages = [
{"role": "system", "content": "You are a helpful assistant."}
] + history + [
{"role": "user", "content": user_message}
]
response = client.chat.completions.create(
model="assisters-chat-v1", # Assisters flagship chat model
messages=messages
)
return response.choices[0].message.content
# Usage
answer = chat("What is Assisters API?")
print(answer)import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'ask_your_api_key',
baseURL: 'https://api.assisters.dev/v1'
});
async function chat(userMessage, history = []) {
const messages = [
{ role: 'system', content: 'You are a helpful assistant.' },
...history,
{ role: 'user', content: userMessage }
];
const response = await client.chat.completions.create({
model: 'assisters-chat-v1',
messages
});
return response.choices[0].message.content;
}
// Usage
const answer = await chat("What is Assisters API?");
console.log(answer);curl https://api.assisters.dev/v1/chat/completions \
-H "Authorization: Bearer ask_your_api_key" \
-H "Content-Type: application/json" \
-d '{
"model": "assisters-chat-v1",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is Assisters API?"}
]
}'Streaming for Better UX
Enable streaming for real-time responses:
def chat_stream(user_message: str, history: list = None):
"""Chatbot with streaming responses."""
if history is None:
history = []
messages = [
{"role": "system", "content": "You are a helpful assistant."}
] + history + [
{"role": "user", "content": user_message}
]
stream = client.chat.completions.create(
model="assisters-chat-v1",
messages=messages,
stream=True # Enable streaming
)
full_response = ""
for chunk in stream:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
print(content, end="", flush=True)
full_response += content
print() # Newline after response
return full_response
# Real-time output as tokens arrive
chat_stream("Explain quantum computing in simple terms")Conversation Memory
Maintain chat history for context-aware conversations:
class Chatbot:
"""Production chatbot with conversation memory."""
def __init__(self, system_prompt: str = "You are a helpful assistant."):
self.client = OpenAI(
api_key="ask_your_api_key",
base_url="https://api.assisters.dev/v1"
)
self.system_prompt = system_prompt
self.history = []
def chat(self, user_message: str) -> str:
"""Send message and get response."""
# Add user message to history
self.history.append({"role": "user", "content": user_message})
# Build messages list
messages = [
{"role": "system", "content": self.system_prompt}
] + self.history
# Call Assisters Chat API
response = self.client.chat.completions.create(
model="assisters-chat-v1",
messages=messages,
temperature=0.7,
max_tokens=1000
)
assistant_message = response.choices[0].message.content
# Add assistant response to history
self.history.append({"role": "assistant", "content": assistant_message})
return assistant_message
def clear_history(self):
"""Reset conversation."""
self.history = []
# Usage
bot = Chatbot("You are a helpful coding assistant.")
print(bot.chat("How do I sort a list in Python?"))
print(bot.chat("Can you show me with a custom key?")) # Remembers contextContent Moderation
Always moderate user input and AI output in production. Assisters' assisters-moderation-v1 model detects harmful content across 14 categories.
class SafeChatbot(Chatbot):
"""Chatbot with built-in content moderation."""
def moderate(self, text: str) -> dict:
"""Check text for harmful content."""
result = self.client.moderations.create(
model="assisters-moderation-v1", # Assisters moderation model
input=text
)
return {
"flagged": result.results[0].flagged,
"categories": result.results[0].categories
}
def chat(self, user_message: str) -> str:
"""Chat with input/output moderation."""
# Check user input
input_check = self.moderate(user_message)
if input_check["flagged"]:
return "I can't respond to that type of message. Please rephrase."
# Get response
response = super().chat(user_message)
# Check AI output
output_check = self.moderate(response)
if output_check["flagged"]:
# Regenerate with stricter prompt
self.history.pop() # Remove flagged response
return self.chat(user_message + " (Please keep the response appropriate.)")
return response
# Safe by default
bot = SafeChatbot()
print(bot.chat("Hello! How are you?")) # Works normallySpecialized Chatbots
Code Assistant
Use assisters-code-v1 for coding tasks:
code_bot = Chatbot(system_prompt="""You are an expert programming assistant.
- Write clean, efficient code
- Explain your reasoning
- Suggest best practices
- Support Python, JavaScript, TypeScript, and more""")
# Override model for code tasks
def chat_code(self, user_message: str) -> str:
messages = [
{"role": "system", "content": self.system_prompt}
] + self.history + [
{"role": "user", "content": user_message}
]
response = self.client.chat.completions.create(
model="assisters-code-v1", # Optimized for code
messages=messages,
temperature=0.2 # More deterministic for code
)
return response.choices[0].message.content
code_bot.chat_code = lambda msg: chat_code(code_bot, msg)
print(code_bot.chat_code("Write a Python function to find prime numbers"))Vision-Enabled Chatbot
Use assisters-vision-v1 for image understanding:
def chat_with_image(user_message: str, image_url: str) -> str:
"""Chatbot that can understand images."""
response = client.chat.completions.create(
model="assisters-vision-v1", # Assisters vision model
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": user_message},
{
"type": "image_url",
"image_url": {"url": image_url}
}
]
}
]
)
return response.choices[0].message.content
# Analyze an image
result = chat_with_image(
"What's in this image? Describe it in detail.",
"https://example.com/image.jpg"
)
print(result)Function Calling
Enable your chatbot to take actions:
import json
def chatbot_with_functions(user_message: str) -> str:
"""Chatbot that can call functions."""
# Define available functions
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "City name"}
},
"required": ["location"]
}
}
},
{
"type": "function",
"function": {
"name": "search_products",
"description": "Search for products in the catalog",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"},
"max_results": {"type": "integer", "default": 5}
},
"required": ["query"]
}
}
}
]
response = client.chat.completions.create(
model="assisters-chat-v1",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_message}
],
tools=tools,
tool_choice="auto"
)
message = response.choices[0].message
# Check if model wants to call a function
if message.tool_calls:
tool_call = message.tool_calls[0]
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
print(f"Calling function: {function_name}")
print(f"Arguments: {arguments}")
# Execute the function (implement your logic here)
# result = execute_function(function_name, arguments)
return message.content or "Function called"
# Test function calling
chatbot_with_functions("What's the weather in San Francisco?")Production Deployment
FastAPI Backend
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
app = FastAPI()
class ChatRequest(BaseModel):
message: str
session_id: str
class ChatResponse(BaseModel):
response: str
session_id: str
# Session storage (use Redis in production)
sessions = {}
@app.post("/chat", response_model=ChatResponse)
async def chat_endpoint(request: ChatRequest):
# Get or create session
if request.session_id not in sessions:
sessions[request.session_id] = SafeChatbot()
bot = sessions[request.session_id]
try:
response = bot.chat(request.message)
return ChatResponse(
response=response,
session_id=request.session_id
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))Environment Variables
# .env file
ASSISTERS_API_KEY=ask_your_api_key
ASSISTERS_BASE_URL=https://api.assisters.dev/v1import os
from dotenv import load_dotenv
load_dotenv()
client = OpenAI(
api_key=os.getenv("ASSISTERS_API_KEY"),
base_url=os.getenv("ASSISTERS_BASE_URL")
)Cost Optimization
| Feature | Model | Cost Impact |
|---|---|---|
| Basic Chat | assisters-chat-v1 | $0.10/M input, $0.20/M output |
| Code Tasks | assisters-code-v1 | Same as chat |
| Image Understanding | assisters-vision-v1 | $0.05/M input, $0.10/M output |
| Moderation | assisters-moderation-v1 | $0.05/M tokens |
Cost Tip: Use max_tokens to limit response length and temperature=0 for deterministic responses that can be cached.