🔧 You are viewing: API Specification - Complete technical referenceSee also: User Guide · Quick Reference
- Component: AgentSDK - Unified text chat with conversation history
- Module:
gaia.chat.sdk - Import:
from gaia.chat.sdk import AgentSDK, AgentConfig, AgentResponse, SimpleChat, quick_chat - Source:
src/gaia/chat/sdk.py
Overview
AgentSDK provides a unified interface for text-based chat interactions with automatic conversation history management. It supports local LLMs (via Lemonade Server), Claude API, and ChatGPT/OpenAI API, with model-specific prompt formatting through the Prompts class. Key Features:- Automatic conversation history tracking
- Multi-model support (14+ models via Prompts)
- Streaming and non-streaming responses
- RAG (Retrieval-Augmented Generation) integration
- Session management
- Performance statistics
- Simple and advanced interfaces
- Interactive chat commands
- Agent communication
- Code generation
- Document Q&A (with RAG)
API Specification
AgentConfig
@dataclass
class AgentConfig:
"""Configuration for AgentSDK."""
model: str = DEFAULT_MODEL_NAME
max_tokens: int = 512
temperature: Optional[float] = None
system_prompt: Optional[str] = None
max_history_length: int = 4 # Number of conversation pairs to keep
show_stats: bool = False
logging_level: str = "INFO"
use_claude: bool = False # Use Claude API
use_chatgpt: bool = False # Use ChatGPT/OpenAI API
use_local_llm: bool = True # Use local LLM (computed automatically)
claude_model: str = "claude-sonnet-4-20250514"
base_url: Optional[str] = None # Lemonade server base URL (None = use LEMONADE_BASE_URL env var)
assistant_name: str = "gaia" # Name to use for assistant in conversations
AgentResponse
@dataclass
class AgentResponse:
"""Response from chat operations."""
text: str
history: Optional[List[str]] = None
stats: Optional[Dict[str, Any]] = None
is_complete: bool = True
AgentSDK
class AgentSDK:
"""
Gaia Agent SDK - Unified text chat integration with conversation history.
Provides a simple interface for integrating GAIA's text chat
capabilities with conversation memory into applications.
"""
def __init__(self, config: Optional[AgentConfig] = None):
"""Initialize the AgentSDK."""
...
def send(self, message: str, **kwargs) -> AgentResponse:
"""
Send a message and get a complete response with conversation history.
Args:
message: The message to send
**kwargs: Additional arguments for LLM generation
Returns:
AgentResponse with the complete response and updated history
"""
...
def send_stream(self, message: str, **kwargs):
"""
Send a message and get a streaming response with conversation history.
Args:
message: The message to send
**kwargs: Additional arguments for LLM generation
Yields:
AgentResponse chunks as they arrive
"""
...
def send_messages(
self,
messages: List[Dict[str, Any]],
system_prompt: Optional[str] = None,
**kwargs,
) -> AgentResponse:
"""
Send a full conversation history and get a response.
Args:
messages: List of message dicts with 'role' and 'content' keys
system_prompt: Optional system prompt to use (overrides config)
**kwargs: Additional arguments for LLM generation
Returns:
AgentResponse with the complete response
"""
...
def send_messages_stream(
self,
messages: List[Dict[str, Any]],
system_prompt: Optional[str] = None,
**kwargs,
):
"""
Send a full conversation history and get a streaming response.
Yields:
AgentResponse chunks as they arrive
"""
...
def get_history(self) -> List[str]:
"""
Get the current conversation history.
Returns:
List of conversation entries in "role: message" format
"""
...
def clear_history(self) -> None:
"""Clear the conversation history."""
...
def get_formatted_history(self) -> List[Dict[str, str]]:
"""
Get conversation history in structured format.
Returns:
List of dictionaries with 'role' and 'message' keys
"""
...
def get_stats(self) -> Dict[str, Any]:
"""Get performance statistics."""
...
def set_system_prompt(self, system_prompt: Optional[str]) -> None:
"""Set the system prompt for future conversations."""
...
def enable_rag(self, documents: Optional[List[str]] = None, **rag_kwargs):
"""
Enable RAG (Retrieval-Augmented Generation) for document-based chat.
Args:
documents: List of PDF file paths to index
**rag_kwargs: Additional RAG configuration options
"""
...
def disable_rag(self):
"""Disable RAG functionality."""
...
def add_document(self, document_path: str) -> bool:
"""Add a document to the RAG index."""
...
async def start_interactive_session(self) -> None:
"""
Start an interactive chat session with conversation history.
Provides a full CLI-style interactive experience with commands
for managing conversation history and viewing statistics.
"""
...
@property
def history_length(self) -> int:
"""Get the current number of conversation entries."""
...
@property
def conversation_pairs(self) -> int:
"""Get the number of conversation pairs (user + assistant)."""
...
SimpleChat
class SimpleChat:
"""
Ultra-simple interface for quick chat integration.
Example:
chat = SimpleChat()
response = chat.ask("What's the weather like?")
print(response)
"""
def __init__(
self,
system_prompt: Optional[str] = None,
model: Optional[str] = None,
assistant_name: Optional[str] = None,
):
"""Initialize SimpleChat with minimal configuration."""
...
def ask(self, question: str) -> str:
"""Ask a question and get a text response with conversation memory."""
...
def ask_stream(self, question: str):
"""Ask a question and get a streaming response with conversation memory."""
...
def clear_memory(self) -> None:
"""Clear the conversation memory."""
...
def get_conversation(self) -> List[Dict[str, str]]:
"""Get the conversation history in a readable format."""
...
Convenience Functions
def quick_chat(
message: str,
system_prompt: Optional[str] = None,
model: Optional[str] = None,
assistant_name: Optional[str] = None,
) -> str:
"""Quick one-off text chat without conversation memory."""
...
def quick_chat_with_memory(
messages: List[str],
system_prompt: Optional[str] = None,
model: Optional[str] = None,
assistant_name: Optional[str] = None,
) -> List[str]:
"""Quick multi-turn chat with conversation memory."""
...
Usage Examples
Example 1: Basic Chat
from gaia.chat.sdk import AgentSDK, AgentConfig
# Create SDK instance
config = AgentConfig(
model="Qwen3-0.6B-GGUF",
max_tokens=512,
show_stats=True
)
chat = AgentSDK(config)
# Single message
response = chat.send("Hello, how are you?")
print(response.text)
# View history
history = chat.get_history()
print(f"Conversation has {len(history)} entries")
Example 2: Streaming Chat
# Streaming response
for chunk in chat.send_stream("Tell me a story"):
if not chunk.is_complete:
print(chunk.text, end="", flush=True)
else:
# Final chunk with stats
if chunk.stats:
print(f"\nTokens: {chunk.stats['total_tokens']}")
Example 3: Chat with RAG
# Enable RAG with documents
chat.enable_rag(documents=["manual.pdf", "guide.pdf"])
# Query with document context
response = chat.send("What are the safety guidelines?")
print(response.text)
# Add more documents
chat.add_document("updates.pdf")
Testing Requirements
def test_agent_sdk_basic():
"""Test basic chat functionality."""
config = AgentConfig(model="test-model")
chat = AgentSDK(config)
response = chat.send("Hello")
assert response.text
assert response.is_complete
def test_conversation_history():
"""Test conversation history tracking."""
chat = AgentSDK()
chat.send("My name is Alice")
chat.send("What's my name?")
history = chat.get_history()
assert len(history) >= 2
assert "Alice" in history[0]
def test_rag_integration():
"""Test RAG functionality."""
chat = AgentSDK()
# Enable RAG
result = chat.enable_rag(documents=["test.pdf"])
assert chat.rag_enabled
# Query with RAG
response = chat.send("Summarize the document")
assert response.text
Dependencies
[project]
dependencies = [
"gaia.llm.llm_client",
"gaia.chat.prompts",
]
[project.optional-dependencies]
rag = ["gaia.rag.sdk"]
AgentSDK Technical Specification