AgnoAGI Integration with Alchemyst AI
Overview
This guide demonstrates how to integrate Alchemyst AI’s memory capabilities with the AgnoAGI framework in Python. You’ll learn how to build lightning-fast, context-aware AI agents that can remember, learn, and maintain state across interactions using both frameworks together.What is AgnoAGI?
AgnoAGI (formerly Phidata) is a lightweight Python framework designed for building multi-modal AI agents with exceptional performance. When combined with Alchemyst AI’s memory system, it creates powerful applications that can:- Instantiate agents ~10,000x faster than LangGraph
- Use ~50x less memory than traditional frameworks
- Maintain persistent memory across sessions with Alchemyst AI
- Learn from user interactions and provide contextually relevant responses
- Scale from prototypes to production systems effortlessly
Prerequisites
Before starting, ensure you have:- Python 3.8 or higher
- An Alchemyst AI account and API key (Get one here)
- Basic understanding of Python and AI concepts
Installation
1. Install Required Packages
Copy
pip install agno alchemystai anthropic
2. Set Up Environment Variables
Create a.env file in your project directory:
Copy
ALCHEMYST_AI_API_KEY=your_alchemyst_api_key_here
ANTHROPIC_API_KEY=your_anthropic_api_key_here
Basic Integration
Step 1: Initialize Alchemyst Memory Client
Copy
import os
from alchemystai import AlchemystAI
# Initialize Alchemyst AI client for memory management
alchemyst_client = AlchemystAI(
api_key=os.getenv("ALCHEMYST_AI_API_KEY")
)
Step 2: Create Custom Memory Storage for Agno
Copy
"""
CLI QnA Agent with Alchemyst AI Memory and Knowledge Base
Uses OpenAI for chat completion and Alchemyst AI for persistent memory
"""
import os
from datetime import datetime
from typing import Dict, List
from alchemyst_ai import AlchemystAI
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv()
class AlchemystKnowledgeAgent:
"""Agent that combines a knowledge base with Alchemyst's memory"""
def __init__(self):
# Initialize OpenAI client
self.openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Initialize Alchemyst AI client for memory
self.alchemyst_client = AlchemystAI(api_key=os.getenv("ALCHEMYST_AI_API_KEY"))
# Session configuration
self.session_id = f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
self.context_group = "knowledge_agent"
self.conversation_history: List[Dict[str, str]] = []
# Simple knowledge base (can be replaced with vector DB)
self.knowledge_base: List[str] = ["Sample knowledge base text"]
print(f"✓ Knowledge Agent initialized (Session: {self.session_id})")
def add_knowledge_to_alchemyst(self, documents: List[str], source: str = "user"):
"""Add documents to both local knowledge base and Alchemyst memory"""
doc_objects = [
{
"content": doc,
"metadata": {
"filename": f"knowledge_{i}",
"filetype": "txt",
"groupName": [self.context_group]
}
}
for i, doc in enumerate(documents)
]
self.alchemyst_client.v1.context.add(
documents=doc_objects,
source=source,
context_type="resource",
scope="internal"
)
self.knowledge_base.extend(documents)
def save_to_memory(self, role: str, content: str):
"""Save conversation turn to Alchemyst memory"""
try:
self.alchemyst_client.v1.context.add(
documents=[
{
"content": f"{role}: {content}",
"metadata": {
"filename": f"{self.session_id}_{len(self.conversation_history)}",
"filetype": "txt",
"groupName": [self.context_group, self.session_id],
},
}
],
source="conversation",
context_type="conversation",
scope="internal",
metadata={
"file_name": f"{self.session_id}_{len(self.conversation_history)}",
"file_size": len(content),
"file_type": "ai/conversation",
"group_name": ["test_group"],
"last_modified": datetime.now().isoformat(),
},
)
except Exception as e:
print(f"Warning: Failed to save to memory: {e}")
def get_relevant_context(self, query: str, limit: int = 3) -> str:
"""Retrieve relevant context from Alchemyst memory"""
try:
results = self.alchemyst_client.v1.context.search(
query=query,
similarity_threshold=0.7,
scope="internal",
metadata={"groupName": [self.context_group]},
)
if hasattr(results, "contexts") and results.contexts:
context_items = []
for i, ctx in enumerate(results.contexts[:limit]):
content = getattr(ctx, "content", "")
context_items.append(f"Context {i+1}: {content}")
return "\n".join(context_items)
return ""
except Exception as e:
print(f"Warning: Failed to retrieve context: {e}")
return ""
def ask(self, question: str) -> str:
"""Ask a question and get an answer with memory and knowledge context"""
# Get relevant context from memory
relevant_context = self.get_relevant_context(question)
# Build enhanced prompt with context and knowledge base
knowledge_context = "\n".join([f"KB {i+1}: {kb}" for i, kb in enumerate(self.knowledge_base[:3])])
system_message = "You are a helpful AI assistant with memory and a knowledge base."
if relevant_context:
system_message += f"\n\nRelevant context from memory:\n{relevant_context}"
if knowledge_context:
system_message += f"\n\nRelevant knowledge base entries:\n{knowledge_context}"
# Add current question to conversation history
self.conversation_history.append({"role": "user", "content": question})
# Prepare messages for OpenAI (last 10 messages to stay within token limits)
messages = [
{"role": "system", "content": system_message}
] + self.conversation_history[-10:]
# Get response from OpenAI
try:
response = self.openai_client.chat.completions.create(
model="gpt-4o-mini", messages=messages, temperature=0.7, max_tokens=500
)
answer = response.choices[0].message.content
# Add assistant response to conversation history
self.conversation_history.append({"role": "assistant", "content": answer})
# Save both question and answer to Alchemyst memory
self.save_to_memory("user", question)
self.save_to_memory("assistant", answer)
return answer
except Exception as e:
return f"Error: {e}"
def show_history(self):
"""Display conversation history"""
if not self.conversation_history:
print("\nNo conversation history yet.")
return
print("\n" + "=" * 60)
print("CONVERSATION HISTORY")
print("=" * 60)
for i, msg in enumerate(self.conversation_history, 1):
role = "You" if msg["role"] == "user" else "Agent"
print(f"\n[{i}] {role}: {msg['content']}")
print("\n" + "=" * 60)
def search_memory(self, query: str):
"""Search through all saved memories"""
try:
results = self.alchemyst_client.v1.context.search(
query=query,
similarity_threshold=0.5,
scope="internal",
minimum_similarity_threshold=0.2,
body_metadata=None,
)
print("🔍 Searching memory...")
if hasattr(results, "contexts") and results.contexts:
print(f"\n🔍 Found {len(results.contexts)} relevant memories:")
print("=" * 60)
for i, result in enumerate(results.contexts[:5], 1):
content = getattr(result, "content", "")
print(f"\n[{i}] {content}")
print("\n" + "=" * 60)
else:
print("\n🔍 No relevant memories found.")
except Exception as e:
print(f"Error searching memory: {e}")
def print_welcome():
"""Print welcome message"""
print("\n" + "=" * 60)
print(" CLI Knowledge Agent WITH MEMORY")
print(" Powered by OpenAI + Alchemyst AI")
print("=" * 60)
print("\nCommands:")
print(" - Type your question to get an answer")
print(" - 'history' - Show conversation history")
print(" - 'search <query>' - Search through saved memories")
print(" - 'addkb <text>' - Add text to knowledge base")
print(" - 'exit' or 'quit' - Exit the program")
print("\n" + "=" * 60 + "\n")
def main():
"""Main CLI loop"""
print_welcome()
try:
agent = AlchemystKnowledgeAgent()
except Exception as e:
print(f"\n❌ Failed to initialize agent: {e}")
print("\nPlease ensure you have set the following environment variables:")
print(" - OPENAI_API_KEY")
print(" - ALCHEMYST_AI_API_KEY")
return
while True:
try:
user_input = input("\n💬 You: ").strip()
if not user_input:
continue
if user_input.lower() in ["exit", "quit", "q"]:
print("\n👋 Goodbye! Your conversation has been saved to memory.")
break
if user_input.lower() == "history":
agent.show_history()
continue
if user_input.lower().startswith("search "):
query = user_input[7:].strip()
if query:
agent.search_memory(query)
else:
print("\n⚠️ Please provide a search query: search <query>")
continue
if user_input.lower().startswith("addkb "):
kb_text = user_input[6:].strip()
if kb_text:
agent.add_knowledge_to_alchemyst([kb_text])
print("✅ Added to knowledge base and memory.")
else:
print("\n⚠️ Please provide text to add: addkb <text>")
continue
print("\n🤖 Agent: ", end="", flush=True)
answer = agent.ask(user_input)
print(answer)
except KeyboardInterrupt:
print("\n\n👋 Goodbye! Your conversation has been saved to memory.")
break
except Exception as e:
print(f"\n❌ Error: {e}")
if __name__ == "__main__":
main()
Pattern 2: Multi-Agent Team with Shared Memory
Copy
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.tools.yfinance import YFinanceTools
from agno.tools.duckduckgo import DuckDuckGoTools
class AlchemystMultiAgentTeam:
"""Multi-agent team with shared Alchemyst memory"""
def __init__(self, alchemyst_client: AlchemystAI):
self.client = alchemyst_client
# Shared memory storage
self.shared_storage = AlchemystMemoryStorage(
alchemyst_client=self.client,
context_id="multi_agent_team"
)
# Research Agent
self.researcher = Agent(
name="Research Agent",
role="Search the web for information",
model=Claude(id="claude-sonnet-4-20250514"),
tools=[DuckDuckGoTools(search=True, news=True)],
storage=self.shared_storage,
instructions=[
"Search for accurate, up-to-date information.",
"Always include sources.",
"Share findings with the team."
],
markdown=True
)
# Finance Agent
self.finance_agent = Agent(
name="Finance Agent",
role="Analyze financial data",
model=Claude(id="claude-sonnet-4-20250514"),
tools=[
YFinanceTools(
stock_price=True,
analyst_recommendations=True,
company_info=True
)
],
storage=self.shared_storage,
instructions=[
"Provide detailed financial analysis.",
"Use tables to display data.",
"Consider market trends."
],
markdown=True
)
# Team Leader
self.team_leader = Agent(
name="Team Leader",
team=[self.researcher, self.finance_agent],
model=Claude(id="claude-sonnet-4-20250514"),
storage=self.shared_storage,
instructions=[
"Coordinate team members effectively.",
"Synthesize information from all agents.",
"Provide comprehensive answers.",
"Remember previous team decisions."
],
markdown=True
)
def share_insight(self, agent_name: str, insight: str):
"""Share an insight across all agents via Alchemyst"""
self.client.v1.context.add(
documents=[{
"content": f"{agent_name}: {insight}",
"metadata": {
"filename": f"insight_{agent_name}_{datetime.now().timestamp()}",
"filetype": "txt",
"groupName": ["multi_agent_team", "shared_insights"]
}
}],
source="agent_insight",
context_type="conversation",
scope="internal",
metadata={
"file_name": f"{self.session_id}_{len(self.conversation_history)}",
"file_size": len(content),
"file_type": "ai/conversation",
"group_name": ["test_group"],
"last_modified": datetime.now().isoformat(),
},
)
def get_team_insights(self) -> List[str]:
"""Retrieve all team insights from Alchemyst"""
results = self.client.v1.context.search(
query="insight",
similarity_threshold=0.5,
scope="internal",
body_metadata={"groupName": ["multi_agent_team", "shared_insights"]}
)
return [result.get('content', '') for result in results]
def execute_task(self, task: str, session_id: str = "team_session") -> str:
"""Execute a task using the multi-agent team"""
response = self.team_leader.run(task, session_id=session_id)
return response.content
Pattern 3: Reasoning Agent with Persistent Memory
Copy
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.tools.reasoning import ReasoningTools
from typing import Dict, Any
class AlchemystReasoningAgent:
"""Reasoning agent with Alchemyst-backed memory"""
def __init__(self, alchemyst_client: AlchemystAI):
self.client = alchemyst_client
self.storage = AlchemystMemoryStorage(
alchemyst_client=self.client,
context_id="reasoning_agent"
)
self.agent = Agent(
name="Reasoning Assistant",
model=Claude(id="claude-sonnet-4-20250514"),
tools=[ReasoningTools(add_instructions=True)],
storage=self.storage,
add_history_to_messages=True,
num_history_runs=10,
instructions=[
"Think step-by-step before answering.",
"Show your reasoning process.",
"Learn from previous interactions.",
"Adapt your approach based on user feedback."
],
markdown=True,
show_tool_calls=True
)
def learn_from_interaction(
self,
query: str,
response: str,
feedback: str,
session_id: str
):
"""Store interaction with feedback in Alchemyst for learning"""
learning_data = {
"timestamp": datetime.now().isoformat(),
"session_id": session_id,
"query": query,
"response": response,
"feedback": feedback,
"type": "learning_interaction"
}
self.client.v1.context.add(
documents=[{
"content": str(learning_data),
"metadata": {
"filename": f"learning_{session_id}_{datetime.now().timestamp()}",
"filetype": "json",
"groupName": ["reasoning_agent", "learning"]
}
}],
source="user_feedback",
context_type="conversation",
scope="internal"
)
def get_learning_history(self) -> List[Dict[str, Any]]:
"""Retrieve learning history from Alchemyst"""
results = self.client.v1.context.search(
query="learning_interaction",
similarity_threshold=0.6,
scope="internal",
body_metadata={"groupName": ["reasoning_agent", "learning"]}
)
history = []
for result in results:
try:
import ast
history.append(ast.literal_eval(result.get('content', '{}')))
except:
continue
return history
def think_and_respond(
self,
question: str,
session_id: str = "default",
show_reasoning: bool = True
) -> str:
"""Use reasoning with access to learning history"""
# Get relevant learning history
learning_context = self.get_learning_history()
# Enhance prompt with learning
if learning_context:
context_summary = "\n".join([
f"Previous learning: {item.get('feedback', '')}"
for item in learning_context[-5:] # Last 5 learnings
])
enhanced_question = f"""
Consider these previous learnings:
{context_summary}
Question: {question}
"""
else:
enhanced_question = question
# Get response with reasoning
response = self.agent.run(
enhanced_question,
session_id=session_id,
stream=False
)
return response.content
Complete Example 1: Simple CLI-based QnA Agent
Copy
"""
Simple CLI QnA Agent with Alchemyst AI Memory
Uses OpenAI for chat completion and Alchemyst AI for persistent memory
"""
import os
from datetime import datetime
from typing import Dict, List
from alchemyst_ai import AlchemystAI
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv()
class QnAAgent:
"""A simple CLI-based Question & Answer agent with memory"""
def __init__(self):
# Initialize OpenAI client
self.openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Initialize Alchemyst AI client for memory
self.alchemyst_client = AlchemystAI(api_key=os.getenv("ALCHEMYST_AI_API_KEY"))
# Session configuration
self.session_id = f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
self.context_group = "qna_agent"
self.conversation_history: List[Dict[str, str]] = []
print(f"✓ QnA Agent initialized (Session: {self.session_id})")
def save_to_memory(self, role: str, content: str):
"""Save conversation turn to Alchemyst memory"""
try:
memory_entry = {
"timestamp": datetime.now().isoformat(),
"session_id": self.session_id,
"role": role,
"content": content,
}
self.alchemyst_client.v1.context.add(
documents=[
{
"content": f"{role}: {content}",
"metadata": {
"filename": f"{self.session_id}_{len(self.conversation_history)}",
"filetype": "txt",
"groupName": [self.context_group, self.session_id],
},
}
],
source="conversation",
context_type="conversation",
scope="internal",
metadata={
"file_name": f"{self.session_id}_{len(self.conversation_history)}",
"file_size": len(content),
"file_type": "ai/conversation",
"group_name": ["test_group"],
"last_modified": datetime.now().isoformat(),
},
)
except Exception as e:
print(f"Warning: Failed to save to memory: {e}")
def get_relevant_context(self, query: str, limit: int = 3) -> str:
"""Retrieve relevant context from Alchemyst memory"""
try:
results = self.alchemyst_client.v1.context.search(
query=query,
similarity_threshold=0.6,
minimum_similarity_threshold=0.2,
scope="internal",
body_metadata=None, # {"groupName": [self.context_group]},
)
print("🔍 Retrieving relevant context from memory...")
print(results.contexts)
if results.contexts:
context_items = []
for i, result in enumerate(results.contexts[:limit]):
content = getattr(result, "content", "")
context_items.append(f"{i + 1}. {content}")
return "\n".join(context_items)
return ""
except Exception as e:
print(f"Warning: Failed to retrieve context: {e}")
return ""
def ask(self, question: str) -> str:
"""Ask a question and get an answer with memory context"""
# Get relevant context from memory
relevant_context = self.get_relevant_context(question)
# Build system message with context
system_message = "You are a helpful AI assistant with memory. "
if relevant_context:
system_message += (
f"\n\nRelevant context from previous conversations:\n{relevant_context}"
)
# Add current question to conversation history
self.conversation_history.append({"role": "user", "content": question})
# Prepare messages for OpenAI (last 10 messages to stay within token limits)
messages = [
{"role": "system", "content": system_message}
] + self.conversation_history[-10:]
# Get response from OpenAI
try:
response = self.openai_client.chat.completions.create(
model="gpt-4o-mini", messages=messages, temperature=0.7, max_tokens=500
)
answer = response.choices[0].message.content
# Add assistant response to conversation history
self.conversation_history.append({"role": "assistant", "content": answer})
# Save both question and answer to Alchemyst memory
self.save_to_memory("user", question)
self.save_to_memory("assistant", answer)
return answer
except Exception as e:
return f"Error: {e}"
def show_history(self):
"""Display conversation history"""
if not self.conversation_history:
print("\nNo conversation history yet.")
return
print("\n" + "=" * 60)
print("CONVERSATION HISTORY")
print("=" * 60)
for i, msg in enumerate(self.conversation_history, 1):
role = "You" if msg["role"] == "user" else "Agent"
print(f"\n[{i}] {role}: {msg['content']}")
print("\n" + "=" * 60)
def search_memory(self, query: str):
"""Search through all saved memories"""
try:
results = self.alchemyst_client.v1.context.search(
query=query,
similarity_threshold=0.5,
scope="internal",
minimum_similarity_threshold=0.2,
body_metadata=None, # {"groupName": [self.context_group]},
)
print("🔍 Searching memory...")
print(results.contexts)
if results.contexts:
print(f"\n🔍 Found {len(results.contexts)} relevant memories:")
print("=" * 60)
for i, result in enumerate(results.contexts[:5], 1):
content = getattr(result, "content", "")
print(f"\n[{i}] {content}")
print("\n" + "=" * 60)
else:
print("\n🔍 No relevant memories found.")
except Exception as e:
print(f"Error searching memory: {e}")
def print_welcome():
"""Print welcome message"""
print("\n" + "=" * 60)
print(" CLI QnA AGENT WITH MEMORY")
print(" Powered by OpenAI + Alchemyst AI")
print("=" * 60)
print("\nCommands:")
print(" - Type your question to get an answer")
print(" - 'history' - Show conversation history")
print(" - 'search <query>' - Search through saved memories")
print(" - 'exit' or 'quit' - Exit the program")
print("\n" + "=" * 60 + "\n")
def main():
"""Main CLI loop"""
# Print welcome message
print_welcome()
# Initialize agent
try:
agent = QnAAgent()
except Exception as e:
print(f"\n❌ Failed to initialize agent: {e}")
print("\nPlease ensure you have set the following environment variables:")
print(" - OPENAI_API_KEY")
print(" - ALCHEMYST_AI_API_KEY")
return
# Main interaction loop
while True:
try:
# Get user input
user_input = input("\n💬 You: ").strip()
# Handle empty input
if not user_input:
continue
# Handle exit commands
if user_input.lower() in ["exit", "quit", "q"]:
print("\n👋 Goodbye! Your conversation has been saved to memory.")
break
# Handle history command
if user_input.lower() == "history":
agent.show_history()
continue
# Handle search command
if user_input.lower().startswith("search "):
query = user_input[7:].strip()
if query:
agent.search_memory(query)
else:
print("\n⚠️ Please provide a search query: search <query>")
continue
# Process question
print("\n🤖 Agent: ", end="", flush=True)
answer = agent.ask(user_input)
print(answer)
except KeyboardInterrupt:
print("\n\n👋 Goodbye! Your conversation has been saved to memory.")
break
except Exception as e:
print(f"\n❌ Error: {e}")
if __name__ == "__main__":
main()
Complete Example 2: Personal Assistant with Memory
Copy
import os
from datetime import datetime
from alchemystai import AlchemystAI
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.tools.reasoning import ReasoningTools
class PersonalAssistantWithMemory:
"""A personal assistant powered by Agno and Alchemyst AI"""
def __init__(self):
# Initialize Alchemyst client
self.alchemyst = AlchemystAI(
api_key=os.getenv("ALCHEMYST_AI_API_KEY")
)
# Initialize custom storage
self.storage = AlchemystMemoryStorage(
alchemyst_client=self.alchemyst,
context_id="personal_assistant"
)
# Create the agent
self.agent = Agent(
name="Personal Assistant",
model=Claude(id="claude-sonnet-4-20250514"),
tools=[
ReasoningTools(add_instructions=True),
DuckDuckGoTools(search=True, news=True)
],
storage=self.storage,
add_history_to_messages=True,
num_history_runs=10,
instructions=[
"You are a helpful personal assistant with memory.",
"Remember user preferences and past interactions.",
"Search the web for current information when needed.",
"Think step-by-step for complex requests.",
"Be proactive and helpful."
],
markdown=True
)
self.session_id = "main_session"
def remember_preference(self, category: str, preference: str):
"""Store a user preference in Alchemyst memory"""
pref_data = {
"category": category,
"preference": preference,
"timestamp": datetime.now().isoformat()
}
self.alchemyst.v1.context.add(
documents=[{
"content": f"User preference: {category} = {preference}",
"metadata": {
"filename": f"pref_{category}",
"filetype": "txt",
"groupName": ["personal_assistant", "preferences"]
}
}],
source="user_preference",
context_type="instruction",
scope="internal"
)
return f"✓ Remembered: {category} → {preference}"
def get_preferences(self) -> Dict[str, str]:
"""Retrieve all stored preferences"""
results = self.alchemyst.v1.context.search(
query="User preference",
similarity_threshold=0.7,
scope="internal",
body_metadata={"groupName": ["personal_assistant", "preferences"]}
)
preferences = {}
for result in results:
content = result.get('content', '')
if '=' in content:
parts = content.split('=')
if len(parts) == 2:
key = parts[0].replace('User preference:', '').strip()
value = parts[1].strip()
preferences[key] = value
return preferences
def add_note(self, note: str, tags: List[str] = None):
"""Add a note to memory"""
tags = tags or []
self.alchemyst.v1.context.add(
documents=[{
"content": note,
"metadata": {
"filename": f"note_{datetime.now().timestamp()}",
"filetype": "txt",
"groupName": ["personal_assistant", "notes"] + tags
}
}],
source="user_note",
context_type="resource",
scope="internal",
metadata={"tags": tags}
)
return "✓ Note saved"
def search_notes(self, query: str) -> List[str]:
"""Search through saved notes"""
results = self.alchemyst.v1.context.search(
query=query,
similarity_threshold=0.6,
scope="internal",
body_metadata={"groupName": ["personal_assistant", "notes"]}
)
return [result.get('content', '') for result in results]
def chat(self, message: str) -> str:
"""Chat with the assistant"""
# Get relevant preferences and notes
preferences = self.get_preferences()
# Enhance message with context
if preferences:
pref_text = "\n".join([f"- {k}: {v}" for k, v in preferences.items()])
context = f"\nUser preferences:\n{pref_text}\n\n"
else:
context = ""
# Search for relevant notes
relevant_notes = self.search_notes(message)
if relevant_notes:
notes_text = "\n".join([f"- {note}" for note in relevant_notes[:3]])
context += f"\nRelevant notes:\n{notes_text}\n\n"
enhanced_message = context + message if context else message
# Get response from agent
response = self.agent.run(enhanced_message, session_id=self.session_id)
return response.content
def get_conversation_history(self) -> List[Dict[str, Any]]:
"""Get conversation history from storage"""
session_data = self.storage.read(self.session_id)
if session_data and 'messages' in session_data:
return session_data['messages']
return []
if __name__ == "__main__":
# Initialize assistant
assistant = PersonalAssistantWithMemory()
print("=== Personal Assistant Demo ===\n")
# Store some preferences
print(assistant.remember_preference("communication_style", "concise and friendly"))
print(assistant.remember_preference("timezone", "EST"))
print(assistant.remember_preference("work_focus", "Python development and AI"))
# Add some notes
print("\n" + assistant.add_note(
"Meeting with team on Friday at 2pm to discuss Q1 goals",
tags=["meeting", "team"]
))
print(assistant.add_note(
"Need to review the new Agno documentation for the integration project",
tags=["todo", "development"]
))
# Chat with assistant
print("\n=== Chat Session ===\n")
print("User: What do you know about my preferences?")
print(f"Assistant: {assistant.chat('What do you know about my preferences?')}\n")
print("User: What meetings do I have coming up?")
print(f"Assistant: {assistant.chat('What meetings do I have coming up?')}\n")
print("User: Tell me about AgnoAGI and its performance benefits")
print(f"Assistant: {assistant.chat('Tell me about AgnoAGI and its performance benefits')}\n")
# Show conversation history
print("\n=== Conversation History ===")
history = assistant.get_conversation_history()
print(f"Total messages in session: {len(history)}")
Best Practices
1. Memory Organization
Copy
# Use consistent naming conventions
storage = AlchemystMemoryStorage(
alchemyst_client=client,
context_id="project_name_agent_type" # Clear, hierarchical naming
)
# Group related data with groupName
documents = [{
"content": content,
"metadata": {
"groupName": ["project", "feature", "subtype"] # Hierarchical organization
}
}]
2. Context Management
Copy
def get_relevant_context(client, query, context_type="all"):
"""Intelligently fetch only relevant context"""
# Use appropriate thresholds
similarity_thresholds = {
"exact": 0.9,
"high": 0.7,
"medium": 0.5,
"low": 0.3
}
results = client.v1.context.search(
query=query,
similarity_threshold=similarity_thresholds.get("high", 0.7),
minimum_similarity_threshold=similarity_thresholds.get("low", 0.3),
scope="internal"
)
# Limit context to prevent token overflow
return results[:5]
3. Performance Optimization
Copy
# Leverage Agno's speed for instantiation
agents = [
Agent(
model=Claude(id="claude-sonnet-4-20250514"),
storage=shared_storage # Reuse storage instance
)
for _ in range(100) # Create 100 agents in ~200μs total
]
# Batch Alchemyst operations when possible
def batch_add_memories(client, documents_list):
"""Add multiple documents in one API call"""
client.v1.context.add(
documents=documents_list,
source="batch_operation",
context_type="resource",
scope="internal"
)
4. Error Handling
Copy
def safe_memory_operation(operation, *args, **kwargs):
"""Wrapper for safe memory operations"""
max_retries = 3
for attempt in range(max_retries):
try:
return operation(*args, **kwargs)
except Exception as e:
if attempt == max_retries - 1:
print(f"Memory operation failed after {max_retries} attempts: {e}")
return None
time.sleep(2 ** attempt) # Exponential backoff
Production Deployment
1. Environment Configuration
Copy
import os
from dotenv import load_dotenv
load_dotenv()
class Config:
ALCHEMYST_API_KEY = os.getenv("ALCHEMYST_AI_API_KEY")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
AGENT_CONTEXT_ID = os.getenv("AGENT_CONTEXT_ID", "production_agent")
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
MAX_HISTORY_RUNS = int(os.getenv("MAX_HISTORY_RUNS", "10"))
2. Monitoring and Logging
Copy
import logging
from datetime import datetime
class MonitoredAgent:
def __init__(self, alchemyst_client, agent_name):
self.logger = logging.getLogger(agent_name)
self.logger.setLevel(logging.INFO)
self.storage = AlchemystMemoryStorage(
alchemyst_client=alchemyst_client,
context_id=agent_name
)
self.agent = Agent(
name=agent_name,
model=Claude(id="claude-sonnet-4-20250514"),
storage=self.storage,
markdown=True
)
def run_with_monitoring(self, query, session_id="default"):
start_time = datetime.now()
try:
response = self.agent.run(query, session_id=session_id)
duration = (datetime.now() - start_time).total_seconds()
self.logger.info(
f"Query processed in {duration:.2f}s | "
f"Session: {session_id} | "
f"Query length: {len(query)}"
)
return response.content
except Exception as e:
self.logger.error(f"Error processing query: {e}")
return "I encountered an error. Please try again."
3. Docker Deployment
Copy
FROM python:3.11-slim
WORKDIR /app
# Install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application
COPY . .
# Set environment variables
ENV PYTHONUNBUFFERED=1
# Run the application
CMD ["python", "main.py"]
Troubleshooting
Common Issues
Memory not persisting:- Check API credentials and network connectivity
- Verify context_id is consistent across sessions
- Ensure proper error handling for memory operations
- Agno agents are ~10,000x faster than alternatives
- Use batch operations for Alchemyst API calls
- Optimize search queries with appropriate similarity thresholds
- Verify Agno and Alchemyst SDK versions are compatible
- Check environment variables are properly set
- Review API rate limits and quotas
Debug Mode
Copy
# Enable debug logging
import logging
logging.basicConfig(level=logging.DEBUG)
# Test storage connection
storage = AlchemystMemoryStorage(alchemyst_client, "test")
storage.create("test_session", {"test": "data"})
result = storage.read("test_session")
print(f"Storage test: {result}")
Performance Comparison
Agno + Alchemyst offers exceptional performance:| Metric | Agno + Alchemyst | Traditional Frameworks |
|---|---|---|
| Agent instantiation | ~2μs | ~20-30ms |
| Memory footprint | ~3.75 KiB | ~200 KiB |
| Context retrieval | <100ms | 500-1000ms |
| Multi-agent scaling | Linear | Exponential overhead |
Resources
Next Steps
- Experiment with different agent configurations and memory patterns
- Scale your application using Agno’s multi-agent teams
- Monitor memory usage and performance in production
- Extend functionality by integrating additional Agno tools

