Code Examples

Practical examples demonstrating how to use Nemo for various AI memory applications. Each example includes complete code, explanations, and real-world use cases.

Simple Chatbot with Memory

🤖
Conversational AI with Memory
Build a chatbot that remembers previous conversations and learns from user interactions.
Use Case

A customer service chatbot that remembers user preferences, previous issues, and conversation context to provide personalized support.

Python
JavaScript
Python Implementation
import nemo import datetime from typing import Dict, List class MemoryChatbot: def __init__(self, api_key: str, user_id: str): self.client = nemo.Client(api_key=api_key) self.user_id = user_id self.memory_id = None self._initialize_memory() def _initialize_memory(self): """Initialize or retrieve existing memory space for the user""" try: # Create new memory space self.memory_id = self.client.memory.allocate( size="500MB", type="episodic", persistence="persistent", name=f"chatbot_memory_{self.user_id}", metadata={"user_id": self.user_id, "type": "conversational"} ) except Exception as e: print(f"Error initializing memory: {e}") def chat(self, user_message: str) -> str: """Process user message and generate response with memory""" # Store the user message self._store_message(user_message, "user") # Retrieve relevant memories relevant_memories = self._get_relevant_memories(user_message) # Generate response (simplified - integrate with your LLM) response = self._generate_response(user_message, relevant_memories) # Store the bot response self._store_message(response, "assistant") return response def _store_message(self, message: str, role: str): """Store a message in memory""" self.client.memory.store( memory_id=self.memory_id, data={ "message": message, "role": role, "timestamp": datetime.datetime.now().isoformat(), "user_id": self.user_id }, associations=[role, "conversation", self.user_id], strength=0.8 if role == "user" else 0.6 ) def _get_relevant_memories(self, query: str, limit: int = 5) -> List[Dict]: """Retrieve relevant memories for context""" try: memories = self.client.memory.retrieve( memory_id=self.memory_id, query=query, similarity_threshold=0.6, limit=limit, include_context=True ) return memories except Exception as e: print(f"Error retrieving memories: {e}") return [] def _generate_response(self, user_message: str, memories: List[Dict]) -> str: """Generate response using memories as context""" # Simple response generation (replace with your LLM integration) if "hello" in user_message.lower(): if memories: return "Hello again! I remember our previous conversations. How can I help you today?" else: return "Hello! I'm your memory-enabled assistant. How can I help you?" elif "remember" in user_message.lower(): if memories: return f"I remember {len(memories)} related conversations. What specifically would you like to know?" else: return "I don't have any relevant memories about that topic yet." else: base_response = "I understand you're asking about: " + user_message if memories: base_response += f"\n\nBased on our previous conversations, I recall we discussed similar topics {len(memories)} times." return base_response # Usage example if __name__ == "__main__": bot = MemoryChatbot(api_key="your_api_key", user_id="user_123") # Simulate conversation print(bot.chat("Hello, I'm looking for help with my account")) print(bot.chat("I had an issue with billing last week")) print(bot.chat("Do you remember what we discussed about my billing issue?"))
JavaScript Implementation
const { Client } = require('@nemo/sdk'); class MemoryChatbot { constructor(apiKey, userId) { this.client = new Client({ apiKey }); this.userId = userId; this.memoryId = null; } async initialize() { try { // Create new memory space this.memoryId = await this.client.memory.allocate({ size: '500MB', type: 'episodic', persistence: 'persistent', name: `chatbot_memory_${this.userId}`, metadata: { user_id: this.userId, type: 'conversational' } }); } catch (error) { console.error('Error initializing memory:', error); } } async chat(userMessage) { // Store the user message await this.storeMessage(userMessage, 'user'); // Retrieve relevant memories const relevantMemories = await this.getRelevantMemories(userMessage); // Generate response const response = await this.generateResponse(userMessage, relevantMemories); // Store the bot response await this.storeMessage(response, 'assistant'); return response; } async storeMessage(message, role) { try { await this.client.memory.store({ memory_id: this.memoryId, data: { message, role, timestamp: new Date().toISOString(), user_id: this.userId }, associations: [role, 'conversation', this.userId], strength: role === 'user' ? 0.8 : 0.6 }); } catch (error) { console.error('Error storing message:', error); } } async getRelevantMemories(query, limit = 5) { try { const memories = await this.client.memory.retrieve({ memory_id: this.memoryId, query, similarity_threshold: 0.6, limit, include_context: true }); return memories.results || []; } catch (error) { console.error('Error retrieving memories:', error); return []; } } async generateResponse(userMessage, memories) { const lowerMessage = userMessage.toLowerCase(); if (lowerMessage.includes('hello')) { return memories.length > 0 ? "Hello again! I remember our previous conversations. How can I help you today?" : "Hello! I'm your memory-enabled assistant. How can I help you?"; } if (lowerMessage.includes('remember')) { return memories.length > 0 ? `I remember ${memories.length} related conversations. What specifically would you like to know?` : "I don't have any relevant memories about that topic yet."; } let response = `I understand you're asking about: ${userMessage}`; if (memories.length > 0) { response += `\n\nBased on our previous conversations, I recall we discussed similar topics ${memories.length} times.`; } return response; } } // Usage example async function runChatbotExample() { const bot = new MemoryChatbot('your_api_key', 'user_123'); await bot.initialize(); console.log(await bot.chat("Hello, I'm looking for help with my account")); console.log(await bot.chat("I had an issue with billing last week")); console.log(await bot.chat("Do you remember what we discussed about my billing issue?")); } module.exports = MemoryChatbot;
Expected Output
Hello! I'm your memory-enabled assistant. How can I help you? I understand you're asking about: I had an issue with billing last week I remember 2 related conversations. What specifically would you like to know?
Integration Tip

Replace the simple response generation with your preferred LLM (OpenAI, Claude, Llama, etc.) and use the retrieved memories as context in your prompts for more sophisticated responses.

Basic Memory System

🧠
Simple Memory Operations
Basic example showing memory allocation, storage, and retrieval.
Basic Memory Operations
import nemo # Initialize client client = nemo.Client(api_key="your_api_key") # Allocate memory memory_id = client.memory.allocate( size="1GB", type="episodic", persistence="session" ) # Store some memories for i in range(5): client.memory.store( memory_id=memory_id, data={ "content": f"This is memory item {i}", "category": "example", "number": i }, associations=["example", f"item_{i}"], strength=0.7 ) # Retrieve memories memories = client.memory.retrieve( memory_id=memory_id, query="memory item", limit=3 ) print(f"Found {len(memories)} memories") for memory in memories: print(f"- {memory.data['content']}") # Clean up client.memory.deallocate(memory_id)

LangChain Integration

🔗
LangChain Memory Integration
Integrate Nemo with LangChain for advanced conversational AI with persistent memory.
Use Case

A sophisticated chatbot that uses LangChain for conversation flow and Nemo for persistent, searchable memory across sessions.

LangChain + Nemo Implementation
import nemo from langchain.memory import ConversationBufferMemory from langchain.schema import BaseMemory from langchain.llms import OpenAI from langchain.chains import ConversationChain from typing import Dict, List, Any import json class NemoLangChainMemory(BaseMemory): """Custom LangChain memory implementation using Nemo""" def __init__(self, nemo_api_key: str, session_id: str, memory_key: str = "history"): self.nemo_client = nemo.Client(api_key=nemo_api_key) self.session_id = session_id self.memory_key = memory_key # Initialize Nemo memory space self.memory_id = self._get_or_create_memory_space() def _get_or_create_memory_space(self) -> str: """Get existing memory space or create new one for this session""" try: # Create new memory space return self.nemo_client.memory.allocate( size="1GB", type="episodic", persistence="session", name=f"langchain_session_{self.session_id}", metadata={"session_id": self.session_id, "framework": "langchain"} ) except Exception as e: print(f"Error with memory space: {e}") return None @property def memory_variables(self) -> List[str]: """Return memory variables that this memory class provides""" return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Load relevant conversation history from Nemo""" current_input = inputs.get("input", "") # Retrieve relevant memories based on current input try: memories = self.nemo_client.memory.retrieve( memory_id=self.memory_id, query=current_input, similarity_threshold=0.5, limit=10, include_context=True ) # Format memories for LangChain formatted_history = [] for memory in memories: if memory["data"].get("type") == "conversation": human_msg = memory["data"].get("human", "") ai_msg = memory["data"].get("ai", "") if human_msg and ai_msg: formatted_history.append(f"Human: {human_msg}") formatted_history.append(f"AI: {ai_msg}") return {self.memory_key: "\n".join(formatted_history[-10:])} # Last 10 lines except Exception as e: print(f"Error loading memory: {e}") return {self.memory_key: ""} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save conversation turn to Nemo memory""" human_input = inputs.get("input", "") ai_output = outputs.get("response", "") try: # Store conversation turn conversation_data = { "type": "conversation", "human": human_input, "ai": ai_output, "session_id": self.session_id, "timestamp": datetime.now().isoformat() } # Generate associations for better retrieval associations = ["conversation", self.session_id] self.nemo_client.memory.store( memory_id=self.memory_id, data=conversation_data, associations=associations, strength=0.8 ) except Exception as e: print(f"Error saving context: {e}") def clear(self) -> None: """Clear memory (optional implementation)""" pass class SmartConversationChain: """Enhanced conversation chain with Nemo memory""" def __init__(self, nemo_api_key: str, openai_api_key: str, session_id: str): # Initialize LLM self.llm = OpenAI( api_key=openai_api_key, temperature=0.7, max_tokens=500 ) # Initialize Nemo memory self.memory = NemoLangChainMemory( nemo_api_key=nemo_api_key, session_id=session_id ) # Create conversation chain self.chain = ConversationChain( llm=self.llm, memory=self.memory, verbose=True ) def chat(self, message: str) -> str: """Process message through the conversation chain""" try: response = self.chain.predict(input=message) return response except Exception as e: return f"Sorry, I encountered an error: {e}" # Usage example if __name__ == "__main__": # Initialize the smart conversation system chat_system = SmartConversationChain( nemo_api_key="your_nemo_api_key", openai_api_key="your_openai_api_key", session_id="user_session_123" ) # Simulate conversation print("=== Smart Conversation with Memory ===") response1 = chat_system.chat("Hi, I'm working on a machine learning project") print(f"AI: {response1}") response2 = chat_system.chat("What are some good neural network architectures?") print(f"AI: {response2}") response3 = chat_system.chat("Can you remind me what we discussed about my project?") print(f"AI: {response3}")