Skip to main content

GitHub All-Stars #2: Mem0 - Creating memory for stateless AI minds

Picture of Artur Skowroński, Head of Java/Kotlin Space

Artur Skowroński

Head of Java/Kotlin Space
Sep 3, 2025|17 min read
neurali_links
meme_this_is_fine

1if new_fact.topic == old_fact.topic
2AND new_fact.timestamp > old_fact.timestamp AND is_not_contradictory(...) then update(...) else if....
1Given this context, which of these four actions (ADD, UPDATE, DELETE, NOOP) should I take?
meme_plugin_based_hybrid

1memory_config = {
2 "vector_store": {
3 "provider": "azure_ai_search",
4 "config": {
5 "service_name": "your-search-service",
6 "api_key": "your-api-key",
7 "collection_name": "memories",
8 },
9 },
10 #... embedder and llm configs also required
11}
12memory = Memory.from_config(memory_config)
1from mem0 import Memory
2
3m = Memory()
4
5messages = [
6 {
7 "role": "user",
8 "content": "I like to drink coffee in the morning and go for a walk"
9 }
10]
11result = m.add(messages, user_id="alice", metadata={"category": "preferences"})
1search_results = memory.search(
2 "What are this user's travel plans?",
3 user_id="demo_user",
4 limit=3
5)
6for i, result in enumerate(search_results['results'], 1):
7 print(f"{i}. {result['memory']} (Score: {result['score']:.4f})")
1import os
2from mem0 import Memory
3
4config = {
5 "vector_store": {
6 "provider": "qdrant",
7 "config": {
8 "collection_name": "test",
9 "host": "localhost",
10 "port": 6333,
11 "embedding_model_dims": 768, # Adjust to your model’s dimensions
12 },
13 },
14 "llm": {
15 "provider": "ollama",
16 "config": {
17 "model": "llama3.1:latest"
18 }
19 },
20}
21memory = Memory.from_config(config)
1# The agent-assistant fetches key client info from the past
2from mem0 import MemoryClient
3
4# Initialize the memory client
5mem0 = MemoryClient(api_key="your-mem0-api-key")
6
7def get_underwriting_context(policy_application: dict, client_id: str):
8 """Fetch historical context for a given client from mem0."""
9 # Query the memory for the client’s claim history
10 query = f"What is the claim history for client {client_id}?"
11 past_claims = mem0.search(query, user_id=client_id)
12
13 # Format retrieved memories for use in the prompt
14 serialized_history = ' '.join([mem["memory"] for mem in past_claims])
15 # ... further logic to build the agent prompt
1from langchain_core.tools import StructuredTool
2from mem0 import MemoryClient
3
4client = MemoryClient(api_key="your-api-key")
5
6def search_underwriting_memory(query: str, filters: dict) -> any:
7 """Search the underwriting knowledge base for past assessments, policies, and claim histories."""
8 return client.search(query=query, filters=filters)
9
10underwriting_search_tool = StructuredTool(
11 name="search_underwriting_memory",
12 description="Useful for finding information on past risk assessments and policies.",
13 func=search_underwriting_memory,
14)
1from mem0 import Memory
2from langchain_community.vectorstores import Chroma
3from langchain_openai import OpenAIEmbeddings
4
5# Initialize connection to the company’s existing vector DB of risk reports
6embeddings = OpenAIEmbeddings()
7risk_reports_db = Chroma(
8 persist_directory="./corporate_risk_reports_db",
9 embedding_function=embeddings,
10 collection_name="mem0" # Required collection name
11)
12# Configure mem0 to use the existing DB
13config = {
14 "vector_store": {
15 "provider": "langchain",
16 "config": {
17 "instance": risk_reports_db
18 }
19 },
20 # ... other configs, e.g., pointing to the corporate LLM
21}
22memory_component = Memory.from_config(config)
github_score

Subscribe to our newsletter and never miss an article