Skip to main content

What is LangChain?

LangChain is a framework that helps developers build applications powered by large language models (LLMs) such as OpenAI or Anthropic. It provides useful building blocks like prompts, chains, agents, and memory to manage conversations, connect data sources, and perform reasoning over multiple steps. Normally, LangChain’s built-in memory (like ConversationBufferMemory) stores information only during a single runtime session. Once the app stops, all past context is lost. This is where Alchemyst Memory becomes valuable — it provides persistent, long-term memory that allows your LLM applications to remember and reuse context across sessions.

Installation

pip install langchain langchain-openai alchemystai python-dotenv

AlchemystMemory Implementation

Here’s the complete implementation of AlchemystMemory for LangChain:
from langchain.memory.chat_memory import BaseChatMemory
from alchemyst_ai import AlchemystAI
from typing import Dict, Any, List, Optional
import time
import uuid


class AlchemystMemory(BaseChatMemory):
    """AlchemystMemory implementation for persistent chat memory using Alchemyst AI."""
    
    def __init__(
        self,
        api_key: str,
        session_id: str,
        **kwargs
    ):
        super().__init__(**kwargs)
        self._session_id = session_id
        self._client = AlchemystAI(
            api_key=api_key,
        )
    
    @property
    def memory_variables(self) -> List[str]:
        """Return the list of memory variables."""
        return ["history"]
    
    def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """Load memory variables from Alchemyst."""
        try:
            # Use the input as query if available, otherwise use "conversation"
            query = inputs.get("input", "").strip() if inputs.get("input") else "conversation"
            
            response = self._client.v1.context.search(
                query=query,
                similarity_threshold=0.0,
                minimum_similarity_threshold=0.0,
                scope="internal",
                metadata=None
            )
            
            contexts = response.contexts if hasattr(response, 'contexts') else []
            items = [c.content for c in contexts if hasattr(c, 'content') and c.content]
            
            return {"history": "\n".join(items)}
        except Exception as error:
            print(f"Error loading memory variables: {error}")
            return {"history": ""}
    
    def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
        """Save context to Alchemyst memory."""
        user_input = str(inputs.get("input", ""))
        ai_output = str(outputs.get("output", ""))
        
        contents = []
        timestamp = int(time.time() * 1000)  # milliseconds
        
        if user_input:
            contents.append({
                "content": user_input,
                "metadata": {
                    "source": self._session_id,
                    "messageId": str(timestamp),
                    "type": "text"
                }
            })
        
        if ai_output:
            contents.append({
                "content": ai_output,
                "metadata": {
                    "source": self._session_id,
                    "messageId": str(timestamp + 1),
                    "type": "text"
                }
            })
        
        if not contents:
            return
        
        try:
            self._client.v1.context.memory.add(
                memory_id=self._session_id,
                contents=contents
            )
        except Exception as error:
            print(f"Error saving context: {error}")
    
    def clear(self) -> None:
        """Clear memory for this session."""
        try:
            self._client.v1.context.memory.delete(
                memory_id=self._session_id
            )
        except Exception as error:
            print(f"Error clearing memory: {error}")
    
    @property
    def memory_keys(self) -> List[str]:
        """Return the memory keys."""
        return ["history"]

Usage Example

from dotenv import load_dotenv
import os
from langchain_openai import ChatOpenAI
from langchain.chains import ConversationChain

load_dotenv()


def main():
    print("Alchemyst-LangChain Integration Test")
    print("=" * 50)
    
    session_id = str(uuid.uuid4())
    print(f"Session ID: {session_id}")
    
    # Initialize memory
    memory = AlchemystMemory(
        api_key=os.getenv("ALCHEMYST_AI_API_KEY", "YOUR_ALCHEMYST_API_KEY"),
        session_id=session_id
    )
    
    # Initialize model
    model = ChatOpenAI(
        model="gpt-4o-mini",
        temperature=0,
    )
    
    # Create conversation chain
    chain = ConversationChain(llm=model, memory=memory)
    
    # Test conversation
    test_messages = [
        "Hi, my name is Alice from New York.",
        "Who is Alice? Where is Alice from?",
        "What did I tell you about myself in our previous conversation?"
    ]
    
    for i, message in enumerate(test_messages, 1):
        print(f"\n[Test {i}] User: {message}")
        response = chain.invoke({"input": message})
        print(f"Bot: {response.get('response', response)}")
    
    print("\n" + "=" * 50)
    print("Test completed! The bot should have remembered your information.")

if __name__ == "__main__":
    main()

Summary

By combining LangChain’s workflow capabilities with Alchemyst’s persistent memory, developers can build intelligent agents that:
  • Retain user context and preferences
  • Continue conversations across sessions
  • Improve personalization over time
This simple integration makes your LLM applications more human-like and contextually aware — without losing information after every run.