Pular para conteúdo

Cookbook

Exemplos completos, prontos para copiar e colar, para casos de uso comuns.


Uso Básico

A integração mais simples: escrever fatos de mensagens do usuário e recuperar contexto para respostas.

import asyncio
from arandu import MemoryClient
from arandu.providers.openai import OpenAIProvider


async def main():
    provider = OpenAIProvider(api_key="sk-...")
    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost:5432/memory",
        llm=provider,
        embeddings=provider,
    )
    await memory.initialize()

    try:
        # Simular uma conversa
        messages = [
            "Hi, I'm Rafael. I'm a backend engineer at Acme Corp in São Paulo.",
            "My girlfriend Ana is a UX designer. We have a cat named Pixel.",
            "I've been learning Rust lately, mostly on weekends.",
            "Actually, I just moved to Rio de Janeiro. Still remote at Acme.",
        ]

        for msg in messages:
            result = await memory.write(agent_id="rafael", message=msg, speaker_name="Rafael")
            added = len(result.facts_added)
            updated = len(result.facts_updated)
            print(f"Write: +{added} facts, ~{updated} updates ({result.duration_ms:.0f}ms)")

        # Recuperar contexto para diferentes queries
        queries = [
            "where does Rafael live?",
            "tell me about Rafael's relationships",
            "what are Rafael's hobbies?",
        ]

        for query in queries:
            result = await memory.retrieve(agent_id="rafael", query=query)
            print(f"\nQuery: {query}")
            print(f"Found {len(result.facts)} facts ({result.duration_ms:.0f}ms)")
            for fact in result.facts[:5]:
                print(f"  [{fact.score:.2f}] {fact.entity_name}: {fact.fact_text}")
    finally:
        await memory.close()


asyncio.run(main())

Usando Anthropic (Claude)

Use Claude como seu LLM mantendo OpenAI para embeddings (Anthropic não oferece API de embeddings):

import asyncio
from arandu import MemoryClient, MemoryConfig
from arandu.providers.anthropic import AnthropicProvider
from arandu.providers.openai import OpenAIProvider


async def main():
    # Claude para raciocínio, OpenAI para embeddings
    llm = AnthropicProvider(api_key="sk-ant-...", model="claude-sonnet-4-20250514")
    embeddings = OpenAIProvider(api_key="sk-...")

    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost/memory",
        llm=llm,
        embeddings=embeddings,
    )
    await memory.initialize()

    try:
        result = await memory.write(
            agent_id="demo",
            message="I love hiking in the mountains. Last weekend I went to Serra da Mantiqueira.",
            speaker_name="Rafael",
        )
        print(f"Extracted {len(result.facts_added)} facts using Claude")

        context = await memory.retrieve(agent_id="demo", query="outdoor activities")
        print(context.context)
    finally:
        await memory.close()


asyncio.run(main())

Usando Outros Providers de LLM

O OpenAIProvider funciona com qualquer provider que exponha uma API compatível com OpenAI. Basta mudar api_key, model e base_url:

from arandu import MemoryClient
from arandu.providers.openai import OpenAIProvider

# DeepSeek V3 (barato, alta qualidade)
llm = OpenAIProvider(
    api_key="sk-deepseek-...",
    model="deepseek-chat",
    base_url="https://api.deepseek.com/v1",
)

# Groq (inferência rápida)
llm = OpenAIProvider(
    api_key="gsk_...",
    model="llama-3.3-70b-versatile",
    base_url="https://api.groq.com/openai/v1",
)

# Together AI
llm = OpenAIProvider(
    api_key="tog_...",
    model="meta-llama/Llama-3.3-70B-Instruct-Turbo",
    base_url="https://api.together.xyz/v1",
)

# Fireworks AI
llm = OpenAIProvider(
    api_key="fw_...",
    model="accounts/fireworks/models/llama-v3p3-70b-instruct",
    base_url="https://api.fireworks.ai/inference/v1",
)

# Modelo local via Ollama
llm = OpenAIProvider(
    api_key="ollama",  # qualquer string não-vazia
    model="llama3.1",
    base_url="http://localhost:11434/v1",
)

# Usar com Arandu (igual ao OpenAI)
memory = MemoryClient(
    database_url="postgresql+psycopg://...",
    llm=llm,
    embeddings=OpenAIProvider(api_key="sk-..."),  # embeddings ainda precisam de OpenAI
)

Embeddings ainda precisam de OpenAI (ou um provider customizado)

A maioria dos providers compatíveis com OpenAI não oferece API de embeddings. Use OpenAIProvider com sua chave real da OpenAI para embeddings, ou implemente um EmbeddingProvider customizado (veja Custom Providers).


Configuração Avançada (Tuning de Retrieval)

Ajuste fino do retrieval para diferentes casos de uso:

import asyncio
from arandu import MemoryClient, MemoryConfig
from arandu.providers.openai import OpenAIProvider


async def main():
    # Provider único para todas as operações LLM (extração, reranker, etc.)
    llm = OpenAIProvider(api_key="sk-...", model="gpt-4o")

    # Configuração para um chatbot que precisa de contexto amplo e recente
    config = MemoryConfig(
        # Extraction: timeout curto para chat em tempo real
        extraction_timeout_sec=15.0,

        # Retrieval: mais resultados, favorecer recência
        topk_facts=40,
        min_similarity=0.15,          # rede mais ampla
        recency_half_life_days=7,     # favorecer fatos recentes mais agressivamente

        # Pesos de score: boost de recência para conversa dinâmica
        score_weights={
            "semantic": 0.50,
            "recency": 0.35,
            "importance": 0.15,
        },

        # Reranker
        enable_reranker=True,

        # Contexto: orçamento maior para respostas ricas
        context_max_tokens=3000,

        # Spreading activation: expansão de contexto mais ampla
        spreading_activation_hops=3,
        spreading_max_related_entities=8,

        # Timezone para cálculos de recência
        timezone="America/Sao_Paulo",
    )

    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost/memory",
        llm=llm,
        embeddings=llm,
        config=config,
    )
    await memory.initialize()

    try:
        # Escrever uma série de mensagens
        await memory.write(agent_id="demo", message="I started a new job at TechCorp today!", speaker_name="Rafael")
        await memory.write(agent_id="demo", message="My manager's name is Sarah. She seems great.", speaker_name="Rafael")
        await memory.write(agent_id="demo", message="The office is in downtown with a nice view.", speaker_name="Rafael")

        # Recuperar com configurações ajustadas
        result = await memory.retrieve(agent_id="demo", query="what's new with the user?")
        print(f"Retrieved {len(result.facts)} facts")
        print(f"Context ({len(result.context)} chars):")
        print(result.context)

        # Verificar scores individuais para validar o tuning
        for fact in result.facts:
            print(f"\n  [{fact.score:.3f}] {fact.fact_text}")
            print(f"    Scores: {fact.scores}")
    finally:
        await memory.close()


asyncio.run(main())

Integração de Background Jobs

Configure manutenção periódica para manter a memória organizada:

import asyncio
from arandu import (
    MemoryClient,
    MemoryConfig,
    cluster_user_facts,
    compute_entity_importance,
    detect_communities,
    refresh_entity_summaries,
    run_consolidation,
    run_memify,
)
from arandu.providers.openai import OpenAIProvider
from arandu.db import create_engine, create_session_factory


async def run_maintenance(
    database_url: str,
    agent_ids: list[str],
    provider: OpenAIProvider,
    config: MemoryConfig,
) -> None:
    """Executa todos os jobs de manutenção em background para uma lista de usuários."""
    engine = create_engine(database_url)
    session_factory = create_session_factory(engine)

    try:
        async with session_factory() as session:
            for agent_id in agent_ids:
                print(f"\n--- Manutenção para {agent_id} ---")

                # 1. Importance scoring (barato, apenas SQL)
                importance = await compute_entity_importance(session, agent_id, config)
                print(f"  Importance: scored {importance.entities_scored} entities")

                # 2. Summary refresh (moderado, LLM)
                summaries = await refresh_entity_summaries(
                    session, agent_id, provider, config
                )
                print(f"  Summaries: refreshed {summaries.summaries_refreshed}")

                # 3. Clustering (moderado, LLM)
                clusters = await cluster_user_facts(
                    session, agent_id, provider, provider, config
                )
                print(f"  Clustering: {clusters.clusters_created} clusters")

                # 4. Community detection
                communities = await detect_communities(
                    session, agent_id, provider, provider, config
                )
                print(f"  Communities: {communities.communities_created} created")

                # 5. Consolidation (moderado, LLM)
                consolidation = await run_consolidation(session, agent_id, provider, config)
                print(f"  Consolidation: {consolidation.observations_created} observations")

                # 6. Memify (moderado, LLM)
                memify = await run_memify(session, agent_id, provider, provider, config)
                print(f"  Memify: {memify.facts_scored} facts scored")

            await session.commit()
    finally:
        await engine.dispose()


async def main():
    provider = OpenAIProvider(api_key="sk-...")
    config = MemoryConfig()
    database_url = "postgresql+psycopg://memory:memory@localhost/memory"

    # Executar uma vez
    await run_maintenance(database_url, ["user_123", "user_456"], provider, config)

    # Ou agendar com asyncio
    # while True:
    #     await run_maintenance(database_url, agent_ids, provider, config)
    #     await asyncio.sleep(4 * 3600)  # a cada 4 horas


asyncio.run(main())

Setup Multi-Agente

Gerencie múltiplos agentes com espaços de memória isolados:

import asyncio
from arandu import MemoryClient
from arandu.providers.openai import OpenAIProvider


async def main():
    provider = OpenAIProvider(api_key="sk-...")
    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost/memory",
        llm=provider,
        embeddings=provider,
    )
    await memory.initialize()

    try:
        # Cada agente tem memória completamente isolada
        await memory.write(
            agent_id="alice",
            message="I work at Google as a PM. I live in Mountain View.",
            speaker_name="Alice",
        )
        await memory.write(
            agent_id="bob",
            message="I'm a freelance designer based in Berlin.",
            speaker_name="Bob",
        )

        # Contexto do agente alice mostra apenas fatos da alice
        alice_ctx = await memory.retrieve(agent_id="alice", query="where do they work?")
        print("Alice:", alice_ctx.context)

        # Contexto do agente bob mostra apenas fatos do bob
        bob_ctx = await memory.retrieve(agent_id="bob", query="where do they work?")
        print("Bob:", bob_ctx.context)
    finally:
        await memory.close()


asyncio.run(main())

Multi-Speaker (Sessão de Terapia)

Dois speakers escrevendo no mesmo agente com a mesma sessão - pronomes como "Eu" resolvem para o speaker correto a cada vez.

import asyncio
from arandu import MemoryClient
from arandu.providers.openai import OpenAIProvider


async def main():
    provider = OpenAIProvider(api_key="sk-...")
    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost/memory",
        llm=provider,
        embeddings=provider,
    )
    await memory.initialize()

    try:
        # Dois speakers, mesmo agente, mesma sessão
        await memory.write(
            agent_id="therapy_bot",
            message="Eu me sinto ignorada pelo Carlos. Ele nunca me ouve.",
            speaker_name="Ana Silva",
            session_id="sessao_001",
        )
        await memory.write(
            agent_id="therapy_bot",
            message="Eu trabalho 12 horas por dia pra sustentar a família.",
            speaker_name="Carlos Silva",
            session_id="sessao_001",
        )

        # Retrieve — fatos atribuídos aos speakers corretos
        result = await memory.retrieve(agent_id="therapy_bot", query="Como a Ana se sente?")
        # Retorna: "Ana Silva se sente ignorada pelo Carlos" (não os fatos do Carlos)
        for fact in result.facts:
            print(f"  [{fact.score:.2f}] {fact.entity_name}: {fact.fact_text}")
    finally:
        await memory.close()


asyncio.run(main())

Multi-Session (Trabalho + Pessoal)

Mesmo speaker, mesmo agente, session_ids diferentes. Retrieve sem session_id busca em todas as sessões.

import asyncio
from arandu import MemoryClient
from arandu.providers.openai import OpenAIProvider


async def main():
    provider = OpenAIProvider(api_key="sk-...")
    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost/memory",
        llm=provider,
        embeddings=provider,
    )
    await memory.initialize()

    try:
        # Contexto de trabalho
        await memory.write(
            agent_id="assistant",
            message="Preciso entregar o relatório até sexta.",
            speaker_name="Marcos",
            session_id="work",
        )

        # Contexto pessoal
        await memory.write(
            agent_id="assistant",
            message="Minha mãe tá doente, vou visitar ela no fim de semana.",
            speaker_name="Marcos",
            session_id="personal",
        )

        # Retrieve busca em TODAS as sessões por padrão
        result = await memory.retrieve(agent_id="assistant", query="O que o Marcos precisa fazer?")
        # Retorna fatos de AMBAS as sessões (work e personal)
        for fact in result.facts:
            print(f"  [{fact.score:.2f}] {fact.entity_name}: {fact.fact_text}")
    finally:
        await memory.close()


asyncio.run(main())

Bot de Atendimento ao Cliente (Múltiplos Clientes)

Mesmo agente atendendo diferentes clientes - os fatos de cada cliente são isolados pelo speaker_name.

import asyncio
from arandu import MemoryClient
from arandu.providers.openai import OpenAIProvider


async def main():
    provider = OpenAIProvider(api_key="sk-...")
    memory = MemoryClient(
        database_url="postgresql+psycopg://memory:memory@localhost/memory",
        llm=provider,
        embeddings=provider,
    )
    await memory.initialize()

    try:
        # Cliente 1
        await memory.write(
            agent_id="support_bot",
            message="Meu pedido #1234 não chegou.",
            speaker_name="Maria Oliveira",
        )

        # Cliente 2
        await memory.write(
            agent_id="support_bot",
            message="Quero trocar o produto que comprei ontem.",
            speaker_name="João Santos",
        )

        # Retrieve para cliente específico — agente lembra cada um separadamente
        result = await memory.retrieve(agent_id="support_bot", query="Qual o problema da Maria?")
        for fact in result.facts:
            print(f"  [{fact.score:.2f}] {fact.entity_name}: {fact.fact_text}")
    finally:
        await memory.close()


asyncio.run(main())