Skip to content

Latest commit

 

History

History
593 lines (503 loc) · 15.3 KB

File metadata and controls

593 lines (503 loc) · 15.3 KB

NornicDB Complete Working Examples

Real-world scenarios with full code


Table of Contents

  1. AI Agent Memory System
  2. Code Knowledge Base
  3. Personal Knowledge Graph
  4. Project Documentation
  5. Learning Tracker

1. AI Agent Memory System

Scenario

An AI coding assistant needs to remember user preferences, project decisions, and context across sessions.

Complete Implementation

// === SETUP: Create memory structure ===

// User preferences (Semantic - lasts ~2 months)
CREATE (pref1:Memory {
  id: randomUUID(),
  content: "User prefers TypeScript over JavaScript",
  tier: "SEMANTIC",
  tags: ["preference", "language", "typescript"],
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.7
})

CREATE (pref2:Memory {
  id: randomUUID(),
  content: "User likes functional programming style",
  tier: "SEMANTIC",
  tags: ["preference", "style", "functional"],
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.6
})

// Project decision (Semantic - important)
CREATE (decision:Memory {
  id: randomUUID(),
  content: "Project uses React 18 with Vite for frontend",
  tier: "SEMANTIC",
  tags: ["decision", "architecture", "frontend"],
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.9
})

// Coding best practice (Procedural - lasts ~2 years)
CREATE (practice:Memory {
  id: randomUUID(),
  content: "Always use async/await instead of .then() chains",
  tier: "PROCEDURAL",
  tags: ["best-practice", "async", "javascript"],
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 1.0
})

// Recent chat context (Episodic - fades in days)
CREATE (chat:Memory {
  id: randomUUID(),
  content: "Currently debugging authentication middleware",
  tier: "EPISODIC",
  tags: ["context", "debugging", "auth"],
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.3
})

// === QUERY: Recall relevant memories ===

// 1. Get all strong memories about preferences
MATCH (m:Memory)
WHERE "preference" IN m.tags
  AND m.decayScore > 0.5
RETURN m.content, m.decayScore, m.accessCount
ORDER BY m.decayScore DESC

// 2. Find architecture decisions
MATCH (m:Memory)
WHERE "architecture" IN m.tags
  OR "decision" IN m.tags
RETURN m.content, m.tier, m.importance
ORDER BY m.importance DESC

// 3. Get current context (episodic memories)
MATCH (m:Memory {tier: "EPISODIC"})
WHERE m.decayScore > 0.3
RETURN m.content, m.created
ORDER BY m.created DESC
LIMIT 5

// === AUTO-LINK: Connect related memories ===

// Link preferences to decisions
MATCH (pref:Memory), (decision:Memory)
WHERE "preference" IN pref.tags
  AND "decision" IN decision.tags
  AND (pref.content CONTAINS "TypeScript" AND decision.content CONTAINS "React")
CREATE (pref)-[:RELATES_TO {
  confidence: 0.85,
  autoGenerated: true,
  createdAt: timestamp()
}]->(decision)

// === ACCESS: Reinforce memory when used ===

// User asks about React - reinforce that memory
MATCH (m:Memory)
WHERE m.content CONTAINS "React"
SET m.lastAccessed = timestamp(),
    m.accessCount = m.accessCount + 1
RETURN m.content, m.accessCount

// === CLEANUP: Archive old episodic memories ===

// Find forgotten episodic memories
MATCH (m:Memory {tier: "EPISODIC"})
WHERE m.decayScore < 0.05
RETURN m.content, m.created, m.decayScore

// Archive them
MATCH (m:Memory {tier: "EPISODIC"})
WHERE m.decayScore < 0.05
SET m:Archived, m.archivedAt = timestamp()
REMOVE m:Memory
RETURN count(m) AS archivedCount

// === SEARCH: Semantic search (with embeddings) ===

// Find memories similar to "What frontend framework should I use?"
// Assume $queryEmbedding is the embedding of that question
MATCH (m:Memory)
WHERE m.embedding IS NOT NULL
  AND m.decayScore > 0.4
WITH m, vector.similarity.cosine(m.embedding, $queryEmbedding) AS similarity
WHERE similarity > 0.75
RETURN m.content,
       m.tier,
       similarity,
       m.decayScore,
       similarity * m.decayScore AS combinedScore
ORDER BY combinedScore DESC
LIMIT 5

// === STATISTICS: Monitor memory health ===

MATCH (m:Memory)
RETURN m.tier,
       count(m) AS total,
       round(avg(m.decayScore) * 100) / 100 AS avgScore,
       round(avg(m.accessCount) * 100) / 100 AS avgAccess,
       sum(CASE WHEN m.decayScore < 0.05 THEN 1 ELSE 0 END) AS needsArchive
ORDER BY m.tier

Expected Results

// User preferences
├─ "User prefers TypeScript over JavaScript" (Score: 0.68, Access: 5)
└─ "User likes functional programming style" (Score: 0.62, Access: 3)

// Architecture decisions
├─ "Project uses React 18 with Vite for frontend" (Importance: 0.9)

// Current context
├─ "Currently debugging authentication middleware" (Recent)

// Statistics by tier
EPISODIC: 12 memories, avg score 0.42, 3 need archive
SEMANTIC: 45 memories, avg score 0.71, 0 need archive  
PROCEDURAL: 18 memories, avg score 0.89, 0 need archive

2. Code Knowledge Base

Scenario

Track code patterns, bugs, and solutions across a codebase.

// === Store code pattern ===
CREATE (pattern:Memory {
  id: randomUUID(),
  content: "Use Zod for runtime type validation in API routes",
  codeExample: "const schema = z.object({ email: z.string().email() })",
  tier: "PROCEDURAL",
  tags: ["pattern", "validation", "zod", "api"],
  file: "src/api/users.ts",
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.8
})

// === Store bug and solution ===
CREATE (bug:Memory {
  id: randomUUID(),
  content: "Race condition in useEffect caused double API calls",
  solution: "Added cleanup function: return () => { cancelled = true }",
  tier: "EPISODIC",  // Will fade unless accessed often
  tags: ["bug", "react", "useEffect", "race-condition"],
  file: "src/hooks/useData.ts",
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.6
})

// === Link related patterns ===
MATCH (p1:Memory), (p2:Memory)
WHERE "validation" IN p1.tags
  AND "api" IN p2.tags
  AND id(p1) < id(p2)
CREATE (p1)-[:RELATES_TO {
  reason: "Both deal with API input handling",
  confidence: 0.80
}]->(p2)

// === Search by file ===
MATCH (m:Memory)
WHERE m.file = "src/api/users.ts"
RETURN m.content, m.codeExample, m.tags
ORDER BY m.importance DESC

// === Find similar bugs ===
MATCH (bug:Memory)
WHERE "bug" IN bug.tags
  AND bug.content CONTAINS "useEffect"
RETURN bug.content, bug.solution, bug.file

// === Get all patterns for a technology ===
MATCH (m:Memory)
WHERE "zod" IN m.tags
  AND m.tier = "PROCEDURAL"
RETURN m.content, m.codeExample

3. Personal Knowledge Graph

Scenario

Build a second brain for personal learning and note-taking.

// === Create topic nodes ===
CREATE (ai:Topic {
  name: "Artificial Intelligence",
  category: "Technology"
})

CREATE (ml:Topic {
  name: "Machine Learning",
  category: "Technology"
})

CREATE (nn:Topic {
  name: "Neural Networks",
  category: "Technology"
})

// === Create learning memories ===
CREATE (concept:Memory {
  id: randomUUID(),
  content: "Backpropagation calculates gradients using chain rule",
  tier: "SEMANTIC",
  tags: ["ml", "neural-networks", "concept"],
  source: "Deep Learning book, Chapter 6",
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.8
})

CREATE (practice:Memory {
  id: randomUUID(),
  content: "Always normalize input features before training",
  tier: "PROCEDURAL",
  tags: ["ml", "best-practice", "preprocessing"],
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.9
})

// === Link topics hierarchically ===
CREATE (ai)-[:CONTAINS]->(ml)
CREATE (ml)-[:CONTAINS]->(nn)

// === Link memories to topics ===
MATCH (m:Memory), (t:Topic)
WHERE "neural-networks" IN m.tags
  AND t.name = "Neural Networks"
CREATE (m)-[:ABOUT]->(t)

// === Query: What do I know about ML? ===
MATCH (topic:Topic {name: "Machine Learning"})-[:CONTAINS*0..2]->(subtopic)
MATCH (m:Memory)-[:ABOUT]->(subtopic)
WHERE m.decayScore > 0.5
RETURN subtopic.name AS topic,
       collect(m.content) AS concepts,
       avg(m.decayScore) AS avgRetention
ORDER BY avgRetention DESC

// === Query: Spaced repetition - what should I review? ===
MATCH (m:Memory)
WHERE m.tier = "SEMANTIC"
  AND m.decayScore BETWEEN 0.3 AND 0.6  // Fading but not forgotten
  AND "concept" IN m.tags
RETURN m.content,
       m.source,
       m.decayScore,
       (timestamp() - m.lastAccessed) / (24 * 60 * 60 * 1000) AS daysSinceReview
ORDER BY m.decayScore ASC
LIMIT 5

// === Reinforce learning ===
MATCH (m:Memory)
WHERE m.content CONTAINS "Backpropagation"
SET m.lastAccessed = timestamp(),
    m.accessCount = m.accessCount + 1,
    m.importance = m.importance + 0.05  // Boost importance when actively studying
RETURN m.content, m.accessCount, m.decayScore

4. Project Documentation

Scenario

Auto-document project decisions and architecture using the graph.

// === Create project structure ===
CREATE (proj:Project {
  name: "Mimir",
  description: "AI Knowledge Management System"
})

CREATE (frontend:Component {
  name: "Frontend",
  tech: "React + TypeScript",
  path: "src/ui/"
})

CREATE (backend:Component {
  name: "Backend",
  tech: "Node.js + Express",
  path: "src/api/"
})

CREATE (db:Component {
  name: "Database",
  tech: "NornicDB (Graph DB)",
  path: "nornicdb/"
})

// === Link components ===
CREATE (proj)-[:HAS_COMPONENT]->(frontend)
CREATE (proj)-[:HAS_COMPONENT]->(backend)
CREATE (proj)-[:HAS_COMPONENT]->(db)
CREATE (frontend)-[:DEPENDS_ON]->(backend)
CREATE (backend)-[:DEPENDS_ON]->(db)

// === Document decisions ===
CREATE (dec1:Decision {
  id: randomUUID(),
  title: "Use TypeScript for type safety",
  rationale: "Catches errors at compile time, better IDE support",
  date: timestamp(),
  status: "ACCEPTED",
  impact: "HIGH"
})

CREATE (dec2:Decision {
  id: randomUUID(),
  title: "Implement memory decay system",
  rationale: "Mimics human memory, auto-cleans old data",
  date: timestamp(),
  status: "IMPLEMENTED",
  impact: "MEDIUM"
})

// === Link decisions to components ===
MATCH (dec:Decision), (comp:Component)
WHERE dec.title CONTAINS "TypeScript"
  AND comp.name = "Frontend"
CREATE (comp)-[:DECIDED_BY]->(dec)

MATCH (dec:Decision), (comp:Component)
WHERE dec.title CONTAINS "memory decay"
  AND comp.name = "Database"
CREATE (comp)-[:DECIDED_BY]->(dec)

// === Generate architecture document ===
MATCH (proj:Project)-[:HAS_COMPONENT]->(comp:Component)
OPTIONAL MATCH (comp)-[:DECIDED_BY]->(dec:Decision)
OPTIONAL MATCH (comp)-[:DEPENDS_ON]->(dep:Component)
RETURN proj.name AS project,
       comp.name AS component,
       comp.tech AS technology,
       collect(DISTINCT dec.title) AS decisions,
       collect(DISTINCT dep.name) AS dependencies
ORDER BY comp.name

// === Find all high-impact decisions ===
MATCH (dec:Decision)
WHERE dec.impact = "HIGH"
RETURN dec.title,
       dec.rationale,
       dec.status,
       dec.date
ORDER BY dec.date DESC

// === Trace dependency chain ===
MATCH path = (start:Component)-[:DEPENDS_ON*]->(end:Component)
WHERE start.name = "Frontend"
RETURN [comp IN nodes(path) | comp.name] AS dependencyChain

5. Learning Tracker

Scenario

Track what you're learning with spaced repetition.

// === Create study session ===
CREATE (session:StudySession {
  id: randomUUID(),
  topic: "Graph Algorithms",
  date: timestamp()
})

// === Add what you learned ===
CREATE (fact1:Memory {
  id: randomUUID(),
  content: "Dijkstra's algorithm finds shortest path in weighted graph",
  tier: "SEMANTIC",
  tags: ["algorithms", "graphs", "dijkstra"],
  difficulty: "MEDIUM",
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.7,
  nextReview: timestamp() + (3 * 24 * 60 * 60 * 1000)  // Review in 3 days
})

CREATE (fact2:Memory {
  id: randomUUID(),
  content: "BFS uses queue, DFS uses stack (or recursion)",
  tier: "PROCEDURAL",
  tags: ["algorithms", "graphs", "bfs", "dfs"],
  difficulty: "EASY",
  created: timestamp(),
  lastAccessed: timestamp(),
  accessCount: 1,
  importance: 0.8,
  nextReview: timestamp() + (7 * 24 * 60 * 60 * 1000)  // Review in 1 week
})

// === Link to session ===
MATCH (m:Memory), (s:StudySession)
WHERE m.id IN ["fact1-id", "fact2-id"]
  AND s.topic = "Graph Algorithms"
CREATE (s)-[:LEARNED]->(m)

// === What to review today? ===
MATCH (m:Memory)
WHERE m.nextReview <= timestamp()
  AND m.tier IN ["SEMANTIC", "PROCEDURAL"]
RETURN m.content,
       m.difficulty,
       (timestamp() - m.lastAccessed) / (24 * 60 * 60 * 1000) AS daysSince,
       m.decayScore
ORDER BY m.decayScore ASC  // Review weakest memories first
LIMIT 10

// === Mark as reviewed (spaced repetition) ===
MATCH (m:Memory)
WHERE m.content CONTAINS "Dijkstra"
WITH m,
     CASE m.difficulty
       WHEN "EASY" THEN 7    // Review in 1 week
       WHEN "MEDIUM" THEN 3  // Review in 3 days
       WHEN "HARD" THEN 1    // Review tomorrow
     END AS daysUntilReview
SET m.lastAccessed = timestamp(),
    m.accessCount = m.accessCount + 1,
    m.nextReview = timestamp() + (daysUntilReview * 24 * 60 * 60 * 1000),
    m.difficulty = CASE
      WHEN m.accessCount > 5 THEN "EASY"  // Getting easier!
      ELSE m.difficulty
    END
RETURN m.content, m.difficulty, m.accessCount

// === Study statistics ===
MATCH (s:StudySession)-[:LEARNED]->(m:Memory)
WHERE s.date > timestamp() - (30 * 24 * 60 * 60 * 1000)  // Last 30 days
RETURN s.topic,
       s.date,
       count(m) AS memorizesLearned,
       avg(m.decayScore) AS avgRetention,
       sum(CASE WHEN m.decayScore > 0.7 THEN 1 ELSE 0 END) AS strongMemories
ORDER BY s.date DESC

// === Knowledge map ===
MATCH (m:Memory)
WHERE m.tier IN ["SEMANTIC", "PROCEDURAL"]
WITH m.tags AS tags, count(m) AS count, avg(m.decayScore) AS avgScore
UNWIND tags AS tag
RETURN tag,
       sum(count) AS totalConcepts,
       round(avg(avgScore) * 100) / 100 AS retention
ORDER BY totalConcepts DESC
LIMIT 20

Common Patterns Library

Pattern: Batch Update with Math

// Recalculate all decay scores
MATCH (m:Memory)
WITH m,
     exp(-0.00412 * ((timestamp() - m.lastAccessed) / 3600000.0)) AS recency,
     log(1 + m.accessCount) / log(101) AS frequency,
     coalesce(m.importance, 0.5) AS importance
SET m.decayScore = 0.4 * recency + 0.3 * frequency + 0.3 * importance
RETURN count(m) AS updated

Pattern: Find Clusters

// Find tightly connected memories
MATCH (m:Memory)-[r:RELATES_TO]-(related:Memory)
WHERE r.confidence > 0.8
WITH m, count(related) AS connections, collect(related.content) AS cluster
WHERE connections >= 3
RETURN m.content AS centerMemory,
       connections,
       cluster
ORDER BY connections DESC
LIMIT 10

Pattern: Time-based Analysis

// Memories created per day (last 30 days)
MATCH (m:Memory)
WHERE m.created > timestamp() - (30 * 24 * 60 * 60 * 1000)
WITH toInteger(m.created / (24 * 60 * 60 * 1000)) AS day,
     count(m) AS memoriesCreated
RETURN day, memoriesCreated
ORDER BY day DESC

Pattern: Confidence-weighted Search

// Find memories with high confidence links
MATCH (m:Memory)-[r:RELATES_TO]->(related:Memory)
WHERE m.content CONTAINS $query
WITH m, related, r.confidence AS conf
ORDER BY conf DESC
RETURN m.content AS source,
       collect({content: related.content, confidence: conf})[0..5] AS relatedMemories

Last Updated: November 25, 2025