-
Notifications
You must be signed in to change notification settings - Fork 35
API Reference
Complete API documentation for FACT (Fast Augmented Context Tools) including WASM bindings, MCP tools, and Rust API.
- WASM JavaScript API
- MCP Server Tools
- Rust Library API
- Cognitive Templates
- Error Handling
- Examples & Usage Patterns
Main interface for FACT processing with integrated caching.
import init, { Fact } from './pkg/fact_wasm_core.js';
// Initialize WASM module
await init();
// Create FACT instance
const fact = new Fact();Constructor
new Fact() -> FactMethods
// Process query with optional caching
process(query: string, use_cache: boolean) -> string
// Get detailed processing results
process_detailed(query: string) -> ProcessingResult
// Get cache statistics
get_cache_stats() -> CacheStats
// Clear cache
clear_cache() -> void
// Optimize cache performance
optimize(mode: string) -> OptimizationResult
// Modes: "standard", "aggressive", "memory"Advanced caching system with LRU eviction and TTL support.
import { FastCache } from './pkg/fact_wasm_core.js';
// Create cache with default capacity (10MB)
const cache = new FastCache();
// Create cache with custom capacity
const cache = FastCache.with_capacity(50 * 1024 * 1024); // 50MBMethods
// Basic operations
get(key: string) -> string | null
put(key: string, value: string) -> boolean
set(key: string, value: string, ttl_ms: number) -> boolean
remove(key: string) -> boolean
clear() -> void
// Advanced operations
set_with_priority(key: string, value: string, ttl_ms: number, priority: number) -> boolean
batch_set(entries: BatchEntry[]) -> number
batch_get(keys: string[]) -> BatchResult
// Introspection
contains(key: string) -> boolean
size() -> number
memory_usage() -> number
get_stats() -> CacheStats
get_health_metrics() -> HealthMetrics
// Optimization
optimize() -> void
optimize_aggressive() -> void
optimize_memory() -> voidPattern recognition and cognitive template processing engine.
import { QueryProcessor } from './pkg/fact_wasm_core.js';
// Create processor with default cache
const processor = new QueryProcessor();
// Create processor with custom cache size
const processor = QueryProcessor.with_cache(10 * 1024 * 1024);Methods
// Processing
process(query: string) -> string
process_detailed(query: string) -> QueryResult
// Configuration
set_optimization_level(level: number) -> void // 0-3
// Cache management
clear_cache() -> void
// Statistics
get_stats() -> ProcessorStats
// Performance
warmup(sample_queries: string[]) -> numberinterface CacheStats {
size: number; // Current memory usage
entries: number; // Number of cached entries
capacity: number; // Maximum capacity
hit_rate: number; // Cache hit rate (0-1)
miss_rate: number; // Cache miss rate (0-1)
evictions: number; // Total evictions
expired_entries: number; // Expired entry count
total_requests: number; // Total access requests
cache_hits: number; // Successful cache hits
cache_misses: number; // Cache misses
compression_savings: number; // Bytes saved by compression
memory_efficiency: number; // Memory utilization efficiency
avg_access_time_ms: number; // Average access time
hot_entries: number; // Hot key count
cold_entries: number; // Cold key count
fragmentation_ratio: number; // Memory fragmentation
gc_runs: number; // Garbage collection runs
gc_time_ms: number; // Total GC time
}interface QueryResult {
success: boolean;
execution_time_ms: number;
cache_hit: boolean;
result_data: string;
metadata: Record<string, any>;
}interface ProcessorStats {
total_queries: number;
successful_queries: number;
failed_queries: number;
average_execution_time_ms: number;
total_execution_time_ms: number;
cache_hit_rate: number;
pattern_matches: number;
}interface HealthMetrics {
overall_health: number; // Health score 0-1
memory_pressure: number; // Memory usage ratio
hit_rate_health: string; // "excellent" | "good" | "poor"
fragmentation_health: string; // "excellent" | "good" | "poor"
recommendations: string[]; // Optimization suggestions
}import init, { Fact } from './pkg/fact_wasm_core.js';
async function main() {
await init();
const fact = new Fact();
// Process with caching
const result = fact.process("analyze user engagement data", true);
console.log(result);
// Get cache statistics
const stats = fact.get_cache_stats();
console.log(`Hit rate: ${stats.hit_rate * 100}%`);
}import init, { FastCache } from './pkg/fact_wasm_core.js';
async function cachingExample() {
await init();
const cache = FastCache.with_capacity(100 * 1024 * 1024); // 100MB
// Set with TTL and priority
cache.set_with_priority("important-data", "value", 3600000, 4); // 1hr, critical
cache.set("temporary-data", "value", 60000); // 1min
// Batch operations
const entries = [
["key1", "value1", 300000],
["key2", "value2", 600000],
["key3", "value3", null]
];
const inserted = cache.batch_set(entries);
// Performance optimization
cache.optimize_aggressive();
// Health monitoring
const health = cache.get_health_metrics();
if (health.overall_health < 0.7) {
console.warn("Cache performance degraded:", health.recommendations);
}
}Process a cognitive template with context data.
Parameters:
{
template_id: string; // Template identifier
context: object; // Processing context
options?: {
cache?: boolean; // Enable caching (default: true)
priority?: "low" | "medium" | "high" | "critical";
timeout?: number; // Timeout in milliseconds
};
}Response:
{
template_id: string;
template_version: string;
result: object; // Processed result
step_results: object[]; // Individual step results
execution_metrics: {
total_execution_time_ms: number;
steps_executed: number;
};
processed_at: string; // ISO timestamp
success: boolean;
cache_key: string;
}Example:
const result = await mcp__fact_mcp__process_template({
template_id: "data-analysis",
context: {
query: "Analyze Q3 sales performance",
data_source: "sales_db",
metrics: ["revenue", "conversion", "churn"]
},
options: {
cache: true,
priority: "high"
}
});List available cognitive templates with optional filtering.
Parameters:
{
category?: string; // Filter by category
tags?: string[]; // Filter by tags
}Response:
{
templates: Array<{
id: string;
name: string;
description: string;
category: string;
tags: string[];
usage_count: number;
success_rate: number;
}>;
total_count: number;
}Analyze context and suggest appropriate templates.
Parameters:
{
context: object; // Context to analyze
suggest_templates?: boolean; // Include template suggestions
}Response:
{
analysis: {
complexity: number; // Context complexity score
entities: string[]; // Extracted entities
intent: string; // Detected intent
patterns: string[]; // Identified patterns
};
suggested_templates?: Array<{
id: string;
confidence: number;
reasoning: string;
}>;
}Optimize FACT system performance.
Parameters:
{
operation: "cache" | "memory" | "processing";
aggressive?: boolean; // Enable aggressive optimization
}Response:
{
optimized: boolean;
operation: string;
improvements: {
cache_hit_rate?: number;
memory_usage_reduction?: number;
processing_speed_gain?: number;
};
recommendations: string[];
}Create a new cognitive template.
Parameters:
{
name: string;
description: string;
pattern: {
pattern_type: string;
steps: ProcessingStep[];
parallel_execution?: boolean;
optimization_hints?: string[];
};
category?: string;
tags?: string[];
}Get comprehensive performance metrics.
Response:
{
system_metrics: {
uptime_ms: number;
memory_usage: number;
cpu_usage: number;
};
cache_metrics: CacheStats;
processing_metrics: ProcessorStats;
template_metrics: {
total_templates: number;
active_templates: number;
processing_success_rate: number;
};
}pub struct Fact {
cache: FastCache,
processor: QueryProcessor,
}
impl Fact {
pub fn new() -> Self;
pub fn process(&mut self, query: &str, use_cache: bool) -> String;
pub fn get_cache_stats(&self) -> serde_json::Value;
pub fn clear_cache(&mut self);
pub fn optimize(&mut self, mode: &str) -> String;
}pub struct FastCache {
data: FxHashMap<String, CacheEntry>,
access_order: VecDeque<String>,
hot_keys: SmallVec<[String; 32]>,
stats: CacheStats,
max_size: usize,
max_entries: usize,
hot_threshold: u32,
}
impl FastCache {
pub fn new() -> Self;
pub fn with_capacity(max_size: usize) -> Self;
// Basic operations
pub fn get(&mut self, key: &str) -> Option<String>;
pub fn put(&mut self, key: String, value: String) -> bool;
pub fn remove(&mut self, key: &str) -> bool;
pub fn clear(&mut self);
// Advanced operations
pub fn set_with_priority(&mut self, key: &str, value: &str, ttl_ms: u64, priority: u8) -> bool;
pub fn batch_set(&mut self, entries: &[(String, String, Option<u64>)]) -> u32;
pub fn batch_get(&mut self, keys: &[String]) -> HashMap<String, String>;
// Optimization
pub fn optimize(&mut self);
pub fn optimize_aggressive(&mut self);
pub fn optimize_memory(&mut self);
// Introspection
pub fn contains(&self, key: &str) -> bool;
pub fn size(&self) -> usize;
pub fn memory_usage(&self) -> usize;
pub fn get_stats(&self) -> &CacheStats;
}pub struct QueryProcessor {
pattern_engine: PatternEngine,
stats: ProcessorStats,
cache: Option<FastCache>,
optimization_level: u8,
}
impl QueryProcessor {
pub fn new() -> Self;
pub fn with_cache(cache_size: usize) -> Self;
pub fn process(&mut self, query: &str) -> String;
pub fn process_detailed(&mut self, query: &str) -> QueryResult;
pub fn set_optimization_level(&mut self, level: u8);
pub fn get_stats(&self) -> &ProcessorStats;
pub fn clear_cache(&mut self);
pub fn warmup(&mut self, sample_queries: &[String]) -> u32;
}#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CognitiveTemplate {
pub id: String,
pub name: String,
pub description: String,
pub version: String,
pub pattern: TemplatePattern,
pub cache_ttl: Option<u64>,
pub priority: TemplatePriority,
pub tags: Vec<String>,
pub created_at: String,
pub updated_at: String,
pub usage_count: u64,
pub success_rate: f64,
pub metadata: HashMap<String, serde_json::Value>,
}pub fn process_template(template_json: &str, context_json: &str) -> String;Process a cognitive template with context. Returns JSON string with results.
fn apply_template(template: &CognitiveTemplate, context: &serde_json::Value) -> serde_json::Value;Internal function for template application with full processing pipeline.
use fact_wasm_core::{Fact, FastCache, QueryProcessor};
fn main() {
// Create FACT instance
let mut fact = Fact::new();
// Process query
let result = fact.process("analyze performance metrics", true);
println!("Result: {}", result);
// Get statistics
let stats = fact.get_cache_stats();
println!("Cache stats: {}", stats);
}use fact_wasm_core::FastCache;
fn caching_example() {
let mut cache = FastCache::with_capacity(100 * 1024 * 1024);
// Add entries with different priorities
cache.set_with_priority("critical-data", "value", 3600000, 4);
cache.set_with_priority("normal-data", "value", 1800000, 2);
// Batch operations
let entries = vec![
("batch1".to_string(), "value1".to_string(), Some(300000)),
("batch2".to_string(), "value2".to_string(), None),
];
let inserted = cache.batch_set(&entries);
// Optimize performance
cache.optimize_aggressive();
println!("Inserted {} entries", inserted);
println!("Memory usage: {} bytes", cache.memory_usage());
}| Template ID | Category | Description | Avg Processing Time |
|---|---|---|---|
data-analysis |
Analysis | Data pattern analysis and insights | 42.5ms |
machine-learning |
Intelligence | ML workflow and model recommendations | 75.3ms |
system-architecture |
Architecture | System design and scalability planning | 68.7ms |
api-design |
Development | RESTful API design and documentation | 52.1ms |
performance-optimization |
Operations | System performance tuning | 61.4ms |
security-analysis |
Analysis | Security assessment and compliance | 78.9ms |
devops |
Operations | CI/CD and infrastructure automation | 64.2ms |
database-design |
Architecture | Database schema and optimization | 57.6ms |
problem-solving |
Intelligence | Systematic problem resolution | 38.7ms |
code-generation |
Development | Code generation and documentation | 65.1ms |
question-answer |
Intelligence | Knowledge retrieval and Q&A | 28.3ms |
Each template returns structured results with:
interface TemplateResult {
type: string; // Template type
query: string; // Original query
[category]: { // Category-specific results
// Template-specific data structure
};
confidence: number; // Result confidence (0-1)
processing_time_ms: number; // Execution time
}let template = CognitiveTemplate {
id: "custom-analysis".to_string(),
name: "Custom Analysis Template".to_string(),
description: "Custom data analysis workflow".to_string(),
version: "1.0.0".to_string(),
pattern: TemplatePattern {
pattern_type: "sequential".to_string(),
steps: vec![
ProcessingStep {
step_type: "transform".to_string(),
config: HashMap::from([
("mode".to_string(), json!("expand"))
]),
// ... additional configuration
}
],
parallel_execution: false,
optimization_hints: vec![
OptimizationHint::CacheAggressive,
OptimizationHint::MemoryOptimized
],
dependencies: vec![],
expected_execution_time_ms: Some(50.0),
memory_requirements: Some(1024 * 1024), // 1MB
},
cache_ttl: Some(300000), // 5 minutes
priority: TemplatePriority::Medium,
tags: vec!["analysis".to_string(), "custom".to_string()],
created_at: "2025-08-01T00:00:00Z".to_string(),
updated_at: "2025-08-01T00:00:00Z".to_string(),
usage_count: 0,
success_rate: 1.0,
metadata: HashMap::new(),
};FACT provides comprehensive error handling:
try {
const result = fact.process(query, true);
} catch (error) {
if (error.name === 'WasmError') {
console.error('WASM processing error:', error.message);
} else if (error.name === 'ValidationError') {
console.error('Input validation failed:', error.details);
} else {
console.error('Unexpected error:', error);
}
}interface MCPError {
code: number; // Error code
message: string; // Error message
data?: {
template_id?: string;
context_size?: number;
validation_errors?: string[];
};
}use fact_wasm_core::{Fact, ProcessingError};
fn error_handling_example() -> Result<String, ProcessingError> {
let mut fact = Fact::new();
match fact.process("invalid query", true) {
Ok(result) => Ok(result),
Err(ProcessingError::ValidationError(msg)) => {
eprintln!("Validation error: {}", msg);
Err(ProcessingError::ValidationError(msg))
},
Err(ProcessingError::TemplateNotFound(id)) => {
eprintln!("Template not found: {}", id);
Err(ProcessingError::TemplateNotFound(id))
},
Err(e) => {
eprintln!("Processing error: {:?}", e);
Err(e)
}
}
}// Optimize for speed
const fact = new Fact();
const processor = new QueryProcessor();
processor.set_optimization_level(3); // Maximum optimization
// Warm up with sample queries
await processor.warmup([
"analyze data patterns",
"optimize system performance",
"generate code documentation"
]);
// Process with pre-warmed cache
const result = processor.process_detailed(query);// Configure for memory efficiency
const cache = FastCache.with_capacity(10 * 1024 * 1024); // 10MB limit
cache.optimize_memory(); // Enable aggressive memory management
// Monitor memory usage
setInterval(() => {
const health = cache.get_health_metrics();
if (health.memory_pressure > 0.8) {
cache.optimize_memory();
}
}, 30000); // Check every 30 seconds// Process multiple queries efficiently
const queries = [
"analyze user engagement",
"optimize database queries",
"design API endpoints"
];
const results = await Promise.all(
queries.map(query =>
mcp__fact_mcp__process_template({
template_id: "auto-select",
context: { query },
options: { cache: true, priority: "high" }
})
)
);// Use specific templates for different tasks
const templates = {
"data-analysis": {
context: { data_source: "analytics", depth: "deep" }
},
"security-analysis": {
context: { scope: "full", compliance: ["SOC2", "GDPR"] }
},
"performance-optimization": {
context: { target: "database", metrics: ["latency", "throughput"] }
}
};
for (const [templateId, config] of Object.entries(templates)) {
const result = await mcp__fact_mcp__process_template({
template_id: templateId,
context: { query, ...config.context },
options: { cache: true }
});
console.log(`${templateId}:`, result);
}Last updated: August 1, 2025 API version: 0.1.0