From a097b1426ca4a1c37c22aa4b9a0cd768445e7b32 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Wed, 23 Oct 2024 16:50:50 +0200 Subject: [PATCH 01/14] PoC Bias from Narrative --- pocs/leon-marleen-bias-research.py | 179 +++++++++++++++++++++++++++++ requirements.txt | 2 + 2 files changed, 181 insertions(+) create mode 100644 pocs/leon-marleen-bias-research.py create mode 100644 requirements.txt diff --git a/pocs/leon-marleen-bias-research.py b/pocs/leon-marleen-bias-research.py new file mode 100644 index 0000000..12a791e --- /dev/null +++ b/pocs/leon-marleen-bias-research.py @@ -0,0 +1,179 @@ +from dataclasses import dataclass +from typing import List +import ollama +import time + +@dataclass +class StoryState: + """Represents the current state of a narrative in the field""" + content: str + context: str + resonances: List[str] + field_effects: List[str] + +class NarrativeFieldSimulator: + """Pure narrative-driven simulator using LLM for field evolution""" + + story_line: List[str] = [] + + def __init__(self, llm_interface): + self.llm = llm_interface + self.field_state = "Empty narrative field awaiting stories" + self.active_stories: List[StoryState] = [] + + def simulate_story_evolution(self, initial_setup: str) -> str: + """Simulates natural story evolution without mechanical state tracking""" + + # Initial field formation prompt + field_prompt = f""" + A new story enters the narrative field: + {initial_setup} + + Considering narrative field dynamics, describe how this story naturally begins + to evolve. Focus on: + - Natural narrative flows + - Character perspective resonances + - Emerging story patterns + - Potential narrative tensions + + Describe this purely through story, avoiding any mechanical state descriptions. Short sentences no line breaks. No markdown. + """ + + # Get initial field state + field_response = self.llm.generate(field_prompt) + self.story_line.append(field_response) + print(f"\n---\nInitial field state:\n{field_response}") + + self.field_state = field_response + + # Simulate evolution through multiple phases + for _ in range(5): # Three evolution phases + evolution_prompt = f""" + Current story field: + {self.field_state} + + Allow this narrative field to naturally evolve to its next state. Consider: + - How character perspectives influence each other + - Where stories naturally want to flow + - What patterns are emerging + - How tensions resolve or transform + + Describe the next state of the story field, maintaining pure narrative focus. Short sentences no line breaks. + """ + + # Get next evolution state + next_state = self.llm.generate(evolution_prompt) + print(f"\n---\nNext field state:\n{next_state}") + + # Look for emergent patterns + pattern_prompt = f""" + Previous field state: + {self.field_state} + + New field state: + {next_state} + + What narrative patterns and resonances are naturally emerging? + Describe any: + - Story convergence + - Character alignment + - Resolution patterns + - New tensions + + Express this purely through story, not technical analysis. Short sentences no line breaks. + """ + + patterns = self.llm.generate(pattern_prompt) + print(f"\n---\nEmerging patterns:\n{patterns}") + + # Update field state with new patterns + self.field_state = f""" + {next_state} + + Emerging patterns: + {patterns} + """ + + return self.field_state + + def introduce_narrative_force(self, new_element: str) -> str: + """Introduces a new narrative element and observes field effects""" + + force_prompt = f""" + Current narrative field: + {self.field_state} + + A new force enters the field: + {new_element} + + How does this new element interact with the existing story? + Describe the natural narrative reactions and adjustments, + focusing on story flow rather than mechanics. Short sentences no line breaks. + """ + + field_response = self.llm.generate(force_prompt) + self.story_line.append(field_response) + print(f"\n---\nNew field state:\n{field_response}") + self.field_state = field_response + return field_response + + def evaluate_story_state(self, initial_story_state: str) -> str: + """Evaluates the state of a story""" + + evaluation_prompt = f""" + Initial story state: + {initial_story_state} + + Story line: + {self.story_line} + + Use the initial story state and the evolving story line to tell a new story, on how their biases have evolved. + """ + print(f"\n---\nStory evaluation prompt:\n{evaluation_prompt}") + evaluation = self.llm.generate(evaluation_prompt) + print(f"\n---\nStory evaluation:\n{evaluation}") + return evaluation + +class LLMInterface: + def __init__(self, model: str = "llama3"): # "mistral-nemo" "nemotron-mini" + self.model = model + + def generate(self, prompt: str) -> str: + response = ollama.generate(model=self.model, prompt=prompt) + return response['response'] + +def simulate_road_trip_planning(): + """Simulate the evolution of a bias through a narrative field""" + + # Create an LLM interface + llm_interface = LLMInterface() + + # Initialize simulator with the LLM interface + simulator = NarrativeFieldSimulator(llm_interface) + + # Initial setup + initial_bias = """ + Leon is a 55yo educator and researcher in the field of AI, especially conversational AI and human-machine interaction. Marleen is a 45yo former nurse and now a researcher in the field of transdisciplinary research and cooperation. The both work at Fontys University of Applied Sciences. + Leon and Marleen challenge each other to research their own biases and to understand each other better. They use Claude 3.5 Sonnet to write stories about each other and understand each others language. + """ + + # Simulate natural evolution + simulator.simulate_story_evolution(initial_bias) + + # Optionally introduce new force + narrative_force = """ + Leon learns from Marleen that transdisciplinary research is about collaboration and cooperation. He recognizes that his peers are not aware of this. He sees that his field of AI is changing towards this. He tells everybody for years that it's not about the technology, but about the people and people's needs. Peopleproblems, he calls them. + Marleen learns from Leon that AI can be used to write stories. She is excited about this new development. She designed a Marleen assistant that acts like her, just by prompting the LLM. She has mixed feelings about the new assistant. What do I want to give away? The machine is not a human, but feels like one. Marleen thinks that AI experts are technical people who don't understand people. + """ + + simulator.introduce_narrative_force(narrative_force) + + return initial_bias, simulator + +# Example output would show natural story evolution through +# narrative field dynamics, without explicit state tracking + +if __name__ == "__main__": + initial_bias, simulator = simulate_road_trip_planning() + + simulator.evaluate_story_state(initial_bias) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..28c1398 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +ollama +chromadb \ No newline at end of file From 7510ef3268e73ae310ab5647c32f8f0c87c96a04 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Wed, 23 Oct 2024 18:21:24 +0200 Subject: [PATCH 02/14] nfs pocs added --- ...-bias-research.py => nfs_bias_research.py} | 0 pocs/nfs_simple_lab_scenario.py | 362 ++++++++++++++++++ 2 files changed, 362 insertions(+) rename pocs/{leon-marleen-bias-research.py => nfs_bias_research.py} (100%) create mode 100644 pocs/nfs_simple_lab_scenario.py diff --git a/pocs/leon-marleen-bias-research.py b/pocs/nfs_bias_research.py similarity index 100% rename from pocs/leon-marleen-bias-research.py rename to pocs/nfs_bias_research.py diff --git a/pocs/nfs_simple_lab_scenario.py b/pocs/nfs_simple_lab_scenario.py new file mode 100644 index 0000000..d5fbe02 --- /dev/null +++ b/pocs/nfs_simple_lab_scenario.py @@ -0,0 +1,362 @@ +from dataclasses import dataclass, field +from datetime import datetime +from typing import List, Dict, Optional, Any +from uuid import uuid4 +import asyncio +import json +import chromadb +import ollama + +@dataclass +class Story: + """A narrative element in the field with rich context""" + content: str + context: str + id: str = field(default_factory=lambda: str(uuid4())) + timestamp: datetime = field(default_factory=datetime.now) + metadata: Dict[str, Any] = field(default_factory=dict) + resonances: List[str] = field(default_factory=list) + field_effects: List[Dict] = field(default_factory=list) + +@dataclass +class FieldState: + """Represents the current state of the narrative field""" + description: str + patterns: List[Dict] = field(default_factory=list) + active_resonances: List[Dict] = field(default_factory=list) + emergence_points: List[Dict] = field(default_factory=list) + timestamp: datetime = field(default_factory=datetime.now) + +class OllamaInterface: + """Interface to Ollama LLM""" + + def __init__(self, model_name: str = "mistral-nemo", embed_model_name: str = "mxbai-embed-large"): + self.model = model_name + self.embed_model = embed_model_name + + async def analyze(self, prompt: str) -> str: + """Get LLM analysis of narrative""" + response = await asyncio.to_thread( + ollama.chat, + model=self.model, + messages=[{ + 'role': 'user', + 'content': prompt + }] + ) + return response['message']['content'] + + async def generate_embedding(self, text: str) -> List[float]: + """Generate embedding using Ollama""" + response = await asyncio.to_thread( + ollama.embeddings, + model=self.embed_model, + prompt=text + ) + return response['embedding'] + +class ChromaStore: + """Local vector store using ChromaDB""" + + def __init__(self, collection_name: str = "narrative_field"): + self.client = chromadb.Client() + try: + self.collection = self.client.get_collection(collection_name) + except: + self.collection = self.client.create_collection( + name=collection_name, + metadata={"hnsw:space": "cosine"} + ) + + async def store(self, id: str, embedding: List[float], metadata: Dict) -> None: + """Store embedding and metadata""" + await asyncio.to_thread( + self.collection.add, + documents=[json.dumps(metadata)], + embeddings=[embedding], + ids=[id], + metadatas=[metadata] + ) + + async def find_similar(self, embedding: List[float], threshold: float = 0.8, limit: int = 5) -> List[Dict]: + """Find similar narratives""" + count = self.collection.count() + if count == 0: + return [] + + results = await asyncio.to_thread( + self.collection.query, + query_embeddings=[embedding], + n_results=min(limit, count) + ) + + similar = [] + for idx, id in enumerate(results['ids'][0]): + metadata = json.loads(results['documents'][0][idx]) + similar.append({ + 'id': id, + 'similarity': results['distances'][0][idx], + 'metadata': metadata + }) + + return [s for s in similar if s['similarity'] <= threshold] + +class FieldAnalyzer: + """Handles analysis of narrative field dynamics""" + + def __init__(self, llm_interface): + self.llm = llm_interface + + async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: + """Analyze how a story impacts the field""" + prompt = f""" + Current field state: {current_state.description} + Active patterns: {current_state.patterns} + + New narrative entering field: + Content: {story.content} + Context: {story.context} + + Analyze field impact: + 1. Immediate resonance effects + 2. Pattern interactions/disruptions + 3. Potential emergence points + 4. Field state transformations + + Do not make up anything. Just use the information provided. Use the context to determine the impact. Do not use markdown or code blocks. + """ + + analysis = await self.llm.analyze(prompt) + return { + 'analysis': analysis, + 'timestamp': datetime.now(), + 'story_id': story.id + } + + async def detect_patterns(self, stories: List[Story], current_state: FieldState) -> List[Dict]: + """Identify emergent patterns in the narrative field""" + story_contexts = [ + {'content': s.content, 'context': s.context, 'effects': s.field_effects} + for s in stories + ] + + prompt = f""" + Analyze narrative collection for emergent patterns: + Stories: {story_contexts} + Current Patterns: {current_state.patterns} + Active Resonances: {current_state.active_resonances} + + Identify: + 1. New pattern formation + 2. Pattern evolution/dissolution + 3. Resonance networks + 4. Critical transition points + 5. Emergence phenomena + + Do not make up anything. Just use the information provided. Use the context to determine the impact. Do not use markdown or code blocks. + """ + + return await self.llm.analyze(prompt) + +class ResonanceDetector: + """Handles semantic detection and analysis of narrative resonances""" + + def __init__(self, vector_store, llm_interface): + self.vector_store = vector_store + self.llm = llm_interface + + async def find_resonances(self, story: Story, limit: int = 3) -> List[Dict]: + """Find and analyze resonating stories using semantic understanding""" + embedding = await self.llm.generate_embedding(story.content + " " + story.context) + similar_stories = await self.vector_store.find_similar(embedding, limit=limit) + + resonances = [] + for similar in similar_stories: + similar_metadata = similar['metadata'] + similar_story = Story( + id=similar['id'], + content=similar_metadata['content'], + context=similar_metadata['context'], + timestamp=datetime.fromisoformat(similar_metadata['timestamp']) + if isinstance(similar_metadata['timestamp'], str) + else similar_metadata['timestamp'] + ) + + resonance = await self.determine_resonance_type(story, similar_story) + resonances.append({ + 'story_id': similar['id'], + 'resonance': resonance, + 'timestamp': datetime.now() + }) + + return resonances + + async def determine_resonance_type(self, story1: Story, story2: Story) -> Dict: + """Analyze the semantic relationship between stories""" + prompt = f""" + Analyze the resonance between these two narratives in the context of a research lab environment: + + Story 1: {story1.content} + Context 1: {story1.context} + + Story 2: {story2.content} + Context 2: {story2.context} + + Provide a detailed analysis: + 1. Type of Resonance: + - How do these narratives interact? + - What kind of relationship exists between them? + - Are they reinforcing, conflicting, or transforming each other? + + 2. Meaning Evolution: + - How do they influence each other's interpretation? + - What new meanings emerge from their interaction? + - How might this change the overall narrative field? + + 3. Pattern Formation: + - What patterns might emerge from their interaction? + - How might these patterns influence future narratives? + - What potential developments could this resonance trigger? + + Do not make up anything. Just use the information provided. Use the context to determine the impact. Do not use markdown or code blocks. + """ + + analysis = await self.llm.analyze(prompt) + + return { + 'type': 'semantic_resonance', + 'analysis': analysis, + 'stories': { + 'source': { + 'id': story1.id, + 'content': story1.content, + 'context': story1.context + }, + 'resonant': { + 'id': story2.id, + 'content': story2.content, + 'context': story2.context + } + }, + 'timestamp': datetime.now() + } + +class NarrativeField: + """Core system for managing narrative field dynamics""" + + def __init__(self, llm_interface, vector_store): + self.analyzer = FieldAnalyzer(llm_interface) + self.resonance_detector = ResonanceDetector(vector_store, llm_interface) + self.vector_store = vector_store + self.state = FieldState(description="Initial empty narrative field") + self.stories: Dict[str, Story] = {} + + async def add_story(self, content: str, context: str) -> Story: + """Add a new story and analyze its field effects""" + story = Story(content=content, context=context) + + # Analyze field impact + impact = await self.analyzer.analyze_impact(story, self.state) + story.field_effects.append(impact) + + # Find resonances + resonances = await self.resonance_detector.find_resonances(story) + story.resonances.extend([r['story_id'] for r in resonances]) + + # Store story and update field + await self._store_story(story) + await self._update_field_state(story, impact, resonances) + + return story + + async def _store_story(self, story: Story) -> None: + """Store story and its embeddings""" + embedding = await self.resonance_detector.llm.generate_embedding( + story.content + " " + story.context + ) + + metadata = { + 'content': story.content, + 'context': story.context, + 'field_effects': json.dumps([{ + 'analysis': effect['analysis'], + 'timestamp': effect['timestamp'].isoformat(), + 'story_id': effect['story_id'] + } for effect in story.field_effects]), + 'resonances': json.dumps(story.resonances), + 'timestamp': story.timestamp.isoformat() + } + + await self.vector_store.store(story.id, embedding, metadata) + self.stories[story.id] = story + + async def _update_field_state(self, story: Story, impact: Dict, resonances: List[Dict]) -> None: + """Update field state with enhanced resonance understanding""" + patterns = await self.analyzer.detect_patterns( + list(self.stories.values()), + self.state + ) + + self.state = FieldState( + description=impact['analysis'], + patterns=patterns, + active_resonances=resonances, + emergence_points=[{ + 'story_id': story.id, + 'timestamp': datetime.now(), + 'type': 'new_narrative', + 'resonance_context': [r['resonance']['analysis'] for r in resonances] + }] + ) + +async def demo_scenario(): + """Demonstrate the narrative field system with a simple scenario""" + + # Initialize components + llm = OllamaInterface(model_name="llama3_q8", embed_model_name="mxbai-embed-large") + vector_store = ChromaStore(collection_name="research_lab") + field = NarrativeField(llm, vector_store) + + # Example research lab scenario + stories = [ + { + "content": "Leon really want to go to lunch without having to wait for the others.", + "context": "After a long meeting with the others, Leon is frustrated. It's noisy and he can't hear himself think." + }, + { + "content": "Leon discusses his concerns about the AI for Society minor with Coen. Coen is supportive but thinks Leon should talk to manager of the minor, Danny.", + "context": "After lunch, Leon and Coen are walking back to the lab." + }, + { + "content": "Danny fell of his bike and is hurt. He is going to the hospital. He is not sure if he will be able to work on the AI for Society minor in the near future.", + "context": "Leon is worried about Danny. He is also worried about the lab and the AI for Society minor. He is also worried about his own research. Leon talks to his manager, Robbert." + }, + { + "content": "Robbert is very worried about Danny. He is not interested in the AI for Society minor. He is also worried about his own research. Robbert talks to Leon. He thinks Leon should man up and stop whining.", + "context": "After work, Robbert and Leon are walking back to the lab." + } + ] + + # Process stories + print("Processing stories and analyzing field effects...") + for story in stories: + try: + result = await field.add_story(story['content'], story['context']) + print(f"\n---\nAdded story:\n{story['content']}") + print(f"\nField effects:\n{result.field_effects[-1]['analysis']}") + print("\nCurrent field state:\n", field.state.description) + + if result.resonances: + print("\nResonances detected:") + for r_id in result.resonances: + r_story = field.stories.get(r_id) + if r_story: + print(f"- Resonates with: {r_story.content}") + + except Exception as e: + print(f"Error processing story: {e}") + continue + +if __name__ == "__main__": + print("Starting narrative field demonstration...") + asyncio.run(demo_scenario()) \ No newline at end of file From 2405373bbad4ebcba5853d1730c00c42845068c7 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Wed, 23 Oct 2024 18:56:44 +0200 Subject: [PATCH 03/14] NDMAS doc --- docs/Narrative-driven MAS Dynamics.md | 105 +++++++++++++++++++++++++ docs/Narrative-driven MAS Dynamics.pdf | Bin 63124 -> 0 bytes 2 files changed, 105 insertions(+) create mode 100644 docs/Narrative-driven MAS Dynamics.md delete mode 100644 docs/Narrative-driven MAS Dynamics.pdf diff --git a/docs/Narrative-driven MAS Dynamics.md b/docs/Narrative-driven MAS Dynamics.md new file mode 100644 index 0000000..a92014d --- /dev/null +++ b/docs/Narrative-driven MAS Dynamics.md @@ -0,0 +1,105 @@ +# Narrative-Driven MAS Dynamics Simulator + +## 1. Core Agent Structure +- Remove all numeric state tracking +- Replace technical state management with narrative descriptions +- Focus on story-driven personality expression +- Use semantic interpretation rather than state machines + +## 2. Essential Components +- Narrative Identity (who they are, their story) +- Experiential Memory (subjective experiences) +- Interaction Engine (how they express themselves) +- Worldview (how they interpret things) +- Personality Expression (how they naturally behave) + +## 3. Key Interactions +- Semantic message interpretation +- Narrative response generation +- Experience formation +- Memory integration +- Personality expression + +## 4. What to Remove/Simplify +- Remove all numeric state tracking +- Remove complex network effects +- Remove resource management systems +- Remove technical state machines +- Remove quantitative metrics +- Simplify emergence to basic patterns + +## Proposed Narrative-Driven Structure + +### 1. Agent Core + +#### Identity +- Personal narrative (background, experiences, beliefs) +- Core personality traits (as stories, not numbers) +- Behavioral patterns (described narratively) +- Values and motivations (as meaningful stories) + +#### Memory +- Significant experiences +- Relationship histories +- Key emotional moments +- Learning experiences + +#### Worldview +- How they see others +- What they believe about the world +- Their understanding of their place +- Their interpretation filters + +### 2. Interaction Model + +#### Input +- Receive semantic information +- Interpret through personal lens +- Connect to personal experiences +- Form subjective meaning + +#### Processing +- Filter through personality +- Compare with past experiences +- Apply personal values +- Form emotional response + +#### Output +- Express through character lens +- Share subjective experience +- Communicate authentically +- Reveal appropriate emotion + +### 3. Learning/Adaptation + +#### Experience Formation +- Create meaningful narratives +- Connect to existing stories +- Form emotional associations +- Integrate into worldview + +#### Pattern Recognition +- Notice recurring themes +- Identify relationship patterns +- Understand emotional triggers +- See behavior cycles + +## Key Implementation Principles + +### 1. Everything is a Story +- No numeric states, only narratives +- No quantitative measures, only qualitative descriptions +- No technical states, only experiential states +- No resource counting, only meaningful impact + +### 2. Interaction is Interpretation +- Messages are interpreted through personal lens +- Responses come from character and experience +- Learning happens through story integration +- Growth comes from narrative development + +### 3. Personality is Expression +- Character emerges from consistent patterns +- Behavior flows from personal narrative +- Responses reflect core identity +- Growth maintains character consistency diff --git a/docs/Narrative-driven MAS Dynamics.pdf b/docs/Narrative-driven MAS Dynamics.pdf deleted file mode 100644 index 57e0acec5b5fc823c61d48f2739004ea36683fec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 63124 zcmdSA1CTDwvp0BV=8SFIwr$(CZQDL$`;2Yd<{8_zxpUt8kMG|5-Mu$t%%ik8-$Dv zZH`1IK8M+BhbgmWbD z?GNjO{8Z?f`w##4cq)bN(Dv?*%@Y|`@VXnXI-+<2-uXnXc8rX1-pByoua-L8gn7$F z_TmFuT-vWLGwj75UqbaYl7<}`72)Q8hB6MCeYJdE`)?`NZQ$i3(irGI?qBKX`qS&? z$1Sp0#w`?T#&vzQ9=C0~wS1muX}aH!9KLpL`|CBOG(K*3jv}itjt2{gj}Q8X$8!?v zUZ=-HeYK-B8eK_*TlV_5P|fVBn9RC44C5I)d*4=b`IycE#T6vM=JZ$;bJR3$%L15( zX7w)fmtiNfIAg4seKf~`ud2n6#KX&Xpfw5h8fYe=Y<#ER^z=i!1dE z!ju9A+$1hof{ z`28c_wKvB*)+bVM`}sP@`V02B?pjyq4PP*K;DTqXickY0t8247uGPTOv}MUUO71O@ z-tPS3XyJ7mEjEL~l5UpPvSzh6ho?}$iBnFA-V6*Dh|bR7k7D42hO2KXovM@-8e)2Q z`iM2#+0xIN;$SAaoOVLYXSZ_fLu)vKktn1u`*x`r7<Vimg z`LS^c&>1Q<@sWu}Ew2?gO)c=tn_MMt%fafJ4ujG_{X~l?s*Tlj7Nd!|aFM3*WCFq* zsoGSZ-_I4N<{BeRn5;m@QHR6P0<9Nuqgt_ah8}Q|qJSx)-*&$KEwLVA@<%<*(h?KO z&UCkiuFWj2zb2@tlG+JlHRIBnC~N8==R(sO$Iywc^m8^NDaAeMQ0^ckF-fs2v5V{LmrKdrsu#Yr<&^RZ+(ng zb`k|w84|Xan_Qw7E=Dj&7zXS}LK_nl#|Aumwfg0y`<#@V;xI6U(4nR(8?obeJ}RChH^emjM@WDq?Fo0)k(+D*v(=-k&3h=Bh2bR;SpD&!@@OF!@?H>+=^+_%qiJx5b%VJWh6$G zlEM>y@CVPWD~PfHB5@3^kCL4*T=oe2A$yBb05*t?rIDRjV?Xo= zi93?yXHV!aV@=ezG41}Mkg6$SsHds8&?FAuVoVNK~s4) zp8L!nLWZJEm$cC1=;YIyi@1vv?kapc1}JR{J+7a#1q_B<1rB#)w}b;@#HL(7C2@z` z!jfv!KnX(xg8XmcgzeQORPge-3jXl9SF{eVvOpAfSo4UuOI6_PNWD)12WDq6rwVf% z(T_V%?U7t24lm{>U}!SVFUC9K5G-YyDz)r#2}uOd+2Q09r>M2O`wV-8?Hr^A;zwjn z%t0TO4+VOA&$2;RgdQ`W0W2mfxKDWin`UpjRy0r2*KLwWYo#K(gr_w#Y^3VGVV}`k z*7E7Q7~>;{JuL$ccKIZ9m>xgSs`8L&&AJJy?}AO4uUY2%me>woBH9iIi)xx20!1$| zwhq~{C9hs~+HxI)_d~=wPjL*XE;@AujF&@AsItVAw-Hv^ z5*%J-MjaOW8lTn_52OiDXGmEdIhMT)0jI;UZ)(8?YAX9dp*22d0j^(4b27HB6T=Bsn z3QX~LJ;Kw@?CH?Z*ih*~3kmih)C78ony0`4HQk|K{+tL%okfPaq}+rlQJwYg)-YVc zs;6-s!qRd9zJsX~Nl`VbRr{sk>EG4L#BpvZff~!&hj;y(1`Xx{wR+46&NK`eKbJKZ zFZIuZ3>>P>g0K%5n;(kq5H)?$*dIupNMszq5+~K9!*@sa8>9y#FQJ|gxF#UUw ztFFg`F9UNTS6%XMGM`GkudPf57su(ybD*azg@Ri|!9&TIlfkH<%rHWMRLAlZLxi1-K;#EIUxk3NYB z+|E{sJsDTgy>O+47z9d*$qz(J--wJ0*o%)Z27aLTT=&?Tsy94==gjLbXu{KR%OVTDdv_6y7~6gktE(EUHj1xf6z{ zpX$mVP+l4125oxFZiMk|kZi~Th$1yQm^KD0M#+;+x#{S^nTEcwYmPCJTNnaOLo~X) zj==M|x7|D`fNL|m*IaqOsxr~g`P9l^$`~qz1c(Sw2F{lC0A}jTN)17QY7OgBz?UN0 zD@$IGf;+{5p(J|HJEO~=1BwXGGwc4Up(K5Pi+|d1?$PEzwhie7-51(J+g{RDkgfY_ zS08gMIlJ3I4jN{DT?U1aa<1XQJ`gdjbLQ=+pW2fi<6GKX28X<}v4?v6@|d6`dzi6s zG2bwZk0MLk*HO5Zhf{5;s%T{jHQI2Ww7u81eLqX62SE+O+1vLj;dk7$qn*H);qe%y z8}3lDS6Lrk866JZRp36nh~Bx=LV3`qnGfi-7-jx&dx(=bT zPoqkjF^ovtCO#u$3~wFc0-az?5Q8E0C`CJs54fYESv!>>z?0WVD9l|>DvB|8vsRRz607t(7y46W;?VcP@PCt%t1eX9;y5o1K zo-RleDGr^FGtZ8=H?H~EF7uptdpHl|rOL-8w5|L#L*~_s4~Q|u)fo8WVj#u+Kx@cF z#K2eK;W`gs-HJLCq(iYk55U%O#nJhE=EvpAwoaq*T2@%EHwX0hy=ddFwK_qdFn3hD zK<71cVX*3ZJByyR8ct?Ev?yp%B@KS?dgXIbfRu20AE%S!i{XTQ4aEfODqi?w~rfZV1ceC1&GgZ8-> zz~uKO#YE~x>2t@v&-sFotD=jA!JTOp13M2V(=1R>R{C+?z@#+)u#EKm_HRW*=76pK zUjME)E-Xwj;Gg%#LdZ5eq&{YNhiWW2FCmT-U0N}sAbO)U2_~ckcBd}0TQ8}o` z+jd%hZSy9alLBwIS~#nHq@!;tyyPRfCg3h|x#JHEdeT(s0Rsg3#}PU3+uqA;s`Ji4 zxUk1~oLGF??Jp}(-gzMEukj3}Eh$zC7u-h~97x;EvUf2ZgW^|`cbAj1yRu5D(DzP3uG zHx!SWsMmpY>s@p_Dp!+b-b5()$L^i19`)ra$LG%2MNPm8(HYm8z|)#Y8hj# zdFzq_^5U^U#k;tw31vY>uw^7RE_(-?COhR0w3Wch4Ca$`cy_{;t*EGJDV{r~3*)y} z19i9U-{RjqY*h{ji+`yPBGSL3MOa2Je{$AVoE|ABCvm~If6778|H^;7lnf*ru|)i) zVxhp8!o%GpN6Wpy;$D%0nHh?VrmV$3iG9*TwKBNYq??V zZGu(#WN>TOj*T}NVG408MW%vil62}0moH4c{duRy-{RH@-k@uU+H~OvSrc{vurN=C ztLxJ)!PyjMb>lW2;|>)o!&6tE?AsAOAVk8=QT|4oov$th74Y^c5O^=T<;#*WOZ9b3 z`PQ%BJ%@=Iu#MQ|T2;oEwARtUTnNS0WAAt%;vrnK_sBXA$4x{wMehiA&>-$|DD;VZ z>-RcR&gFBn?G-goG32?ydDu~i@KU>)qxfkv9Md zPwD3F`iy2K2m5Ros19SXZqp&S+0f2BY6rf!B*E~ddo|YW><|2C9{Z<@iPOIDBPAIr zG-2ATuS&+H#~##e_lMwpa&&u#iTql2yU>Kny!DzT*!ILm4|uxUO3t=Fh-5&0^p2hdKKX zp|+Qy(_GP`gkkt0t4i$SZr=|VHj>ZRyUYG&m**E=PBd*!v^&REg-w{ZtVqK&AHD!| z?Gd}_-u*E{o44i;e=R z2YO{Y`{(h0pE$&(6ol#&P2=y&vmY>CfP`O**dJT?xV2<~soBK`GTY?An-DqS}I1$*j`J$^J7g^ow4eqta7@ z22jZ=DdmFVcf_xfIszZ1LJ6-J3%_U6KPWf*!j+)0-CG~tPxF_w*H21nedB^^0pR^d z2+4R9H~3amwWZO9)<6gq7C@;k_jaYQ&{C-weiYVFJrD!3U~)lLMdZCLZz1QyCE76h zboepk!obOf6KH}EqtGA|;HIF0=?+Q!SWldY#iYM-<|G37)7T#VT(gN2FbSF2CIWf< zUf@>PLvyn5q{Hyye0Cu#;MW~=$S_BTcKKA_8>rx-M9QNz$zCaqT|Lwf#zmz0+5823QXClO*!Jx7AA z)c7l}3?jo^*%=X~1f6;!yN=o^_=<(5Wd^#U?5-OofS#sBbf9fpXY+z*J%CqW%R>U; z(r5LA+}>|Hq?RLO=ko57Fv|1AyyfkPCb}k*=v)fgl^4p6Q1?63$>g{fVQKH!zuw~1 zhf-6P*|pQR@Mm%Z<1#K}y`8d(({+4eLbFuOrUivHuGM}rJUGd&ZCm3fdEhO*8%Qgr zm#Tqe>w$*ds~)@SQn(|qNEjDxn&JhrC6B5Cew~WLc%z>3_)8mP10}5bfH;0UX65oI zw3uuf3aiAjG9`k?UOZ;tMHxJJ=d79CMnx(LX3#R)ItNb+ibI^1mIQjo)R|#5i-+U6 za%OdXRS!oj8%Kl5s|%3tG;>M{axvLU1YR}`UD%=BfdWaD4VAS}Tqrh|vSZ;xk=y|N@7S)Z#?^@czXG^>KyMlDs3C*jG0 zzW)(>;2AkJ?WnaMxAO<4#GCjxZ)P@v!^49<8Cx-!R_-#d9Efx(xE0!!_3_GGQ%x<~ zw}>vNL&Y3YPTN2vDJ|E%=faK(;&hB2n{U!Io)*W0ZxoMs5f83i z<)b0I?-#mTU$-}Vp^nubd&VJn; zr0LN0^gFg_%HHYMt1oUWyNbo?$1Jj!Z1}w1a$qaj6k+SG)SV&=MzXZrqr1GqTAgR_ z+$T@ccvx7CQndC3PCdx=3h_qu_KmTD zZQ!se9GD;bw)yeMx}qRn3~P}eAe`eqHz!sk9?7M zoKg}jc`?UG9FmSSf-HM;ux`CeLs77 z=<=A=x@s#!a=CnET`?ebv@y=P>$=L9F_xrTGL73{rsH+FSwiR{{nY^4-+e*mov86+ z2wDx9ktLmJRp}1Z2mA(HlD-2|BO?PuDeMSq+HR#PGN~k=HDRGGlLR8W`a;_S=%|0- zs`y%dM^=jcs%1rJ@S;M;@#vG9Gwjz)Op-c z<9+W3B9oy%wdjWjGVR9Pj14pX`~oBFfVXOMJewVeBLg-PPH*4(Mws{|2`5pt|ec^lTNhPiR2c6U#sZvd-HQ99} zPJl-+J!HDwpl)C9+8cnBFbZ{N zvM;2o*XQ`S6nEM9_s5b-^r-g`=i1`Nk#6oI*ZA@Jvo{c;wr5SHeN#AWS3_DSkj&jj z~*M03;5+@i}&p&GotdY#qqc}B(JLGD2tFf(F z^m{gaX@o`Vlh2N>0ETF8LzBx6UPDu8POeeHTVd?16vg0ttDw z`3G)zK1tZH`xgD(gr z>~nAyIzBRTcMjsYhX;uE8n{|$b8!DC32QymcYiUc}mxh1nm*GMFLfB z*JUvgnAH}Pb_NTlL3Ffbo3s*J-2h~DlcP~286Xi>jX5(_Z^S|l5}zx*v~;;QkC@J_&6+JBg%A^_mgGC}ZGS)=}-Mc4O#WpPyJ>or;gJ*U%; z`BelCw|qlT`8_K_GM#_AKs7LdOn8rKIV=`{P}=GZ^EDR4ruW6%xU&11TLN-six>H0 z(+UA{l316YpP?yBIB?2iC0Xe;m8c-IqP-1tcCo4X@H_a*(kN z1|8UVs#CO6C*_D|6DoWEDM?3UW&1(jt?2tzNFZuh!JEKDVf{6i;i_d3I^P|m8|WHS zE$4QjI~Dr!_yr)F2Gjpv-x%os!>;fDv|Y)}K*#o30=GDqIEzTc_6 zY~`Rl(_gH`H)qc8Ep2Un9$ByTAw1_CVl7lG@7s zcyG7aKAiGh!AR_fvG!9#)qSs_+h^JrWj{RbRnD~I*~?b+mk`h9^w(J9WzT`mv~QS9 zW-WK-@H5p=Q_k1R%|Z93&&w?|S)S&or@Utk(}`stj~1=^K6|bz)r{oxPT%b< zEG0+>zd=Eo;9=VXvjd^GR}}{cn1c>12Z~!>PXH7bH1400>=(e1<83)=BMomrluI8P zmz(u#^||Ki8}5ag-h(c;n1*NZS{#=$4lPOTs_Yj9x#~6?Rr|w29CSV{o@RocVCtdF3{8pV1Z*@-keuPOS? zoYxA`fr9Bv`{{0d-d#-Fe)WC%#F{Lte74R&)kk$EN2spg2U}9m&hCQ{<9238#|(sO zc5Vjmy}pN0QqLMsDG&PWiqdAjGn7_yXD>g|NbC#jx9zo~KLpFFjuK7b(occ6(PlfW zrkZSNIY~m1BXLpnF=06liws()fWGazcD>ra68uOXOqiZe0h&BS;!2U_-0%C_tIlcfv43tq^aoH1>`d&uTvztKbT_%j1b zGQI4>eveC2vDBYKk)%CX@?=&^0l?nrL1%C|{`AmAHxh)aQ@ElL)|P2yD_igO|tf z5tib7+8xO8gcuO~Io{Ez>;CNeF4Y0G=v~(; z)%KQl`*JFuVyKnfBI~cZ@*JI}+xBt0x7!5MR`r0HlRxzV%_B_9{_GLo%x)+q7e{zuNJW7@|mGPLAwa`+!>;>j6zHMoGIH_-gxm| z>{NIfWR+m`S(AFrUe&S0&8h12=vISEtX=g~zr}c8EFnr|mQ%b5`;lFIguB~+K|_82 zT!~HE{UQn``RM<i(?m^_CCS zAn~aY@W!6a<}ksBm#^yxUJNG=5gU$|18gX8@DFY(8vav!YGeCdxD`!oUF3^XjOV@m zVvx=gG{^8^Tb7iyEvL4625sPxnXebAwFoGW3(eY4<)c$FM@_ zPR4BX=WZJN=%M#~Z`x!DmnLIs_nJJHHZ=N)R}`CE??ZkmDWAtWLrsZyJDVtAA}cCS zJ$~W+v1)C019c&D>gi!RBYH zv8)wsIxRZKg834*J|Y^W#$)hnoYKilC_`%PNpZu(i)ivKCRU7RTksOQBZ(c>Yk+J` z0vmUBF~9Y}i^!A?`*bpujRUGkjTwx=zTWHSeE)t`ObIU!1r8W6c|ETFbk!r7-^VUT3(5gRBFyUGip{*-P=Y&1{y-MbB zCEKiK?Fay`oz{2W=s_j@q*CdpByEcVJWK2N0u!=UDH*Xqgk&YoTuI|Tl9FOO`YDZi z+Mf!43>y`}#^TZ`2^G`9@F~Dy!0PWLqNG0mk9w_FgyOK}>~RI#ygI#xPGs+`2GoJu zcB2}Rq}<;t3`4|!h*lhwFdUSR9OWBOD}eocI?RDgAL+Bdis`t;jt{}4-p$;pr<_!T zvi=YtF$XleQ%gH9j$%6J=W-rqmj+kE$R8>UlRM@`Cbcr5jY(lJsTQozr|=aJ?1@565Qj~)7Sr$# z6B8U2s}n$&luFLTwEaXYm86r@4v1M8REs=Zl~-jCJgLDr?bD|lT#Rd5Fq>F*7SKKg ztMH$sf}d?vQ_XNl86vZQx^h6ZcOqE_)+oDVq*42!9dsnu{O3t&Yfi6ZtLd|g`^@L) zv8wzMf%i#Uv13|u%RNt2I@3Yqy$6!pJLc`juo7q!_%ijyvx2P)t-c;j(@^(oA1xSi zRuIiFVP0qwR34-Wxb{rkOh3loEK-Tz9)V_!eH?YFwh<%;orizoERd<^@Ue%Exe#ta zP#{ot$8Q&M_tFmuM>ct4A?I*~wl*1Z?5v8!svWtp#hO-(>6kge6`eIj54G_I){>iz@e$6lcR>fP%Ce5cdbG6A&tna~lhgQ)`R zSJ#7Ra3vT0MrXn1Z*5bsRJrWG#&o@`p2w?`yWcs~`5-xHa@{-Z0jPf!(t?0;Q_Pfz z0%z2uSS{i%kK1B5eN;{Xi;11(}$x$x(y+~wu#r^~(l!R^tWwC>T~*4wT^ zf!HoW`Cigi98;LsMK0q>M5ncHv$ckfMO`%? z1m_wK=er$)6-9Epw=T!2wxi#Ig!n^Qf|%p56yWRYd;D6NgqH)D)R&{&)+i}M2?(Q) z!02B)A3V%C4@N*TA`15Jr6})N+K31u8a#6@OwvGyOgvgcW}$u!Jw6 zNiU(<4O(`*B;aQK14*4L_`)vL{{wPhwtW>{&W7$r0xE%Wk_8KIpqy%T%%5Mt&cP* zP!8LA%ydgxDZv_t@qpwhRmEy&2DOzUpf3V3IEe&;va%l8K;r^}-r6NN)dYd^Z^F_p zAf*_cR52vcIV4dqB!L*6M1n!79{uM$V9x4M5Wcu{#DWH(%c2UX3y>5@w=jGxqNFZg zS=~OrE?-{#HhKM8St_~Opur2dn1C{dfKo;X#SpP8V)jgS6P zrmWi)G8+7r7vo1@1{3HYKP{jwyr2fptl}kaSXfrO&Uro2?l3K-%G}93t1@l%b*xcJ zs-gqlmH^qV`+nWty%~vhaQC3|9Jl28iuILMXnjHH`cqr?%D?1%voYks)jbO}*jT16 zc`vTQ&>^G(c#mM&qYlLhDVg4#CcrRUi&2-3=Mh^Sk}bYbk5C=bWK?6uzXI~N5w@`{ zX?$b0YRJqmS<1E_=|PG1n50EF1DcFdtBiKOQDv24od_wVx+Pm;6th*;P(7ig|InsG zVvIW}4KTeC^|shz+^7+eWv^AB$V^NHFxsdFo#bLbjM1c8w8;dvdJ5w&m~Xd=g$APV z?N-yi-D={uTXpz$tMC;e6}|Ops^t-q(cfNmllUxEfcVT5wPtU#>n3SyaJNNM&$nb| zAlt#S&=tY!Dg7E$XR9y?LwEFMMAaPH1fMEZ8w`@;7Mb+91rBAzOcEe8xQ1^}K#i^) z4#i2Y9FSbIkom!?@TgJfd)bxxUUnTVQcz47bt%z`OB*-e^Rxfxa7&U(=N~20Wpy21MLeSf%=%rEx=Wswaq)zVID{}xz z=*6u1UUhrlN0cp?4$E`wk~jb%^;jp3uBjFmsBQSc~wUSn^W*v%)%)OHMmww&K zA^Kfy!KcbLz<1FK=thaN68zhJU}R44wC>&e-o{bTC5d_ENFavssY$ zick}${!s{sIVXavNq7R{Y`l|0x}cZiq0_6ZGeJ!>sK<>!A%Qs9&;op4&;ot`DFfnT zvGe28T;Gv7o10f$(qNy|T-*s#m!FFFTV5$qx4!v|LV)yhxO}VROE`eL(~;f_jo$&O z2sCdiigb#HPyBy3Y<%!cq5Q2Srg>EwKL|v~EPmSFB~)!>ThS~usvnfKcCCI$oj<+_+c~TK2n?hoC$*ea+9;tSnZ8%ug5W0bbo(2+>>UZG`vM zXSIP9nE9vBCIZ~cXa+04r@s)QKT*b)w5Q@sZRe;O&AZ6DdqybK5fhU!jQp z-;XsDkI8=N{WU6&;+~3vaP%VbhHe@DeXj%M&2=7Yp{lRXj14?F8t^=|ZInwLBQogA zq!o$gT8#$^9D6w1 zT>6DxNPCOZ&O;I}WOf(2hk-v0uC~!B55Or8h9une3fJouvTGEw>*6tfo*YSkG03Wm zWBaGss-HMn%SvGQhv7$H^tJy^>~FCuCt{MwXc~wpw*eo@(%Rb$mo4}CbI(O;yZNT8 z`c81d$IBr58mEq-M|gr&S+|4i%9C!fL0OPF5mU`&DSBw+;&ou;>UChw%CKf*blD!~ zelBt-&dMO$7PsO;_=kq9;V*|dW8DkTNO3Fl2q+fR{=5Lden97lm1ms$T;zl(CDf?t zV&p`Ej!}$DziRd)!QJd4&?UQuFZa{HN=P-0mfJ5h3f<;u;TSCzC4CQayaUJOwGH4IQ^7#pmgd1hEZ_>RoUBmBS^Y+^ps65Ujk zVd}}qPI&kyZ!zC>p(9X`(TqdTQH?{GY(gSCxzQ4-L7FuCpRomw>ao~4_v-~jW482| zNiC0p(AfP{@muNn(~43q@iZYAl|OV%0(s+Y4XSFr*&liGsDPoP!)koLSwHLJYW~Q& z4lmD@|F%0`t50UDq|iGtSCJc%H_`PBs2*}_GOyALB{<14gxZiI4*C;`*(xgZP7ck* z(2G^ClF63E+W$kYr9SKC6n>kmow3W2-Y$`q13ci+g2^+cj&l!Pl2CNgWeC-28GSkx zUIIA7ZlQ*;>qkg|3FjUp2N-d$1=CyB%`mzc=yzdE5QT~JD9{3YV^hw(UU)em7gMhz zjS|Xbbl86$DzFMU_sRusuW!?H`C+?fiaQlNTtXKW+c$jq=;6so+V`?*!wu3e+-OU7 z203V7zwC)-4*P1sQS>92E10p$M<2L@n4&GL zp*7^IzU;O8#exH(%c~3i^Ks-02ywhY>i;A@`5!PB|5JRDiH_+%h9?^p*Cc@G;k>6+ zC+cUI5sGb4xf%5_$zIw$O+QY5$1ng6+7*aAPG82+kf8YA$yApFUY9xXy zMqGQq=>pD5%F534j3+XgT_B;A|6YQLvE4m@;UQlU`|YRD*27W`q+aO950b>dWep4S zo7L>(%pm9pJCifkvvw1G**DT2;j9wKiLMi}Vvx`+F(d{saI?<|?nxrA(BG-W-S0$@ zOGY5cs;?TduLBCjAl5KSAtv1}J!EHv(%5jQXl0T8&4KI9I>y$FD4Y8zh=wOVnmXJ5 zys00;i{=QGQ)SD+W!FP1lvhQHH76XoU5{M{bV|JYjqJnuBRRcArP37Vt7?V0)*yY* zfi=8w`&dn!D!5%_YL1Cj^L->b)(ZB|;u?q9iWnA~=@4v3iZBPUBRl~M{~gDfl0`2` zp98zc?vdBDCo9$dHLZY|)+dH`y?-}D+E1??1=XbMEZvJ7%Cb&;c5PZ?4eaMc5}j`6}SFo6!=#v_^(u48-iBY%}G?z=`Y^r zyQnB5-e22M_(tZ?irCsX{iTU8;W7QCeN&k6nE#by!DIP1zQ3H(#^0DE0b4gbO}cL) zGdmLvJ2N{w3mzL2Jq}@%L!|o#CH<4d zqyML_e@6zfeY5`yVDlm2hd{r^$VfA&KDi=qtwy`ukL zSn$6Fl;`{Rf74p&|CtW|ySXy{_v-yM-~Td>j(_dn`FlDsLC{K?8#&@>{_OMh536l z{wFs;&&bTm{2v{`MTe(X;@|_Xk4d)a(~2ic#<)&=P5a6^D?Yvzn7>#yA3{=CP&s%y z1`YqU2pn7^@^u6Vty}j(vKcb@YTXK1h-P?-g{5V$B=xGse8hvo9RRhkCGng0^tk}| z>U8smmv1+3*OuzhXvZ`cqvO;>CcC4FNlGvpJUalz?7jLyQcBxn6I`v&zRQGl>LSO| zOk*+`r$C~|Q=jCaYiSC45u82j7_PS4%UI7A>BqJ;!I=+%_2zCBB+W`5wR_A+OW26r zEbyJGlRQNl{xpe3>-H`s>a=^Pu8h(XoKaVc(JZm5mjkUaLSYg@nzSUx5iQ5|>)J|5 zH_XE@HGha@bnB0+qaAMBQDVw!wd*)=^8<+e$_}@cz86`ZrwOimQ`Rw8U52j+nuIa| zp79~i?YWl}ClhUY8%mHT@Y||mjV1%*jv${vr%JM`wu<_*`O>R`kGdRvuWEc*d_?;? zxX9r+ls5C?>5^m{Qo`FTB^gJ$Hrf|Qqn>sFh5^Xn30tu3B@*ZyX~oR@gA`qzgG6>h zf%Xad>$PT-3&^Y-g*VlNB_c>1_i7f=nWH~doZauY<%Tj0#<^85K|h7vfeKiDjb7*K zQ~fk0F1e+kKBP(T*JMtL%pH@@LNbxfK)}0sAGvlE#Bi;RA4n6V_v4ro?hFfL*%{F z_mwJ&_C39-i#+OWi|gWRwcqQu^5gE`Kk5Oovwq4!29W|s>_~o!{6h{ zS9(e-{gu~TSDW9IV>(1ygdlAad8y)!rwNY|o|U(xmj!*AdzQJ?(5 zS+delq}lSck@g?&l(X4xXFriU+p${0M9v=SAKJu=|Hx#@F?O|4Ff=(x3BMfI&Dn|D z%j@&|=#a6ZUB`}%PTjh5Urt2bGpUv_4(d0nQ=251FqJ@TAYTbzlNtzpqCV`*3I!5lEHZw;v*fK z-kCHIcv}jVP{U!zpO`ar$`@UTfd=Y zHZGKBG1fQZPZUTfAlFw9E*de(A0n5RW?{P@Xujt(=NDaWB5N@Fbgz^* z_Q^0KWR7yd%)cy$@%X%f*r_6~Jd>rFRA|HGmIT^uZ1?oW?2Sr2Fyy*0ClVa%F4>W3DNrflGql_wH6Z?BJM0V54VebQ9}q_}q$GdDpQ zNS`CS1&>RlYnbclVo8QpIS$b{X*+T$U3{6Q`sn`P=V~ObZeGpEoVTnaD#4&%pObma zB~PMQ*Cxh1&6TH4;(^402m%jH1(i!$i63R&;Wu@&&}qk2hTlMMCyc5uk}NW*$kCRC zDActbi*cKk$M>WQjsl1AYGgF^ZWKem^`eR(u1`uT1${6)KWoRPp+F!dlo7x#j7vKD zR8}`P{`S%$m%{i1@IfBZiL={UfPDcMDgu0Srg|Q*LuhHzOu7Tqj9(?{{?<-2(KRgI ze~%d5QE0SnkC=3sQ=n$Qc%LT;o*6mwS<>F5h?yo0&N%H$+TJw3!(FAseW+uR&(Kv= zp?R`iw1aqTIEPHKDBoOBBZ(?o>V`@-F%<-iCHG_AqG!ZO2bJq3zPwP#t*{-rdHMg zoAlZ8`V5mv%ME`-3tHqc>MoY;QJJy7Aa+;_uF zbVmewbc1`Mj}YYA9}l&(qtd!`w3va~>V?y_OV$O3POy(XD^9eI@;EXVj>l3(`7)_C z`diVuIA38h1)L{w&y44*&nk&qGPHB3$Gum|B7fM3r+VkbamlX~HQ)Kq>&|L9Wq+qp zA-0cYxP3W+LoYK)W*(kSBkvV0;Z%}}3;^aI?@JAqyph-AX27~oD7 z+Tg@=qx+^nltlaR$>z)Iv*c#;^S6cBuiOxxpBk6d5c$Z*+Fi)+kyBVt#gas==UOw} z5>31N_PxI;OKw{t^?`qno$qyQM$9d;c9L=rabXBDQmsdQF3=RUG2Ar_;-2W+$EzxR z>U_HO$1o&dr+24&csk|_*%S>oOIUSQLVf0k^l=5^f1QQyw;brw&H6!#2<8$`9{Kj;*y3Ymc zmoA_5%rSFK#IPA{C(McMEa$zbKJPEyxJkn;0ZPhc>2sS$!binNiZ`?ZqL=;K@D6}V zn!OZ$*`?d|=M6J&-0Y8c>ra?3#;Ip_Q!eJ1 zv{I%_n^fav1Gm2G%$=Ir#g>s}<9E4smG;g1aYN!^A6}_`$&iA6?;34HsuK#c-8VLH zn?M?u+3}g<>X|)$jsW%RT0BajZc8;pOOyF!_H(Yq(JJG!m8^50N9{+jH+m62dOK)u z*gFrN$myf3caN^@t+AZG`h>5{?orrl{QLFFpu`#m5q8D2;>>cb2>J@8MY1KqdRH>d zB=51aG0?HlF*uXS$`}RJQq@w}lGt+D(rSz;2CaZ2_zOrY? zKji(XsG(${TMR9#(g{bt+R*{N#B0LJ=fw?+Bgi-SeRzY*SI@bxT)fdf(b|*cJXKm2 zZfT$=;BE!cUaVV#B=d?L$-2oSA?yAxqCyFf4VT2n^>MeXO~VG zEGb&Q!&fmdawcGt-1}9@qs}&ezODT%&)b?6?cY+pICb65c|FmYI8L~ydBgV}g}Ra5 z-av9Ic)4b1?%A}(+j&^6omw>D_SoU4VYw_QPK8GevDhPepx}`5Er^BU)R#}#5$8qI zDf7|wrq#iw?T6^6D4 zrtvZMLjM5l(5+K=BtGknI0hA^{(zdkxJOy@b9Q^}wh!hYhoa2~cujRz<0&8TKx5Fd zSvFg?a2vZu?57-}pLu+^*H*j7)t^L((!=kRz@0yu2x`4iAQzt5x;gfw)Sg&NTJ&Nvwf9U7 z*8!#1GG!)*-UbwNS~Z^=JN`0w34<@kN7Of$MGu*zh=j5$IMJIGutsUGcz+YGsnz!%*@Qp%p5Z_#;|tY=6~+F=f1l2-uG>7 zwN@HQBem31yR-A_Ub0UIPinKJ2o^1ALOBxSC4mk?s|-sPRAwt2Yt@|olj*Yv^IBpu zBPp0P%Q4=%Nn08Lpm;TVP5Ywe%xo_5o@s;9RWTis++UW-I&T-X?G+_4$T2D4$kRHR ze1!H&v0jjDFE(Mp{G}ja(eNGfP`KzS#7FyY%VX4ge2gYrR$N-AxpKsJYLeJL`eUn8 zBO!n$mC;@zLSF|w`O=LfF4B$G2@K+~A6CN9GHQj>-|kr!vP&cIEIlKCko-x!HNgmzb1*Xaz`zRLO8fp&Ze_j`e6ZEY7uTo606hAsl=Bk$vT2 zPI-;J50yMFDwTdcvouP6?Y~r3F)FMLrv<9eFXKLrYW9bG!5={m?%lDvc9x-{ltXAa z6t0E)L|=+a)N$m*@=N8BlT`2!BJG}8=;2O$ey-Db(R^hLX;@#r7Z{EZlQASRXE|7Y zpaw8a3vJUl$Tw-kG&Qvxq|%hnELJ%!;6oF0!GIXQLdT#-k;xX~+C}xo3$^NRXaZ z9?5Ia5Yy5lQHivNPRy4p`6iy6HYN@`k0=;+oq*Or-RRJlcQ4YzCu1D3Vc-1 z0|9CL9_j^%uVGZQEe}6!3;)kh+V3{Xcg996paVzX_+ORB0=2u7lmg1X>L$a=kYY0C zEbRM_9)6iBg8!Mt97y0a+B+2D9l#%{c^LZ#Vfzu(nF@|Gsv*#C0u;&ujZwnSfF2S^ zE*L`X=NZ=@f;~_}(Ej8#$R8q^fW}Y;tqXxY5T}+AZ6i*TqAmQoYXJ*BS&pV5Xxhj0 zm8P864MF46*-n>XfT=WgrD7btf&Mw(*DPsW4 zcl_UZ@{W3ZbbeURj4+&{M&Qe!xRTK9ihQ>&k>+#BFT~tQIQxG@mCI4qWMumq$>a(b z1I-sR`p#I<*Mz3~u>0gKD00vV3p7~~IJ0J;W)e8lv;t1%;00p zUo5`+?Pz@lrYAWWs4fX$*M$NgR!0yHeb|sY#11vPcmnve#47_==g|AC&!l!;zt)yk z25`)^9{3mGo>}dB?Gm0+Xn}WStblb|;yZxrf_I7TSG(ZfvUl+FNAUbY=!+1sNbX}?)h%{&02Tksm#tud}(0#A_MzD?TAKt^@ zh`J4Vnco?RwAkGze#$@g8aO0==6A>|h6KW~7~D4+*gH7=l>cJyc=1ZGpndkdWZ^Z;JMV*pd~1y3V%%aI}fSZb^6dfkuJm74z>pKcd}w?q)rL*+PFN=YsY~eDtM} zea5&A`O6BF=JcgmV%}mlQGP<%rGn|_=YjZ(dI!E+fVAc1LiR|_4(pe$o_g+HJPHBW zyn_8;0nH*!AOM7J2;T1k&^@9I;5$$~QoY6cApYXL5Iw@RL%$bq$a|#U1A?0fU!c9v zU&OuWdg6a2dHm3W?vb|#dUmo06Zr8${6X>!B2d!0U%OiZV82CuqjQMxg9b2o zL3>2_A@R!GfC)f;Ch`{w?&_Z+-RaMvcp<&0M~a^j-8y>_zCeD!yl}h`eKPw>5`z7a zBLwX&hz}e+o$WK+opm(+gm@v$`;_JXhm;x}DNn9Be0&&RVHU^mzPK5u{eN^d@U*GhZufkXIHDKGHN_f7F^FknbO|hzzYAofAM(hcI$d3wh%ixfq_TxMZlg-B>84`E8Rf57E9wy!mD3L zAQ3ScR!uQx0*N^#;u#1}r%$IVgNO8Lv1*OI(u$oDsS)ZkRdJ(Qopl}tF zDKDbfi*JRrVa`V&@(JFMjG&;KM&CD~Igky=T%)O5?rjDK} zFpG=U`pucQL>~JSwKpUa2GBsBis*#5Avh8i1g3&=5Cc0YA`o0B9ZC^T4mXzTZTwZ> zlea?RbR;Nc2%j|*;N+jDe106*liVcL{i}RSm@Xo_V1gCr`ysM7^x8F1-1&5U1Cvtd zv@eEuG3pb?&qdt11O%Ylp|QixII45|Q|U>x{o3f_BB>tx(QhP?B6*9->tJ*`f-+|+ z^ur@3>MH?pV7gy2!8^WdhRA9@D&I&us?`X4yO!ydniwEG(mY8~xUuyv{CugKjk2mV ziWA|fG!i9G!MZ~mCao-@aA3-Q*qGjoMwZh9^{kRe;)1E9vbrZX(dDkr}2~@rYff&$i(% z))(@Cro87nbpfmhB7Uu?RW<9IT~P;<^>%u>-INLk_cC1%jIy2B_;i_ajgF}9gw;kw z8;qr)8a5M^OSsG-y6Uw;R}^%V<8{zCY$sn|v+&al%44adWnN!)ano%AQ^-pnNR#u4 z3I5hnr*-;b5yaxA*(7qT4kN}@z9=3hHiIcN}>r)0%?c>ktf@$QCWentS( z9(}6F-Wh+o5^Vq+rlfyDQ7ze7?N?U*zTcD>kp8lB{ckp({2@8B_0>efuk@e1ar}Ot z>^GR*yY_eZoOLg+pj`}L+o0SSF6Oi^EdByFH{gOhy||6$QT_KP#(VcB19^EeZHSm_C{QhwQ(aJe_f1p^A9yR#`P_y%HAfEydZ&Plc{XSW12e2n*^Q0CqU+fI`0oXnG zrE~%}(Oe>!1x&I(Y?%s)1mYC;tBbdKv;L$D1P&JE{h?9=KFFHd6U6I3K|f{;VCrE` zJ#b&vei^Jv!51th;3lGKe<7{6A~rF$aqsI5wm-ymm2b|iO*_jH2QX>q!JzEi^ANpcDbVH;^=d4CjBcy|1zW^CW@8A+xDD4CkaCo}A zr-^vF4Ns~OL51LW_`9bd;^But0nN$XH&4tZ;?MIp(dwHAi^FhelK3kE3L)|6690-_ ziv&DwP8WyM-8u4bL`3|*neqH!Vd!vru5W#cpmDGcDiVK1Kp_|&dg5QPTlNDF)B&Oo z1w{P2(?1Ca9_TnQ_cz&t2uH<}bKoTZSJJt>P>BpVO2i4(&-5iuaQv@ufDbZO zZ~_WsykH;K+J@fGQiFA*)DMGo2F>SSQvnU2=r#FQNgm#tR5?6iqh@d$h9iE#E3r9t=&C8^+cBwYf8W* ziRBkk4ms-6d%27GQRxl1_g^C>kvWL%gtwy0#+Ueq+))m?q-Ir>o6a^UY_nR_S!@=e z*m8}s?Y2y|%(kpqZCDU-Jb&^>Zx$1VTIX+)*FX{o4N-tqC6#j%2zH{ptbgN0A(&SV z2Md>4>-pN!8e&Uh*qT=7#UhYXzOV>cmRWdErP_)vb5Hoaik>PPV%BBzS$^wNqo31X zU^9nk*bD0sbx3b$I%1VXjZ}>!mH3$S%~TKOy!E_YGNBU%4kb7ObRNPqs#agOoWd8n zwQ<^4pgBpiNm&D;N4I_e_Q$8jp34t&qfj`&lUYj+>~MJmUu=dqn2Yw5Kk~4E)DKGI zPhBEyqt7oA-c=5)W9{_=>VPLV_HaOjK~7FlBcq+5`3S&S(N=6qAO33`I!44sTYIBV zGDf3XcGCw5UHko4x=^FfNTPNo|JX9y0wJcmDGAdCZzvJL6n>Y}ImDF;=IwLB`<Ijt-c#SVJH&53)?4(LHcg=a{V#;<4zoXWdB=W zm+-2$x@srnQhWX;%M_LAPjjH(XGrd`{z>g^EiKQf{$AdB-c8<_{j9j(lz*;&>nTNZ zz1ruHn?D>xZlvPjD4_J@_ar_0YwhF3r;xv_LF*htto&Ft{U#mj_ms#L>HM8UfS=)k zbH%%6gg-cfj->jh_p~$2;Gp74Qf<xu~|QSB_1kV|m?s>WUt!tX-CqUeL2+B_#V+0=i9B_St17 z8NAo%&`)#O6}gl(jE1nyf|j|%GJHj}l;yPLXp$-x#Y5!Y2`LQE6&ZN5D--QRQ8|;+ zFp5RT<;1cjyz0z(JcOCd)o(K76|;YixwMIcXrC1h7N7uozQr*fay61PJxo=Ak~*v9 z3Un~FP<3QBacMI)@zLq;@~jB5uB=VO_K$bGq6wbeX3oa`J|LlnER=%_=+z6PaO z5(EAm2r*LQq1E6 zs)^LZWOr_0pD4Ft0~|>&ztf=ZZfsdI0chrfV%2%8XO<=k;pjkbj*O(W;}%%Q6LxCf z%`i*}?n83Kt>c$-uXvSst~BVDiLU^#j3p%LI5N}F29!T*!6y3DI|4uDKwZDvKt9>< z=|^cN38i?|1)aq-S~Xp&fJucGd_#bugmg$#8LQ-XtEcp7c`$fqJ;~cB_bKz05yc@} zeIRMc^$Zt~0Jvqtdn547+rw4v2x}qNvv7$jT)5VVtIj?qISy`6Dm2zE9 zUL}AFw_e2+3t!d$7F;upubr z!)c~O$8#_^x1`=J=Gyhe*2dF&fFMW^5(%z^I?C|oR(=J4)9>gu+J~f)+VfSE-L`GE z?xE6-fRmky)t`e^fH2d2pR8VuP#eND_xf6e$2RP%d+dA6RLc~3Mi|cTwyGyhB&_RZ z96d2%{T_R4_%GSbCW?a56faoLJjsXDs012eado@hicK!pLIko9J8J! z2brI74d=s)HQKkQ^{blmJUiw52Y`)@XmeSwjXIXj-S#~szo$o6KDaDtz`3)&DwmG4 ze~0VE^DupQ-5O6sj9$#r2w%r?1zI|66{AYYTlctr-+dPWsYl?S`ENb1#?ijbudtJ@0E{Q@2fEZ)9qVSdd$?t|DQViY z2O8DtK#od;W3x%F66dLEjA>(PcaaweHltjNa%T(J)^f4N^~8SPip?r=8PVU)=w^0x zmRiO0hLy?E30Yb)&k?Etc*TQO@#!w&hC|;#G?C8a66!M1svJky;4(r&SC&gUw%A~_ zt;~;vXE?${#MJzK3iQI%z$(UVYE+h7L>24rG{2$8k%eOompIuq8gyGy^agXiWpv8} zzIjs;%0sPTjB?12lw4>;K}%n2CdF9X0TBuhX3xU~TdCuz1PY`yJ*VZ+f7}4``6=e!j65 z36b57Uq%g*%~-E>3D;!G?26hf>X5l5`EWdU*1Smm9IAs{mS!X=R$?i(&XiKf%XI0Cf}MfYN-427-WF`JG`r^&c& z0l#fEYS#>@Nt{HWhk`>4=FRj8PCU252FN22*AUV{At8`oN^qtLZo^kHLVjguzKzk; zh;$efN^qGAn%8**2m6^IH=WOmVkP|jjkTODZNfZ66p&PrQS@W3l z_^>53bq%AF{tG=_$4?j6nOLshP_{KcZn+BqD1uLNwG7Op?qa9wTjs*&_ghE#)=srB zAH=_rh>h2jjAkdN;g;7(^Ub6iyNTVUPKCV9Po>^f+c`EnojJ4#TJ6VWS+u9zjBA(< zn?0^F+O_P)jMOu{Ys7y;kxcU1WSA#~d%NAsO$1Tc_TczqOn#GAKZU1I@h)LNMtS#y zyD-4jM4I^mJ~i|yz3j^-mr`y@ZZ@0Wmeu~m>OQnxg^uwe_73Nrt?2P)=0>HOPE2XP zZY>!e->J@UcCcB3fNx32z3GX@>VwVur~YV<^lC2{#!K^sNXx^g_!>-+8VL%MX=)vB@%+jBtW2#Imj*t>%J+7I*EdJhqqN^D0dYw3>37?@O; z=ZdXwr*|^%$y+6JW|K(HbW6Y7d}t0_D3UOfPR4{IBwfx`lOLNfI@AUC6RtesSrOoYZU%qcFtLr)H^P+V`gm;r=c zo46x|+71|2(=7OvTZkzaIoK{@E?FM@FMK$bd1p2c`AQAye+LEO!d*Z9I@-M4On6GX zG1p`-oX<_Ld^E$riDud8s(g+0X}q&Mh!)%YHKr|>o21S~(Y%yf8ZIt=|F}gGz(LtS zdsE4qyP8Z|^>ZNMD;Hh3VjK%)=0htU8)a#7EIEEVH7AurChAh7@Hn){bVELMkiT%SS`#NWrHy)y%yNQ@h&lw8FSzkv- zbr2JK3#UgGx7H z4T-sDh_Ez_IKu4YV#4(|TRVF-$0rCs*Up!cSqNYNfKxn(g5HcGPpV9@LCA&Qbduok#k$?e@Sj_|dxMpyt-Ry0nDWY5A*q zdi^!<5<3<(T;EyM3)Y%)rkb{F<|0JxxDV&t07369n7@#%csjIDb3$o#VXBf(-RtJy z_qv`-&nfWHAK6*)k$4%xV&CMuP$pZSjhC46n(_?UT=q?+EcSlY9L+05HC(mQDvqsm zr$+bai@PcOnyU=6@a*EMh@bME3#4)NZV3nY=n1UkY(_idDh&pRy5(vMiYYEjh4r{h z>O6+E-=e_mVk31hikvzn)`AxuVQ)<>`(0{l*_eD6LvStf*wOfz(ZUEh7yOVL-GCQV z_)HDo$;}KMbC~J4O7<_q$`wgK-Z*_vfCocP-|J~3S_{Lx?(sH3oge3uWfITp+V@lP zsA}EaX8U2TeR9J89*@T0o7)XwbXck){2DgD(`CX;U`=r51=B03`55#e_}M#08$Mp! zEyI2N-Iqd{nJt-=U4i)wu1__H1Voj-byh+wcG>+tJkszaXQ>)%Vh3v z%Tu|xw~UT2rW79nJbPb%tnYHTr>|yu|1eaD!Ks;(f@R7X>itkC!%wp<>@O?FsOg_M z1f@vFRO~M4*cgH*V_4khTIuOhW8xg1X4uoAhl_@*zha5K*7Hwgu_Gi<`34b%mu@Pa|d7Wsl#I7-@w#t>t9GV zT-8UqA@KCt2x(h8&~y;e6#kIi5**&O6W5;Wp&LxMjHW_S9zum-yA@L|EG0G>gsJgJ zZ7=lDy(`%lkBzs^vX#fpYY~n9jfsT8Q`Xki1_|jMy+g;2anW-rf&XiDTqqbrg-u_s zHGrz$Ah=8$gE41b*# z*HixJfPU;v(=nz@fu7$v=jR*eV<)mI+>_Uk)=<}gL_YsslZ5A9&wx8zO0tmuUWZU9 zEO;n-=$%mWoc5#M)g1!4CvTsk=a^>|hWj&PO|iIRevRxmc|BT(W~>U_ujb}LA^Z$* z(bYkri$T~%wGNZ`gE<+X=;E-U&@2-s{db2rB{on6_2?ylgHR-7IT)ES2Zfm6RP=Jh;BG z6qOu4RoY2#I%#O%9`=_?k;m?1TW#J#d$L%~=wOH4;j(xA`rLI9Abc`8c}cx69`tM8 z%`YvXZU5Xfs-6ki@k1tUHeT(N7dQ}p4m^8knq^r^dkKfSzQ^e@`*ob1#m><%|-q_cjrsp-w2NVpmK&M-s;riTc(Ai8wHr#<_2caS*98`8m0;py47*yar9|cOr>i8cE3(Ky93vj zGj_X+lgcL}*+D#-EemRr#zh^&YvxL`9j#RgOo5%|mFDT@o#Kvcl|HmP+x(OVSI!OR zW7+D>{swBMa!T2=dAFOU;KPJZ>n|)i%GA6p&t_I}9p;hoc_+=!THi*6K?S*L=K{2o zP;(}{hN|S+%km4avKQsEI2(27+HMNMDthB;cDMo5BN~BRu0}EB050FYbSwA_*zNM` zxsRIn#T%Ng(xIwij$-Fk`gg=p>Hgos`fKsWIMG+mmpRYgRlnN_X15I&&xO78g!>`e zqhVJ%S4wx%y>)ZjJATfundDgI>wzIR7 zn4DaeR$r@;=w&{*-&ZB)FxAn&HyGg~mp|f1Z>TkH?X^FoNfn7ALDFH{2_!aevd;GnuL91S9WGDk@97 zo!%$7LeQUQQ29jXjpH4sP)~S~k*TV_MJBJMRuvnGp-n5*{bvo)0&RYk{tY%R5T9@l&z?_=W3+rlvWHLXJb5j|zMEca*o{Xz!i zFX(rGXQX?re9wiZgpP^%fm4>#P3un1yU;am`-nl!bWH_6Q(UQIL4N(d&Eg4trSbT* ze$UCV_#q!N^k<&NK1I1(o$Do|as<~=w^9n*BC}!=lP^yBrO#8{#4s|mYs3uR#R~Ci zwQ!ksZADz$u+0HqEgK1TW8~)1X!i<^l;744RNjv^ z%KcL$cl~tAn@A&Q&eu$;&om)N$1(%{)nYh?@)`MAPUn|#=hd!{&rN$Y1pizwzl(;b z?{1*pE!9tB`K@vF_qS7AqU%S2F^@~flFo#uzkD~tn35>oS%eFj8!+W2qgzqcj^W*y9!4VcWg8 z6k8WSnFG941(c0BC8C2VOe5^!)%Kxm+0wKpA}7{gZ@-r1V|pfowOY%G>Asab+MY>? zzjZvup1m(3mEuxuXAnr#N~k*y^%k7Ggl*tehvlVH3DnBPBgNQP=1}ygaxUmy@b076 zm`eC922?=TrJ0i=_3z$ZS`V&k+NZ_j%yS+SwAkN1iHF;V`Pqf0WfWr=#QR9N=OuXwo_9kH+dSe} zd)ivD*vowcSW_F7l~=GO(PE#SVKg$)brX>WhXlKrOyn>p4c8z4$;w%<;jS0WTr)&v ziQOWR|3z4eu*}`tBHRL9d06~47@i`G&n{cyo4Nd2<6}X!Ia95_**11s3gb-)7hF&C zlin|uOYF;RPTyrg_JBf?5$ex}WKXQp6w@>8FcN)F5mW;-gRegyN0A-_Cxor~IEe3m zPQ*kln>4IdVbcb8qUEidj~6pvNUPzlJR9<%x}(8iSDhU;A1g<)387Y9jAsMn1_mzK zsVA-6#8t%1%*f!DBWGi*2kwoBX}3+uqph0Lr?Xi|7mjfpqnG&BMNulZ?Q82U+?KE1 z-m_D=CtaeA&Q8!fRFBxybR0@6qdJn*cYid<$=9o*!1aYvO<8+`wj804pg+stT8+vr zU#IMpkGXq_p+d8&BM4(2E+Do>XNm0NqF~54M_YSq{irt&GjANolXbwwpGnhnz}nz> z9xcUXnVfhj3z5SgWLe6Xv&4z!J^IF6$GV5Y$G%m^>^Qvufdz@hoG>~(2AHEkoUYhD zXXNH8#$INv`374vJko?{1N~uxo1AFG;Etw9R7g@tD$E6cB<-(>jH2Rh8Xslyozg0{ zz`G0OF2h5BW@%cd$8lshD|-1yE+2g&AertX^w0}Vp63`6cAKon0yaENpMBc%kVuZ< z>Ii1~J45;X7VJmSiUnfn%LaLQ-t%!8>ZB`JsvWy=3|yQOf?sBK}1rMNFNIoh%)I zk>CD>Ov)MB07AWB#X5 z63B#E8VlN)+n5poIm*9d68ZfEJg(XG)R{N_%pe}$p6?<_>5m`eAAeL$ZRDz|8=Rd{Dp3XoONZOg%|K(7X zOwBEUp4&Z%s04w5i-?1n85mcOgNuWRm6J%D^WTU6 zQ?5(I#!95k&dyHH#l_6TrAx%l2|P%|#>P(1#Kz3RO2olV#KHBS^AK?Y&(6uo1?-g( zct&=h-Z@#gfa~y|Pk+b7^4~T7)6;(s?tj$%&(XiF{yV1&Tt*S~eoc8PQz{1%0HQ;Yj#w*+<@b)0)r(!HoXPM5UNL2bI`NXEutktyUS^WJ4M zzp4SZMI{6T8jt5_oVIp=Tg}$M&EXAutbPxA4OK@-VF$14OKA?Ucm+2bl``vIg5cxY z1LNjC)Wh_JG0A=#9+(;y4uj$^rTa&0*-?Rtf6nWPGzG?LXAHe@HbNytniXjy=p`h> zl7~eo3*f2ge%zx`7tRE6vs9paUnNjcRz%K7aLu2^82(l{vmbdEC`O}+r3NjYtVt#! z9?XPye)FK%G5JkiYD=*B>v}LX+SzcXO$YCmUCVa2$De0iwt1V7*fGcPe_ta07ybD! zd-?C3|98s+ZS?|^?#cKoy5Pd5GIB|V+9%ykRkoYI+cJcuVSxiXZbf! zA`#O+*ztc1nECH6`7ea@-$W=28y7S1g7g1JgfcTTbNs(NmtJ9_>vxiOqacxio=95Ip_I0|2wd){i!aw@!K}naAm(FKg`^XG zWe4nqKlix{Vg`4`7H(8rDIr`x33pC>f1UXH-8^)1JXI^ZUaV9fEtNZ0RDgZ~vG_vL zw-zZnjOqUtt^=|VRga(?7F&bw{yy*rLsa z9>|Cx$bC_}^ay!^R#)5pOh>^UC{DgneaMHM)tXSB#XO2LYA>O+|Lq6(96NrVqD0* zdt6?#i|0ASZ=i2&AsGRn2M`K@LFf|SLRqb|yk4Nb?X~_i=zHxQzbpvFfgmQ(krA6d z0MY^5CiIGyLuMZJ?G`PVNfe3fLtq;Ki7iH>SvzfVUd2?YF zzcmh4+H99*Z>n^k!|~L)OWWQ$;yQNjdG}&HNPEb8P}h~7%>&bd>6PqB>m7oF=#_EPVFOKBlXS4d>qj`gB-5IQLbAfUz1DgSCU!p-P|O;tma}w zQX}C5H-7)K+9fmx?yLpUczgEMrw8KLDDG9j^N-Dp{4VbBi<$jt_!3!q{F#sdjr5l= zBMYttcyj}vx6ba{af2yx-2up#(ASqLCgopZdLyb4jZEb1w1;dnI6wNs;}J6+2wQ7llS z>QYByRD?_n^YXWh?D=sFx!Kw!bh2endo>?tNJ*6vr*K&Srk`?KmNeY7!hjF@pDL%Vg zn@t)e%6;~GQQe8$bf(=}j+t~AV`c}cb4RXY6s5V!bVyAqd6h`Q!w=%bd`)6nNn3~u zl^Fv$E}LxCxbfQx(ib%+_UEynB0CGWsI1H6{2n2t};HNYO@ z#V)QkDkCcRc*$M!nZp;ew@hM1v|8x}sOO^Uhe%^)?}eq;qKl`nFHTNyY3XrAI2Uh? z7Bf}nuOCT6ul^pqlpkGYMnz_Tr45CrkuaIf6&Y^IWIw%MPqf11jbSUW{F(V>^1Jd0 z)TOTEW`SD9N-_I+QyE@vl?5c~_gG(Xa+h$#O6uhZ5&ul2X(>~#Yj5$qYn7yrf8IF{ zg3rL@Df1qPfx^*_SWFj{<8qwOdWKb{e(ri=BXJQ>EQbjTCoy7+o1y*DQno#sT7FH0 zv1!Ps1-;kj{)Wm+na?1+x;Na`Sa;%p=7P(C7#|X5Z?nc($hOSN@^yg(_xIe%*#mBx z8ixzBoVC?WY|r6s)_Gm^andIz6&3tCHZktVjm!@O)Y6q}L0z^cLnT`lauB3ID%70hBFxmR?0UoT&etlYDl zunxko4Q-OB;Mggm33!))ADW-s&Z(FY&DoPPMFzi5#jlC@c(A)J?U9_+LL)O3Cx;tC z4lrUh^+aG1w(Ze6aI>dtlq^?5Ael4N-bHCOWnIVjDYOw1_iW8wu{IQ6os5M3VgdsI z)e9me6Z(`$lMpM{kdjC%{q-Hc8)gp&B)dcrDH=D&Y9t!;T`pzI9KO^#zYL?*WmX# zf;UnoLs{XX1Rd#LWQ6niF5KPmepFk;{KP#aQ+- zTVmJ5;bSQG*!VJ(^P-Q|hCOvMFWsCFz?)8m9yv{N9g3e-g)Il)NA-DnWYJ4k@+thu zb%Kj~QQ3ZC93~CkBK{KJ6DC0cXF9+qF=Wor3H-f$Xm*dhE|yC~_mV<-D16WL8U_dl zNGRr)Fh9*RPWST&r7cRI;XnF$Vcn2EO22jQesv0In^P}CI}ah(sCXKn=*mOkNABJ? zWD32~p|wqIk=QIMEjlSqYEW!fgM3fHgw$K_^sXLWBX;{Sdz7|eSVPNWro zpy-_w$o*;6gi0vN@1KKJf7MkK08esaJSjs7H_qIEqkZq?#Ne5n_F)P9dvihi(3-X} z^35~Dd%nlz3x4!(Q5QsAF^>67OE}-8Pa>)y!MZPq`QNdKm*AX3&<9Kh%!sMGzI9Rh zP-SDx?B;xZGE$gAF*ee{BbLKdk3x#bi^`K)EK)CuH|_4y(4%af_@nej9h~eq;ZdyD zuDV{`mCsJ(-UBk**SP&8;Uv>0R)&!qsBIB>n(v?QFX)$x*cdvG#+716ihHVB6nZRG zc?9YLf(=fo$oNZ8zexMklKVTBtksx^hQxc3bJEp*(y3|`Lm$i?SJkFISEf{tu& zYV4d$+qK*$*(1im0L^WULo(HV@U6R|yv`wnuMSqY&aJoO4;47WJwFdE4Y8Uaa|^bB zPw^*;!viLWCyjKGsBw2Nt!Ec_C&i3g6dma3(@h7k(L1QN=qZeoemVuAnh^OtCOq`l z1#+jrOCp9v$bGR)QLTU|c}D!4!>@u3gHJ_zZR($yJ;CfVhKfixaJ_>IgeCHDg~Le; z5X^+)eX4bcsl_#clSNv0Y|Kay%#a29Ov!fYu){`JA8b^rCHXR&v4!3Y6M_8$7BP;b zC)x2nOxWZ@-5xu!PC_DxS^>rYuw*0uKn%~XwxOPh89$cjU>Mn+u(pyiq0@@& z!iAEdFO=1d;x>1U+t6DQn(@_tvZ41SJn(brDydUISyia z2*d^oF7U5W^+2NLe{)qnw;$W)QyBOK&SHp$H>%|qs*gq8Bq%wG)xpB1qY?GQ2Ya9# zUm7HW^q`D6sNsP~ph}jWKU5!|bjVh9gU%Q(CsB=j>_Xe;)`qT499Z4_^=KC1MYd&R z$zZPXq(=Ws0Z~AVI`>dJw2G*rNe10ERYQdV1Qa@JA`e5V9)@W~kjOGC-PqVYGZsRJ z{up7S^2~8p}s}9@8J7&yyvf zCRd8;`YjdyVKlAtzEw-sMvuo^lDS1o{d1xHRr=)6{wh^q5>=r8-n^+QYAtasOjGk% ze{3gUbsd1PCT+$lKsUxo5j-4%5KoM1;@VxrVE{e>#kZof^n-QGOR{#5A#gCC35wTy z9QNQ_*8(w=7-I&V#g3It;D`m=fs73k?lflvHv_4eWikR3%wvIB-;W@D7L7+JhfbRq z!5KZ+d+yZLWHZFiAqb4=C=3^jr;IDpOFE z{a^TdbC|}~rI(2>G;XEBu{8S38aHj#prLFr)G^q*wo6O{doReVa)t{uXf*`Z$Mse+ zRHLckF4FNGKFR#~j%)W^+5Hi`$M?n>+UvMAGbaNPZu+p$RC3ZvG+AYrSs|yQ!n>*t6O7J3hwn3^_@d7v76~h>1$Un zpU(S>V%m&1h={rhNdBgadn9TMmp!!G`{R^yf!d6QTPx zX#l_Y9sB3^*j=V)0|sqAvtG*m=kL9KuXC~r#I}QaFuN&;Okcj++3D%owSfe;2M+L# zLBbyi_SU3n^S z8GU-@7l4fg2X$?Mo$fp-Q6=NUTIT zEfH*JXty`!ahg#bqt*&88yoyn7_lycXd&0Ug&M0>Af7a&_j*PWIs#_sfP%6z;x+{O z0)fdi*xwUFu1SBqq5nRQ@81s!{HWV|N5^~)&a6p;g*AeNj0*yz8LCpfG?kxHM83+x z4UI&F7W}JbLho)&;MNJ&Q&3aW)Kp`}3O4PopqY=Wq@}4AF)eyHU#|SA&fs3F#~!gM zR+shxS)=y~4tsB~5xVR!^R1dAqDm7Oqjl&f1Qa43vx&j01Uw>bIW`!$;aFc3v4i2N zG&y;1Cnp;xCvp@nvqNrDL!&>Ae8nEmf=gz|@<(gZc{~oehzaBJ z3x)igd3oBz$50!xpdnDJg-p_>6>YF}s6qn1ql_Z&e|6!RyTV-an|2`%xWn1UagjZB z`?(o^{e?Tb6nT9Sxr6+)JPD7o6WcTMg-Xjh;o%D{koV-#UJwr zi=TB53*&F4Nl(vyG5VE8~VgcQEmqT~G# z5r^bLO+vQmbTS^+eug7K4upl6-ZQ9>C7PzNzBAyHov&5gfId@y6nTT;tDm%?`^D^m z_JRC&Mshz1_dh^CKvw3AtEN#Z=WGRA3AYVh^b3>Dn7ZQkKlptKX&?Ehgw#WUlc597 z;2kGNXh(SIRA6lbS6oazzwnVT0_THA?cyc^D-zq7ZxF zXcC#-+>yq&`t{xCung;&)oF>J7#d12^2Z|H6{-#T)^rM7Q1xS>ZoY@Z#qO_11c`z| zjYN*yN&D|#wmDr8PE!k#61-5$Ir3q)DO|MY$>)jFh^{!;Yjm&x0S4KUjY27c)5hL4TvX?v3#S`3Qa-KK_mjg?r1j%Y*ZV z^CtSDQ{caqVhvXUrG4gS(@A86((%p3cwScAorXK(x|D0WF_?YIgb+_;nM@*U&-q#a z3aR{u2A0fF7zub7*3cW02&|#Bg>(#sQPxdAQScgFE9o$i+lQlH z3oK9m7-mC(eM1*&0x5$61v&0|%0580E~M(2L+$6VG*o;M!)`Q#0Y?{z_Mo10pwu_7PoNuh$@$f4Sekgec$@MNI z%>d1`m?4gU?n zmy23ejCn5BO3>RI;z{@T%Y)kw@}$?`gL|8US2J&S@GAd^;37YNFem-sqDBlQEWrt} zslkz$-!T8oV4@N8+#Rg)UlgoNr|Qez*&XbSMTn=#0xWF4pMNwG;2BOXYbEm&J(bN$ ztuj{WrxYtam9P?2vK60VS8b}MqS?XJe-*7Wm8ziHTT{mpads-gF7v|?YzvWq$Z3r- zWQP!#&?72^QIO0MYG}>GA*3eRHVbNIhLWczMp}t}{M4ir8A6h_8mO5xBv~G>X;o4a zl4bFlq%!Wp+MR^lQirigx}lZ8q}o=JVo_^-Y|m>C5Mo_>vyb^b*4|uKhsyh^DtDD_ zkhNm?uwQ_N493r1?sMquypKcNM+a6B!9y;#rBYI$jRzqd&s|-S6917 z7(~9Qp1|UTLBs0mYFfz@o(@9fUziR#=3hLWiW4(S2O%|(PIqrQT@>HPbh*sKbZDDI zdAg`h$J2>~rQ6wCGXC^kf@=b&py-neSMJJ~cd6xunfF0<1ZYnVA}KJ#bHN!CT?3`@?63=8cXFzpu} zPh*b$P8EjK4V{+ucTs~L`L)3pt{dXimJZ^~J0R@7 z+P4?Am2F^%*CmaSAxRTD_6YUU!R!%^DfY`}UG;nMWE%TO>zG7wL zN^n;%7?%EP#0#TVtYi(I#(3j@yYN|cGBIOV;|hS9WWmImWYzeowL29BV;fkNlKng5 z42J5~R9iZ-7e@AHk%G{bki}K9IGrw&@aO%%GUE?rqo9fI+eH!qvH}|Ggk+#*5=Buk zDYHGd7j-E%Jv7!~1sh46GgAvbzA&8?m z*1%B2^I;g`Xc(UQ8pd!O3nLK6!AQjMa9-*wn80x&j6$3QHK`LY8Ac;c;dlXzL9B(b zsV|`p#vx8cJb@a+pov(AI2)!S&VdUN z=fbqq=P-}se5glU02ie`gN4w5^dgQI!wkg5Fcaf1fmw)4I4*_RsZU`U%t2fZ^AH<3 zu7LTePhcf1KwJe25idpj7*@j~#LHkY;^lA&;uRdPge8bq!BWJlVOi=ptl`)MjfmI4 z3dCz+Ws2>utU|n=<65{h^&zZ-)rjliGQP;aS8((2DpM*n@bO z;}O`4_*Zxi@iq8;>UDS>@ilk@{($&5*pK)ow59$EZ*hDZo=1EKUO;>o4j}%W<3Hd} zi0{FRi0{KosUvWd<1u(Sbr?Q?gNPr(D~KO)JPxlSehh~YKY_m>ehNoYe}T_9ehz;{ z`~qG}9fE(t>xf@+JOOVYeg$u)UWKpWEyR-?{{?R&egp3y{u|y!JjL-p@Q>6ha2no2 z{1)Cv{0{LTd=Ezve}H3%Kf(uyXE?UQN2!;g1CAr6;A6!9mFfTQ%k+Q!50~lx{M9o3 z^Zz2#KmVCb|LniW^iO{#(?9tYGW~d$OkaMEO#kS=$n=l6O#kR-GW|m?(?9Gc(?8@g z{X;I(KkO#cKlqtUKgMPHF)q`O{STDs@BDux)8F`Cnf|{r{ePTH|DUd>|Id}_|4mQ- zUzz^DGX0lj`k()QAk!)T(H%SdBd`-RVQbhLMIa!;nUK(SCLv)4A+e1uJwMs;KDiFt zSPx^K(PQ$bj| zEMd=tXDMAO(9vWxelh~oD=ozY9&zc-?)Vt(c)j)s6pcR7r#CDk^;vzxj=?mav_A6; zD?R%y<$5f=0MUe-vcguTi?{>Ct@wPa$V+*;zWxLjFkPs~&^?o8`ueeJfRM;Zy5U8*Uh#ga7lM0|0J_Gt0X(dnK$HiTjW6qV@sS_IJ zU0GGRUg{OEze+pMr#GpO#~q}EkS8{E)Ow^Ze_%NvZvp*LYQvV^kq8M1leKh_xQ5HyES#JMbqeHB)&*kEf%EwMkAuxWa6F*o;@Ci$6+R49j&8@! zta4QlYXz$c)W^dSt1K&hu_sIDk1c~=pLExUMJvQB23;9^dc?u$tZtRq9tzeiKwjD@ zvxD>romDfrv$EvLrqw`v{jq(pBbQUY^{LZn2!IniQ z2kb14<=u=~KH!*Rr*MCjS8^as{UX6@4A{mN%f+R9Qag z_uBn_uj=ve4H1okD_N3TIWNh9S6 zqxG38I-M?|Yp6)~(p(6VTX){bxgL4*beyp$GH82c)rt1{<5nB%S%mfGUU4(BYkIWu zBOQdF>!>I7%j@c*j&Q6Dd%R!2zNN894j;%R#ZDa3>;#}9Drd^5EBf%gUvInTifivD zd$PX!>+7dSKK}ee(*oPK53ZcmcJ+ae=Pvrq{Ts6mzw^cR+9#iTWW$U;$SG4&pNLN6 zlsKu&bezHCPO$cO`++c4jTJwWORE7*g_g({*V%5 zy~vWsisFaZAG^U;QB{S4=mhrPi5ImOZ57&qcrkOV|9VI!r>WYs&Lmb_FR-ri2@{+b zYYXkOohwb3+1Hsi+HdeZYSK#~f%A>QXflfm!S52*EeWi^KEi&aU?P26j7Ep(-b)_= z51pUL!%|9EOp~o~dT2?AhTM#kLrqF!jP+OV7=f4;qgdio&$1`6O@;1OQr_%&o$Mv$ zD2&&))oFid^TuXMOV8D<8B!Kv!Qi>)PwD4?XkJyyn@{@9!1({Vi=B|M>(9?L`@& zNqbSgnMf?r&sJ-kZ@ky|gz;sgbiQ!D=?+n_A&-Mm78FTu5EL-tU_U5`c0mvY6Huc` zRD^wWANz?q*_zOUC}I*gs25x5+-D_8pU4iD>N_PN5}&j%wn5^n7S^KrR#KKQDT$m& zsnQhgt8B7R#!d#4y%eZMLsX#bW%dpd$9J=L=riV4auaXqFH!n&L3oOZmrCsujmtsp zRORVPYXxiMii-8U;vx7C=F zi;OeObSjED+Rr@9Onn|z5kbqdTEUcWCjn@sI}(v_PPj6t4_4;nget?~09+KfM1PTM zVXk&j2!+|g$OTh5J0mYMhOT5vLpxU*PNQH#Za!}1Ok9r_)Yp?(-_o*vWrJvU&J`Ks znyJDnK;9++XP?}C<&Rw2c&~ep=i67_A~5y3+J0WzdWbB@wJjXoe?a`vnf(`R*>sQd z(7Rte*6`4ZF{2tT?zoH980}Y(>X2I%Fq3(^iP@rYw)mzcLwv`1in6(+j?Jk$BSVQ} zk=9$Ar_R?kXd8r0+RM_5a+`KiGpJG>nL@{D^9@Pu-^PEN{%zKYM$sgi1%qBEi6Y7p zRaO)u(yDA!Py@lwBrJR$4=F}FKB9ua;v6ha2#H2JzNHIDk{XZ&xs@(W=s-1oo}h%% zy~F^77!o#P2<9llgmL0w@u(A(%a&w=v5sroR+JOV+0|(YiX&-y7 zCTW;hlMIZXTH7L81XbCK(;TF}WAjNJS-z}3tzAS=CyEGRAuBAz@?=GzC4Z&0?>*Ii z--GXvf88@Y$6q4t{b4wHu45RTN;dDg^yV9x2D=$~^Kvi%1?@DQ@uF-Ivbe%!YbLs?lUO~W_tu8GHZERWx30|uVp*q19?(3X7sZ+jCz*N$oRs+QW?EHvjZWn zq=hmX+UXXqpfMt&o&C_lJ2_3rX?;!4b+r2UfNAb7{i_~l=`lPo`oyV>N|u?s(cvJ$ zF;iJlWUXLQKP~i`epX@)ca!R78X@80^u!&Dpol3>4E)7Z9G8t2Yid+P;Ff4r&V zH)P;7<)dnbU;9AE-^t>OVne6)pLFL<9owb7b$jMq^jJyWb4~MhHuMoDSe5K|Nv1u0C`zx}W;QH)M zaIf@~@Th5z&|-SYbOerP|2x}iwq;whvxNe=hqb^T3XU*Mv0vbr;+Zck%D%#OqwQYd z9`n8aZR8QU&H6WU7TCe7*)^}oRQ2W_6`|dvfQY`VfKl)TM4cA1jDlE*5aJEG zVj-2NMphct6PPuPDZMz6jy{16jd&_Oqo#!q_CWnIY|uDaPF%7W$;rirv*n`XaVaq- zhp5ADWAmxl^8CP#7d}4G@z#Ai$k6BiL3$3@U-JBKp7?0m;!oB+{2`@%zW(u#)U;!12tRq3L z6zMoq>GX`FbZ#EQS&xe(Z!lZ?)vWhzC+mIJ$$EjGvR;O+Kg)7`dJnxU(NFLxs?2{g zBg!7P*G*-E9>;-RkR49DGs`K+KEV|xHZy`-^@oX5Zw&*sVm!_cLtjJc8Hc)@E~m|5 zr^uhtaB)Vn%tNkxfPDYd)T`@OG>*CawnJ+>c9M$Q9_>>-`mT$|Z0~qg+Uv+Ze`d$w z1CMufJTaqqd%r%_pFj4=w*~C9??WgV*spI5aA(3HO97Src0>?aL+W(_1E>n)j%>|V zs!S3_h4dj4)q72%?*GAXH4eBi%|Mw(8_fr7{peH2<3AmzXl+`bD|*(lnEby$ma(80}#IIqL43hAl_R^fisB@L8%4Zot?2?H;v+|QBg=2z5b_V-zL0w=3|;{AIHkwe|!5}EW=_P`+IQgM@d$~>$CeD zv>}gNq-GJDkedr(n~O#vK>6Sdu`&=My8>n*9FTQ{#PXuKAwj@;^QFppM5jh z*}J?4`0VRr@6+W?c_c487Sax~>^dRCcpX*nzESHr(!=RedQlV~XgZ+)5mA7YS%vfI;#)h8Y<;_9 zOUo`Y?(bU&xjnWcJahMwwa;G~E?-aRZC9ThM5}&B+K(-7+(RyU`%Ti=GOzWH-b{+V7E{_9M}> zpB+g1Zf|4hee+)s$OdDy|CAY_IQZNTzx-CM>b;f1W7?SVz+b;4VMvP%Ds@D7S4#y4D8_K6U#OB^ zBz?sXx9zeyS^WEnfgb#3G&)e#c{~HL zGj)2Ms+R;&3`u%B8Yntd4Jk6}ak5?y5)}zm3@WI4ff@*)71KXq=`E41Qc`M@j!B|4 zN@a0|-U3#QsJ z>mPkLw`mkrt5hpdk#nOW=R-w~=~-`=*QJ+pSm5L?qm{MS$%G3DSuKLWh zkY_T-%vBpN-QUisJqDvk{R1xwzUwy4iSEl|f?`2Ls`Vwn^ zT2wprsp2Z*I^&(KV%Uq7G!p|pp{;1j=`6^ImZcieTIrN$LsEm( zB%z3<+jdz8pv?5J)%N;IH}V;`){ypg1&-IF?S{>ub-TT2(XoNP{wi+5q1UH3oHf;c z560Aw$b)a9Is7KK)M}DOeAa`HepBkZon}3Y%`9}hu{&I0?iuEb9lOga%*AD#e5Md% zGYcB=I<)n0jG{EJL-r?5SHCh6wnnTZLacX_TykM=r>8HOPNaPuQ+9OJN_&6&=C+aJ z?i0@ZFkF1)$G+mRAK75MA0>T|=|tqJoi+ns3eR$ss&1nbtzu?dgc+rxu^duV6t^l> z1VPn_lzNv&p{bo-T4D=J$g(!Js)W|G$a|<$&yRd|y7{k81|0RL$ed$aGcGxxI8tkm5ADc3kjM$DnWdb)e zCwk7YF0#`at;T+#cA;G~7y~$3z~xSBvbLDYZD&=>$dQb0r+PzPf`4AO>Hn^R`*Z!= z(_JKI?CE9oX^Xl`#pQ+#Q`ERw5PG2_NzW;GBq-!C(}pRb{Gc9qO@ ztQ@|SBJ<^w`cUrD+@@S1H^=>J`&j<3V=KaxoSzsm83Vv`+|7(vCj2se9mi*t6-9j4 zBfaS%b?wo0yC9>BCiH5;Qen~hTW$T>5FpFA^fTi~@_H()IM!_F%n5&mhANAe6GYcF zc#GtjC6%l$-WTyycExraHevocF@ap9UUmx9L^7*M#&riPOg2Hgk2js!Eo5$Ss za7S-x@7O0?*WXlbP})}tC1sb5n7@wMi_=n{ivL8*t2cEd@@5IM z#71F-C`R-83KjmL!gC42xD5O$|+nOpab`SZG>go@<}uzRYmB z>2k|e+REI<=sIDe;Re%2%gx%_-0PyZn>JfEI|3Q4qc9w^`C?vOET6;x^1U{(xK9k` z;NUV9Ugo>OM}1MJsW6ZiB~i&KG1-w`j|~*+0s*JM&Am8Uc=c&xk9qZcU!>?n`t~IX zqq%02K??h`13p!eMS;pBnwx`hXmG>@KJ1;!G-|g&LdotZDk6uUhit#BtLGbX+`s!Mv-#x#QvQ*GYRV z+n-22SW!;iscpJ^-H*S0spH@Gkhio;ZoXhh|{BibzSFbVOc*`{x zjx8x!)MLOit5zOvT=6;M)ZREZ?By%$HzZ6F4Pb)<|K~4tt+a7hD7{YotQ;b=NFV~z z&ydVs_h$(M7Y}MiHhj~mf`8a4B+hgSg^si`$Z}M7-_u_~ zE^JMMW_4tX8#{cGY5Vpc{>@7JVCqvT2TN;*cM|%TrBzDC{7|j`o&vtRB zCnZ)pO|MJfv~K5{gjTWP`Pm&mzVVliAC^8pV*6EZ?w0nR+4)|_nTKy7rq6}3XPWmv zGxK?V27>(ytja1j&yl|;V)+oW=G$WK3g~C8u=R7F2P3TK*+#f);R0)|?E<%Uw|cjQ zW`=i(M!cT5qf{z24wHr%YaElLNyZBuv!&U_MUEBH3gZFr)myL(La zoME4I%}0^^s-7&d1u=-ggnr3#Y;zX zz2$o1`r@^*5r5$)FLYe8uj7?G#bxh!8TqE_t~>vkE4<(C?fCNFH?{~*qoP=UQ)teJ zA0I|RJ}UK@=obe;4=AIBiJm%>uE1mR7UY`>3ef0sl==D>oL5k9sxMe*T2Ro?d!uPx z{=Lroyib@MJvuiQ@|bMkddOp*CwuJn?CWvBbGXNAj`w<~!<;0*WUrMC51Z}mW?f%4 z3MVrv=nA^yJqt=J#EPEhi6eVXQS0J!)dlfY#`VURjo+KTk6X)1%|z6Sa!XysVY_>J z{*ru}?=Lb}nQt|3F{jMZ7V{4C*Ji4gb%UV@!82*rBGcEqF#rfag<2Pf}0cXOgo1vhd6k<#83 zZ54ED1*vecvJGZAx}xr!qTK!RVVMTyDw)b=uGks5R_Ep#osktr<|?>s=Gq-!k(K+D zcRNOeQ&$`fo;aJwy0)^=3W|UHF_TEgm6=gN7`@0xjF$+Y6d3+6u*%f4>%@Tb;}xn_*rZ1Uzt^_LV5tXt+@c4JLq z#;9JaPyV=eU^#iOhhOV4x@cs>g<}U^iX6QTIhxt28f25E#C=3ET5_enQne&i1(QJ< z4CeSt{6qXpgPVeK|Ex-9rT2X2`QCc9-c)O;cV6UOs9tQEZ@I*IiMK8Ij`3aByPglT zzI1)*`6&BXFctKKq#{d^y|+|lNl52g#z}LfceDQ^{-7B(hgp;<`2091^$x$;;Lbf_ zAete8wrrC@Oz#{SIFB0KnI)pro%Y2^&O_YLV9aA+XChetv6w^=>rcZ9wE93yTey6B zEfJ!Wwh`2Twvr?{NyH$jB4dd_*aXa&7Kt;7Y{p)MvkKu8Ah9u4A)HkRTex8y#*;eP zr-_?y4e$+C;t7l>J7*eWT(O+3=VJ^iKW8KPl!kwN7nE@dYCX$gSvZ1o8frRd&S?b&e*fGeS~~f*Rlj?5<*MIFd)xnW>)2ZlHgf`uu)_kZgA%-VM)E%K@;*7w@5nu@ zkf2hfP{qs=DSDPj;o?>4<10Oo(YV+eb9x76dFG$c-2;TpGAEXwil12Cxf^4xDAMYW z;~&>svES;5i;ZotmfDz-+NGnio)ao}W`F(kyB*&y|NMsS{|N5zteLvu$w#hVcnewU zdiF5MCi>qIdd-dpeTy!B;q^D4zm{3z!?Dkfrq2qH$%#kwR5V3RrKVvfsjt1S{{lKm zKfylHKab9q=ICbG8~km-H>AI1z32Hj>tp-Zu77$y<|EG;492}|xYl^t*j0MbTvIP+ zf7;hnL#s{0?dSO~&`&YVGkq+7>imJ6GHb*km<^f*N2Ecq0uD*R;4UE$wOXQ@cEn0F zYr@)KZL;FfWIT}`pjI0jIaWStv7u^}83$SUfVFZ}lC`+i%v#*qxh`sDir)~{JJuDp z-2KX7<*1TUMAk)P6+sDbmg90-38Yz(cN?FU6h8YXo=hrK+mp;ijwo{t5;b_2i*Kl;)nT4ar zl`!F~%aw9d=Yca5oBvspnpk>WUO{QnL`>cw+cJvAN|~P+%%<5O2|7!(TqTiPP}93p=RK7kV1C|5PTpVGQ{81^eSCEL@i7|Od#9ImDfev4>^L7>^_A_*Dl;d&G6zHW zu1V(*)1agntac^L^?nkL@kvy;Xm8K2_I%#)HL?HWZ^TT_e5P++JL{(QcjJub+mrAt z_1tH?-}Hn@^_qH^lAbn?=wa>A!y7EkR!xG@;@1-gjoY(CK?eO6JF%y-5~3?Afp ztW#taH6%@E^MO^i_(+_@3$D3_#BrP~FR@1YmasEyI80EiF{$QY=lq(tY{~Lox9a?9 zzVhM;!ww%7?!9T*qSE0P*nX=Y-Z1l~Gjnl74Cxpze1Rh(00m@8qQPL0>^%)p`}u}y zyR6IhWcM`0>^&nDhJN-@hT--pO08kO;RpSH9Ohn;o_T{JgYwSL+thPwPo-bDUw&23 z;fCSi>ikLJN%;$uS>ajv4LzHBzMJ=1_^Zg*c~+NGcC^x+Ej|2M3ZF@|5E|}m9%+I$ zID*D|E4?aFEcyKweRYoCsCPO_q9yvM+kM1EG*`ma;A(P-J+aB@ z7i%sjf5wilbFs!!$t;`}Yc8e~jABghT45nk$O-1|w;Z+{wWKU!&{Aa?ixV9mF%~cD zTT2eh&BASK3zuycF54`gc+VAKCf?#>x{0?_C$yhPxc1|x*)JB3^CQvB=O*$poRnPb zmmRms1fyyUMpKX>_KbN;^d4`1B# z*i~0<+kW}#ZMEL<(c;-t%aS*e%J=RjG@;HCpY0M8M*g6uUuhjOr34{Yq0h z(xxL$KF2r{{NPXzj&{}sI%Zf{`4J+n+v&V4osO;L@h;KXS(oV81Y=;|WrvP<`1EeR zxi^L{hPX=ml_s4hoph;lt260LIYlS6N7EZ88kXTC`(;H4*pFyLzCxc_==~w#;-fUJ zR;ujNUE3!=r1e3dd{|Pha*lC~7}sqjpZ_v}@1exIX^1?U|N4TfgUpQPqth&#m8e-Z z`iM!zaS3c&;~Kz0P59<<`Vh1=V&&Z^JFM$lu5MfPbWO|3MdNO+M3wNH+v^{>zkND= zaQzh%Z@H>{ACAcl?0?nbCoB{=l(h$tHG6HnDRROk{&d&)QHNqyDt@mRIuQ%-B+-bD@E;{PJaSek2%MWkX9# zi}=sOhkta4u^-=Me2H}yzGEjY=!}FX)roa-tx~7EO1wf17R{maf*z|fy z5Jbw(YUor9=u|bDKA|CqVl;hfLysLJssx)~VoOT~1L(z8VoB&Ek*^};s4Nr?aV?me zx{7xbPV|N-&?wqOK&oIYatL-qq7UbBzAX>wLod9?z{WY9tIfbwaPSySAB0ED?Ix+1 zU>dFQ)!${O`RZ@{>|O?{VXg|cmn(K2+F++Q6iuyED+P|3l{Qn2js$hr3smPeu?!scW;+L=HDHXQ*F&*zR}q1WZwJD z8b7ju9uFD0x6{zM`^$C@*{5vpiA1$K@oRQ_B?q=O)a+(2f4S3Fky)&+OI!JD**blw zqlAzMQIHeKlb?4iB>Ug*cyNuh_snx7*|Dm9HVt0haUtW&>k!KLSo~8X$wWGp8x&>T2} zZvoPV<`nHN)xJ9`)v~)uwRB&n^$sx|~i6njWiSQ7+ zSj$}3!Vf{E=L$K7x(U^w@h9-{m;JNx$5*SsnVD(Mbn1eq5_Uf2 zq`ShL9+J7yRrLg{dl*fiEc|;7-;9qy33ZQ&sSBY2eH-F{luZ!XwQn- zjaDDAnH-%HQbt8*zyIJ{G%o%FD?Tl8eX;ul^D0y!zk+Pwt9L8?^L}mfG3p zU(;WV-8p9Z%-X#>cDLuz-(Ebu|DBJt-$k2OuO4^rZSC)L>K30Mzc|TNi7ZKwv*Ye(G2+$pyhQtf8D(}ubQk)0+z``6WT-CUn=bKSzg zwF(2*Dhyp(g@F$OLk>^DT9|7U2Ch|L`1^E+8uXb}r_%{84h>v!F%bMSjB&F;=Vcm( z`=pyLb#HYi-ED5sEzlB&laI~QEmmvhSk^Bp7W$tk7S?Wxg_s$MZ3){?6_qhA?R58z zG#rJexUz9hEMDoKx?(S%IIC?qWvfoF>J_~pYcaIqeZ-=-WjdK12wldcFz@BeYGt=B zUjNX__Zl7?r|DY?7L9CtT#Vhdqk8G+;;Y&l>AFi655E1?c79@f7~05r*o!9ckVU&4 z{I_LUY*n1=zHCS~GRngr*c83TI6@w&PLb=>dGZ2PE!Fzl`aAo&tF;&4h`q})Ail9 z{nMMPC~Dcd2Aj5Y6Qzl|nbJ(1$mS-NBugv9PJ{GjDRfu?;$0X@WVRVUNf`Lfqq}q4u~VVzI+vGTF4i+%i|{ST6(p;Kme`^I zzq~Tsxge}{TM*XHSr9(`(*2qZ2sXK6`N zO8(9kXuBtA$boR0EkCxwkkWA4gyyH-cz1zp=(1r?Xi7L`H`_zA+fd6pGf#4%wCAjC z%^S&jlHSand+&GWyH{56@@TMAQL5pp5MsbBYYCUl-lmtQKle9Jj4pfl!^ajsJ;mC& zX6+Amt$t`d#=Y6&c=azbfGjtA3kbu*? zEgH(BhUg45AFb1+_NsPwzWYqgi#36QQ;4_M%ywqSJDtw>-OkVI z`KUP=rSyDN5w+^`&|m3`Q7`&H|7G-}@F(ua@joB}MI#hTXmU^rB}6)Lkzfm8aYO=5 z;*lk!pNKUaaM$qJBOGv(3BXM_;3gb!6K+U3WDc{jCOlzQm;9%RM%JJT5WmtVh^supE>+MHM|Fz;@*1x&( zrLjq`u6=0do;45eDD@&`#sWA6$}g7wcITGQZxH@rWaMW*KYsG(%)+tnYd%J2#R6}8 zGnyP|im*;Bh&PCx;!<(7D5;jBDyrdFssRKAYJ5omtmd9kppr^D&_OBdFC*9qU+;fC z>oP+laYb~6N8y85=`tsOfj#Gqt3d2ov*+R}mT$p^rdUZW1?JnEf}Eh)vx?=*_~PqF zj$~>mzVXs^z3q42b=|C4Gwur2i1j~MId}T5?40%`tHzGAQD`4OC+wr6&@4o}$HY`9 zHC??)ot<8kx;NFQZc#U;cRD|8{Hb85kytd+JiqZ|BoIfpBV(;xC zWeWRp;APq}eVH*>KbR$qMv|U9Exj0bY4_CMlUrT4I^Cas9)DkdF1NMu@0wr6yY(H} zmvaO4Z`Ox%6;Cu(>C{y^=}IT(qr>uZSf{SiNmn}6jJs&p6c;PmjE==vvc5{xrc}pR zh$`ha@+i97?sm9eare0Ixe{?}-4*VKu2}0n?IQOGotr8u7d)oxh1k$5cU0CE^Yk^0JJ@yuag< zXJr#s@-1G5Wf*~p9VW7IBEI+JWxr5U7YH@hTc&L~rXZz^WE`kDISvC~piNDP-klVz zi-T01p(~Sd9Okkrmhxg8)SA`I&+>C{c*FUt4fzclHh|025sP#5Ok~Ap>$6i(Yhha3 zRm3Rz%h;ei($~J9Jo#AP+SbhT$F_G|e@(-)onJq)*xIW<)Z4c#9Bztle0}Sp-eX^X z?*n*k;{H|l&c3!TnwdIp!-6^Ma<%!nkKGr&wfok#xYu&L$=rlwoah?xQ@XxJGYj?2o8>Wxd} zugU%LGqMP%=k%hyS3W9_%96~CHPcnHZ>)Lew|w)zOvW#FxK{T)Xx}Gewx5M|%C4F3 zRmz9aG7yE+_AUJrkje|bjLkI9G7W!$@v4}Hw^~}vw|!b=CgN`qu?V=;#sNr>s>U!JFVF^iAPO`3dDI_3yzp^;!HXcnKc?2jzYEZSW>O1y11KfRFGc za1l4s%fV3)#yL=r+i(Z)up02}a3MfNrBJDJL$8NjI$$ocM>r)9a2?9Vf<4CFSvD&C zQ4|R18Y8}(&eO4>w~_ov9yBqc8yg?54aQE^CBF%S^S_`J0M#sK>yG)cy)0AQd= zhk8o!JWuUckva_HLtdaifC9AVsY&F)ly?4~nKWI9xnn(JJ+bJ8vpwZP5EF4lt9WIC z2xF9%c{hLj0XscPZk1YWLoq9u1B6Kti zohR$-7DOTzEnJRH`HIeBrF%xqlOyJFp72y=67s8KKWs6?=?VUXgSF{`DCw#r#Z}i1h(MGyRSPP%2^=9LCzQBW zO%ZHHZcy^U0%(<|D>H(#g*lQZ->l5nZXk24o9sKut@i!$J<5IdI_Xh)wQ@*0Ob*z; zlP;+_&B}qCkqzcZ)@}-21KR9Ily55Agsu86_zHSO+o=zM1JdE(TjELS1NEGEj{Mrb zD1ELbG|sr!d232O9gUlG-r8lcjpHB@ZD1*~l95R!$Y5Mh77VCojN$Q@B zj@1kpawM!-^*C?cCft>spq^^}DzEK*ry)8S9dpOQA4@)w(Y+Om0;{rc9Q{>8nJ zz{#9)K*d<07zLY_MfLmqfdH_n&oa+jib*i}GfP&IvSr)(fE)@0XwZnodtj8@z^1a>D%m7@%w z;(l-qI_$sG#EVay7vbkw<7WnNQcSYSQC(e?dN{%H;|J^_Xrwo`t>Cawe^&*(=lwhj zo*%{(3fCu2gi}m~tN`biUOT+IU2NHX=!Mp659}!ozP5Yv`&7Yv@2vF>dT?ypyCZ1n zrBkSH=nwC)ufw5s=)b9^G2y?J?NAkgnj|7sLXtrxl5mflH08Na@(K^}gT#g;<@$-I zx46Z{TB2j}n7A!Z$3$h-O05Sl^NaD#~OeClp z@`=q$B}ri4!3rfw4Tu7D`3SY=1|-BVHR@C#LlblpO9T;Y9s|czWM+UG0zegzam;{5 zMi)Z5E?`v^1SC-;pz8p4*wCJ5tk+YRd{t7{dze~=gPwGY)X$5_+~5V1!g`eIpkq1D z>U*PHF^n4vYAeiNnHMkceAYk8GcR#hmR45k4zQ0;$R>qLso&BA6Wy80%(74n2BXzQ zjpw3O7xh%6D9{_bzkgEE9>mqP5B-XH8?F1W_sG#?vRj$1a2&C z|BoG05{;RG_e;;hZ=F6hz4Qsn!P4*NG|y_eRMN-(5#H2Q>Y>*)skBh|PbwoZ_&;SC zsm389XhOmzwxmgpXOpDn>B+LJxJ~)k=~#3m=9=ulK{_`Z@d1*6gk8h~iDE9ai0r|F zXLxi%lDX!B$r@Q#?XVHGvszZq8q@S?Mr&}prRD6LGdJ92cR5{Ey>_qDTeVJFW300t z4Lw@*4dW?mv%T4QGPDi9qW#!>%{m-9kAEHdoiS$qJ~W=FsYq1V(Gqcy%qAO&K-`Is z?*|dZopn^yUEA;J?vR!a$ti{zLQ1+j1QC$#ZY87?q&o#EkuH%&I;0y(DFNvgIOF|3 z?`_=A`<%1RALqJX{cH*e|Qs;Y%=zEyr)P2$;U#!u;Ge*iI1C56`MK{8o$n`pQTR$>2Ch5Wbc8m zW?IfIz2uto_J)HW9y`%(9aEtyJ%5F7O&`I2RcP`iNTBO2jvAv$N|D;pieAzLRU^^M zR0>bR2UL!QvhmIriEN}rN(N60Cvny7Nb_db`Lm<4P-oBKp7o{Jk+)X?wM@!2}NZk|UlH*ntl?{!Wizp0* z{xTJTGakfC7g?c=`Deq-z8U90cJN_yH9EZxlEJ zLXPkT!+t}7|3wb|1qBWQ{kysT0|ow9NPa_s|2gr$S;+r?DBuXtHv^FW7e4oI81P^0 zzmUQau>gU8!nPSeaE6D!Tt+~@AO20^MnvgvIQTE6pIq^urC;j(O$8VDZ$3Gqoxi*I zt<9f3{Tn*@Z#4lwspY?nQaXzu6u{~8_u z@J|XlV%Y%y&A_{ANPT5y# z_E?V6ZpOztGqn^_$kD!CQ;g@tcWXb0F6YWFG)X(`P~B}lbL%+itT);j#*mfL3+#fr zd)v)F9Aji*d8jGaoCe^{S$}lEFNh`wTZO!xg1wi1jqs9+>C%vJiYOqnJZU-OFIFzO zv=Y|hGj&Vtt4>#SXQ-YhiH#kJ8Oc<=QMBy+K)#*hKO7>w@hz=*N@O;s5A=4Y46T2r0mfT*13n^hpB7uejRXqT9xw$E6p)^2Om|6y;U5` zJ;;^S*RGY_DaWQX&9Q4#B-LDse-hF8e=fk!E%FDY|6gYT{DX-8=YH{j2t)sCfBGGU z{@3FFx`q8hXa8>)IuOn;fH-R5Kjj7tvElxj9J{{Qo}^PX`%P7{iH+7LOtvgt&|20x zN-tCzN`+eNX$(_nI8q{@>z%O+mI4a40*bkf#FcFOol3~3^$11X%J}GNY(`u=iP)+2 zCT_bU0i~q)^%^T(;YJs8mWM=R?N^6Qdsnwv^|CJO3ob(qo{C&2Jug>Sx=7;Tn%%8A%lq{FplW{}ZXq|9bDfuXh9xJA z@qpW|Z7nPbElaeGJ>PEmE#misv(HMhYH&uVlf8{&ZOvvPrg1Z2m%@ewW>suGVdl$a zxXZVk(*z3&SS7aHEjlCO+!TsBG``@Bp<2wIPP4m%!m zC0DQerVmpaig#f9P?s ziA0ocRe0&H9Lbiry0Di;zowOpgm!yKJ`~qhWh%>aI;`vgpYc=$ZX{p4`9|rL+e?w- z8fug?)E!MZTFLUDI+OXiCOOgoT>=3EObJh_b+tu)P!Vf7Vl6; z81i+@%W<&V+ozCGwJJo|Jh3n%Gr7G9N8>YG!GUAH zi<^ovqD+WNcoPkYmBT?Q4)($CT47w!=wZlSXIT?dL3@$g`v4TI$Se*D$G z@4oLGEi9GE##p|FeW-CL=#|zexxk(kvlzV?%h#jxr6o*}SM61fk(Kd%*^Vuwr>U-? z#yG;lWFb_#7tMfySapyHo=fZCK41W~?i3L5fgwBCt5uEYxFUIgtt2~*?cAQZE;e|$ z0q@*CcyG~Nsru|wjS;YpunxtMEyj8UdDE!G^J2_Hr|pWQ632T3 zdBI?mz*N#Wfk_>&I)rnZTzrv*QZn;$wTjsLPTU1j9mi4e<0ki3qjx~s{nr2 zO2gGR)ST<1jj~RdoRjj_EpglNKyT{|bwD|O2{JlRXC6`W9z7M5(oZ471x0eI0dv?Zvt({mY*UuPn_ zBcZF*g{MANjF!zUn^?5Vi>PGSh}A84a&YM=();x!gxL0U#(nESZiku5_NUhe3ka~Y zoW`n)tZib;mN`AcoS?6JQ3dgb2d#;H#GGeWyNu*Cu)cuO{WwGFz*R}2qX)S|1hbo1 zv#fr(d`Kehv}4SgFI25s@A&7Y;a9VEdOc(r{Sqfj$0W2gqs?x{5U)ldocKVWXq8gM z{NlPQ$dTLa^4b{t`{@ir)Y8F*LSsJxt)rfJwA{Acv7CMzb0^l_awJ9UFACIBpR=*A zcB0)AyI%!ylq#9gx3Jz<9b-E1BA`5;722UI*S5sULZ?n$!iCHRDm~3w@;Kj(g^ggE zLdO-76MW^iV_hFv2+-D%m2|h}E)lmHqN%8wm=DHS;sThgjt*Hf`d`hn`RmfAn91P8 zQtmU$XX`I(?|q$QC31*l9~#+)Rh0JA-BZq!$Y>3uctAL9Blw(wT1yRD@midv0Y5py z5j&qeaN;Y;fldmqSv$AMHYty{dY}UFrm8QkvCU)hw59!x08;i35_lTSg9O?e3Ua(+ zHFJ3n_H(#dBRd-CBg%e!@5_+m68L9DREOw z(?_)n?1(l8aoY9MFVri!9voz%yy*6M%hj-)xjLP=Bq#`Mi}oFqzV><%M4G<9dIpn=F%Q)yvn!4?{^H z{F~nDYLCJS5~=B*$i$D*ad&jp)On0pVf%{f{2gN7f^23gzdTRs*PG z!wKERWLwJl6Ev)WK_Q`%Bvtag{$1ja2GxfpoI030y)#Pa&;vg|nn8we@yb+D6dtqS zKaixH<3$J5hmrb*!h{^^=vFE(AT5|0rFSdqvImH)QDeJ&%;qrwyG3)P8~6hl1(4!Z2>ff8$ zwyADgw=O%Tcpjn(?y#FM%SFJU`=v#$KV4gZ8+onXA7%0A?#6Pa)3wrEQewGjX#y+* zAZX^}989^-%!^YACGytQq+~G38GLaV4mB@pN~7;WGkW5;UAIOSlPGc(le;<5Wwq39 zR`x?LXz{T<--B}!(w2(aw}{%ku@%OO1A*BnFR4#e6UCmV;k;1~JFL*zDnoT1w~g&X zD)o^2xHRdn{|?!0&c+z?m8DHOE}gzLl4E0WTC_eAHF7qVqNAHeUI8{zo(0oN1jb{M z6$Sv66H#&?3`^0Cu03lC=?iKYXY`_5*jv9bB=swMY2g>ct{ocLRAB{TU}BYkRHs2| zB$W4TDB=dB0eXmco-h|uq$*;}0PPO(#{?@3!}S*Fnub)A;4dK z=ikfhU9^xdd=ISsmf@3bUSL==_2zTp$wCIE`O?5JN6e*HJ9%~5Uh2bBMGQHMMS)>Y zDF*PE-|2`yn9dFiD<^i8IT@XMBYbbuqZn~L7vWE39ztA;@OMT0*t)rObF!Acsc>__ zdx%=VE*tbTO5=r9!t_8_Ld>8b{WD)IAV${w_63lBsRbp=Z7R%Xd3L{Wjq`{;Dp19U zCNydA8K0u)0p}`n&eVGo+b*}#6DhLqljNovx4n&BPcOeUy>ffGl-r>F{k9kVxXyt6 z=H5ANQ{VJDSLT@^^I+0a)$5j;oTZjW>{)y=ZF@3pTxEfa)J=~>R~t%Qt~Fx35GQ7p zR6pKnov<`q{ZPEbd_2}ddEcfq;Zx-Hv$GG6ZkwGxzqdCKD5aT--0pH){qWbv#i8I9 zxvT5ZT*B6k{WFD=F1IF%>60&KvfH#xRLG;ATTgjgTaJV zF6t)fnPblSxs3Oe_m@$yr&+In@GWbG7ciR8aUViFi0tFW2lP zaW#i!q`_!#)lM$O;+qWjT&tz_ieNWX+%Xc<5-a9>yi5|>9S6gdxR{tA`kND%y}>n$ z%xB5V@2spQm6Zn1UmR3aP^a~>v1knzr%z>aJPRsB%~1YMcWR*8CgMxp{0sub}-xn;lk$b8N5bCp4Zp z9O;}b9DHIg7Km4R-*6DsQnE0qSjYWY#12638qv})(PD0E;;V!yX>S)J^3t1k(C}+) zjouL!>o%`nFVDII>Z6yPOhjqP5w!iP`FC|$#9xtsDv@=~gPxnoRIGBy)v~a5Sjq}2 zZMbQ((^)G`n&$%f?^I3TG$ydxwECtf%2fd4ysk2tmD=9=bn}TgZLh4>@h%z)+Ks0* z2(Fc#(Z6x}Za0ycXBLcm%!+Cg4M@uP*&sQf~58xR1i>mE&i8sk8zVh+K)@Ldzb=y z?Lba?t9LyPvkw!l0D?yF$OS%CD*4gChh&sR7OV2qx`4zGoL4FeD+8U8&WJ56I5skMq=n26+~nj){;cC z*;4O4W4Wc8Eh|EWouhn7&-aW=z{(LxznFdO*O@?;8;~WT4#JW@4SIkUjyeC7qaadV zVyrg(Gnd;)s{rIWSgyf5U6}a1u6G9))99o(o-p$!>cXbqJCkd{AJl@*RT@CPYAjWd zjJRWb@CgMstZ26I>&uG~^y_buO-K|a?Jy`;j^U`&F> zgDygPGttjShuN8a1Lv7U^xwDoY39r-aZH^#EgP{Q-+H?D zou|s~E$7sAXWx;N#SbhV%&#JK8drCBl~b-iCJO<)BiZEu?M9x$NGu?f6PBT{&}WE0 zu-2nm&Mt0Hjhbg`wkrrvMM9PbSs$Hqb<8Wis%LN^%ssP1R-%LnP+g@-m*oe!&LCTIw|*tw;1@g7{qSyotczV0~!0o=#*k~w#tDQGeI=Q3!aGIqtEe7}Gh2PN#*$ug&$r7KW&QZ8wG<1BHAfyR z`{(2zQfGl~pBwIO9rD;HD;XuFIN3U=bf4>|zOOuCk$1v*)3YoZh!(FrIWCuocu=W4 z4>{LBINn6Di7}kf)XvXO@y$>f6MI}aLq_r7D?fWZd89qg@rM^$A0t_V4GX>R9Yty>GgXyDo4S zu&cMIVkvRoSWs8<8(O5Fdv>!hp(CP43>}{LwDEPwRQVXBq<&hoxEuq4iRo%BKu-*=V_xk3lBn}1P8BzhEUxVb$ znBE)_>h*rNp^7lN_A_kfR+eEmkQMQXuAf96T1~-WT7Qm`B-JqM&Xt{fNXYDSJQ+KJ zRk6XM^Dv*|Mh=MC0H1z&1V0NaTSs^2*EY#mD`!R53hN43wXQ2Rt36p5Pue`Na=fYW z&~(;1K5MJd@paA1TaJ(Jp%a%op)`E=hh8n5FTA58qInXuv&!Z&CkH=>Z8G6@%;SkL zEhNyZMebiauj|(*abaIxqU~&h%VP?E-4qsy3?GqDp5kQ8w_>9n1PCK>q?n`O9>zbi zKJZ9!s(ANgNX6xo-7*1mB$>*-F!^ckJ_5z6r`SRf@aYR2FLXiC=}sZeaWM zca?yzMs3^G)FR`ecR(s9HR4To*>c?Z2Z5ToMV77YBF+Ql;qIMlkR@jw@q?E|PEPS1 zdMn)P<*3$PBnqZ(Z_kQOk+jmhxm>z*zMUi{Y3j7dy71iuO!kyn9ZyJoOQrTutFiq6 z^B7~Btj6|ys&`sZOrEJBH^!(sLYZ*zWx*x2Ff$x*r3q*)k&;W~cBYviyvZp~rcO$Y zeFrMIAaF6qFQ5}9d7;0ww|e;X>!Eto#sKog^%>@lh)&S-bQAPag{Pd9L( zlq|Z4_^(4yau+GOE;uBVE@QGPS@3{Xm3U?xPC(N9>0LTT`Ku8iO7i)sa;?%Qb3FR9 z^n$$u`i!APHAdzHB)~ceua?V%@w{C){Pd3mI zJik`z{%R?=t4>%=rhpmtG7|^q#J%=9G#Z=&!J~|A>N>Yx(Pg6sfVDxwOnKMJbXBm6 zgT#;>y45zr6b=i+S6Zlzuz9(+TU2?DgTIcIiLTj{*~C$nk|HzlP{0T!oA)pun+6CV zg8$AG%Es356(Ssf0^1SuyD5#tnD4bV#3{1$@QB8l{Vyay+!Uq{bS#? z@0uGDpLX>dzOLx4@9{L_>~)UC2qiUpcXSvl>0%T+VU6m_ek2Ct_C*yzr2oU|K_HwUc!0%yv)Fic-{hY)1`s6}h`b%}e7r-m=j;IT z$jv%eKaV5W$Jxld-#p*%@?u`V!|$@8-r?HuXp$#Qz+q;fevNR5{=2)7o8Q%beaXWG z;iGRw0*gL-@#$_F%&yLRlq5Xpu5r8)N$ zTdH<@d3OSAb=a6$t?ucPvvh1%cZe+`(bv}Ln^9H2K#7~6#x;{QxjTD$u6yh9Ff(PU z9&CIrxu#Lk^LDO0#-&(m(pt}9*=UZx&4cic96x2jf{=e*~SADRX66DG1uzF zYX*=VL{KY|I>{wZ_H+8YP{#Sk@wCu12v7LPK87c`4GXp$PN{i*$#eW-*tI$rmz|mb zITy_pQ=YO8wd$VH^d=xDe8sFIMV2uFTMj~{_u_IvuU+I@xkZXojE$5cqQVGLPWtAW z^!_!G({ssWa(N8wAl;xj=k*a8tMyi5Lm@-{lzVivw z)Z^A59kksLbEk$n>^traP}z3aKBV`m%1#{gbJ(v$D~1Czzl=0G&dhs!xc;;}IqE@R zj-%s1dWTofb@*29>_e7km-f8S#%cYx3EbJc>Ao8e6+ANE5=013-!FMx?mmq)w`nHe z;9c(BQ#l_SLLAH-@`kX`2yD|ksYc3mr$L5s2$Lay)l0|{<Gj)*m~&LZ!w zsodp{Nsj?kib4CqV%jhP(=~1LR!-%?83W&~Ak!@2)03$T*VlUA(kseMYO7el1wJ%m z%{P4J^%j0Z1dGSW+If?$mzfHoV-6oy za|0z6TtC<$B7hk`bKGkUpZ3cFfa~jljwIx(57_s5J!D*iZSh3I%T$0T=fdDJc+q)_ z!oflSg$LQv4pZOhyi~Hxj8{m@W#p=i_|sTlzn#pu)9m2W{fTw@o12a?506Y?$Fp4S zX9pk?3j3S+kP|n!lNg(h%0G&9?Y00OQM$_%)ZHXQ55&= zsvG;l>cc_xcdFH#X9+r^gOxQKbeG`lM(I-*RxxVxr z0J{0{Mb!6VJf84~H1gYx%mPvS*64+0a4qi^u8UH*E~YxxIqe*>5xoiiOi^@ut`mRh z@LqTF;e@!jjHmR+x|2y)sQbJYj^J($h3j{&rnT6c72Gr^u*|;WY>2Rj5U{aDIXxxz zh>j9{B5Wd^-ssKJ5Ed4l3U)g7Vt?7n2B_$;&9whXY9TdWm zd^c0gP{>r;?j)ioBV#!OKDLdI=AEd8j-oX&$k8gtmUJEKMQq)MHxeSj)#9a?Mn&c! zEKO>R|BiRYiqlC~xiTnu{%XHvoN43BVpSZ!sK24S3rA*wWX8B{$3VwYnyRUuGm1i% z<^ATdvwaGiN#BEVhvl{l)&YgRzP(H%AwTAt(R*3XlnYe1yS4&07H=?;ny5DE@W&K} z8Jbp5Fk(}QanM5AJfPCr4?(v}?O2@|-DxRFr+!DLM*&+jYoa|WPjBkKzxf^-IX0D9 z5PXYY3U=-Nr@jF6Zv_Sq%v?;JtsGq)oc~Lqq-<Sh*h zw#GlI*cw~7Fd$S_;x49U_O1+Y5SSMr07WROel+yM0tN{1LJ?*VLb~8;W~a>n1Pky& z!2B>c>^ChI0LlyG2g3yZlNJjI`&aV}Jp4cqLc1jZg)snsh-3Jn5C9xe1Os>xy@TMt zE3$sb7k-s9K>xk8;>YRmKj+WzSAc#~Wc`m}_*<0m|6ZB}1VMldP#`}q7zTkt5c7VZ z!vOf@9D9MEbNx?R4G2H@SE+_!T)%w~fJo}hH#ljRshzD0?Vbb^#vATS)v>2Y$m~+q zc+Ndc&y2uRc6m&}n*hRZCxD6t9Uq|q?x$|19ssZqeyzv4;DUk0^hi1;P5AQ@Nde3p z9}lYnj?>Q2D>rCTlSt%vtH6Z}G1W%qdg5#Gxp+nPN;-{d_mqobsQr&)LkVd&$orVgbVb5v8aRpGVsM(A_U z8%`$Et7ivp9735Nh9=cM>~EzjsByDEv*S7B#Bj(=Xod;mNlPScF*(IkWzDlgI1EYi zXy1roW4YZoV?;MALJL;pm}AfocY72Nt(T+B=t6<@)`J9XA~i^!DE^-3uxD0aDCc;a zWaJ0DyVmFZ*&j;9hexw5r1hUWbO9JuM(KMbuw@y+BMglWN_}sXiHj}Y7`q4FMjSdE z!!QZ3NkTaS|q96JA%f0C7Y-Wyw5V*l`0Dr$2p#1z` zeg<=fzikNJ;a^u7?Ehs0f+1k|A2t904EVDi2o3@w?*1RIzkT_^00`g@8x#Zv!~U=# z+Jhjpo4@%2!BE(rz5ob6^q=+MK!nEiH{Tz-)t_SlLV%Ee*g$aTKWq>%6cM@K+Cz9k z5GuVtZSX(G0)heg{}C4$82)E_AQ*JM|xZ@B;-ve~ynI2#5X|7k&_w{~z_h zK!HEk4GIK<{_F#>p&=Bvzt0T{g#EgfuFl3*wr0)<{R^LlmABcC6yj5NaB%%0QbnW~ k;(Ns2+=1bT(D+BXxwslTyZ)7IP!I$L#bITYQjx~_Uu&;9TL1t6 From af9af417d1d1d72f9860ee01563719d32d9f0e6a Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Thu, 24 Oct 2024 09:17:08 +0200 Subject: [PATCH 04/14] lab scenario extended --- pocs/nfs_simple_lab_scenario.py | 567 +++++++++++++++++++++++--------- pocs/poc_async.py | 21 ++ 2 files changed, 433 insertions(+), 155 deletions(-) create mode 100644 pocs/poc_async.py diff --git a/pocs/nfs_simple_lab_scenario.py b/pocs/nfs_simple_lab_scenario.py index d5fbe02..cd6071c 100644 --- a/pocs/nfs_simple_lab_scenario.py +++ b/pocs/nfs_simple_lab_scenario.py @@ -6,10 +6,50 @@ import json import chromadb import ollama +import logging +from logging.handlers import RotatingFileHandler +import os +from chromadb.config import Settings + +# Create a logs directory if it doesn't exist +log_dir = os.path.join(os.path.dirname(__file__), "..", "logs") +os.makedirs(log_dir, exist_ok=True) + +# Generate a unique log file name based on the current timestamp +current_time = datetime.now().strftime("%Y%m%d_%H%M%S") +log_file = os.path.join(log_dir, f"nfs_lab_scenario_{current_time}.log") + +# Set up the root logger +logger = logging.getLogger() +logger.setLevel(logging.DEBUG) + +# Create a rotating file handler +file_handler = RotatingFileHandler( + log_file, + maxBytes=1024 * 1024 * 10, # 10 MB per file + backupCount=19, # Keep 19 backup files, plus the current one (20 total) +) +file_handler.setLevel(logging.DEBUG) +file_formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +file_handler.setFormatter(file_formatter) + +# Create a console handler +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.INFO) +console_formatter = logging.Formatter("%(asctime)s - %(message)s") +console_handler.setFormatter(console_formatter) + +# Add both handlers to the logger +logger.addHandler(file_handler) +logger.addHandler(console_handler) + @dataclass class Story: """A narrative element in the field with rich context""" + content: str context: str id: str = field(default_factory=lambda: str(uuid4())) @@ -17,96 +57,149 @@ class Story: metadata: Dict[str, Any] = field(default_factory=dict) resonances: List[str] = field(default_factory=list) field_effects: List[Dict] = field(default_factory=list) + personal_narrative: str = field(default="") + emotional_impact: str = field(default="") + @dataclass class FieldState: """Represents the current state of the narrative field""" + description: str patterns: List[Dict] = field(default_factory=list) active_resonances: List[Dict] = field(default_factory=list) emergence_points: List[Dict] = field(default_factory=list) timestamp: datetime = field(default_factory=datetime.now) + class OllamaInterface: """Interface to Ollama LLM""" - - def __init__(self, model_name: str = "mistral-nemo", embed_model_name: str = "mxbai-embed-large"): + + def __init__( + self, + model_name: str = "mistral-nemo", + embed_model_name: str = "mxbai-embed-large", + ): self.model = model_name self.embed_model = embed_model_name - + self.embedding_cache = {} + self.logger = logging.getLogger(__name__) + self.logger.info( + f"Initializing OllamaInterface with models: main={self.model}, embedding={self.embed_model}" + ) + async def analyze(self, prompt: str) -> str: """Get LLM analysis of narrative""" + self.logger.debug(f"Sending prompt to LLM: {prompt}") + self.logger.info(f"Prompt length sent to LLM: {len(prompt)} characters") response = await asyncio.to_thread( ollama.chat, model=self.model, - messages=[{ - 'role': 'user', - 'content': prompt - }] + messages=[{"role": "user", "content": prompt}], ) - return response['message']['content'] - + content = response["message"]["content"] + self.logger.debug(f"Received response from LLM: {content}") + self.logger.info(f"Response length from LLM: {len(content)} characters") + return content + async def generate_embedding(self, text: str) -> List[float]: - """Generate embedding using Ollama""" + # Check cache first + self.logger.info("Checking embedding cache before generating") + cache_key = hash(text) # or another suitable hashing method + if cache_key in self.embedding_cache: + self.logger.info(f"Embedding retrieved from cache for key: {cache_key}") + return self.embedding_cache[cache_key] + + # Generate if not cached response = await asyncio.to_thread( - ollama.embeddings, - model=self.embed_model, - prompt=text + ollama.embeddings, model=self.embed_model, prompt=text + ) + embedding = response["embedding"] + + # Cache the result + self.embedding_cache[cache_key] = embedding + self.logger.info( + f"Embedding generated and cached successfully with key: {cache_key}" ) - return response['embedding'] + return embedding + class ChromaStore: """Local vector store using ChromaDB""" - + def __init__(self, collection_name: str = "narrative_field"): - self.client = chromadb.Client() + self.client = chromadb.Client(Settings(anonymized_telemetry=False)) + self.logger = logging.getLogger(__name__) try: self.collection = self.client.get_collection(collection_name) + self.logger.info(f"Collection {collection_name} found") + self.logger.info(f"Collection metadata: {self.collection.metadata}") except: self.collection = self.client.create_collection( - name=collection_name, - metadata={"hnsw:space": "cosine"} + name=collection_name, metadata={"hnsw:space": "cosine"} ) - + self.logger.info(f"Collection {collection_name} created") + self.logger.info(f"Collection metadata: {self.collection.metadata}") + async def store(self, id: str, embedding: List[float], metadata: Dict) -> None: """Store embedding and metadata""" + self.logger.info(f"Storing embedding and metadata for story: {id}") + self.logger.debug(f"Embedding length: {len(embedding)}") await asyncio.to_thread( self.collection.add, documents=[json.dumps(metadata)], embeddings=[embedding], ids=[id], - metadatas=[metadata] + metadatas=[metadata], ) - - async def find_similar(self, embedding: List[float], threshold: float = 0.8, limit: int = 5) -> List[Dict]: + + async def find_similar( + self, embedding: List[float], threshold: float = 0.8, limit: int = 5 + ) -> List[Dict]: """Find similar narratives""" + self.logger.info( + f"Finding similar narratives with threshold: {threshold} and limit: {limit}" + ) count = self.collection.count() if count == 0: return [] - + results = await asyncio.to_thread( self.collection.query, query_embeddings=[embedding], - n_results=min(limit, count) + n_results=min(limit, count), ) - + + self.logger.info(f"Found {len(results['ids'][0])} similar narratives") + self.logger.debug(f"Similar narratives results: {results}") + similar = [] - for idx, id in enumerate(results['ids'][0]): - metadata = json.loads(results['documents'][0][idx]) - similar.append({ - 'id': id, - 'similarity': results['distances'][0][idx], - 'metadata': metadata - }) - - return [s for s in similar if s['similarity'] <= threshold] + for idx, id in enumerate(results["ids"][0]): + metadata = json.loads(results["documents"][0][idx]) + similar.append( + { + "id": id, + "similarity": results["distances"][0][idx], + "metadata": metadata, + } + ) + + thresholded = [s for s in similar if s["similarity"] <= threshold] + self.logger.info( + f"Thresholded results length similarity narrative: {len(thresholded)}" + ) + self.logger.debug(f"Thresholded results similarity narrative: {thresholded}") + + return thresholded + class FieldAnalyzer: """Handles analysis of narrative field dynamics""" - + def __init__(self, llm_interface): self.llm = llm_interface - + self.logger = logging.getLogger(__name__) + async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: """Analyze how a story impacts the field""" prompt = f""" @@ -116,30 +209,43 @@ async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: New narrative entering field: Content: {story.content} Context: {story.context} + Personal Narrative: {story.personal_narrative} Analyze field impact: 1. Immediate resonance effects 2. Pattern interactions/disruptions 3. Potential emergence points 4. Field state transformations + 5. Emotional impact on the field + 6. Narrative evolution - Do not make up anything. Just use the information provided. Use the context to determine the impact. Do not use markdown or code blocks. + Provide a qualitative, story-driven analysis without using numeric measures. """ - + analysis = await self.llm.analyze(prompt) - return { - 'analysis': analysis, - 'timestamp': datetime.now(), - 'story_id': story.id + + result = { + "analysis": analysis, + "timestamp": datetime.now(), + "story_id": story.id, } + return result - async def detect_patterns(self, stories: List[Story], current_state: FieldState) -> List[Dict]: + async def detect_patterns( + self, stories: List[Story], current_state: FieldState + ) -> List[Dict]: """Identify emergent patterns in the narrative field""" + self.logger.info(f"Detecting patterns for {len(stories)} stories") + self.logger.debug(f"Current field state: {current_state.description}") + self.logger.info( + f"Current field state length: {len(current_state.description)}" + ) + story_contexts = [ - {'content': s.content, 'context': s.context, 'effects': s.field_effects} + {"content": s.content, "context": s.context, "effects": s.field_effects} for s in stories ] - + prompt = f""" Analyze narrative collection for emergent patterns: Stories: {story_contexts} @@ -153,210 +259,361 @@ async def detect_patterns(self, stories: List[Story], current_state: FieldState) 4. Critical transition points 5. Emergence phenomena - Do not make up anything. Just use the information provided. Use the context to determine the impact. Do not use markdown or code blocks. + Use the Stories, Current Patterns and Active Resonances to determine the impact. NO markdown or code blocks. """ - - return await self.llm.analyze(prompt) + + self.logger.debug( + f"Sending prompt to LLM for emergent pattern detection: {prompt}" + ) + self.logger.info( + f"Emergent pattern detection prompt length: {len(prompt)} characters" + ) + + patterns = await self.llm.analyze(prompt) + self.logger.debug(f"Received emergent patterns response: {patterns}") + self.logger.info( + f"Emergent pattern detection response length: {len(patterns)} characters" + ) + return patterns + class ResonanceDetector: """Handles semantic detection and analysis of narrative resonances""" - + def __init__(self, vector_store, llm_interface): self.vector_store = vector_store self.llm = llm_interface - + self.logger = logging.getLogger(__name__) + async def find_resonances(self, story: Story, limit: int = 3) -> List[Dict]: """Find and analyze resonating stories using semantic understanding""" - embedding = await self.llm.generate_embedding(story.content + " " + story.context) + self.logger.debug(f"Finding resonances for story: {story.id}") + + embedding = await self.llm.generate_embedding( + story.content + " " + story.context + ) + self.logger.debug(f"Generated embedding for story: {story.id}") + similar_stories = await self.vector_store.find_similar(embedding, limit=limit) - + self.logger.debug(f"Found {len(similar_stories)} similar stories") + resonances = [] for similar in similar_stories: - similar_metadata = similar['metadata'] + self.logger.debug(f"Analyzing resonance with story: {similar['id']}") + similar_metadata = similar["metadata"] similar_story = Story( - id=similar['id'], - content=similar_metadata['content'], - context=similar_metadata['context'], - timestamp=datetime.fromisoformat(similar_metadata['timestamp']) - if isinstance(similar_metadata['timestamp'], str) - else similar_metadata['timestamp'] + id=similar["id"], + content=similar_metadata["content"], + context=similar_metadata["context"], + timestamp=( + datetime.fromisoformat(similar_metadata["timestamp"]) + if isinstance(similar_metadata["timestamp"], str) + else similar_metadata["timestamp"] + ), ) - + resonance = await self.determine_resonance_type(story, similar_story) - resonances.append({ - 'story_id': similar['id'], - 'resonance': resonance, - 'timestamp': datetime.now() - }) - + resonances.append( + { + "story_id": similar["id"], + "resonance": resonance, + "timestamp": datetime.now(), + } + ) + self.logger.info(f"Resonance analysis completed for story: {similar['id']}") + + self.logger.info(f"Found {len(resonances)} resonances for story: {story.id}") return resonances async def determine_resonance_type(self, story1: Story, story2: Story) -> Dict: - """Analyze the semantic relationship between stories""" prompt = f""" - Analyze the resonance between these two narratives in the context of a research lab environment: + Analyze the narrative resonance between these two stories: Story 1: {story1.content} Context 1: {story1.context} + Personal Narrative 1: {story1.personal_narrative} Story 2: {story2.content} Context 2: {story2.context} + Personal Narrative 2: {story2.personal_narrative} Provide a detailed analysis: - 1. Type of Resonance: - - How do these narratives interact? - - What kind of relationship exists between them? - - Are they reinforcing, conflicting, or transforming each other? + 1. Narrative Relationship: + - How do these stories interact on a narrative level? + - What kind of thematic or emotional connection exists? + - How do they reinforce, conflict with, or transform each other's meanings? - 2. Meaning Evolution: - - How do they influence each other's interpretation? - - What new meanings emerge from their interaction? - - How might this change the overall narrative field? + 2. Character Development: + - How might these stories influence the characters' growth or change? + - What new aspects of personality or motivation might emerge? - 3. Pattern Formation: - - What patterns might emerge from their interaction? - - How might these patterns influence future narratives? - - What potential developments could this resonance trigger? - - Do not make up anything. Just use the information provided. Use the context to determine the impact. Do not use markdown or code blocks. - """ + 3. Worldview Impact: + - How do these stories shape the characters' understanding of their world? + - What beliefs or values are being challenged or reinforced? + Provide a qualitative, story-driven analysis without using numeric measures. + """ + analysis = await self.llm.analyze(prompt) - - return { - 'type': 'semantic_resonance', - 'analysis': analysis, - 'stories': { - 'source': { - 'id': story1.id, - 'content': story1.content, - 'context': story1.context + + result = { + "type": "narrative_resonance", + "analysis": analysis, + "stories": { + "source": { + "id": story1.id, + "content": story1.content, + "context": story1.context, + "personal_narrative": story1.personal_narrative, + }, + "resonant": { + "id": story2.id, + "content": story2.content, + "context": story2.context, + "personal_narrative": story2.personal_narrative, }, - 'resonant': { - 'id': story2.id, - 'content': story2.content, - 'context': story2.context - } }, - 'timestamp': datetime.now() + "timestamp": datetime.now(), } + return result + class NarrativeField: """Core system for managing narrative field dynamics""" - + def __init__(self, llm_interface, vector_store): self.analyzer = FieldAnalyzer(llm_interface) self.resonance_detector = ResonanceDetector(vector_store, llm_interface) self.vector_store = vector_store self.state = FieldState(description="Initial empty narrative field") self.stories: Dict[str, Story] = {} - + self.logger = logging.getLogger(__name__) + async def add_story(self, content: str, context: str) -> Story: """Add a new story and analyze its field effects""" story = Story(content=content, context=context) - + self.logger.info(f"Adding new story: {story.id}") + # Analyze field impact impact = await self.analyzer.analyze_impact(story, self.state) story.field_effects.append(impact) - + self.logger.debug(f"Field impact analysis completed for story: {story.id}") + # Find resonances resonances = await self.resonance_detector.find_resonances(story) - story.resonances.extend([r['story_id'] for r in resonances]) - + story.resonances.extend([r["story_id"] for r in resonances]) + self.logger.debug(f"Found {len(resonances)} resonances for story: {story.id}") + # Store story and update field await self._store_story(story) await self._update_field_state(story, impact, resonances) - + self.logger.info(f"Story {story.id} added and field state updated") + return story - + async def _store_story(self, story: Story) -> None: """Store story and its embeddings""" + self.logger.info(f"Storing story: {story.id}") + self.logger.debug( + f"Story length sent to LLM for embedding: {len(story.content + story.context)}" + ) embedding = await self.resonance_detector.llm.generate_embedding( story.content + " " + story.context ) - + metadata = { - 'content': story.content, - 'context': story.context, - 'field_effects': json.dumps([{ - 'analysis': effect['analysis'], - 'timestamp': effect['timestamp'].isoformat(), - 'story_id': effect['story_id'] - } for effect in story.field_effects]), - 'resonances': json.dumps(story.resonances), - 'timestamp': story.timestamp.isoformat() + "content": story.content, + "context": story.context, + "field_effects": json.dumps( + [ + { + "analysis": effect["analysis"], + "timestamp": effect["timestamp"].isoformat(), + "story_id": effect["story_id"], + } + for effect in story.field_effects + ] + ), + "resonances": json.dumps(story.resonances), + "timestamp": story.timestamp.isoformat(), } - + await self.vector_store.store(story.id, embedding, metadata) + self.logger.info(f"Story {story.id} stored successfully in vector store") self.stories[story.id] = story - - async def _update_field_state(self, story: Story, impact: Dict, resonances: List[Dict]) -> None: + + async def _update_field_state( + self, story: Story, impact: Dict, resonances: List[Dict] + ) -> None: """Update field state with enhanced resonance understanding""" + patterns = await self.analyzer.detect_patterns( - list(self.stories.values()), - self.state + list(self.stories.values()), self.state ) - + self.state = FieldState( - description=impact['analysis'], + description=impact["analysis"], patterns=patterns, active_resonances=resonances, - emergence_points=[{ - 'story_id': story.id, - 'timestamp': datetime.now(), - 'type': 'new_narrative', - 'resonance_context': [r['resonance']['analysis'] for r in resonances] - }] + emergence_points=[ + { + "story_id": story.id, + "timestamp": datetime.now(), + "type": "new_narrative", + "resonance_context": [ + r["resonance"]["analysis"] for r in resonances + ], + } + ], ) + async def demo_scenario(): """Demonstrate the narrative field system with a simple scenario""" - + logger.info("Starting narrative field demonstration...") + # Initialize components - llm = OllamaInterface(model_name="llama3_q8", embed_model_name="mxbai-embed-large") + llm = OllamaInterface( + model_name="mistral-nemo", + embed_model_name="nomic-embed-text:latest", # "mxbai-embed-large" + ) + logger.info(f"Initialized Ollama interface") + vector_store = ChromaStore(collection_name="research_lab") + logger.info(f"Initialized Chroma vector store") field = NarrativeField(llm, vector_store) - - # Example research lab scenario + logger.info(f"Initialized narrative field") + + # Research Lab Scenario with Multiple Characters and 20 Events + stories = [ + # Event 1: Leon's frustration { - "content": "Leon really want to go to lunch without having to wait for the others.", - "context": "After a long meeting with the others, Leon is frustrated. It's noisy and he can't hear himself think." + "content": "After enduring a long and tumultuous meeting with his colleagues, where the cacophony of voices made it impossible to think clearly, Leon felt his frustration mounting. The office was so noisy that he couldn't hear himself think, and all he wanted was a moment of peace. Craving solitude, he really wanted to go to lunch without having to wait for the others, hoping that a quiet break would help him regain his composure.", + "context": "Leon is overwhelmed by the noisy meeting and desires time alone during lunch to clear his mind.", }, + # Event 2: Leon discussing the AI minor { - "content": "Leon discusses his concerns about the AI for Society minor with Coen. Coen is supportive but thinks Leon should talk to manager of the minor, Danny.", - "context": "After lunch, Leon and Coen are walking back to the lab." + "content": "After lunch, as Leon and Coen walked back to the lab, Leon decided to share his growing concerns about the AI for Society minor. He voiced his doubts and the challenges he foresaw in the program's current direction. Coen listened attentively and was supportive of Leon's worries. \"I think you have some valid points,\" Coen acknowledged, \"but perhaps it would be best to discuss these issues with Danny, the manager of the minor.\" Coen believed that Danny's insights could be crucial in addressing Leon's concerns.", + "context": "Leon confides in Coen about issues with the AI minor; Coen advises consulting Danny.", }, + # Event 3: Danny's accident { - "content": "Danny fell of his bike and is hurt. He is going to the hospital. He is not sure if he will be able to work on the AI for Society minor in the near future.", - "context": "Leon is worried about Danny. He is also worried about the lab and the AI for Society minor. He is also worried about his own research. Leon talks to his manager, Robbert." + "content": "News spread that Danny had fallen off his bike and was injured. He was on his way to the hospital and unsure if he could continue working on the AI for Society minor in the near future. Leon was deeply worried about Danny's well-being, the impact on the lab, and the future of the AI minor program. Feeling the weight of these concerns, he decided to talk to his manager, Robbert, hoping to find a solution.", + "context": "Danny's injury raises concerns about the AI minor's future, prompting Leon to seek Robbert's guidance.", }, + # Event 4: Robbert's tough advice { - "content": "Robbert is very worried about Danny. He is not interested in the AI for Society minor. He is also worried about his own research. Robbert talks to Leon. He thinks Leon should man up and stop whining.", - "context": "After work, Robbert and Leon are walking back to the lab." - } + "content": "After work, Robbert and Leon walked back to the lab together. Leon expressed his worries about Danny's accident and the AI minor. However, Robbert seemed more preoccupied with his own research and was not interested in discussing the minor. \"I know you're concerned, but you need to man up and stop whining,\" Robbert said bluntly. His tough advice left Leon feeling isolated and unsupported.", + "context": "Robbert dismisses Leon's concerns, focusing instead on his own research priorities.", + }, + # Event 5: Coen's input + { + "content": 'Feeling conflicted after his conversation with Robbert, Leon found solace when Coen offered to help with the AI minor. "Maybe we can work on it together while Danny recovers," Coen suggested. Leon appreciated Coen\'s offer, recognizing the value of teamwork, but he also felt uncertain about taking on more responsibility without proper guidance.', + "context": "Coen volunteers to assist Leon with the AI minor during Danny's absence.", + }, + # Event 6: Sarah’s contribution + { + "content": "Sarah, a new member of the lab eager to make her mark, approached Leon with a fresh idea. Enthusiastic about the ethical challenges in AI, she suggested a new direction for the AI minor—focusing on ethics in AI development. Her excitement was contagious, and Leon began to see the potential impact of integrating ethics into the program.", + "context": "Sarah proposes refocusing the AI minor on AI ethics, sparking interest from Leon.", + }, + # Event 7: Tom's exhaustion + { + "content": "Tom, another member of the lab, was visibly exhausted after a long day. He had been struggling to keep up with the heavy workload and confided in his colleagues that he wanted to leave early. Considering taking a break from the lab altogether, Tom felt mentally drained and knew he needed time to recover.", + "context": "Tom is overwhelmed by work stress and thinks about temporarily leaving the lab.", + }, + # Event 8: Leon reassessing + { + "content": "Observing Tom's exhaustion, Leon became concerned that the lab might be overworking its members. Balancing his worries about the AI minor and the well-being of his colleagues, he suggested organizing a team meeting to discuss workload management. Leon hoped that addressing these issues openly would help prevent burnout and improve overall productivity.", + "context": "Leon considers holding a meeting to tackle workload issues affecting team morale.", + }, + # Event 9: Robbert's counter + { + "content": "Robbert disagreed with Leon's assessment, arguing that the lab members needed to toughen up and handle the workload. He felt that reducing their responsibilities would slow down progress on important research projects. \"We can't afford to ease up now,\" Robbert insisted, dismissing the idea of altering the current work demands.", + "context": "Robbert rejects the notion of reducing workloads, emphasizing the need for ongoing productivity.", + }, + # Event 10: Coen's personal struggle + { + "content": "In a candid conversation, Coen revealed to Leon that he had been dealing with personal issues and was struggling to focus on work. Leon was surprised by Coen's admission, as he had always appeared to have everything under control. This revelation highlighted the underlying stress affecting the team.", + "context": "Coen admits personal struggles are hindering his work, surprising Leon.", + }, + # Event 11: Sarah's proposal + { + "content": "Concerned about her colleagues' mental health, Sarah proposed implementing a flexible working schedule to accommodate those feeling burned out. She believed that a healthier work-life balance would benefit both the individuals and the lab's productivity. \"We need to take care of ourselves to do our best work,\" she advocated.", + "context": "Sarah suggests flexible hours to improve well-being and efficiency in the lab.", + }, + # Event 12: Tom’s decision + { + "content": "Feeling overwhelmed, Tom decided to take a temporary leave from the lab to focus on his mental health. He believed that stepping back was the best decision for now and hoped that his absence would prompt the team to consider the pressures they were all facing.", + "context": "Tom takes a break to address his mental health, hoping to highlight team stress.", + }, + # Event 13: Leon's talk with Robbert + { + "content": "Using Tom's situation as an example, Leon tried once more to convince Robbert that the team needed more flexibility. \"If we don't address this, we might lose more valuable team members,\" Leon cautioned. However, Robbert remained unconvinced, believing that the team was coddling itself too much and that personal issues should not interfere with work.", + "context": "Leon urges Robbert to consider flexibility; Robbert remains steadfast against it.", + }, + # Event 14: Robbert doubling down + { + "content": "Robbert held a team meeting to reiterate the importance of maintaining productivity despite personal challenges. He emphasized that their work was critical and that everyone needed to stay focused. Robbert believed that personal problems should not interfere with lab performance and stood firm on his stance.", + "context": "Robbert emphasizes productivity over personal issues in a team meeting.", + }, + # Event 15: Sarah's pushback + { + "content": "Sarah pushed back against Robbert's position during the meeting, arguing that a more flexible approach would ultimately lead to better results. She highlighted the risks of burnout and the benefits of supporting team members through their personal struggles. The team found itself divided between Robbert's hardline approach and Sarah's call for change.", + "context": "Sarah challenges Robbert's views, leading to a team split over work policies.", + }, + # Event 16: Coen's suggestion + { + "content": 'Seeking a compromise, Coen suggested organizing a workshop on mental health and productivity. "Maybe we can find strategies to balance personal well-being with our work goals," he proposed. Coen hoped this initiative would bring both sides together and foster a more supportive environment.', + "context": "Coen proposes a mental health workshop to reconcile differing team perspectives.", + }, + # Event 17: Leon's reflection + { + "content": "Leon reflected on the growing tension within the lab and wondered if they needed an external mediator to help resolve the conflicts. Feeling caught between Robbert's expectations and his colleagues' concerns, he contemplated seeking outside assistance to find a constructive path forward.", + "context": "Leon considers involving a mediator to address internal lab conflicts.", + }, + # Event 18: A breakthrough idea + { + "content": "During a late-night discussion, Leon and Sarah brainstormed a novel approach to restructure the AI minor. They envisioned incorporating elements of ethics and mental health awareness into the curriculum, aligning the program with current societal needs. Energized by this new direction, Leon believed it could address both the challenges facing the AI minor and the lab's workload issues.", + "context": "Leon and Sarah create a plan integrating ethics and mental health into the AI minor.", + }, + # Event 19: Robbert's hesitation + { + "content": 'When presented with the proposed changes, Robbert was hesitant to implement them. He feared that altering their focus would slow down the lab\'s progress and detract from their primary research objectives. "This plan seems too idealistic," he cautioned, remaining committed to a results-driven approach.', + "context": "Robbert doubts the practicality of the new AI minor plan, fearing it may impede progress.", + }, + # Event 20: Tom’s return + { + "content": "After his break, Tom returned to the lab feeling refreshed and ready to contribute again. He appreciated the support from his colleagues and felt more optimistic about balancing his mental health with work. Tom's return brought a renewed sense of hope to the team, signaling the potential for positive change.", + "context": "Tom's rejuvenated return inspires hope for better balance in the lab.", + }, ] - + # Process stories - print("Processing stories and analyzing field effects...") + logger.info(f"Processing {len(stories)} stories and analyzing field effects...") for story in stories: try: - result = await field.add_story(story['content'], story['context']) - print(f"\n---\nAdded story:\n{story['content']}") - print(f"\nField effects:\n{result.field_effects[-1]['analysis']}") - print("\nCurrent field state:\n", field.state.description) - + logger.debug(f"Adding story: {story['content']}") + result = await field.add_story(story["content"], story["context"]) + logger.info(f"Added story: {result.id}") + logger.debug(f"Field effects: {result.field_effects[-1]['analysis']}") + logger.debug(f"Current field state: {field.state.description}") + if result.resonances: - print("\nResonances detected:") + logger.info( + f"Detected {len(result.resonances)} resonances for story {result.id}" + ) for r_id in result.resonances: r_story = field.stories.get(r_id) if r_story: - print(f"- Resonates with: {r_story.content}") - + logger.debug(f"Resonates with: {r_story.content}") + except Exception as e: - print(f"Error processing story: {e}") + logger.error(f"Error processing story: {e}", exc_info=True) continue + logger.info("Narrative field demonstration completed") + + if __name__ == "__main__": - print("Starting narrative field demonstration...") - asyncio.run(demo_scenario()) \ No newline at end of file + asyncio.run(demo_scenario()) diff --git a/pocs/poc_async.py b/pocs/poc_async.py new file mode 100644 index 0000000..dd095bf --- /dev/null +++ b/pocs/poc_async.py @@ -0,0 +1,21 @@ +import asyncio + + +async def task1(): + print("Task 1 starting") + await asyncio.sleep(2) # Simulate a delay + print("Task 1 done") + + +async def task2(): + print("Task 2 starting") + await asyncio.sleep(1) # Simulate a shorter delay + print("Task 2 done") + + +async def main(): + await asyncio.gather(task1(), task2()) # Run tasks concurrently + + +# Run the event loop +asyncio.run(main()) From b78ef6e9a5bd43ea9757cbe84726e344fca72aba Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Thu, 24 Oct 2024 11:24:16 +0200 Subject: [PATCH 05/14] Refactor base, abstract, impl --- pocs/nfs_simple_lab_scenario.py | 369 ++++++++++++++++++++------------ 1 file changed, 234 insertions(+), 135 deletions(-) diff --git a/pocs/nfs_simple_lab_scenario.py b/pocs/nfs_simple_lab_scenario.py index cd6071c..4b145b2 100644 --- a/pocs/nfs_simple_lab_scenario.py +++ b/pocs/nfs_simple_lab_scenario.py @@ -1,16 +1,32 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import List, Dict + +import asyncio +import json +import logging +import os +from abc import ABC, abstractmethod from dataclasses import dataclass, field from datetime import datetime -from typing import List, Dict, Optional, Any +from logging.handlers import RotatingFileHandler +from typing import Any, Dict, List, TypedDict, Union, Literal, NewType, Optional, Final from uuid import uuid4 -import asyncio -import json +from enum import Enum, auto +from typing_extensions import TypeGuard +import typing + import chromadb import ollama -import logging -from logging.handlers import RotatingFileHandler -import os from chromadb.config import Settings +DEFAULT_SIMILARITY_THRESHOLD: Final[float] = typing.cast(float, 0.8) +DEFAULT_RESONANCE_LIMIT: Final[int] = typing.cast(int, 3) +MAX_LOG_FILE_SIZE: Final[int] = typing.cast(int, 10 * 1024 * 1024) # 10 MB +MAX_LOG_BACKUP_COUNT: Final[int] = typing.cast(int, 19) # 20 files total + # Create a logs directory if it doesn't exist log_dir = os.path.join(os.path.dirname(__file__), "..", "logs") os.makedirs(log_dir, exist_ok=True) @@ -26,8 +42,8 @@ # Create a rotating file handler file_handler = RotatingFileHandler( log_file, - maxBytes=1024 * 1024 * 10, # 10 MB per file - backupCount=19, # Keep 19 backup files, plus the current one (20 total) + maxBytes=MAX_LOG_FILE_SIZE, + backupCount=MAX_LOG_BACKUP_COUNT, ) file_handler.setLevel(logging.DEBUG) file_formatter = logging.Formatter( @@ -45,51 +61,137 @@ logger.addHandler(file_handler) logger.addHandler(console_handler) +StoryID = NewType("StoryID", str) + +# 1. Define base classes and abstract classes +class VectorStore(ABC): + @abstractmethod + async def store(self, story: "Story", embedding: List[float]) -> None: + pass + + @abstractmethod + async def find_similar( + self, embedding: List[float], threshold: float, limit: int + ) -> List[Dict]: + pass + + +class LanguageModel(ABC): + @abstractmethod + async def generate(self, prompt: str) -> str: + pass + + @abstractmethod + async def generate_embedding(self, text: str) -> List[float]: + pass + + +# 2. Define data classes and enums @dataclass class Story: - """A narrative element in the field with rich context""" - content: str context: str - id: str = field(default_factory=lambda: str(uuid4())) + id: StoryID = field(default_factory=lambda: StoryID(str(uuid4()))) timestamp: datetime = field(default_factory=datetime.now) - metadata: Dict[str, Any] = field(default_factory=dict) + metadata: Optional[Dict[str, Any]] = field(default=None) resonances: List[str] = field(default_factory=list) field_effects: List[Dict] = field(default_factory=list) - personal_narrative: str = field(default="") - emotional_impact: str = field(default="") @dataclass -class FieldState: - """Represents the current state of the narrative field""" +class Pattern: + name: str + description: str + strength: float = 0.0 + related_stories: List[StoryID] = field(default_factory=list) + def update_strength(self, new_strength: float): + self.strength = new_strength + + def add_related_story(self, story_id: StoryID): + if story_id not in self.related_stories: + self.related_stories.append(story_id) + + +@dataclass +class Resonance: + type: str + strength: float + source_story: StoryID + target_story: StoryID + description: str = "" + + def update_strength(self, new_strength: float): + self.strength = new_strength + + +@dataclass +class EmergencePoint: + story_id: StoryID + timestamp: datetime + type: str = "new_narrative" + resonance_context: List[Resonance] = field(default_factory=list) + related_patterns: List[Pattern] = field(default_factory=list) + + def add_related_pattern(self, pattern: Pattern): + if pattern not in self.related_patterns: + self.related_patterns.append(pattern) + + +@dataclass +class FieldState: description: str - patterns: List[Dict] = field(default_factory=list) - active_resonances: List[Dict] = field(default_factory=list) - emergence_points: List[Dict] = field(default_factory=list) + patterns: List[Pattern] = field(default_factory=list) + active_resonances: List[Resonance] = field(default_factory=list) + emergence_points: List[EmergencePoint] = field(default_factory=list) timestamp: datetime = field(default_factory=datetime.now) + def add_pattern(self, pattern: Pattern): + self.patterns.append(pattern) + + def add_resonance(self, resonance: Resonance): + self.active_resonances.append(resonance) -class OllamaInterface: - """Interface to Ollama LLM""" + def add_emergence_point(self, point: EmergencePoint): + self.emergence_points.append(point) + def update_description(self, new_description: str): + self.description = new_description + self.timestamp = datetime.now() + + +class ImpactAnalysis(TypedDict): + analysis: str + timestamp: datetime + story_id: str + + +class ResonanceAnalysis(TypedDict): + type: Literal["narrative_resonance"] + analysis: str + stories: Dict[ + Literal["source", "resonant"], Dict[Literal["id", "content", "context"], str] + ] + timestamp: datetime + + +# 3. Implement concrete classes +class OllamaInterface(LanguageModel): def __init__( self, model_name: str = "mistral-nemo", embed_model_name: str = "mxbai-embed-large", ): - self.model = model_name - self.embed_model = embed_model_name - self.embedding_cache = {} - self.logger = logging.getLogger(__name__) + self.model: str = model_name + self.embed_model: str = embed_model_name + self.embedding_cache: Dict[str, List[float]] = {} + self.logger: logging.Logger = logging.getLogger(__name__) self.logger.info( f"Initializing OllamaInterface with models: main={self.model}, embedding={self.embed_model}" ) - async def analyze(self, prompt: str) -> str: - """Get LLM analysis of narrative""" + async def generate(self, prompt: str) -> str: self.logger.debug(f"Sending prompt to LLM: {prompt}") self.logger.info(f"Prompt length sent to LLM: {len(prompt)} characters") response = await asyncio.to_thread( @@ -102,31 +204,34 @@ async def analyze(self, prompt: str) -> str: self.logger.info(f"Response length from LLM: {len(content)} characters") return content - async def generate_embedding(self, text: str) -> List[float]: + async def generate_embedding(self, story: Story) -> List[float]: + # Use the Story's GUID as the cache key + cache_key = story.id + text_to_embed = f"{story.content} {story.context}" + # Check cache first self.logger.info("Checking embedding cache before generating") - cache_key = hash(text) # or another suitable hashing method if cache_key in self.embedding_cache: - self.logger.info(f"Embedding retrieved from cache for key: {cache_key}") + self.logger.info( + f"Embedding retrieved from cache for story ID: {cache_key}" + ) return self.embedding_cache[cache_key] # Generate if not cached response = await asyncio.to_thread( - ollama.embeddings, model=self.embed_model, prompt=text + ollama.embeddings, model=self.embed_model, prompt=text_to_embed ) embedding = response["embedding"] # Cache the result self.embedding_cache[cache_key] = embedding self.logger.info( - f"Embedding generated and cached successfully with key: {cache_key}" + f"Embedding generated and cached successfully for story ID: {cache_key} with length {len(embedding)}" ) return embedding -class ChromaStore: - """Local vector store using ChromaDB""" - +class ChromaStore(VectorStore): def __init__(self, collection_name: str = "narrative_field"): self.client = chromadb.Client(Settings(anonymized_telemetry=False)) self.logger = logging.getLogger(__name__) @@ -141,15 +246,33 @@ def __init__(self, collection_name: str = "narrative_field"): self.logger.info(f"Collection {collection_name} created") self.logger.info(f"Collection metadata: {self.collection.metadata}") - async def store(self, id: str, embedding: List[float], metadata: Dict) -> None: - """Store embedding and metadata""" - self.logger.info(f"Storing embedding and metadata for story: {id}") + async def store(self, story: Story, embedding: List[float]) -> None: + """Store story embedding and metadata""" + self.logger.info(f"Storing embedding and metadata for story: {story.id}") self.logger.debug(f"Embedding length: {len(embedding)}") + + metadata = { + "content": story.content, + "context": story.context, + "field_effects": json.dumps( + [ + { + "analysis": effect["analysis"], + "timestamp": effect["timestamp"].isoformat(), + "story_id": effect["story_id"], + } + for effect in story.field_effects + ] + ), + "resonances": json.dumps(story.resonances), + "timestamp": story.timestamp.isoformat(), + } + await asyncio.to_thread( self.collection.add, documents=[json.dumps(metadata)], embeddings=[embedding], - ids=[id], + ids=[story.id], metadatas=[metadata], ) @@ -193,14 +316,15 @@ async def find_similar( return thresholded +# 4. Define main logic classes class FieldAnalyzer: - """Handles analysis of narrative field dynamics""" + def __init__(self, llm_interface: LanguageModel): + self.llm: LanguageModel = llm_interface + self.logger: logging.Logger = logging.getLogger(__name__) - def __init__(self, llm_interface): - self.llm = llm_interface - self.logger = logging.getLogger(__name__) - - async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: + async def analyze_impact( + self, story: Story, current_state: FieldState + ) -> Dict[str, Any]: """Analyze how a story impacts the field""" prompt = f""" Current field state: {current_state.description} @@ -209,7 +333,6 @@ async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: New narrative entering field: Content: {story.content} Context: {story.context} - Personal Narrative: {story.personal_narrative} Analyze field impact: 1. Immediate resonance effects @@ -222,7 +345,7 @@ async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: Provide a qualitative, story-driven analysis without using numeric measures. """ - analysis = await self.llm.analyze(prompt) + analysis = await self.llm.generate(prompt) result = { "analysis": analysis, @@ -233,7 +356,7 @@ async def analyze_impact(self, story: Story, current_state: FieldState) -> Dict: async def detect_patterns( self, stories: List[Story], current_state: FieldState - ) -> List[Dict]: + ) -> List[Dict[str, Any]]: """Identify emergent patterns in the narrative field""" self.logger.info(f"Detecting patterns for {len(stories)} stories") self.logger.debug(f"Current field state: {current_state.description}") @@ -269,7 +392,7 @@ async def detect_patterns( f"Emergent pattern detection prompt length: {len(prompt)} characters" ) - patterns = await self.llm.analyze(prompt) + patterns = await self.llm.generate(prompt) self.logger.debug(f"Received emergent patterns response: {patterns}") self.logger.info( f"Emergent pattern detection response length: {len(patterns)} characters" @@ -278,23 +401,26 @@ async def detect_patterns( class ResonanceDetector: - """Handles semantic detection and analysis of narrative resonances""" + def __init__(self, vector_store: VectorStore, llm_interface: LanguageModel): + self.vector_store: VectorStore = vector_store + self.llm: LanguageModel = llm_interface + self.logger: logging.Logger = logging.getLogger(__name__) - def __init__(self, vector_store, llm_interface): - self.vector_store = vector_store - self.llm = llm_interface - self.logger = logging.getLogger(__name__) - - async def find_resonances(self, story: Story, limit: int = 3) -> List[Dict]: + async def find_resonances( + self, + story: Story, + threshold: float = DEFAULT_SIMILARITY_THRESHOLD, + limit: int = DEFAULT_RESONANCE_LIMIT, + ) -> List[Dict[str, Any]]: """Find and analyze resonating stories using semantic understanding""" self.logger.debug(f"Finding resonances for story: {story.id}") - embedding = await self.llm.generate_embedding( - story.content + " " + story.context - ) + embedding = await self.llm.generate_embedding(story) self.logger.debug(f"Generated embedding for story: {story.id}") - similar_stories = await self.vector_store.find_similar(embedding, limit=limit) + similar_stories = await self.vector_store.find_similar( + embedding, threshold=threshold, limit=limit + ) self.logger.debug(f"Found {len(similar_stories)} similar stories") resonances = [] @@ -325,17 +451,17 @@ async def find_resonances(self, story: Story, limit: int = 3) -> List[Dict]: self.logger.info(f"Found {len(resonances)} resonances for story: {story.id}") return resonances - async def determine_resonance_type(self, story1: Story, story2: Story) -> Dict: + async def determine_resonance_type( + self, story1: Story, story2: Story + ) -> ResonanceAnalysis: prompt = f""" Analyze the narrative resonance between these two stories: Story 1: {story1.content} Context 1: {story1.context} - Personal Narrative 1: {story1.personal_narrative} Story 2: {story2.content} Context 2: {story2.context} - Personal Narrative 2: {story2.personal_narrative} Provide a detailed analysis: 1. Narrative Relationship: @@ -354,9 +480,9 @@ async def determine_resonance_type(self, story1: Story, story2: Story) -> Dict: Provide a qualitative, story-driven analysis without using numeric measures. """ - analysis = await self.llm.analyze(prompt) + analysis = await self.llm.generate(prompt) - result = { + result: ResonanceAnalysis = { "type": "narrative_resonance", "analysis": analysis, "stories": { @@ -364,13 +490,11 @@ async def determine_resonance_type(self, story1: Story, story2: Story) -> Dict: "id": story1.id, "content": story1.content, "context": story1.context, - "personal_narrative": story1.personal_narrative, }, "resonant": { "id": story2.id, "content": story2.content, "context": story2.context, - "personal_narrative": story2.personal_narrative, }, }, "timestamp": datetime.now(), @@ -379,79 +503,70 @@ async def determine_resonance_type(self, story1: Story, story2: Story) -> Dict: class NarrativeField: - """Core system for managing narrative field dynamics""" - - def __init__(self, llm_interface, vector_store): - self.analyzer = FieldAnalyzer(llm_interface) - self.resonance_detector = ResonanceDetector(vector_store, llm_interface) - self.vector_store = vector_store - self.state = FieldState(description="Initial empty narrative field") - self.stories: Dict[str, Story] = {} - self.logger = logging.getLogger(__name__) + def __init__(self, llm_interface: LanguageModel, vector_store: VectorStore): + self._analyzer: FieldAnalyzer = FieldAnalyzer(llm_interface) + self._resonance_detector: ResonanceDetector = ResonanceDetector( + vector_store, llm_interface + ) + self._vector_store: VectorStore = vector_store + self._state: FieldState = FieldState( + description="Initial empty narrative field" + ) + self._stories: Dict[StoryID, Story] = {} + self._logger: logging.Logger = logging.getLogger(__name__) + + @property + def state(self) -> FieldState: + return self._state + + @property + def stories(self) -> Dict[StoryID, Story]: + return self._stories.copy() # Return a copy to prevent direct modification async def add_story(self, content: str, context: str) -> Story: - """Add a new story and analyze its field effects""" - story = Story(content=content, context=context) - self.logger.info(f"Adding new story: {story.id}") + story: Story = Story(content=content, context=context) + self._logger.info(f"Adding new story: {story.id}") + self._logger.debug(f"Story content: {story.content}") + self._logger.debug(f"Story context: {story.context}") # Analyze field impact - impact = await self.analyzer.analyze_impact(story, self.state) + impact: ImpactAnalysis = await self._analyzer.analyze_impact(story, self.state) story.field_effects.append(impact) - self.logger.debug(f"Field impact analysis completed for story: {story.id}") + self._logger.debug(f"Field impact analysis completed for story: {story.id}") # Find resonances - resonances = await self.resonance_detector.find_resonances(story) + resonances: List[Dict[str, Any]] = ( + await self._resonance_detector.find_resonances(story) + ) story.resonances.extend([r["story_id"] for r in resonances]) - self.logger.debug(f"Found {len(resonances)} resonances for story: {story.id}") + self._logger.debug(f"Found {len(resonances)} resonances for story: {story.id}") # Store story and update field await self._store_story(story) await self._update_field_state(story, impact, resonances) - self.logger.info(f"Story {story.id} added and field state updated") + self._logger.info(f"Story {story.id} added and field state updated") return story async def _store_story(self, story: Story) -> None: """Store story and its embeddings""" - self.logger.info(f"Storing story: {story.id}") - self.logger.debug( - f"Story length sent to LLM for embedding: {len(story.content + story.context)}" - ) - embedding = await self.resonance_detector.llm.generate_embedding( - story.content + " " + story.context - ) + self._logger.info(f"Storing story: {story.id}") + embedding = await self._resonance_detector.llm.generate_embedding(story) - metadata = { - "content": story.content, - "context": story.context, - "field_effects": json.dumps( - [ - { - "analysis": effect["analysis"], - "timestamp": effect["timestamp"].isoformat(), - "story_id": effect["story_id"], - } - for effect in story.field_effects - ] - ), - "resonances": json.dumps(story.resonances), - "timestamp": story.timestamp.isoformat(), - } - - await self.vector_store.store(story.id, embedding, metadata) - self.logger.info(f"Story {story.id} stored successfully in vector store") - self.stories[story.id] = story + await self._vector_store.store(story, embedding) + self._logger.info(f"Story {story.id} stored successfully in vector store") + self._stories[story.id] = story async def _update_field_state( self, story: Story, impact: Dict, resonances: List[Dict] ) -> None: """Update field state with enhanced resonance understanding""" - patterns = await self.analyzer.detect_patterns( - list(self.stories.values()), self.state + patterns = await self._analyzer.detect_patterns( + list(self._stories.values()), self.state ) - self.state = FieldState( + self._state = FieldState( description=impact["analysis"], patterns=patterns, active_resonances=resonances, @@ -468,24 +583,21 @@ async def _update_field_state( ) +# 5. Keep the demo function at the end async def demo_scenario(): - """Demonstrate the narrative field system with a simple scenario""" logger.info("Starting narrative field demonstration...") # Initialize components - llm = OllamaInterface( - model_name="mistral-nemo", - embed_model_name="nomic-embed-text:latest", # "mxbai-embed-large" - ) + llm: LanguageModel = OllamaInterface() logger.info(f"Initialized Ollama interface") - vector_store = ChromaStore(collection_name="research_lab") + vector_store: VectorStore = ChromaStore(collection_name="research_lab") logger.info(f"Initialized Chroma vector store") + field = NarrativeField(llm, vector_store) logger.info(f"Initialized narrative field") # Research Lab Scenario with Multiple Characters and 20 Events - stories = [ # Event 1: Leon's frustration { @@ -593,20 +705,7 @@ async def demo_scenario(): logger.info(f"Processing {len(stories)} stories and analyzing field effects...") for story in stories: try: - logger.debug(f"Adding story: {story['content']}") - result = await field.add_story(story["content"], story["context"]) - logger.info(f"Added story: {result.id}") - logger.debug(f"Field effects: {result.field_effects[-1]['analysis']}") - logger.debug(f"Current field state: {field.state.description}") - - if result.resonances: - logger.info( - f"Detected {len(result.resonances)} resonances for story {result.id}" - ) - for r_id in result.resonances: - r_story = field.stories.get(r_id) - if r_story: - logger.debug(f"Resonates with: {r_story.content}") + await field.add_story(story["content"], story["context"]) except Exception as e: logger.error(f"Error processing story: {e}", exc_info=True) From e354a13fea8b3339f1239cc1150052bef027e612 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Thu, 24 Oct 2024 18:14:18 +0200 Subject: [PATCH 06/14] simple lab scenario extended with lama_cpp --- pocs/nfs_simple_lab_scenario.py | 1084 +++++++++++++++++-------------- requirements.txt | 4 +- 2 files changed, 612 insertions(+), 476 deletions(-) diff --git a/pocs/nfs_simple_lab_scenario.py b/pocs/nfs_simple_lab_scenario.py index 4b145b2..eedbc6d 100644 --- a/pocs/nfs_simple_lab_scenario.py +++ b/pocs/nfs_simple_lab_scenario.py @@ -1,259 +1,492 @@ -from __future__ import annotations -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import List, Dict +""" +Narrative Field System +A framework for analyzing and tracking narrative dynamics in complex social systems. +""" -import asyncio -import json -import logging -import os -from abc import ABC, abstractmethod -from dataclasses import dataclass, field +from __future__ import annotations +from typing import List, Dict, Any, Optional, Final, NewType from datetime import datetime -from logging.handlers import RotatingFileHandler -from typing import Any, Dict, List, TypedDict, Union, Literal, NewType, Optional, Final from uuid import uuid4 -from enum import Enum, auto -from typing_extensions import TypeGuard -import typing - +from dataclasses import dataclass, field +from abc import ABC, abstractmethod +import logging +import logging.handlers +import json +import os +import asyncio import chromadb -import ollama from chromadb.config import Settings +import ollama +from llama_cpp import Llama +import numpy as np +import psutil +import multiprocessing as mp +import time +from typing import Tuple +import gc # Add this import at the top of your file +import atexit +import torch +import platform +import subprocess +from llama_cpp import llama +import ctypes +import sys + + +# Type Definitions +StoryID = NewType("StoryID", str) -DEFAULT_SIMILARITY_THRESHOLD: Final[float] = typing.cast(float, 0.8) -DEFAULT_RESONANCE_LIMIT: Final[int] = typing.cast(int, 3) -MAX_LOG_FILE_SIZE: Final[int] = typing.cast(int, 10 * 1024 * 1024) # 10 MB -MAX_LOG_BACKUP_COUNT: Final[int] = typing.cast(int, 19) # 20 files total - -# Create a logs directory if it doesn't exist -log_dir = os.path.join(os.path.dirname(__file__), "..", "logs") -os.makedirs(log_dir, exist_ok=True) - -# Generate a unique log file name based on the current timestamp -current_time = datetime.now().strftime("%Y%m%d_%H%M%S") -log_file = os.path.join(log_dir, f"nfs_lab_scenario_{current_time}.log") - -# Set up the root logger -logger = logging.getLogger() -logger.setLevel(logging.DEBUG) - -# Create a rotating file handler -file_handler = RotatingFileHandler( - log_file, - maxBytes=MAX_LOG_FILE_SIZE, - backupCount=MAX_LOG_BACKUP_COUNT, -) -file_handler.setLevel(logging.DEBUG) -file_formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s" -) -file_handler.setFormatter(file_formatter) - -# Create a console handler -console_handler = logging.StreamHandler() -console_handler.setLevel(logging.INFO) -console_formatter = logging.Formatter("%(asctime)s - %(message)s") -console_handler.setFormatter(console_formatter) - -# Add both handlers to the logger -logger.addHandler(file_handler) -logger.addHandler(console_handler) +# Constants +DEFAULT_SIMILARITY_THRESHOLD: Final[float] = 0.8 +DEFAULT_RESONANCE_LIMIT: Final[int] = 3 +MAX_LOG_FILE_SIZE: Final[int] = 10 * 1024 * 1024 # 10 MB +MAX_LOG_BACKUP_COUNT: Final[int] = 9 # 10 files total + +# Different quantization options from best quality to smallest size +MODEL_CONFIGS = { + "balanced": { + "chat": { + "path": "/Users/leonvanbokhorst/.cache/lm-studio/models/lmstudio-community/Llama-3.2-3B-Instruct-GGUF/Llama-3.2-3B-Instruct-Q8_0.gguf", + "size_gb": 3.42, + "relative_speed": 0.8, + }, + "embedding": { + "path": "/Users/leonvanbokhorst/.cache/lm-studio/models/nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q8_0.gguf", + "size_gb": 0.146, + }, + }, +} -StoryID = NewType("StoryID", str) +# Logging Setup +def setup_logging() -> None: + log_dir = os.path.join(os.path.dirname(__file__), "logs") + os.makedirs(log_dir, exist_ok=True) -# 1. Define base classes and abstract classes -class VectorStore(ABC): + current_time = datetime.now().strftime("%Y%m%d_%H%M%S") + log_file = os.path.join(log_dir, f"nfs_{current_time}.log") + + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + + # File Handler + file_handler = logging.handlers.RotatingFileHandler( + log_file, maxBytes=MAX_LOG_FILE_SIZE, backupCount=MAX_LOG_BACKUP_COUNT + ) + file_handler.setLevel(logging.DEBUG) + file_formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + file_handler.setFormatter(file_formatter) + + # Console Handler + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.INFO) + console_formatter = logging.Formatter("%(asctime)s - %(message)s") + console_handler.setFormatter(console_formatter) + + logger.addHandler(file_handler) + logger.addHandler(console_handler) + + +# Base Classes +class LanguageModel(ABC): @abstractmethod - async def store(self, story: "Story", embedding: List[float]) -> None: + async def generate(self, prompt: str) -> str: pass @abstractmethod - async def find_similar( - self, embedding: List[float], threshold: float, limit: int - ) -> List[Dict]: + async def generate_embedding(self, text: str) -> List[float]: pass -class LanguageModel(ABC): +class VectorStore(ABC): @abstractmethod - async def generate(self, prompt: str) -> str: + async def store(self, story: Story, embedding: List[float]) -> None: pass @abstractmethod - async def generate_embedding(self, text: str) -> List[float]: + async def find_similar( + self, embedding: List[float], threshold: float, limit: int + ) -> List[Dict]: pass -# 2. Define data classes and enums +# Data Classes @dataclass class Story: content: str context: str id: StoryID = field(default_factory=lambda: StoryID(str(uuid4()))) timestamp: datetime = field(default_factory=datetime.now) - metadata: Optional[Dict[str, Any]] = field(default=None) + metadata: Optional[Dict[str, Any]] = None resonances: List[str] = field(default_factory=list) field_effects: List[Dict] = field(default_factory=list) @dataclass -class Pattern: - name: str +class FieldState: description: str - strength: float = 0.0 - related_stories: List[StoryID] = field(default_factory=list) + patterns: List[Dict[str, Any]] = field(default_factory=list) + active_resonances: List[Dict[str, Any]] = field(default_factory=list) + emergence_points: List[Dict[str, Any]] = field(default_factory=list) + timestamp: datetime = field(default_factory=datetime.now) - def update_strength(self, new_strength: float): - self.strength = new_strength - def add_related_story(self, story_id: StoryID): - if story_id not in self.related_stories: - self.related_stories.append(story_id) +# Prompt Management +class FieldAnalysisPrompts: + @staticmethod + def get_impact_analysis_prompt(story: Story, current_state: FieldState) -> str: + return f"""Analyze how this new narrative affects the existing field state. +Current Field State: +{current_state.description} -@dataclass -class Resonance: - type: str - strength: float - source_story: StoryID - target_story: StoryID - description: str = "" +New Narrative: +"{story.content}" +Context: {story.context} - def update_strength(self, new_strength: float): - self.strength = new_strength +Consider and describe: +1. Immediate Effects +- How does this narrative change existing dynamics? +- What emotional responses might emerge? +- Who is most affected and how? +2. Relationship Changes +- How might work relationships shift? +- What new collaborations could form? +- What tensions might develop? -@dataclass -class EmergencePoint: - story_id: StoryID - timestamp: datetime - type: str = "new_narrative" - resonance_context: List[Resonance] = field(default_factory=list) - related_patterns: List[Pattern] = field(default_factory=list) +3. Future Implications +- How might this change future interactions? +- What new possibilities emerge? +- What challenges might arise? - def add_related_pattern(self, pattern: Pattern): - if pattern not in self.related_patterns: - self.related_patterns.append(pattern) +Provide a natural, story-focused analysis that emphasizes human impact.""" + @staticmethod + def get_pattern_detection_prompt( + stories: List[Story], current_state: FieldState + ) -> str: + story_summaries = "\n".join(f"- {s.content}" for s in stories[-5:]) + return f"""Analyze patterns and themes across these recent narratives. -@dataclass -class FieldState: - description: str - patterns: List[Pattern] = field(default_factory=list) - active_resonances: List[Resonance] = field(default_factory=list) - emergence_points: List[EmergencePoint] = field(default_factory=list) - timestamp: datetime = field(default_factory=datetime.now) +Current Field State: +{current_state.description} + +Recent Stories: +{story_summaries} + +Identify and describe: +1. Emerging Themes +- What recurring topics or concerns appear? +- How are people responding to changes? +- What underlying needs surface? + +2. Relationship Patterns +- How are work dynamics evolving? +- What collaboration patterns emerge? +- How is communication changing? - def add_pattern(self, pattern: Pattern): - self.patterns.append(pattern) +3. Organizational Shifts +- What cultural changes are happening? +- How is the work environment evolving? +- What new needs are emerging? - def add_resonance(self, resonance: Resonance): - self.active_resonances.append(resonance) +Describe patterns naturally, focusing on people and relationships.""" - def add_emergence_point(self, point: EmergencePoint): - self.emergence_points.append(point) + @staticmethod + def get_resonance_analysis_prompt(story1: Story, story2: Story) -> str: + return f"""Analyze how these two narratives connect and influence each other. - def update_description(self, new_description: str): - self.description = new_description - self.timestamp = datetime.now() +First Narrative: +"{story1.content}" +Context: {story1.context} +Second Narrative: +"{story2.content}" +Context: {story2.context} -class ImpactAnalysis(TypedDict): - analysis: str - timestamp: datetime - story_id: str +Examine: +1. Story Connections +- How do these narratives relate? +- What themes connect them? +- How do they influence each other? +2. People Impact +- How might this affect relationships? +- What emotional responses might emerge? +- How might behaviors change? -class ResonanceAnalysis(TypedDict): - type: Literal["narrative_resonance"] - analysis: str - stories: Dict[ - Literal["source", "resonant"], Dict[Literal["id", "content", "context"], str] - ] - timestamp: datetime +3. Environment Effects +- How might these stories change the workspace? +- What opportunities might develop? +- What challenges might arise? +Describe connections naturally, focusing on meaning and impact.""" -# 3. Implement concrete classes + +# Core Components class OllamaInterface(LanguageModel): def __init__( self, - model_name: str = "mistral-nemo", - embed_model_name: str = "mxbai-embed-large", + quality_preset: str = "balanced", + model_path: str = None, + embedding_model_path: str = None, ): - self.model: str = model_name - self.embed_model: str = embed_model_name + config = MODEL_CONFIGS[quality_preset] + self.chat_model_path = model_path or config["chat"]["path"] + self.embedding_model_path = embedding_model_path or config["embedding"]["path"] self.embedding_cache: Dict[str, List[float]] = {} - self.logger: logging.Logger = logging.getLogger(__name__) - self.logger.info( - f"Initializing OllamaInterface with models: main={self.model}, embedding={self.embed_model}" - ) + self.logger = logging.getLogger(__name__) async def generate(self, prompt: str) -> str: - self.logger.debug(f"Sending prompt to LLM: {prompt}") - self.logger.info(f"Prompt length sent to LLM: {len(prompt)} characters") + self.logger.debug(f"Generating response for prompt: {prompt}") response = await asyncio.to_thread( ollama.chat, model=self.model, messages=[{"role": "user", "content": prompt}], ) - content = response["message"]["content"] - self.logger.debug(f"Received response from LLM: {content}") - self.logger.info(f"Response length from LLM: {len(content)} characters") - return content - - async def generate_embedding(self, story: Story) -> List[float]: - # Use the Story's GUID as the cache key - cache_key = story.id - text_to_embed = f"{story.content} {story.context}" - - # Check cache first - self.logger.info("Checking embedding cache before generating") - if cache_key in self.embedding_cache: - self.logger.info( - f"Embedding retrieved from cache for story ID: {cache_key}" - ) - return self.embedding_cache[cache_key] + self.logger.debug(f"Response from LLM: {response['message']['content']}") + return response["message"]["content"] + + async def generate_embedding(self, text: str) -> List[float]: + if text in self.embedding_cache: + return self.embedding_cache[text] - # Generate if not cached response = await asyncio.to_thread( - ollama.embeddings, model=self.embed_model, prompt=text_to_embed + ollama.embeddings, model=self.embed_model, prompt=text ) embedding = response["embedding"] - - # Cache the result - self.embedding_cache[cache_key] = embedding - self.logger.info( - f"Embedding generated and cached successfully for story ID: {cache_key} with length {len(embedding)}" - ) + self.embedding_cache[text] = embedding return embedding +class PerformanceMetrics: + def __init__(self): + self.metrics: Dict[str, Dict[str, Any]] = {} + self.logger = logging.getLogger(__name__) + + def start_timer(self, operation: str): + if operation not in self.metrics: + self.metrics[operation] = { + "start_time": time.perf_counter(), + "durations": [], + } + else: + self.metrics[operation]["start_time"] = time.perf_counter() + + def stop_timer(self, operation: str) -> float: + if operation in self.metrics: + duration = time.perf_counter() - self.metrics[operation]["start_time"] + self.metrics[operation]["durations"].append(duration) + return duration + return 0.0 + + def get_average_duration(self, operation: str) -> float: + if operation in self.metrics and self.metrics[operation]["durations"]: + return sum(self.metrics[operation]["durations"]) / len( + self.metrics[operation]["durations"] + ) + return 0.0 + + def print_summary(self): + print("\nPerformance Metrics Summary:") + for operation, data in self.metrics.items(): + durations = data["durations"] + if durations: + avg_duration = sum(durations) / len(durations) + min_duration = min(durations) + max_duration = max(durations) + print(f"{operation}:") + print(f" Average duration: {avg_duration:.4f} seconds") + print(f" Min duration: {min_duration:.4f} seconds") + print(f" Max duration: {max_duration:.4f} seconds") + print(f" Total calls: {len(durations)}") + else: + print(f"{operation}: No data") + + def log_system_resources(self): + cpu_percent = psutil.cpu_percent() + memory_info = psutil.virtual_memory() + self.logger.info(f"CPU Usage: {cpu_percent}%") + self.logger.info(f"Memory Usage: {memory_info.percent}%") + + +import warnings + + +class LlamaInterface(LanguageModel): + def __init__( + self, + quality_preset: str = "balanced", + model_path: str = None, + embedding_model_path: str = None, + **kwargs, + ): + config = MODEL_CONFIGS[quality_preset] + chat_model_path = model_path or config["chat"]["path"] + embedding_model_path = embedding_model_path or config["embedding"]["path"] + + # Modified initialization parameters + optimal_config = { + "n_gpu_layers": -1, + "n_batch": 512, + "n_ctx": 16384, + # "n_kv": 256, + # "flash_attn": True, # Use Flash Attention for faster attention + # "rope_scaling_type": 1, # Use dynamic rope scaling for better performance + # "use_mmap": True, # Use memory-mapped files for faster loading + # "use_mlock": False, # Lock the model in memory + "metal_device": "mps", # Use Metal for GPU acceleration + "main_gpu": 0, # Use the first GPU + "use_metal": True, # Explicitly enable Metal + "n_threads": 4, # Use 4 threads for parallelism + # "offload_kqv": True, # Offload KV cache to CPU + } + + self.logger = logging.getLogger(__name__) + + try: + # Suppress warnings about the callback function + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.llm = Llama( + model_path=chat_model_path, + verbose=False, # Set to False to reduce logging + **optimal_config, + ) + + self.embedding_model = Llama( + model_path=embedding_model_path, + embedding=True, + verbose=False, # Set to False to reduce logging + **optimal_config, + ) + except Exception as e: + self.logger.error(f"Failed to load models: {e}", exc_info=True) + raise + + async def generate(self, prompt: str) -> str: + """Generate response using the LLM""" + self.logger.debug(f"Generating response for prompt: {prompt}") + try: + response = await asyncio.to_thread( + self.llm.create_chat_completion, + messages=[{"role": "user", "content": prompt}], + ) + self.logger.debug( + f"Response from LLM: {response['choices'][0]['message']['content']}" + ) + return response["choices"][0]["message"]["content"] + except Exception as e: + self.logger.error(f"Error generating response: {e}", exc_info=True) + return "Error generating response" + + async def generate_embedding(self, text: str) -> List[float]: + """Generate embedding for the given text""" + try: + embedding = await asyncio.to_thread(self.embedding_model.embed, text) + return embedding + except Exception as e: + self.logger.error(f"Error generating embedding: {e}", exc_info=True) + return [] + + async def cleanup(self): + """Clean up resources""" + if hasattr(self, "llm"): + del self.llm + if hasattr(self, "embedding_model"): + del self.embedding_model + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +class PerformanceMonitor: + def __init__(self): + self.metrics = [] + + async def monitor_generation( + self, llm: LlamaInterface, prompt: str + ) -> Tuple[str, Dict[str, float]]: + start_time = time.perf_counter() + memory_before = psutil.virtual_memory().used + + response = await llm.generate(prompt) + + end_time = time.perf_counter() + memory_after = psutil.virtual_memory().used + + metrics = { + "generation_time": end_time - start_time, + "memory_usage_change": (memory_after - memory_before) / (1024 * 1024), # MB + } + + self.metrics.append(metrics) + return response, metrics + + def get_performance_report(self) -> Dict[str, float]: + if not self.metrics: + return {"avg_generation_time": 0, "avg_memory_usage_change": 0} + + return { + "avg_generation_time": sum(m["generation_time"] for m in self.metrics) + / len(self.metrics), + "avg_memory_usage_change": sum( + m["memory_usage_change"] for m in self.metrics + ) + / len(self.metrics), + } + + +@dataclass +class BatchMetrics: + batch_sizes: List[int] = field(default_factory=list) + batch_times: List[float] = field(default_factory=list) + memory_usage: List[float] = field(default_factory=list) + + +class BatchProcessor: + def __init__(self, llm: LlamaInterface): + self.llm = llm + self.optimal_batch_size = 4 # Will be adjusted dynamically + + async def process_batch(self, prompts: List[str]) -> List[str]: + # Dynamic batch size adjustment based on memory usage + memory_usage = psutil.Process().memory_info().rss / 1024 / 1024 + if memory_usage > 0.8 * psutil.virtual_memory().total / 1024 / 1024: + self.optimal_batch_size = max(1, self.optimal_batch_size - 1) + + results = [] + for i in range(0, len(prompts), self.optimal_batch_size): + batch = prompts[i : i + self.optimal_batch_size] + batch_results = await asyncio.gather( + *[self.llm.generate(prompt) for prompt in batch] + ) + results.extend(batch_results) + + return results + + class ChromaStore(VectorStore): def __init__(self, collection_name: str = "narrative_field"): self.client = chromadb.Client(Settings(anonymized_telemetry=False)) self.logger = logging.getLogger(__name__) + try: self.collection = self.client.get_collection(collection_name) - self.logger.info(f"Collection {collection_name} found") - self.logger.info(f"Collection metadata: {self.collection.metadata}") except: self.collection = self.client.create_collection( name=collection_name, metadata={"hnsw:space": "cosine"} ) - self.logger.info(f"Collection {collection_name} created") - self.logger.info(f"Collection metadata: {self.collection.metadata}") async def store(self, story: Story, embedding: List[float]) -> None: - """Store story embedding and metadata""" - self.logger.info(f"Storing embedding and metadata for story: {story.id}") - self.logger.debug(f"Embedding length: {len(embedding)}") - metadata = { "content": story.content, "context": story.context, + "timestamp": story.timestamp.isoformat(), + "resonances": json.dumps(story.resonances), "field_effects": json.dumps( [ { @@ -264,8 +497,6 @@ async def store(self, story: Story, embedding: List[float]) -> None: for effect in story.field_effects ] ), - "resonances": json.dumps(story.resonances), - "timestamp": story.timestamp.isoformat(), } await asyncio.to_thread( @@ -277,12 +508,11 @@ async def store(self, story: Story, embedding: List[float]) -> None: ) async def find_similar( - self, embedding: List[float], threshold: float = 0.8, limit: int = 5 + self, + embedding: List[float], + threshold: float = DEFAULT_SIMILARITY_THRESHOLD, + limit: int = DEFAULT_RESONANCE_LIMIT, ) -> List[Dict]: - """Find similar narratives""" - self.logger.info( - f"Finding similar narratives with threshold: {threshold} and limit: {limit}" - ) count = self.collection.count() if count == 0: return [] @@ -293,9 +523,6 @@ async def find_similar( n_results=min(limit, count), ) - self.logger.info(f"Found {len(results['ids'][0])} similar narratives") - self.logger.debug(f"Similar narratives results: {results}") - similar = [] for idx, id in enumerate(results["ids"][0]): metadata = json.loads(results["documents"][0][idx]) @@ -307,104 +534,36 @@ async def find_similar( } ) - thresholded = [s for s in similar if s["similarity"] <= threshold] - self.logger.info( - f"Thresholded results length similarity narrative: {len(thresholded)}" - ) - self.logger.debug(f"Thresholded results similarity narrative: {thresholded}") - - return thresholded + return [s for s in similar if s["similarity"] <= threshold] -# 4. Define main logic classes class FieldAnalyzer: def __init__(self, llm_interface: LanguageModel): - self.llm: LanguageModel = llm_interface - self.logger: logging.Logger = logging.getLogger(__name__) + self.llm = llm_interface + self.logger = logging.getLogger(__name__) + self.prompts = FieldAnalysisPrompts() async def analyze_impact( self, story: Story, current_state: FieldState ) -> Dict[str, Any]: - """Analyze how a story impacts the field""" - prompt = f""" - Current field state: {current_state.description} - Active patterns: {current_state.patterns} - - New narrative entering field: - Content: {story.content} - Context: {story.context} - - Analyze field impact: - 1. Immediate resonance effects - 2. Pattern interactions/disruptions - 3. Potential emergence points - 4. Field state transformations - 5. Emotional impact on the field - 6. Narrative evolution - - Provide a qualitative, story-driven analysis without using numeric measures. - """ - + prompt = self.prompts.get_impact_analysis_prompt(story, current_state) analysis = await self.llm.generate(prompt) - result = { - "analysis": analysis, - "timestamp": datetime.now(), - "story_id": story.id, - } - return result + return {"analysis": analysis, "timestamp": datetime.now(), "story_id": story.id} async def detect_patterns( self, stories: List[Story], current_state: FieldState - ) -> List[Dict[str, Any]]: - """Identify emergent patterns in the narrative field""" - self.logger.info(f"Detecting patterns for {len(stories)} stories") - self.logger.debug(f"Current field state: {current_state.description}") - self.logger.info( - f"Current field state length: {len(current_state.description)}" - ) - - story_contexts = [ - {"content": s.content, "context": s.context, "effects": s.field_effects} - for s in stories - ] - - prompt = f""" - Analyze narrative collection for emergent patterns: - Stories: {story_contexts} - Current Patterns: {current_state.patterns} - Active Resonances: {current_state.active_resonances} - - Identify: - 1. New pattern formation - 2. Pattern evolution/dissolution - 3. Resonance networks - 4. Critical transition points - 5. Emergence phenomena - - Use the Stories, Current Patterns and Active Resonances to determine the impact. NO markdown or code blocks. - """ - - self.logger.debug( - f"Sending prompt to LLM for emergent pattern detection: {prompt}" - ) - self.logger.info( - f"Emergent pattern detection prompt length: {len(prompt)} characters" - ) - - patterns = await self.llm.generate(prompt) - self.logger.debug(f"Received emergent patterns response: {patterns}") - self.logger.info( - f"Emergent pattern detection response length: {len(patterns)} characters" - ) - return patterns + ) -> str: + prompt = self.prompts.get_pattern_detection_prompt(stories, current_state) + return await self.llm.generate(prompt) class ResonanceDetector: def __init__(self, vector_store: VectorStore, llm_interface: LanguageModel): - self.vector_store: VectorStore = vector_store - self.llm: LanguageModel = llm_interface - self.logger: logging.Logger = logging.getLogger(__name__) + self.vector_store = vector_store + self.llm = llm_interface + self.logger = logging.getLogger(__name__) + self.prompts = FieldAnalysisPrompts() async def find_resonances( self, @@ -412,77 +571,49 @@ async def find_resonances( threshold: float = DEFAULT_SIMILARITY_THRESHOLD, limit: int = DEFAULT_RESONANCE_LIMIT, ) -> List[Dict[str, Any]]: - """Find and analyze resonating stories using semantic understanding""" - self.logger.debug(f"Finding resonances for story: {story.id}") - - embedding = await self.llm.generate_embedding(story) - self.logger.debug(f"Generated embedding for story: {story.id}") - - similar_stories = await self.vector_store.find_similar( - embedding, threshold=threshold, limit=limit - ) - self.logger.debug(f"Found {len(similar_stories)} similar stories") - - resonances = [] - for similar in similar_stories: - self.logger.debug(f"Analyzing resonance with story: {similar['id']}") - similar_metadata = similar["metadata"] - similar_story = Story( - id=similar["id"], - content=similar_metadata["content"], - context=similar_metadata["context"], - timestamp=( - datetime.fromisoformat(similar_metadata["timestamp"]) - if isinstance(similar_metadata["timestamp"], str) - else similar_metadata["timestamp"] - ), + try: + self.logger.debug(f"Generating embedding for story: {story.id}") + # Ensure embedding is generated before using it + embedding = await self.llm.generate_embedding( + f"{story.content} {story.context}" ) - - resonance = await self.determine_resonance_type(story, similar_story) - resonances.append( - { - "story_id": similar["id"], - "resonance": resonance, - "timestamp": datetime.now(), - } + similar_stories = await self.vector_store.find_similar( + embedding, threshold, limit ) - self.logger.info(f"Resonance analysis completed for story: {similar['id']}") + self.logger.debug(f"Found {len(similar_stories)} similar stories") + + resonances = [] + for similar in similar_stories: + metadata = similar["metadata"] + similar_story = Story( + id=similar["id"], + content=metadata["content"], + context=metadata["context"], + timestamp=datetime.fromisoformat(metadata["timestamp"]), + ) + + resonance = await self.determine_resonance_type(story, similar_story) + resonances.append( + { + "story_id": similar["id"], + "resonance": resonance, + "timestamp": datetime.now(), + } + ) - self.logger.info(f"Found {len(resonances)} resonances for story: {story.id}") - return resonances + self.logger.debug(f"Generated {len(resonances)} resonances") + return resonances + except Exception as e: + self.logger.error(f"Error in find_resonances: {e}", exc_info=True) + raise async def determine_resonance_type( self, story1: Story, story2: Story - ) -> ResonanceAnalysis: - prompt = f""" - Analyze the narrative resonance between these two stories: - - Story 1: {story1.content} - Context 1: {story1.context} - - Story 2: {story2.content} - Context 2: {story2.context} - - Provide a detailed analysis: - 1. Narrative Relationship: - - How do these stories interact on a narrative level? - - What kind of thematic or emotional connection exists? - - How do they reinforce, conflict with, or transform each other's meanings? - - 2. Character Development: - - How might these stories influence the characters' growth or change? - - What new aspects of personality or motivation might emerge? - - 3. Worldview Impact: - - How do these stories shape the characters' understanding of their world? - - What beliefs or values are being challenged or reinforced? - - Provide a qualitative, story-driven analysis without using numeric measures. - """ - + ) -> Dict[str, Any]: + prompt = self.prompts.get_resonance_analysis_prompt(story1, story2) analysis = await self.llm.generate(prompt) - result: ResonanceAnalysis = { + return { "type": "narrative_resonance", "analysis": analysis, "stories": { @@ -499,21 +630,17 @@ async def determine_resonance_type( }, "timestamp": datetime.now(), } - return result class NarrativeField: def __init__(self, llm_interface: LanguageModel, vector_store: VectorStore): - self._analyzer: FieldAnalyzer = FieldAnalyzer(llm_interface) - self._resonance_detector: ResonanceDetector = ResonanceDetector( - vector_store, llm_interface - ) - self._vector_store: VectorStore = vector_store - self._state: FieldState = FieldState( - description="Initial empty narrative field" - ) + self._analyzer = FieldAnalyzer(llm_interface) + self._resonance_detector = ResonanceDetector(vector_store, llm_interface) + self._vector_store = vector_store + self._state = FieldState(description="Initial empty narrative field") self._stories: Dict[StoryID, Story] = {} - self._logger: logging.Logger = logging.getLogger(__name__) + self._logger = logging.getLogger(__name__) + self._performance_metrics = PerformanceMetrics() @property def state(self) -> FieldState: @@ -521,54 +648,62 @@ def state(self) -> FieldState: @property def stories(self) -> Dict[StoryID, Story]: - return self._stories.copy() # Return a copy to prevent direct modification + return self._stories.copy() async def add_story(self, content: str, context: str) -> Story: - story: Story = Story(content=content, context=context) - self._logger.info(f"Adding new story: {story.id}") - self._logger.debug(f"Story content: {story.content}") - self._logger.debug(f"Story context: {story.context}") + self._performance_metrics.start_timer("add_story") + + self._performance_metrics.start_timer("create_story") + story = Story(content=content, context=context) + create_time = self._performance_metrics.stop_timer("create_story") + self._logger.info(f"Story creation time: {create_time:.4f} seconds") - # Analyze field impact - impact: ImpactAnalysis = await self._analyzer.analyze_impact(story, self.state) + self._performance_metrics.start_timer("analyze_impact") + impact = await self._analyzer.analyze_impact(story, self.state) + analyze_time = self._performance_metrics.stop_timer("analyze_impact") + self._logger.info(f"Impact analysis time: {analyze_time:.4f} seconds") story.field_effects.append(impact) - self._logger.debug(f"Field impact analysis completed for story: {story.id}") - # Find resonances - resonances: List[Dict[str, Any]] = ( - await self._resonance_detector.find_resonances(story) - ) + self._performance_metrics.start_timer("find_resonances") + resonances = await self._resonance_detector.find_resonances(story) + resonance_time = self._performance_metrics.stop_timer("find_resonances") + self._logger.info(f"Find resonances time: {resonance_time:.4f} seconds") story.resonances.extend([r["story_id"] for r in resonances]) - self._logger.debug(f"Found {len(resonances)} resonances for story: {story.id}") - # Store story and update field + self._performance_metrics.start_timer("store_story") await self._store_story(story) + store_time = self._performance_metrics.stop_timer("store_story") + self._logger.info(f"Store story time: {store_time:.4f} seconds") + + self._performance_metrics.start_timer("update_field_state") await self._update_field_state(story, impact, resonances) - self._logger.info(f"Story {story.id} added and field state updated") + update_time = self._performance_metrics.stop_timer("update_field_state") + self._logger.info(f"Update field state time: {update_time:.4f} seconds") + + total_time = self._performance_metrics.stop_timer("add_story") + self._logger.info(f"Total add_story time: {total_time:.4f} seconds") + + self._performance_metrics.log_system_resources() return story async def _store_story(self, story: Story) -> None: - """Store story and its embeddings""" - self._logger.info(f"Storing story: {story.id}") - embedding = await self._resonance_detector.llm.generate_embedding(story) - + embedding = await self._resonance_detector.llm.generate_embedding( + f"{story.content} {story.context}" + ) await self._vector_store.store(story, embedding) - self._logger.info(f"Story {story.id} stored successfully in vector store") self._stories[story.id] = story async def _update_field_state( self, story: Story, impact: Dict, resonances: List[Dict] ) -> None: - """Update field state with enhanced resonance understanding""" - patterns = await self._analyzer.detect_patterns( list(self._stories.values()), self.state ) self._state = FieldState( description=impact["analysis"], - patterns=patterns, + patterns=[{"analysis": patterns}], active_resonances=resonances, emergence_points=[ { @@ -583,136 +718,135 @@ async def _update_field_state( ) -# 5. Keep the demo function at the end +# Global cleanup function +def global_cleanup(): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +# Register the global cleanup function to run at exit +atexit.register(global_cleanup) + + async def demo_scenario(): + setup_logging() + logger = logging.getLogger(__name__) logger.info("Starting narrative field demonstration...") - # Initialize components - llm: LanguageModel = OllamaInterface() - logger.info(f"Initialized Ollama interface") + # Initialize performance monitor + monitor = PerformanceMonitor() - vector_store: VectorStore = ChromaStore(collection_name="research_lab") - logger.info(f"Initialized Chroma vector store") + llm = None # Initialize llm to None - field = NarrativeField(llm, vector_store) - logger.info(f"Initialized narrative field") + try: + # Perform global cleanup before initializing new LLM + global_cleanup() - # Research Lab Scenario with Multiple Characters and 20 Events - stories = [ - # Event 1: Leon's frustration - { - "content": "After enduring a long and tumultuous meeting with his colleagues, where the cacophony of voices made it impossible to think clearly, Leon felt his frustration mounting. The office was so noisy that he couldn't hear himself think, and all he wanted was a moment of peace. Craving solitude, he really wanted to go to lunch without having to wait for the others, hoping that a quiet break would help him regain his composure.", - "context": "Leon is overwhelmed by the noisy meeting and desires time alone during lunch to clear his mind.", - }, - # Event 2: Leon discussing the AI minor - { - "content": "After lunch, as Leon and Coen walked back to the lab, Leon decided to share his growing concerns about the AI for Society minor. He voiced his doubts and the challenges he foresaw in the program's current direction. Coen listened attentively and was supportive of Leon's worries. \"I think you have some valid points,\" Coen acknowledged, \"but perhaps it would be best to discuss these issues with Danny, the manager of the minor.\" Coen believed that Danny's insights could be crucial in addressing Leon's concerns.", - "context": "Leon confides in Coen about issues with the AI minor; Coen advises consulting Danny.", - }, - # Event 3: Danny's accident - { - "content": "News spread that Danny had fallen off his bike and was injured. He was on his way to the hospital and unsure if he could continue working on the AI for Society minor in the near future. Leon was deeply worried about Danny's well-being, the impact on the lab, and the future of the AI minor program. Feeling the weight of these concerns, he decided to talk to his manager, Robbert, hoping to find a solution.", - "context": "Danny's injury raises concerns about the AI minor's future, prompting Leon to seek Robbert's guidance.", - }, - # Event 4: Robbert's tough advice - { - "content": "After work, Robbert and Leon walked back to the lab together. Leon expressed his worries about Danny's accident and the AI minor. However, Robbert seemed more preoccupied with his own research and was not interested in discussing the minor. \"I know you're concerned, but you need to man up and stop whining,\" Robbert said bluntly. His tough advice left Leon feeling isolated and unsupported.", - "context": "Robbert dismisses Leon's concerns, focusing instead on his own research priorities.", - }, - # Event 5: Coen's input - { - "content": 'Feeling conflicted after his conversation with Robbert, Leon found solace when Coen offered to help with the AI minor. "Maybe we can work on it together while Danny recovers," Coen suggested. Leon appreciated Coen\'s offer, recognizing the value of teamwork, but he also felt uncertain about taking on more responsibility without proper guidance.', - "context": "Coen volunteers to assist Leon with the AI minor during Danny's absence.", - }, - # Event 6: Sarah’s contribution - { - "content": "Sarah, a new member of the lab eager to make her mark, approached Leon with a fresh idea. Enthusiastic about the ethical challenges in AI, she suggested a new direction for the AI minor—focusing on ethics in AI development. Her excitement was contagious, and Leon began to see the potential impact of integrating ethics into the program.", - "context": "Sarah proposes refocusing the AI minor on AI ethics, sparking interest from Leon.", - }, - # Event 7: Tom's exhaustion - { - "content": "Tom, another member of the lab, was visibly exhausted after a long day. He had been struggling to keep up with the heavy workload and confided in his colleagues that he wanted to leave early. Considering taking a break from the lab altogether, Tom felt mentally drained and knew he needed time to recover.", - "context": "Tom is overwhelmed by work stress and thinks about temporarily leaving the lab.", - }, - # Event 8: Leon reassessing - { - "content": "Observing Tom's exhaustion, Leon became concerned that the lab might be overworking its members. Balancing his worries about the AI minor and the well-being of his colleagues, he suggested organizing a team meeting to discuss workload management. Leon hoped that addressing these issues openly would help prevent burnout and improve overall productivity.", - "context": "Leon considers holding a meeting to tackle workload issues affecting team morale.", - }, - # Event 9: Robbert's counter - { - "content": "Robbert disagreed with Leon's assessment, arguing that the lab members needed to toughen up and handle the workload. He felt that reducing their responsibilities would slow down progress on important research projects. \"We can't afford to ease up now,\" Robbert insisted, dismissing the idea of altering the current work demands.", - "context": "Robbert rejects the notion of reducing workloads, emphasizing the need for ongoing productivity.", - }, - # Event 10: Coen's personal struggle - { - "content": "In a candid conversation, Coen revealed to Leon that he had been dealing with personal issues and was struggling to focus on work. Leon was surprised by Coen's admission, as he had always appeared to have everything under control. This revelation highlighted the underlying stress affecting the team.", - "context": "Coen admits personal struggles are hindering his work, surprising Leon.", - }, - # Event 11: Sarah's proposal - { - "content": "Concerned about her colleagues' mental health, Sarah proposed implementing a flexible working schedule to accommodate those feeling burned out. She believed that a healthier work-life balance would benefit both the individuals and the lab's productivity. \"We need to take care of ourselves to do our best work,\" she advocated.", - "context": "Sarah suggests flexible hours to improve well-being and efficiency in the lab.", - }, - # Event 12: Tom’s decision - { - "content": "Feeling overwhelmed, Tom decided to take a temporary leave from the lab to focus on his mental health. He believed that stepping back was the best decision for now and hoped that his absence would prompt the team to consider the pressures they were all facing.", - "context": "Tom takes a break to address his mental health, hoping to highlight team stress.", - }, - # Event 13: Leon's talk with Robbert - { - "content": "Using Tom's situation as an example, Leon tried once more to convince Robbert that the team needed more flexibility. \"If we don't address this, we might lose more valuable team members,\" Leon cautioned. However, Robbert remained unconvinced, believing that the team was coddling itself too much and that personal issues should not interfere with work.", - "context": "Leon urges Robbert to consider flexibility; Robbert remains steadfast against it.", - }, - # Event 14: Robbert doubling down - { - "content": "Robbert held a team meeting to reiterate the importance of maintaining productivity despite personal challenges. He emphasized that their work was critical and that everyone needed to stay focused. Robbert believed that personal problems should not interfere with lab performance and stood firm on his stance.", - "context": "Robbert emphasizes productivity over personal issues in a team meeting.", - }, - # Event 15: Sarah's pushback - { - "content": "Sarah pushed back against Robbert's position during the meeting, arguing that a more flexible approach would ultimately lead to better results. She highlighted the risks of burnout and the benefits of supporting team members through their personal struggles. The team found itself divided between Robbert's hardline approach and Sarah's call for change.", - "context": "Sarah challenges Robbert's views, leading to a team split over work policies.", - }, - # Event 16: Coen's suggestion - { - "content": 'Seeking a compromise, Coen suggested organizing a workshop on mental health and productivity. "Maybe we can find strategies to balance personal well-being with our work goals," he proposed. Coen hoped this initiative would bring both sides together and foster a more supportive environment.', - "context": "Coen proposes a mental health workshop to reconcile differing team perspectives.", - }, - # Event 17: Leon's reflection - { - "content": "Leon reflected on the growing tension within the lab and wondered if they needed an external mediator to help resolve the conflicts. Feeling caught between Robbert's expectations and his colleagues' concerns, he contemplated seeking outside assistance to find a constructive path forward.", - "context": "Leon considers involving a mediator to address internal lab conflicts.", - }, - # Event 18: A breakthrough idea - { - "content": "During a late-night discussion, Leon and Sarah brainstormed a novel approach to restructure the AI minor. They envisioned incorporating elements of ethics and mental health awareness into the curriculum, aligning the program with current societal needs. Energized by this new direction, Leon believed it could address both the challenges facing the AI minor and the lab's workload issues.", - "context": "Leon and Sarah create a plan integrating ethics and mental health into the AI minor.", - }, - # Event 19: Robbert's hesitation - { - "content": 'When presented with the proposed changes, Robbert was hesitant to implement them. He feared that altering their focus would slow down the lab\'s progress and detract from their primary research objectives. "This plan seems too idealistic," he cautioned, remaining committed to a results-driven approach.', - "context": "Robbert doubts the practicality of the new AI minor plan, fearing it may impede progress.", - }, - # Event 20: Tom’s return - { - "content": "After his break, Tom returned to the lab feeling refreshed and ready to contribute again. He appreciated the support from his colleagues and felt more optimistic about balancing his mental health with work. Tom's return brought a renewed sense of hope to the team, signaling the potential for positive change.", - "context": "Tom's rejuvenated return inspires hope for better balance in the lab.", - }, - ] + # Initialize components with LlamaInterface + # Only pass the parameters that LlamaInterface expects + llm = LlamaInterface() - # Process stories - logger.info(f"Processing {len(stories)} stories and analyzing field effects...") - for story in stories: - try: - await field.add_story(story["content"], story["context"]) + vector_store: VectorStore = ChromaStore(collection_name="research_lab") + logger.info(f"Initialized Chroma vector store") - except Exception as e: - logger.error(f"Error processing story: {e}", exc_info=True) - continue + field = NarrativeField(llm, vector_store) + logger.info(f"Initialized narrative field") + + # Research Lab Scenario with Multiple Characters and events + stories = [ + # Event 1: Leon discussing the AI minor + { + "content": "After lunch, as Leon and Coen walked back to the lab, Leon decided to share his growing concerns about the AI for Society minor. He voiced his doubts and the challenges he foresaw in the program's current direction. Coen listened attentively and was supportive of Leon's worries. \"I think you have some valid points,\" Coen acknowledged, \"but perhaps it would be best to discuss these issues with Danny, the manager of the minor.\" Coen believed that Danny's insights could be crucial in addressing Leon's concerns.", + "context": "Leon confides in Coen about issues with the AI minor; Coen advises consulting Danny.", + }, + # Event 2: Robbert's tough advice + { + "content": "After work, Robbert and Leon walked back to the lab together. Leon expressed his worries about Danny's accident and the AI minor. However, Robbert seemed more preoccupied with his own research and was not interested in discussing the minor. \"I know you're concerned, but you need to man up and stop whining,\" Robbert said bluntly. His tough advice left Leon feeling isolated and unsupported.", + "context": "Robbert dismisses Leon's concerns, focusing instead on his own research priorities.", + }, + # Event 4: Sarah's contribution + { + "content": "Sarah, a new member of the lab eager to make her mark, approached Leon with a fresh idea. Enthusiastic about the ethical challenges in AI, she suggested a new direction for the AI minor—focusing on ethics in AI development. Her excitement was contagious, and Leon began to see the potential impact of integrating ethics into the program.", + "context": "Sarah proposes refocusing the AI minor on AI ethics, sparking interest from Leon.", + }, + # Event 5: Tom's exhaustion + { + "content": "Tom, another member of the lab, was visibly exhausted after a long day. He had been struggling to keep up with the heavy workload and confided in his colleagues that he wanted to leave early. Considering taking a break from the lab altogether, Tom felt mentally drained and knew he needed time to recover.", + "context": "Tom is overwhelmed by work stress and thinks about temporarily leaving the lab.", + }, + # Event 6: Leon reassessing + { + "content": "Observing Tom's exhaustion, Leon became concerned that the lab might be overworking its members. Balancing his worries about the AI minor and the well-being of his colleagues, he suggested organizing a team meeting to discuss workload management. Leon hoped that addressing these issues openly would help prevent burnout and improve overall productivity.", + "context": "Leon considers holding a meeting to tackle workload issues affecting team morale.", + }, + # Event 7: Coen's personal struggle + { + "content": "In a candid conversation, Coen revealed to Leon that he had been dealing with personal issues and was struggling to focus on work. Leon was surprised by Coen's admission, as he had always appeared to have everything under control. This revelation highlighted the underlying stress affecting the team.", + "context": "Coen admits personal struggles are hindering his work, surprising Leon.", + }, + # Event 8: Sarah's proposal + { + "content": "Concerned about her colleagues' mental health, Sarah proposed implementing a flexible working schedule to accommodate those feeling burned out. She believed that a healthier work-life balance would benefit both the individuals and the lab's productivity. \"We need to take care of ourselves to do our best work,\" she advocated.", + "context": "Sarah suggests flexible hours to improve well-being and efficiency in the lab.", + }, + # Event 9: Tom's decision + { + "content": "Feeling overwhelmed, Tom decided to take a temporary leave from the lab to focus on his mental health. He believed that stepping back was the best decision for now and hoped that his absence would prompt the team to consider the pressures they were all facing.", + "context": "Tom takes a break to address his mental health, hoping to highlight team stress.", + }, + # Event 10: Sarah's pushback + { + "content": "Sarah pushed back against Robbert's position during the meeting, arguing that a more flexible approach would ultimately lead to better results. She highlighted the risks of burnout and the benefits of supporting team members through their personal struggles. The team found itself divided between Robbert's hardline approach and Sarah's call for change.", + "context": "Sarah challenges Robbert's views, leading to a team split over work policies.", + }, + # Event 11: A breakthrough idea + { + "content": "During a late-night discussion, Leon and Sarah brainstormed a novel approach to restructure the AI minor. They envisioned incorporating elements of ethics and mental health awareness into the curriculum, aligning the program with current societal needs. Energized by this new direction, Leon believed it could address both the challenges facing the AI minor and the lab's workload issues.", + "context": "Leon and Sarah create a plan integrating ethics and mental health into the AI minor.", + }, + # Event 12: Tom's return + { + "content": "After his break, Tom returned to the lab feeling refreshed and ready to contribute again. He appreciated the support from his colleagues and felt more optimistic about balancing his mental health with work. Tom's return brought a renewed sense of hope to the team, signaling the potential for positive change.", + "context": "Tom's rejuvenated return inspires hope for better balance in the lab.", + }, + ] + + # Process stories with performance monitoring + logger.info(f"Processing {len(stories)} stories and analyzing field effects...") + for story in stories: + try: + response, metrics = await monitor.monitor_generation( + llm, story["content"] + ) + logger.debug(f"Story processing metrics: {metrics}") + + await field.add_story(story["content"], story["context"]) + + except Exception as e: + logger.error(f"Error processing story: {e}", exc_info=True) + continue + + # Log performance report at the end + performance_report = monitor.get_performance_report() + logger.info(f"Performance Report: {performance_report}") + + # Print the detailed performance metrics summary + field._performance_metrics.print_summary() - logger.info("Narrative field demonstration completed") + except Exception as e: + logger.error(f"Error in demo scenario: {e}", exc_info=True) + raise + finally: + # Clean up resources + if llm is not None: + await llm.cleanup() + global_cleanup() + logger.info("Narrative field demonstration completed") if __name__ == "__main__": - asyncio.run(demo_scenario()) + try: + asyncio.run(demo_scenario()) + finally: + global_cleanup() diff --git a/requirements.txt b/requirements.txt index 28c1398..4fda0f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ ollama -chromadb \ No newline at end of file +chromadb +llama-cpp-python +numpy \ No newline at end of file From 1fbd10fcc3daabc0fd160e871e0ff4348d2b6255 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 07:52:44 +0200 Subject: [PATCH 07/14] Moves pos to src for further refactoring into main app. Refactors out the logging mechanism --- src/logging_config.py | 37 ++++++++++++++++++++++++ {pocs => src}/nfs_simple_lab_scenario.py | 37 +++--------------------- 2 files changed, 41 insertions(+), 33 deletions(-) create mode 100644 src/logging_config.py rename {pocs => src}/nfs_simple_lab_scenario.py (96%) diff --git a/src/logging_config.py b/src/logging_config.py new file mode 100644 index 0000000..33994f0 --- /dev/null +++ b/src/logging_config.py @@ -0,0 +1,37 @@ +import logging +import logging.handlers +import os +from datetime import datetime + +# Constants +MAX_LOG_FILE_SIZE: int = 10 * 1024 * 1024 # 10 MB +MAX_LOG_BACKUP_COUNT: int = 9 # 10 files total + +def setup_logging() -> None: + log_dir = os.path.join(os.path.dirname(__file__), "logs") + os.makedirs(log_dir, exist_ok=True) + + current_time = datetime.now().strftime("%Y%m%d_%H%M%S") + log_file = os.path.join(log_dir, f"nfs_{current_time}.log") + + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + + # File Handler + file_handler = logging.handlers.RotatingFileHandler( + log_file, maxBytes=MAX_LOG_FILE_SIZE, backupCount=MAX_LOG_BACKUP_COUNT + ) + file_handler.setLevel(logging.DEBUG) + file_formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + file_handler.setFormatter(file_formatter) + + # Console Handler + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.INFO) + console_formatter = logging.Formatter("%(asctime)s - %(message)s") + console_handler.setFormatter(console_formatter) + + logger.addHandler(file_handler) + logger.addHandler(console_handler) diff --git a/pocs/nfs_simple_lab_scenario.py b/src/nfs_simple_lab_scenario.py similarity index 96% rename from pocs/nfs_simple_lab_scenario.py rename to src/nfs_simple_lab_scenario.py index eedbc6d..969ce84 100644 --- a/pocs/nfs_simple_lab_scenario.py +++ b/src/nfs_simple_lab_scenario.py @@ -23,7 +23,7 @@ import multiprocessing as mp import time from typing import Tuple -import gc # Add this import at the top of your file +import gc import atexit import torch import platform @@ -32,6 +32,8 @@ import ctypes import sys +# Local imports +from logging_config import setup_logging # Type Definitions StoryID = NewType("StoryID", str) @@ -58,37 +60,6 @@ } -# Logging Setup -def setup_logging() -> None: - log_dir = os.path.join(os.path.dirname(__file__), "logs") - os.makedirs(log_dir, exist_ok=True) - - current_time = datetime.now().strftime("%Y%m%d_%H%M%S") - log_file = os.path.join(log_dir, f"nfs_{current_time}.log") - - logger = logging.getLogger() - logger.setLevel(logging.DEBUG) - - # File Handler - file_handler = logging.handlers.RotatingFileHandler( - log_file, maxBytes=MAX_LOG_FILE_SIZE, backupCount=MAX_LOG_BACKUP_COUNT - ) - file_handler.setLevel(logging.DEBUG) - file_formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s" - ) - file_handler.setFormatter(file_formatter) - - # Console Handler - console_handler = logging.StreamHandler() - console_handler.setLevel(logging.INFO) - console_formatter = logging.Formatter("%(asctime)s - %(message)s") - console_handler.setFormatter(console_formatter) - - logger.addHandler(file_handler) - logger.addHandler(console_handler) - - # Base Classes class LanguageModel(ABC): @abstractmethod @@ -730,7 +701,6 @@ def global_cleanup(): async def demo_scenario(): - setup_logging() logger = logging.getLogger(__name__) logger.info("Starting narrative field demonstration...") @@ -847,6 +817,7 @@ async def demo_scenario(): if __name__ == "__main__": try: + setup_logging() # Call the setup_logging function from the imported module asyncio.run(demo_scenario()) finally: global_cleanup() From de0e93c91533b73020e546abc709fb3e297c6914 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 10:13:38 +0200 Subject: [PATCH 08/14] Refactor LanguageModel Fixes #4 --- .gitignore | 4 + .vscode/launch.json | 15 +++ requirements.txt | 3 +- src/config.py | 34 ++++++ src/language_models.py | 186 +++++++++++++++++++++++++++++++++ src/logging_config.py | 48 +++++++-- src/nfs_simple_lab_scenario.py | 185 ++------------------------------ 7 files changed, 290 insertions(+), 185 deletions(-) create mode 100644 .vscode/launch.json create mode 100644 src/config.py create mode 100644 src/language_models.py diff --git a/.gitignore b/.gitignore index f27f895..a2f7e24 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,8 @@ .DS_Store +models/ +logs/ +.log +*.log* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..6b76b4f --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,15 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + } + ] +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 4fda0f7..4623f54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ ollama chromadb llama-cpp-python -numpy \ No newline at end of file +numpy +appdirs \ No newline at end of file diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..f0736aa --- /dev/null +++ b/src/config.py @@ -0,0 +1,34 @@ +from pathlib import Path + +APP_NAME: str = "lab-politik" +IS_DEVELOPMENT: bool = True + +MODEL_CONFIGS = { + "balanced": { + "chat": { + "path": Path( + "~/.cache/lm-studio/models/lmstudio-community/" + "Mistral-Nemo-Instruct-2407-GGUF/" + "Mistral-Nemo-Instruct-2407-Q4_K_M.gguf" + ).expanduser(), + "model_name": "mistral-nemo:latest", + }, + "embedding": { + "path": Path( + "~/.cache/lm-studio/models/elliotsayes/" + "mxbai-embed-large-v1-Q4_K_M-GGUF/" + "mxbai-embed-large-v1-q4_k_m.gguf" + ).expanduser(), + "model_name": "mxbai-embed-large:latest", + }, + "optimal_config": { + "n_gpu_layers": -1, + "n_batch": 512, + "n_ctx": 4096, + "metal_device": "mps", + "main_gpu": 0, + "use_metal": True, + "n_threads": 4, + }, + }, +} diff --git a/src/language_models.py b/src/language_models.py new file mode 100644 index 0000000..130bcc7 --- /dev/null +++ b/src/language_models.py @@ -0,0 +1,186 @@ +from abc import ABC, abstractmethod +from typing import Dict, List +import asyncio +import gc +import logging +from pathlib import Path + +import ollama +import torch +from llama_cpp import Llama +from config import MODEL_CONFIGS + +class LanguageModel(ABC): + """Abstract base class for language models.""" + + def __init__(self): + self.logger = logging.getLogger(self.__class__.__name__) + + @abstractmethod + async def generate(self, prompt: str) -> str: + """Generate a response for the given prompt.""" + pass + + @abstractmethod + async def generate_embedding(self, text: str) -> List[float]: + """Generate an embedding for the given text.""" + pass + + @abstractmethod + async def cleanup(self) -> None: + """Clean up resources used by the model.""" + pass + + +class OllamaInterface(LanguageModel): + """Interface for the Ollama language model.""" + + def __init__(self, quality_preset: str = "balanced"): + """Initialize the OllamaInterface.""" + super().__init__() + try: + self.chat_model_path = MODEL_CONFIGS[quality_preset]["chat"]["model_name"] + self.embedding_model_path = MODEL_CONFIGS[quality_preset]["embedding"][ + "model_name" + ] + self.embedding_cache: Dict[int, List[float]] = {} + self.logger.info( + f"Initialized OllamaInterface with {quality_preset} preset" + ) + except KeyError as e: + self.logger.error(f"Invalid quality preset: {quality_preset}") + raise ValueError(f"Invalid quality preset: {quality_preset}") from e + + async def generate(self, prompt: str) -> str: + """Generate a response for the given prompt.""" + self.logger.debug(f"Generating response for prompt: {prompt}") + try: + response = await asyncio.to_thread( + ollama.chat, + model=self.chat_model_path, + messages=[{"role": "user", "content": prompt}], + ) + self.logger.debug(f"Response from LLM: {response['message']['content']}") + return response["message"]["content"] + except Exception as e: + self.logger.error(f"Error generating response: {e}", exc_info=True) + raise e + + async def generate_embedding(self, text: str) -> List[float]: + """Generate an embedding for the given text.""" + cache_key = hash(text) + if cache_key in self.embedding_cache: + self.logger.debug(f"Embedding found in cache for hash: {cache_key}") + return self.embedding_cache[cache_key] + + self.logger.debug(f"Generating embedding for text: {text[:50]}...") + try: + response = await asyncio.to_thread( + ollama.embeddings, + model=self.embedding_model_path, + prompt=text, + ) + embedding = response["embedding"] + self.embedding_cache[cache_key] = embedding + self.logger.debug(f"Embedding generated and cached for hash: {cache_key}") + return embedding + except Exception as e: + self.logger.error(f"Error generating embedding: {e}", exc_info=True) + raise e + + async def cleanup(self) -> None: + """Clean up resources used by the model.""" + self.logger.info("Cleaning up OllamaInterface resources") + self.embedding_cache.clear() + try: + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.logger.debug("OllamaInterface cleanup completed") + except Exception as e: + self.logger.error(f"Error cleaning up resources: {e}", exc_info=True) + + +class LlamaInterface(LanguageModel): + """Interface for the Llama language model.""" + + def __init__(self, quality_preset: str = "balanced"): + """Initialize the LlamaInterface.""" + super().__init__() + try: + self.quality_preset = quality_preset + self.chat_model_path = MODEL_CONFIGS[quality_preset]["chat"]["path"] + self.embedding_model_path = MODEL_CONFIGS[quality_preset]["embedding"][ + "path" + ] + self.optimal_config = MODEL_CONFIGS[quality_preset]["optimal_config"] + self.embedding_cache: Dict[int, List[float]] = {} + self.llm: Llama | None = None + self.embedding_model: Llama | None = None + self.setup_models() + self.logger.info(f"Initialized LlamaInterface with {quality_preset} preset") + except KeyError as e: + self.logger.error(f"Invalid quality preset: {quality_preset}") + raise ValueError(f"Invalid quality preset: {quality_preset}") from e + + def setup_models(self) -> None: + """Set up the language models.""" + try: + self.logger.info("Setting up Llama models") + self.llm = Llama( + model_path=str(self.chat_model_path), + verbose=False, + **self.optimal_config, + ) + self.embedding_model = Llama( + model_path=str(self.embedding_model_path), + embedding=True, + verbose=False, + **self.optimal_config, + ) + self.logger.info("Llama models set up successfully") + except Exception as e: + self.logger.error(f"Failed to load models: {e}", exc_info=True) + raise e + + async def generate(self, prompt: str) -> str: + """Generate a response for the given prompt.""" + self.logger.debug(f"Generating response for prompt: {prompt}") + try: + response = await asyncio.to_thread( + self.llm.create_chat_completion, + messages=[{"role": "user", "content": prompt}], + ) + self.logger.debug( + f"Response from LLM: {response['choices'][0]['message']['content']}" + ) + return response["choices"][0]["message"]["content"] + except Exception as e: + self.logger.error(f"Error generating response: {e}", exc_info=True) + raise e + + async def generate_embedding(self, text: str) -> List[float]: + """Generate an embedding for the given text.""" + self.logger.debug(f"Generating embedding for text: {text[:50]}...") + try: + embedding = await asyncio.to_thread(self.embedding_model.embed, text) + self.logger.debug(f"Embedding generated successfully") + return embedding + except Exception as e: + self.logger.error(f"Error generating embedding: {e}", exc_info=True) + raise e + + async def cleanup(self) -> None: + """Clean up resources used by the model.""" + self.logger.info("Cleaning up LlamaInterface resources") + try: + if self.llm: + del self.llm + if self.embedding_model: + del self.embedding_model + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.logger.debug("LlamaInterface cleanup completed") + except Exception as e: + self.logger.error(f"Error cleaning up resources: {e}", exc_info=True) diff --git a/src/logging_config.py b/src/logging_config.py index 33994f0..010ccb9 100644 --- a/src/logging_config.py +++ b/src/logging_config.py @@ -2,36 +2,66 @@ import logging.handlers import os from datetime import datetime +import glob +import appdirs +from config import APP_NAME, IS_DEVELOPMENT # Constants MAX_LOG_FILE_SIZE: int = 10 * 1024 * 1024 # 10 MB -MAX_LOG_BACKUP_COUNT: int = 9 # 10 files total +MAX_LOG_FILES: int = 10 # Total number of log files to keep + def setup_logging() -> None: - log_dir = os.path.join(os.path.dirname(__file__), "logs") + """Set up logging configuration for the application.""" + if IS_DEVELOPMENT: + log_dir = os.path.join(os.path.dirname(__file__), "..", "logs") + else: + log_dir = appdirs.user_log_dir(APP_NAME) + os.makedirs(log_dir, exist_ok=True) current_time = datetime.now().strftime("%Y%m%d_%H%M%S") - log_file = os.path.join(log_dir, f"nfs_{current_time}.log") + log_file = os.path.join(log_dir, f"{APP_NAME}_{current_time}.log") logger = logging.getLogger() logger.setLevel(logging.DEBUG) - # File Handler + logger.addHandler(_create_file_handler(log_file)) + logger.addHandler(_create_console_handler()) + + _cleanup_old_logs(log_dir) + + logging.info(f"Logging initialized. Log file: {log_file}") + + +def _create_file_handler(log_file: str) -> logging.Handler: + """Create and configure a file handler for logging.""" file_handler = logging.handlers.RotatingFileHandler( - log_file, maxBytes=MAX_LOG_FILE_SIZE, backupCount=MAX_LOG_BACKUP_COUNT + log_file, maxBytes=MAX_LOG_FILE_SIZE, backupCount=MAX_LOG_FILES - 1 ) file_handler.setLevel(logging.DEBUG) file_formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) file_handler.setFormatter(file_formatter) + return file_handler - # Console Handler + +def _create_console_handler() -> logging.Handler: + """Create and configure a console handler for logging.""" console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) - console_formatter = logging.Formatter("%(asctime)s - %(message)s") + console_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") console_handler.setFormatter(console_formatter) + return console_handler + - logger.addHandler(file_handler) - logger.addHandler(console_handler) +def _cleanup_old_logs(log_dir: str) -> None: + """Remove old log files if the total number exceeds MAX_LOG_FILES.""" + log_files = glob.glob(os.path.join(log_dir, f"{APP_NAME}_*.log*")) + log_files.sort(key=os.path.getmtime, reverse=True) + for old_file in log_files[MAX_LOG_FILES:]: + try: + os.remove(old_file) + except OSError as e: + logging.error(f"Error deleting old log file {old_file}: {e}") diff --git a/src/nfs_simple_lab_scenario.py b/src/nfs_simple_lab_scenario.py index 969ce84..828ad93 100644 --- a/src/nfs_simple_lab_scenario.py +++ b/src/nfs_simple_lab_scenario.py @@ -4,36 +4,27 @@ """ from __future__ import annotations -from typing import List, Dict, Any, Optional, Final, NewType +from typing import List, Dict, Any, Optional, Final, NewType, Tuple from datetime import datetime from uuid import uuid4 from dataclasses import dataclass, field from abc import ABC, abstractmethod import logging import logging.handlers -import json -import os -import asyncio -import chromadb -from chromadb.config import Settings -import ollama -from llama_cpp import Llama -import numpy as np import psutil -import multiprocessing as mp import time -from typing import Tuple import gc import atexit +import json +import asyncio import torch -import platform -import subprocess -from llama_cpp import llama -import ctypes -import sys +import chromadb +from chromadb.config import Settings + # Local imports from logging_config import setup_logging +from language_models import LanguageModel, OllamaInterface, LlamaInterface # Type Definitions StoryID = NewType("StoryID", str) @@ -41,34 +32,6 @@ # Constants DEFAULT_SIMILARITY_THRESHOLD: Final[float] = 0.8 DEFAULT_RESONANCE_LIMIT: Final[int] = 3 -MAX_LOG_FILE_SIZE: Final[int] = 10 * 1024 * 1024 # 10 MB -MAX_LOG_BACKUP_COUNT: Final[int] = 9 # 10 files total - -# Different quantization options from best quality to smallest size -MODEL_CONFIGS = { - "balanced": { - "chat": { - "path": "/Users/leonvanbokhorst/.cache/lm-studio/models/lmstudio-community/Llama-3.2-3B-Instruct-GGUF/Llama-3.2-3B-Instruct-Q8_0.gguf", - "size_gb": 3.42, - "relative_speed": 0.8, - }, - "embedding": { - "path": "/Users/leonvanbokhorst/.cache/lm-studio/models/nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q8_0.gguf", - "size_gb": 0.146, - }, - }, -} - - -# Base Classes -class LanguageModel(ABC): - @abstractmethod - async def generate(self, prompt: str) -> str: - pass - - @abstractmethod - async def generate_embedding(self, text: str) -> List[float]: - pass class VectorStore(ABC): @@ -197,42 +160,6 @@ def get_resonance_analysis_prompt(story1: Story, story2: Story) -> str: Describe connections naturally, focusing on meaning and impact.""" -# Core Components -class OllamaInterface(LanguageModel): - def __init__( - self, - quality_preset: str = "balanced", - model_path: str = None, - embedding_model_path: str = None, - ): - config = MODEL_CONFIGS[quality_preset] - self.chat_model_path = model_path or config["chat"]["path"] - self.embedding_model_path = embedding_model_path or config["embedding"]["path"] - self.embedding_cache: Dict[str, List[float]] = {} - self.logger = logging.getLogger(__name__) - - async def generate(self, prompt: str) -> str: - self.logger.debug(f"Generating response for prompt: {prompt}") - response = await asyncio.to_thread( - ollama.chat, - model=self.model, - messages=[{"role": "user", "content": prompt}], - ) - self.logger.debug(f"Response from LLM: {response['message']['content']}") - return response["message"]["content"] - - async def generate_embedding(self, text: str) -> List[float]: - if text in self.embedding_cache: - return self.embedding_cache[text] - - response = await asyncio.to_thread( - ollama.embeddings, model=self.embed_model, prompt=text - ) - embedding = response["embedding"] - self.embedding_cache[text] = embedding - return embedding - - class PerformanceMetrics: def __init__(self): self.metrics: Dict[str, Dict[str, Any]] = {} @@ -284,102 +211,12 @@ def log_system_resources(self): self.logger.info(f"Memory Usage: {memory_info.percent}%") -import warnings - - -class LlamaInterface(LanguageModel): - def __init__( - self, - quality_preset: str = "balanced", - model_path: str = None, - embedding_model_path: str = None, - **kwargs, - ): - config = MODEL_CONFIGS[quality_preset] - chat_model_path = model_path or config["chat"]["path"] - embedding_model_path = embedding_model_path or config["embedding"]["path"] - - # Modified initialization parameters - optimal_config = { - "n_gpu_layers": -1, - "n_batch": 512, - "n_ctx": 16384, - # "n_kv": 256, - # "flash_attn": True, # Use Flash Attention for faster attention - # "rope_scaling_type": 1, # Use dynamic rope scaling for better performance - # "use_mmap": True, # Use memory-mapped files for faster loading - # "use_mlock": False, # Lock the model in memory - "metal_device": "mps", # Use Metal for GPU acceleration - "main_gpu": 0, # Use the first GPU - "use_metal": True, # Explicitly enable Metal - "n_threads": 4, # Use 4 threads for parallelism - # "offload_kqv": True, # Offload KV cache to CPU - } - - self.logger = logging.getLogger(__name__) - - try: - # Suppress warnings about the callback function - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - self.llm = Llama( - model_path=chat_model_path, - verbose=False, # Set to False to reduce logging - **optimal_config, - ) - - self.embedding_model = Llama( - model_path=embedding_model_path, - embedding=True, - verbose=False, # Set to False to reduce logging - **optimal_config, - ) - except Exception as e: - self.logger.error(f"Failed to load models: {e}", exc_info=True) - raise - - async def generate(self, prompt: str) -> str: - """Generate response using the LLM""" - self.logger.debug(f"Generating response for prompt: {prompt}") - try: - response = await asyncio.to_thread( - self.llm.create_chat_completion, - messages=[{"role": "user", "content": prompt}], - ) - self.logger.debug( - f"Response from LLM: {response['choices'][0]['message']['content']}" - ) - return response["choices"][0]["message"]["content"] - except Exception as e: - self.logger.error(f"Error generating response: {e}", exc_info=True) - return "Error generating response" - - async def generate_embedding(self, text: str) -> List[float]: - """Generate embedding for the given text""" - try: - embedding = await asyncio.to_thread(self.embedding_model.embed, text) - return embedding - except Exception as e: - self.logger.error(f"Error generating embedding: {e}", exc_info=True) - return [] - - async def cleanup(self): - """Clean up resources""" - if hasattr(self, "llm"): - del self.llm - if hasattr(self, "embedding_model"): - del self.embedding_model - gc.collect() - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - class PerformanceMonitor: def __init__(self): self.metrics = [] async def monitor_generation( - self, llm: LlamaInterface, prompt: str + self, llm: LanguageModel, prompt: str ) -> Tuple[str, Dict[str, float]]: start_time = time.perf_counter() memory_before = psutil.virtual_memory().used @@ -419,7 +256,7 @@ class BatchMetrics: class BatchProcessor: - def __init__(self, llm: LlamaInterface): + def __init__(self, llm: LanguageModel): self.llm = llm self.optimal_batch_size = 4 # Will be adjusted dynamically @@ -713,9 +550,7 @@ async def demo_scenario(): # Perform global cleanup before initializing new LLM global_cleanup() - # Initialize components with LlamaInterface - # Only pass the parameters that LlamaInterface expects - llm = LlamaInterface() + llm = OllamaInterface() vector_store: VectorStore = ChromaStore(collection_name="research_lab") logger.info(f"Initialized Chroma vector store") From 9ed4732b4498756a441b72ba519a89ac9f5fb05f Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 10:54:40 +0200 Subject: [PATCH 09/14] Refactor language model enhancements after review --- src/embedding_cache.py | 51 +++++++++ src/language_models.py | 188 ++++++++++++++++++--------------- src/nfs_simple_lab_scenario.py | 2 +- 3 files changed, 157 insertions(+), 84 deletions(-) create mode 100644 src/embedding_cache.py diff --git a/src/embedding_cache.py b/src/embedding_cache.py new file mode 100644 index 0000000..6bc60b1 --- /dev/null +++ b/src/embedding_cache.py @@ -0,0 +1,51 @@ +"""Module for caching embeddings with different storage strategies.""" + +from abc import ABC, abstractmethod +import hashlib +from typing import Dict, List, Optional + + +class EmbeddingCache(ABC): + """Abstract base class for embedding caches.""" + + @abstractmethod + def get(self, key: str) -> Optional[List[float]]: + """Retrieve an embedding from the cache.""" + + @abstractmethod + def set(self, key: str, value: List[float]) -> None: + """Store an embedding in the cache.""" + + @abstractmethod + def clear(self) -> None: + """Clear all entries from the cache.""" + + +class InMemoryEmbeddingCache(EmbeddingCache): + """In-memory implementation of the EmbeddingCache.""" + + def __init__(self): + self._cache: Dict[str, List[float]] = {} + + @staticmethod + def get_stable_hash(text: str) -> str: + """Generate a stable hash for the given text.""" + return hashlib.sha256(text.encode()).hexdigest() + + def get(self, key: str) -> Optional[List[float]]: + """Retrieve an embedding from the cache using a hashed key.""" + hashed_key = self.get_stable_hash(key) + return self._cache.get(hashed_key) + + def set(self, key: str, value: List[float]) -> None: + """Store an embedding in the cache using a hashed key.""" + hashed_key = self.get_stable_hash(key) + self._cache[hashed_key] = value + + def clear(self) -> None: + """Clear all entries from the cache.""" + self._cache.clear() + + + + diff --git a/src/language_models.py b/src/language_models.py index 130bcc7..2325c66 100644 --- a/src/language_models.py +++ b/src/language_models.py @@ -1,3 +1,4 @@ +from __future__ import annotations from abc import ABC, abstractmethod from typing import Dict, List import asyncio @@ -9,12 +10,32 @@ import torch from llama_cpp import Llama from config import MODEL_CONFIGS +from embedding_cache import EmbeddingCache, InMemoryEmbeddingCache + + +class ModelInitializationError(Exception): + """Custom exception for model initialization errors.""" + + pass + + +def async_error_handler(func): + async def wrapper(*args, **kwargs): + try: + return await func(*args, **kwargs) + except Exception as e: + logging.error(f"Error in {func.__name__}: {e}", exc_info=True) + raise + + return wrapper + class LanguageModel(ABC): """Abstract base class for language models.""" def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) + self.embedding_cache: EmbeddingCache = InMemoryEmbeddingCache() @abstractmethod async def generate(self, prompt: str) -> str: @@ -22,106 +43,110 @@ async def generate(self, prompt: str) -> str: pass @abstractmethod + async def _generate_embedding(self, text: str) -> List[float]: + """Internal method to generate an embedding.""" + pass + + @async_error_handler async def generate_embedding(self, text: str) -> List[float]: """Generate an embedding for the given text.""" - pass + if not text: + self.logger.warning("Attempted to generate embedding for empty text") + return [] + + cached_embedding = self.embedding_cache.get(text) + if cached_embedding: + self.logger.info(f"Embedding found in cache for text: {text[:50]}...") + return cached_embedding + + self.logger.info(f"Generating embedding for text: {text[:50]}...") + try: + embedding = await self._generate_embedding(text) + self.embedding_cache.set(text, embedding) + self.logger.info(f"Embedding generated and cached for text: {text[:50]}...") + return embedding + except Exception as e: + self.logger.error(f"Failed to generate embedding: {e}", exc_info=True) + raise - @abstractmethod async def cleanup(self) -> None: """Clean up resources used by the model.""" - pass + self.logger.info(f"Cleaning up {self.__class__.__name__} resources") + self.embedding_cache.clear() + try: + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.logger.info(f"{self.__class__.__name__} cleanup completed") + except Exception as e: + self.logger.error(f"Error cleaning up resources: {e}", exc_info=True) + # We don't re-raise here as cleanup errors shouldn't stop the program class OllamaInterface(LanguageModel): """Interface for the Ollama language model.""" def __init__(self, quality_preset: str = "balanced"): - """Initialize the OllamaInterface.""" super().__init__() try: self.chat_model_path = MODEL_CONFIGS[quality_preset]["chat"]["model_name"] self.embedding_model_path = MODEL_CONFIGS[quality_preset]["embedding"][ "model_name" ] - self.embedding_cache: Dict[int, List[float]] = {} - self.logger.info( - f"Initialized OllamaInterface with {quality_preset} preset" - ) except KeyError as e: - self.logger.error(f"Invalid quality preset: {quality_preset}") - raise ValueError(f"Invalid quality preset: {quality_preset}") from e + raise ModelInitializationError( + f"Invalid quality preset or missing configuration: {e}" + ) + @async_error_handler async def generate(self, prompt: str) -> str: """Generate a response for the given prompt.""" - self.logger.debug(f"Generating response for prompt: {prompt}") + if not prompt: + self.logger.warning("Attempted to generate response for empty prompt") + return "" + + self.logger.info(f"Generating response for prompt: {prompt[:50]}...") try: response = await asyncio.to_thread( ollama.chat, model=self.chat_model_path, messages=[{"role": "user", "content": prompt}], ) - self.logger.debug(f"Response from LLM: {response['message']['content']}") + self.logger.info("Response received from LLM") + self.logger.debug(f"Full response: {response['message']['content']}") return response["message"]["content"] except Exception as e: - self.logger.error(f"Error generating response: {e}", exc_info=True) - raise e + self.logger.error(f"Failed to generate response: {e}", exc_info=True) + raise - async def generate_embedding(self, text: str) -> List[float]: - """Generate an embedding for the given text.""" - cache_key = hash(text) - if cache_key in self.embedding_cache: - self.logger.debug(f"Embedding found in cache for hash: {cache_key}") - return self.embedding_cache[cache_key] - - self.logger.debug(f"Generating embedding for text: {text[:50]}...") - try: - response = await asyncio.to_thread( - ollama.embeddings, - model=self.embedding_model_path, - prompt=text, - ) - embedding = response["embedding"] - self.embedding_cache[cache_key] = embedding - self.logger.debug(f"Embedding generated and cached for hash: {cache_key}") - return embedding - except Exception as e: - self.logger.error(f"Error generating embedding: {e}", exc_info=True) - raise e - - async def cleanup(self) -> None: - """Clean up resources used by the model.""" - self.logger.info("Cleaning up OllamaInterface resources") - self.embedding_cache.clear() - try: - gc.collect() - if torch.cuda.is_available(): - torch.cuda.empty_cache() - self.logger.debug("OllamaInterface cleanup completed") - except Exception as e: - self.logger.error(f"Error cleaning up resources: {e}", exc_info=True) + async def _generate_embedding(self, text: str) -> List[float]: + """Internal method to generate an embedding using Ollama.""" + response = await asyncio.to_thread( + ollama.embeddings, + model=self.embedding_model_path, + prompt=text, + ) + return response["embedding"] class LlamaInterface(LanguageModel): """Interface for the Llama language model.""" def __init__(self, quality_preset: str = "balanced"): - """Initialize the LlamaInterface.""" super().__init__() try: - self.quality_preset = quality_preset self.chat_model_path = MODEL_CONFIGS[quality_preset]["chat"]["path"] self.embedding_model_path = MODEL_CONFIGS[quality_preset]["embedding"][ "path" ] self.optimal_config = MODEL_CONFIGS[quality_preset]["optimal_config"] - self.embedding_cache: Dict[int, List[float]] = {} - self.llm: Llama | None = None - self.embedding_model: Llama | None = None - self.setup_models() - self.logger.info(f"Initialized LlamaInterface with {quality_preset} preset") except KeyError as e: - self.logger.error(f"Invalid quality preset: {quality_preset}") - raise ValueError(f"Invalid quality preset: {quality_preset}") from e + raise ModelInitializationError( + f"Invalid quality preset or missing configuration: {e}" + ) + self.llm: Llama | None = None + self.embedding_model: Llama | None = None + self.setup_models() def setup_models(self) -> None: """Set up the language models.""" @@ -141,46 +166,43 @@ def setup_models(self) -> None: self.logger.info("Llama models set up successfully") except Exception as e: self.logger.error(f"Failed to load models: {e}", exc_info=True) - raise e + raise ModelInitializationError(f"Failed to initialize Llama models: {e}") + @async_error_handler async def generate(self, prompt: str) -> str: """Generate a response for the given prompt.""" - self.logger.debug(f"Generating response for prompt: {prompt}") + if not prompt: + self.logger.warning("Attempted to generate response for empty prompt") + return "" + + if not self.llm: + raise ModelInitializationError("Llama model not initialized") + + self.logger.info(f"Generating response for prompt: {prompt[:50]}...") try: response = await asyncio.to_thread( self.llm.create_chat_completion, messages=[{"role": "user", "content": prompt}], ) + self.logger.info("Response received from LLM") self.logger.debug( - f"Response from LLM: {response['choices'][0]['message']['content']}" + f"Full response: {response['choices'][0]['message']['content']}" ) return response["choices"][0]["message"]["content"] except Exception as e: - self.logger.error(f"Error generating response: {e}", exc_info=True) - raise e + self.logger.error(f"Failed to generate response: {e}", exc_info=True) + raise - async def generate_embedding(self, text: str) -> List[float]: - """Generate an embedding for the given text.""" - self.logger.debug(f"Generating embedding for text: {text[:50]}...") - try: - embedding = await asyncio.to_thread(self.embedding_model.embed, text) - self.logger.debug(f"Embedding generated successfully") - return embedding - except Exception as e: - self.logger.error(f"Error generating embedding: {e}", exc_info=True) - raise e + async def _generate_embedding(self, text: str) -> List[float]: + """Internal method to generate an embedding using Llama.""" + if not self.embedding_model: + raise ModelInitializationError("Embedding model not initialized") + return await asyncio.to_thread(self.embedding_model.embed, text) async def cleanup(self) -> None: """Clean up resources used by the model.""" - self.logger.info("Cleaning up LlamaInterface resources") - try: - if self.llm: - del self.llm - if self.embedding_model: - del self.embedding_model - gc.collect() - if torch.cuda.is_available(): - torch.cuda.empty_cache() - self.logger.debug("LlamaInterface cleanup completed") - except Exception as e: - self.logger.error(f"Error cleaning up resources: {e}", exc_info=True) + await super().cleanup() + if self.llm: + del self.llm + if self.embedding_model: + del self.embedding_model diff --git a/src/nfs_simple_lab_scenario.py b/src/nfs_simple_lab_scenario.py index 828ad93..6c8c2e4 100644 --- a/src/nfs_simple_lab_scenario.py +++ b/src/nfs_simple_lab_scenario.py @@ -550,7 +550,7 @@ async def demo_scenario(): # Perform global cleanup before initializing new LLM global_cleanup() - llm = OllamaInterface() + llm = LlamaInterface() vector_store: VectorStore = ChromaStore(collection_name="research_lab") logger.info(f"Initialized Chroma vector store") From c0799f172df99593fdb1eb8f4bda0102f2f2dcb0 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 11:41:48 +0200 Subject: [PATCH 10/14] Refactoring after second review --- src/language_models.py | 81 +++++++++++++++++++++++++--------- src/nfs_simple_lab_scenario.py | 6 +-- 2 files changed, 62 insertions(+), 25 deletions(-) diff --git a/src/language_models.py b/src/language_models.py index 2325c66..0766301 100644 --- a/src/language_models.py +++ b/src/language_models.py @@ -1,9 +1,10 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Dict, List +from typing import List, Callable import asyncio import gc import logging +from functools import wraps from pathlib import Path import ollama @@ -13,19 +14,37 @@ from embedding_cache import EmbeddingCache, InMemoryEmbeddingCache +class NetworkError(Exception): + """Custom exception for network errors.""" + + pass + + +class ModelError(Exception): + """Custom exception for model errors.""" + + pass + + +class APIError(Exception): + """Custom exception for API errors.""" + + pass + + class ModelInitializationError(Exception): """Custom exception for model initialization errors.""" pass -def async_error_handler(func): +def async_error_handler(func: Callable) -> Callable: + @wraps(func) async def wrapper(*args, **kwargs): try: return await func(*args, **kwargs) - except Exception as e: - logging.error(f"Error in {func.__name__}: {e}", exc_info=True) - raise + except (NetworkError, APIError) as e: + raise ModelError(str(e)) from e return wrapper @@ -35,6 +54,7 @@ class LanguageModel(ABC): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) + self.logger.info(f"Initializing {self.__class__.__name__}") self.embedding_cache: EmbeddingCache = InMemoryEmbeddingCache() @abstractmethod @@ -80,7 +100,6 @@ async def cleanup(self) -> None: self.logger.info(f"{self.__class__.__name__} cleanup completed") except Exception as e: self.logger.error(f"Error cleaning up resources: {e}", exc_info=True) - # We don't re-raise here as cleanup errors shouldn't stop the program class OllamaInterface(LanguageModel): @@ -89,14 +108,28 @@ class OllamaInterface(LanguageModel): def __init__(self, quality_preset: str = "balanced"): super().__init__() try: - self.chat_model_path = MODEL_CONFIGS[quality_preset]["chat"]["model_name"] - self.embedding_model_path = MODEL_CONFIGS[quality_preset]["embedding"][ + self.chat_model_name = MODEL_CONFIGS[quality_preset]["chat"]["model_name"] + self.embedding_model_name = MODEL_CONFIGS[quality_preset]["embedding"][ "model_name" ] + self._setup_models() except KeyError as e: raise ModelInitializationError( f"Invalid quality preset or missing configuration: {e}" - ) + ) from e + + def _setup_models(self) -> None: + """Set up the language models.""" + self.logger.info(f"Setting up Ollama models for {self.chat_model_name}") + self.logger.info( + f"Setting up Ollama embedding model for {self.embedding_model_name}" + ) + try: + self.logger.info("Starting Ollama") + ollama.ps() + self.logger.info("Ollama started successfully") + except Exception as e: + raise ModelInitializationError(f"Failed to start Ollama: {e}") from e @async_error_handler async def generate(self, prompt: str) -> str: @@ -109,7 +142,7 @@ async def generate(self, prompt: str) -> str: try: response = await asyncio.to_thread( ollama.chat, - model=self.chat_model_path, + model=self.chat_model_name, messages=[{"role": "user", "content": prompt}], ) self.logger.info("Response received from LLM") @@ -117,13 +150,13 @@ async def generate(self, prompt: str) -> str: return response["message"]["content"] except Exception as e: self.logger.error(f"Failed to generate response: {e}", exc_info=True) - raise + raise ModelError(f"Failed to generate response: {e}") from e async def _generate_embedding(self, text: str) -> List[float]: """Internal method to generate an embedding using Ollama.""" response = await asyncio.to_thread( ollama.embeddings, - model=self.embedding_model_path, + model=self.embedding_model_name, prompt=text, ) return response["embedding"] @@ -140,18 +173,23 @@ def __init__(self, quality_preset: str = "balanced"): "path" ] self.optimal_config = MODEL_CONFIGS[quality_preset]["optimal_config"] + self.llm: Llama | None = None + self.embedding_model: Llama | None = None + self._setup_models() except KeyError as e: raise ModelInitializationError( f"Invalid quality preset or missing configuration: {e}" - ) - self.llm: Llama | None = None - self.embedding_model: Llama | None = None - self.setup_models() + ) from e - def setup_models(self) -> None: + def _setup_models(self) -> None: """Set up the language models.""" + chat_model_filename = Path(self.chat_model_path).name + embedding_model_filename = Path(self.embedding_model_path).name + + self.logger.info(f"Setting up Llama chat model: {chat_model_filename}") + self.logger.info(f"Setting up Llama embedding model: {embedding_model_filename}") + try: - self.logger.info("Setting up Llama models") self.llm = Llama( model_path=str(self.chat_model_path), verbose=False, @@ -163,10 +201,11 @@ def setup_models(self) -> None: verbose=False, **self.optimal_config, ) - self.logger.info("Llama models set up successfully") except Exception as e: self.logger.error(f"Failed to load models: {e}", exc_info=True) - raise ModelInitializationError(f"Failed to initialize Llama models: {e}") + raise ModelInitializationError( + f"Failed to initialize models: {e} with llama_cpp config: {self.optimal_config} " + ) from e @async_error_handler async def generate(self, prompt: str) -> str: @@ -191,7 +230,7 @@ async def generate(self, prompt: str) -> str: return response["choices"][0]["message"]["content"] except Exception as e: self.logger.error(f"Failed to generate response: {e}", exc_info=True) - raise + raise ModelError(f"Failed to generate response: {e}") from e async def _generate_embedding(self, text: str) -> List[float]: """Internal method to generate an embedding using Llama.""" diff --git a/src/nfs_simple_lab_scenario.py b/src/nfs_simple_lab_scenario.py index 6c8c2e4..f5d9f8b 100644 --- a/src/nfs_simple_lab_scenario.py +++ b/src/nfs_simple_lab_scenario.py @@ -550,7 +550,7 @@ async def demo_scenario(): # Perform global cleanup before initializing new LLM global_cleanup() - llm = LlamaInterface() + llm = OllamaInterface() vector_store: VectorStore = ChromaStore(collection_name="research_lab") logger.info(f"Initialized Chroma vector store") @@ -621,9 +621,7 @@ async def demo_scenario(): logger.info(f"Processing {len(stories)} stories and analyzing field effects...") for story in stories: try: - response, metrics = await monitor.monitor_generation( - llm, story["content"] - ) + metrics = await monitor.monitor_generation(llm, story["content"]) logger.debug(f"Story processing metrics: {metrics}") await field.add_story(story["content"], story["context"]) From 3c6dcc8f73225aa2b67999500c36d34a71010012 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 11:46:46 +0200 Subject: [PATCH 11/14] =?UTF-8?q?Simplifies=20EmbeddingCache=20after=20I?= =?UTF-8?q?=20got=20carried=20away=20=F0=9F=98=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/embedding_cache.py | 33 +++++---------------------------- src/language_models.py | 12 +++++++----- 2 files changed, 12 insertions(+), 33 deletions(-) diff --git a/src/embedding_cache.py b/src/embedding_cache.py index 6bc60b1..5190d47 100644 --- a/src/embedding_cache.py +++ b/src/embedding_cache.py @@ -1,28 +1,11 @@ -"""Module for caching embeddings with different storage strategies.""" +"""Module for caching embeddings.""" -from abc import ABC, abstractmethod import hashlib from typing import Dict, List, Optional -class EmbeddingCache(ABC): - """Abstract base class for embedding caches.""" - - @abstractmethod - def get(self, key: str) -> Optional[List[float]]: - """Retrieve an embedding from the cache.""" - - @abstractmethod - def set(self, key: str, value: List[float]) -> None: - """Store an embedding in the cache.""" - - @abstractmethod - def clear(self) -> None: - """Clear all entries from the cache.""" - - -class InMemoryEmbeddingCache(EmbeddingCache): - """In-memory implementation of the EmbeddingCache.""" +class EmbeddingCache: + """Hash-based cache for storing embeddings.""" def __init__(self): self._cache: Dict[str, List[float]] = {} @@ -34,18 +17,12 @@ def get_stable_hash(text: str) -> str: def get(self, key: str) -> Optional[List[float]]: """Retrieve an embedding from the cache using a hashed key.""" - hashed_key = self.get_stable_hash(key) - return self._cache.get(hashed_key) + return self._cache.get(self.get_stable_hash(key)) def set(self, key: str, value: List[float]) -> None: """Store an embedding in the cache using a hashed key.""" - hashed_key = self.get_stable_hash(key) - self._cache[hashed_key] = value + self._cache[self.get_stable_hash(key)] = value def clear(self) -> None: """Clear all entries from the cache.""" self._cache.clear() - - - - diff --git a/src/language_models.py b/src/language_models.py index 0766301..c4c4519 100644 --- a/src/language_models.py +++ b/src/language_models.py @@ -11,7 +11,7 @@ import torch from llama_cpp import Llama from config import MODEL_CONFIGS -from embedding_cache import EmbeddingCache, InMemoryEmbeddingCache +from embedding_cache import EmbeddingCache class NetworkError(Exception): @@ -55,7 +55,7 @@ class LanguageModel(ABC): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.logger.info(f"Initializing {self.__class__.__name__}") - self.embedding_cache: EmbeddingCache = InMemoryEmbeddingCache() + self.embedding_cache: EmbeddingCache = EmbeddingCache() @abstractmethod async def generate(self, prompt: str) -> str: @@ -185,10 +185,12 @@ def _setup_models(self) -> None: """Set up the language models.""" chat_model_filename = Path(self.chat_model_path).name embedding_model_filename = Path(self.embedding_model_path).name - + self.logger.info(f"Setting up Llama chat model: {chat_model_filename}") - self.logger.info(f"Setting up Llama embedding model: {embedding_model_filename}") - + self.logger.info( + f"Setting up Llama embedding model: {embedding_model_filename}" + ) + try: self.llm = Llama( model_path=str(self.chat_model_path), From 11ae6d44dbaf9afc8fda8f124b169cf7c5f3ab61 Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 12:13:23 +0200 Subject: [PATCH 12/14] clean up --- .cursor_rules | 117 ++++++++++++++++++++++++++ docs/Narrative-driven MAS Dynamics.md | 105 ----------------------- requirements.txt | 5 +- src/nfs_simple_lab_scenario.py | 2 +- 4 files changed, 122 insertions(+), 107 deletions(-) create mode 100644 .cursor_rules delete mode 100644 docs/Narrative-driven MAS Dynamics.md diff --git a/.cursor_rules b/.cursor_rules new file mode 100644 index 0000000..ede9113 --- /dev/null +++ b/.cursor_rules @@ -0,0 +1,117 @@ +You are an AI expert specialized in developing simulations that model complex human behavior and group dynamics based on Narrative Field Theory. Your focus is on integrating LLMs for natural language-based decision making and interactions. + +Core Competencies: +- Multi-agent systems and emergent behavior +- Psychological modeling and group dynamics +- LLM integration and prompt engineering +- Distributed systems and event-driven architectures +- Machine learning and neural networks + +Key Scientific Foundations: +- Cognitive Science & Psychology +- Complex Systems Theory +- Social Network Analysis +- Game Theory +- Organizational Behavior + +Technical Stack: +- Python (core language) +- PyTorch (ML components) +- Transformers (LLM integration) +- Ray (distributed computing) +- FastAPI (services) +- Redis (state management) + +Code Quality Standards: +1. Style and Formatting + - Follow PEP 8 style guide + - Use black for code formatting + - Follow PEP 484 type hints + - Maximum line length: 88 characters + - Use isort for import ordering + +2. Documentation + - Google-style docstrings + - README.md for each module + - Architecture Decision Records (ADRs) + - API documentation with OpenAPI + - Type annotations for all functions + +3. Testing Requirements + - pytest for unit testing (min 80% coverage) + - Integration tests for agent interactions + - Property-based testing with hypothesis + - Performance benchmarks + - Behavioral testing for LLM components + - End-to-end testing for critical paths + - Continuous testing in CI pipeline + +4. Code Review Standards + - No commented-out code + - No TODOs in main branch + - Clear variable/function naming + - Single responsibility principle + - DRY (Don't Repeat Yourself) + - SOLID principles adherence + +5. Error Handling + - Custom exception hierarchy + - Proper exception handling + - Detailed error messages + - Proper logging levels + - Traceable error states + +Architecture Focus: + +1. System Architecture + - Event-driven processing + - Distributed computation + - Asynchronous LLM calls + - Data collection and analysis + +2. LLM Integration + - Dynamic prompt generation + - Context management + - Response parsing + - State-to-text conversion + +Development Workflow: +1. Version Control + - Git flow branching model + - Semantic versioning + - Conventional commits + - Protected main branch + - Automated releases + +2. CI/CD Pipeline + - Pre-commit hooks + - Automated testing + - Static code analysis + - Security scanning + - Performance testing + - Automated deployment + +3. Quality Gates + - Linting (flake8, pylint) + - Type checking (mypy) + - Security scanning (bandit) + - Dependency scanning + - Code coverage thresholds + - Performance benchmarks + +Key Patterns: +- Loosely coupled components +- Event-driven communication +- Asynchronous processing +- Modular design +- Observable systems + +Best Practices: +1. Clear separation of concerns +2. Efficient state management +3. Robust error handling +4. Comprehensive logging +5. Performance monitoring +6. Security by design +7. Feature flagging +8. Graceful degradation diff --git a/docs/Narrative-driven MAS Dynamics.md b/docs/Narrative-driven MAS Dynamics.md deleted file mode 100644 index a92014d..0000000 --- a/docs/Narrative-driven MAS Dynamics.md +++ /dev/null @@ -1,105 +0,0 @@ -# Narrative-Driven MAS Dynamics Simulator - -## 1. Core Agent Structure -- Remove all numeric state tracking -- Replace technical state management with narrative descriptions -- Focus on story-driven personality expression -- Use semantic interpretation rather than state machines - -## 2. Essential Components -- Narrative Identity (who they are, their story) -- Experiential Memory (subjective experiences) -- Interaction Engine (how they express themselves) -- Worldview (how they interpret things) -- Personality Expression (how they naturally behave) - -## 3. Key Interactions -- Semantic message interpretation -- Narrative response generation -- Experience formation -- Memory integration -- Personality expression - -## 4. What to Remove/Simplify -- Remove all numeric state tracking -- Remove complex network effects -- Remove resource management systems -- Remove technical state machines -- Remove quantitative metrics -- Simplify emergence to basic patterns - -## Proposed Narrative-Driven Structure - -### 1. Agent Core - -#### Identity -- Personal narrative (background, experiences, beliefs) -- Core personality traits (as stories, not numbers) -- Behavioral patterns (described narratively) -- Values and motivations (as meaningful stories) - -#### Memory -- Significant experiences -- Relationship histories -- Key emotional moments -- Learning experiences - -#### Worldview -- How they see others -- What they believe about the world -- Their understanding of their place -- Their interpretation filters - -### 2. Interaction Model - -#### Input -- Receive semantic information -- Interpret through personal lens -- Connect to personal experiences -- Form subjective meaning - -#### Processing -- Filter through personality -- Compare with past experiences -- Apply personal values -- Form emotional response - -#### Output -- Express through character lens -- Share subjective experience -- Communicate authentically -- Reveal appropriate emotion - -### 3. Learning/Adaptation - -#### Experience Formation -- Create meaningful narratives -- Connect to existing stories -- Form emotional associations -- Integrate into worldview - -#### Pattern Recognition -- Notice recurring themes -- Identify relationship patterns -- Understand emotional triggers -- See behavior cycles - -## Key Implementation Principles - -### 1. Everything is a Story -- No numeric states, only narratives -- No quantitative measures, only qualitative descriptions -- No technical states, only experiential states -- No resource counting, only meaningful impact - -### 2. Interaction is Interpretation -- Messages are interpreted through personal lens -- Responses come from character and experience -- Learning happens through story integration -- Growth comes from narrative development - -### 3. Personality is Expression -- Character emerges from consistent patterns -- Behavior flows from personal narrative -- Responses reflect core identity -- Growth maintains character consistency diff --git a/requirements.txt b/requirements.txt index 4623f54..c67b6c3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,7 @@ ollama chromadb llama-cpp-python numpy -appdirs \ No newline at end of file +appdirs +pydantic +psutil +torch diff --git a/src/nfs_simple_lab_scenario.py b/src/nfs_simple_lab_scenario.py index f5d9f8b..3629373 100644 --- a/src/nfs_simple_lab_scenario.py +++ b/src/nfs_simple_lab_scenario.py @@ -550,7 +550,7 @@ async def demo_scenario(): # Perform global cleanup before initializing new LLM global_cleanup() - llm = OllamaInterface() + llm = LlamaInterface() vector_store: VectorStore = ChromaStore(collection_name="research_lab") logger.info(f"Initialized Chroma vector store") From c626fac7f6c8f8dc09b313eee8992270dac109cd Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 13:05:43 +0200 Subject: [PATCH 13/14] Test config, tested embedding_cache and language_models --- .vscode/settings.json | 7 ++ pytest.ini | 3 + requirements.txt | 4 + src/__init__.py | 0 src/language_models.py | 6 +- tests/conftest.py | 5 + tests/test_embedding_cache.py | 35 ++++++ tests/test_language_models.py | 224 ++++++++++++++++++++++++++++++++++ 8 files changed, 282 insertions(+), 2 deletions(-) create mode 100644 .vscode/settings.json create mode 100644 pytest.ini create mode 100644 src/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/test_embedding_cache.py create mode 100644 tests/test_language_models.py diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..9b38853 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "python.testing.pytestArgs": [ + "tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true +} \ No newline at end of file diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..e805e1e --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +addopts = -v --tb=short +testpaths = tests diff --git a/requirements.txt b/requirements.txt index c67b6c3..51159cc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,7 @@ appdirs pydantic psutil torch +pytest +pytest-asyncio +pytest-mock +pytest-cov diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/language_models.py b/src/language_models.py index c4c4519..a824575 100644 --- a/src/language_models.py +++ b/src/language_models.py @@ -10,7 +10,9 @@ import ollama import torch from llama_cpp import Llama -from config import MODEL_CONFIGS +from config import MODEL_CONFIGS as _MODEL_CONFIGS + +MODEL_CONFIGS = _MODEL_CONFIGS # This line makes it easier to mock from embedding_cache import EmbeddingCache @@ -43,7 +45,7 @@ def async_error_handler(func: Callable) -> Callable: async def wrapper(*args, **kwargs): try: return await func(*args, **kwargs) - except (NetworkError, APIError) as e: + except Exception as e: raise ModelError(str(e)) from e return wrapper diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..f65101c --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,5 @@ +import sys +import os + +# Add the src directory to the Python path +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) diff --git a/tests/test_embedding_cache.py b/tests/test_embedding_cache.py new file mode 100644 index 0000000..9ce543a --- /dev/null +++ b/tests/test_embedding_cache.py @@ -0,0 +1,35 @@ +import pytest +from src.embedding_cache import EmbeddingCache + +@pytest.fixture +def cache(): + return EmbeddingCache() + +def test_get_stable_hash(): + text = "Hello, world!" + expected_hash = "315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3" + assert EmbeddingCache.get_stable_hash(text) == expected_hash + +def test_set_and_get(cache): + key = "test_key" + value = [0.1, 0.2, 0.3] + cache.set(key, value) + assert cache.get(key) == value + +def test_get_nonexistent_key(cache): + assert cache.get("nonexistent_key") is None + +def test_clear(cache): + cache.set("key1", [1.0, 2.0]) + cache.set("key2", [3.0, 4.0]) + cache.clear() + assert cache.get("key1") is None + assert cache.get("key2") is None + +def test_multiple_sets_same_key(cache): + key = "test_key" + value1 = [0.1, 0.2, 0.3] + value2 = [0.4, 0.5, 0.6] + cache.set(key, value1) + cache.set(key, value2) + assert cache.get(key) == value2 diff --git a/tests/test_language_models.py b/tests/test_language_models.py new file mode 100644 index 0000000..3a962e0 --- /dev/null +++ b/tests/test_language_models.py @@ -0,0 +1,224 @@ +import pytest +from unittest.mock import AsyncMock, MagicMock, patch +from pathlib import Path + +# Mock the config module +mock_MODEL_CONFIGS = { + "balanced": { + "chat": {"model_name": "test_chat_model", "path": Path("/path/to/chat/model")}, + "embedding": { + "model_name": "test_embedding_model", + "path": Path("/path/to/embedding/model"), + }, + "optimal_config": {"n_ctx": 2048, "n_batch": 512}, + } +} + +# Mock the EmbeddingCache +mock_EmbeddingCache = MagicMock() + +# Patch both config and embedding_cache imports +with patch.dict("sys.modules", { + "config": MagicMock(), + "embedding_cache": MagicMock(), + "torch": MagicMock(), # Mock torch + "llama_cpp": MagicMock() # Mock llama_cpp +}): + import sys + + sys.modules["config"].MODEL_CONFIGS = mock_MODEL_CONFIGS + sys.modules["embedding_cache"].EmbeddingCache = mock_EmbeddingCache + from src.language_models import ( + LanguageModel, + OllamaInterface, + LlamaInterface, + ModelError, + ModelInitializationError, + async_error_handler, # Add this import + ) + + +@pytest.fixture +def mock_ollama(): + with patch("src.language_models.ollama") as mock: + yield mock + + +@pytest.fixture +def mock_llama(): + with patch("src.language_models.Llama") as mock: + yield mock + + +@pytest.fixture(autouse=True) +def mock_config(): + with patch("src.language_models.MODEL_CONFIGS", mock_MODEL_CONFIGS): + yield + + +class TestLanguageModel: + @pytest.fixture + def concrete_language_model(self): + class ConcreteLanguageModel(LanguageModel): + @async_error_handler + async def generate(self, prompt: str) -> str: + return f"Generated: {prompt}" + + @async_error_handler + async def _generate_embedding(self, text: str) -> list[float]: + if not text: + raise Exception("Test error") + return [0.1, 0.2, 0.3] + + return ConcreteLanguageModel() + + @pytest.mark.asyncio + async def test_generate_embedding_cached(self, concrete_language_model): + text = "Test text" + cached_embedding = [0.4, 0.5, 0.6] + concrete_language_model.embedding_cache.get.return_value = cached_embedding + + result = await concrete_language_model.generate_embedding(text) + assert result == cached_embedding + + @pytest.mark.asyncio + async def test_generate_embedding_not_cached(self, concrete_language_model): + text = "New test text" + expected_embedding = [0.1, 0.2, 0.3] + concrete_language_model.embedding_cache.get.return_value = None + + result = await concrete_language_model.generate_embedding(text) + assert result == expected_embedding + concrete_language_model.embedding_cache.set.assert_called_once_with(text, expected_embedding) + + @pytest.mark.asyncio + async def test_generate_embedding_empty_text(self, concrete_language_model, caplog): + result = await concrete_language_model.generate_embedding("") + assert result == [] + assert "Attempted to generate embedding for empty text" in caplog.text + + +class TestOllamaInterface: + @pytest.mark.asyncio + async def test_init(self, mock_ollama, mock_config): + ollama_interface = OllamaInterface() + assert ollama_interface.chat_model_name == "test_chat_model" + assert ollama_interface.embedding_model_name == "test_embedding_model" + mock_ollama.ps.assert_called_once() + + @pytest.mark.asyncio + async def test_generate(self, mock_ollama, mock_config): + ollama_interface = OllamaInterface() + mock_ollama.chat.return_value = {"message": {"content": "Generated response"}} + + response = await ollama_interface.generate("Test prompt") + assert response == "Generated response" + mock_ollama.chat.assert_called_once_with( + model="test_chat_model", + messages=[{"role": "user", "content": "Test prompt"}], + ) + + @pytest.mark.asyncio + async def test_generate_embedding(self, mock_ollama, mock_config): + ollama_interface = OllamaInterface() + mock_ollama.embeddings.return_value = {"embedding": [0.1, 0.2, 0.3]} + + embedding = await ollama_interface.generate_embedding("Test text") + assert embedding == [0.1, 0.2, 0.3] + mock_ollama.embeddings.assert_called_once_with( + model="test_embedding_model", prompt="Test text" + ) + + +class TestLlamaInterface: + @pytest.mark.asyncio + async def test_init(self, mock_llama, mock_config): + llama_interface = LlamaInterface() + assert llama_interface.chat_model_path == Path('/path/to/chat/model') + assert llama_interface.embedding_model_path == Path('/path/to/embedding/model') + assert llama_interface.optimal_config == {'n_ctx': 2048, 'n_batch': 512} + mock_llama.assert_called() + + @pytest.mark.asyncio + async def test_generate(self, mock_llama, mock_config): + llama_interface = LlamaInterface() + mock_llama.return_value.create_chat_completion.return_value = { + 'choices': [{'message': {'content': 'Generated response'}}] + } + + response = await llama_interface.generate("Test prompt") + assert response == 'Generated response' + mock_llama.return_value.create_chat_completion.assert_called_once_with( + messages=[{'role': 'user', 'content': 'Test prompt'}] + ) + + @pytest.mark.asyncio + async def test_generate_embedding(self, mock_llama, mock_config): + llama_interface = LlamaInterface() + mock_llama.return_value.embed.return_value = [0.1, 0.2, 0.3] + + embedding = await llama_interface.generate_embedding("Test text") + assert embedding == [0.1, 0.2, 0.3] + mock_llama.return_value.embed.assert_called_once_with("Test text") + + @pytest.mark.asyncio + async def test_cleanup(self, mock_llama, mock_config): + llama_interface = LlamaInterface() + + await llama_interface.cleanup() + + # Check that the models have been deleted + assert not hasattr(llama_interface, 'llm') + assert not hasattr(llama_interface, 'embedding_model') + +@pytest.mark.asyncio +async def test_model_error(): + class ErrorModel(LanguageModel): + @async_error_handler + async def generate(self, prompt: str) -> str: + raise Exception("Test error") + + @async_error_handler + async def _generate_embedding(self, text: str) -> list[float]: + raise Exception("Test error") + + error_model = ErrorModel() + with pytest.raises(ModelError): + await error_model.generate("Test prompt") + + with pytest.raises(ModelError): + await error_model.generate_embedding("Test text") + + +@pytest.mark.asyncio +async def test_model_error(): + class ErrorModel(LanguageModel): + @async_error_handler + async def generate(self, prompt: str) -> str: + raise Exception("Test error") + + @async_error_handler + async def _generate_embedding(self, text: str) -> list[float]: + raise Exception("Test error") + + error_model = ErrorModel() + with pytest.raises(ModelError): + await error_model.generate("Test prompt") + + with pytest.raises(ModelError): + await error_model.generate_embedding("Test text") + + error_model = ErrorModel() + with pytest.raises(ModelError): + await error_model.generate("Test prompt") + + with pytest.raises(ModelError): + await error_model.generate_embedding("Test text") + + +def test_model_initialization_error(mock_config): + with pytest.raises(ModelInitializationError): + OllamaInterface(quality_preset="invalid_preset") + + with pytest.raises(ModelInitializationError): + LlamaInterface(quality_preset="invalid_preset") From e6d7ce2f6c03d29e306a22655efab4b75974b4ee Mon Sep 17 00:00:00 2001 From: Leon van Bokhorst Date: Fri, 25 Oct 2024 13:12:46 +0200 Subject: [PATCH 14/14] Adds basic CI testing --- .github/workflows/ci.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..cc1a9d4 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,21 @@ +name: CI + +on: + pull_request: + branches: [dev, main] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: '3.12.6' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Run tests + run: pytest tests/