Skip to content

Commit 58df71d

Browse files
authored
Add files via upload
1 parent 98c72b7 commit 58df71d

File tree

3 files changed

+236
-0
lines changed

3 files changed

+236
-0
lines changed

scripts/meta_state_logger.py

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
# meta_state_logger.py
2+
import argparse, time, os, datetime as dt
3+
4+
def tail_last_line(path):
5+
try:
6+
with open(path, 'rb') as f:
7+
f.seek(0, os.SEEK_END)
8+
size = f.tell()
9+
if size == 0:
10+
return None
11+
back = min(4096, size)
12+
f.seek(-back, os.SEEK_END)
13+
chunk = f.read().decode('utf-8', errors='ignore')
14+
lines = [ln for ln in chunk.splitlines() if ln.strip()]
15+
return lines[-1] if lines else None
16+
except FileNotFoundError:
17+
return None
18+
19+
def parse_metrics(line):
20+
if not line: return None
21+
parts = [p.strip() for p in line.split(',')]
22+
try:
23+
return dict(
24+
cycle=int(parts[0]),
25+
alpha=float(parts[1]),
26+
omega=float(parts[2]),
27+
sigma=float(parts[3]),
28+
drift=float(parts[4]),
29+
confidence=float(parts[5]),
30+
entropy=float(parts[6]),
31+
)
32+
except Exception:
33+
# robust fallback
34+
vals=[]
35+
for p in parts:
36+
try: vals.append(float(p))
37+
except: pass
38+
if len(vals) >= 5:
39+
return dict(cycle=int(vals[0]), alpha=float('nan'), omega=float('nan'),
40+
sigma=vals[-4], drift=vals[-3], confidence=vals[-2], entropy=vals[-1])
41+
return None
42+
43+
def classify_state(entropy, drift, confidence):
44+
if entropy > 0.05 or confidence < 0.95: return "CONFUSED"
45+
if drift > 0.02: return "DRIFTING"
46+
if entropy > 0.02: return "WANDERING"
47+
return "STABLE"
48+
49+
def advice(state):
50+
return {
51+
"STABLE": "All good. Maintain parameters.",
52+
"WANDERING":"Gently reduce temperature/top_p. Add brief anchor.",
53+
"DRIFTING": "Lower top_p, increase repeat_penalty. Re-center on the task.",
54+
"CONFUSED": "Strong reset: anchor + lower temperature; consider re-prompt.",
55+
"REALIGNING":"Stabilizing… keep parameters conservative for a bit.",
56+
}.get(state, "Observe and adapt conservatively.")
57+
58+
def main():
59+
ap = argparse.ArgumentParser(description="Human-readable state logger from RLang CSV.")
60+
ap.add_argument("--csv", default="out.csv")
61+
ap.add_argument("--out", default="meta_state.txt")
62+
ap.add_argument("--interval", type=float, default=2.0)
63+
args = ap.parse_args()
64+
65+
last_written_cycle = None
66+
print(f"[meta] watching {args.csv}{args.out}")
67+
while True:
68+
line = tail_last_line(args.csv)
69+
m = parse_metrics(line)
70+
if m and m.get('cycle') != last_written_cycle:
71+
state = classify_state(m['entropy'], m['drift'], m['confidence'])
72+
now = dt.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%SZ")
73+
with open(args.out, 'w', encoding='utf-8') as f:
74+
f.write(
75+
f"""# ResonantBridge — Meta State
76+
time: {now} UTC
77+
cycle: {m['cycle']}
78+
sigma: {m['sigma']:.5f}
79+
entropy: {m['entropy']:.5f}
80+
drift: {m['drift']:.5f}
81+
confidence: {m['confidence']:.5f}
82+
state: {state}
83+
84+
advice: {advice(state)}
85+
"""
86+
)
87+
last_written_cycle = m['cycle']
88+
print(f"[meta] cycle {m['cycle']}{state}")
89+
time.sleep(args.interval)
90+
91+
if __name__ == "__main__":
92+
main()

scripts/ollama_sigma_feed.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
"""
2+
ollama_sigma_feed.py
3+
--------------------
4+
Streams tokens from a local Ollama model and writes a rolling "sigma" proxy
5+
to a text file for the Rlang autopoietic bridge to read.
6+
7+
Requirements:
8+
- Python 3.9+
9+
- `requests` library
10+
- Ollama running locally (default http://localhost:11434)
11+
12+
Usage:
13+
python ollama_sigma_feed.py --model llama3:8b --prompt "Explain resonance in one paragraph." --sigma-file sigma_feed.txt --window 64
14+
15+
Sigma proxy (no logprobs mode):
16+
We compute a rolling token-distribution entropy H over the last N tokens.
17+
Then map H into a bounded coherence measure sigma ~= 1 + k*(H0 - H),
18+
where H0 is a reference entropy (approx upper bound for your model/tokenizer window).
19+
20+
If your Ollama build exposes token logprobs in the stream, you can enable
21+
`--use-logprobs` and we will compute proper entropy from probabilities.
22+
"""
23+
import argparse, time, json, collections, math, requests
24+
25+
def rolling_entropy_from_tokens(tokens):
26+
# Simple character-level fallback: frequency of initial char of token text
27+
# (works acceptably on common tokenizers for a quick proxy without logprobs).
28+
freq = collections.Counter(t[:1] for t in tokens if t)
29+
total = sum(freq.values()) or 1
30+
p = [c/total for c in freq.values()]
31+
H = -sum(pi*math.log(max(pi,1e-12)) for pi in p) # nats
32+
return H
33+
34+
def map_entropy_to_sigma(H, H_ref=2.0, k=0.25):
35+
# sigma ~ 1 when H == H_ref; >1 if H < H_ref (more coherent); <1 if H > H_ref
36+
return 1.0 + k*(H_ref - H)
37+
38+
def stream_ollama(model, prompt, host="http://localhost:11434", use_logprobs=False):
39+
url = f"{host}/api/generate"
40+
payload = {
41+
"model": model,
42+
"prompt": prompt,
43+
"stream": True,
44+
# You may set options here if desired:
45+
# "options": {"temperature": 0.8, "top_p": 0.9}
46+
}
47+
with requests.post(url, json=payload, stream=True) as r:
48+
r.raise_for_status()
49+
for line in r.iter_lines():
50+
if not line:
51+
continue
52+
try:
53+
data = json.loads(line.decode("utf-8"))
54+
except Exception:
55+
continue
56+
yield data
57+
58+
def main():
59+
ap = argparse.ArgumentParser()
60+
ap.add_argument("--model", default="llama3:8b")
61+
ap.add_argument("--prompt", default="Explain resonance as breathing of a system.")
62+
ap.add_argument("--sigma-file", default="sigma_feed.txt")
63+
ap.add_argument("--window", type=int, default=64, help="rolling window (tokens)")
64+
ap.add_argument("--host", default="http://localhost:11434")
65+
ap.add_argument("--use-logprobs", action="store_true", help="if available in your Ollama stream")
66+
args = ap.parse_args()
67+
68+
tokens = collections.deque(maxlen=args.window)
69+
# Try a rough upper reference entropy; adjust if your tokenizer is different
70+
H_ref = 2.0
71+
with open(args.sigma_file, "w", encoding="utf-8") as sf:
72+
sf.write("1.00\n") # initial neutral value
73+
74+
print(f"[ollama_sigma_feed] connecting to {args.host}, model={args.model}")
75+
for msg in stream_ollama(args.model, args.prompt, args.host, args.use_logprobs):
76+
if msg.get("done"):
77+
break
78+
tok_text = msg.get("response", "")
79+
if not tok_text:
80+
continue
81+
# Split coarse to capture sub-tokens quickly
82+
for ch in tok_text.split():
83+
tokens.append(ch)
84+
H = rolling_entropy_from_tokens(tokens)
85+
sigma = map_entropy_to_sigma(H, H_ref=H_ref, k=0.25)
86+
# clamp to reasonable living range [0.5, 1.5]
87+
sigma = max(0.5, min(1.5, sigma))
88+
with open(args.sigma_file, "w", encoding="utf-8") as sf:
89+
sf.write(f"{sigma:.4f}\n")
90+
print("[ollama_sigma_feed] stream complete; final sigma written.")
91+
92+
if __name__ == "__main__":
93+
main()

scripts/ollama_sigma_feed_live.py

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# ollama_sigma_feed_live.py
2+
import argparse, subprocess, time, sys, os
3+
4+
def main():
5+
ap = argparse.ArgumentParser(description="Keep σ(t) fresh by repeatedly calling ollama_sigma_feed.py.")
6+
ap.add_argument("--model", required=True, help='e.g. "llama3.1:8b"')
7+
ap.add_argument("--prompt", default="Explain resonance as breathing of a system.")
8+
ap.add_argument("--sigma-file", default="sigma_feed.txt")
9+
ap.add_argument("--window", type=int, default=64)
10+
ap.add_argument("--host", default="http://127.0.0.1:11434")
11+
ap.add_argument("--interval", type=float, default=1.0, help="seconds to wait between runs")
12+
ap.add_argument("--use-logprobs", action="store_true")
13+
ap.add_argument("--max-runs", type=int, default=0, help="0 = infinite; otherwise run this many times and exit")
14+
args = ap.parse_args()
15+
16+
script = os.path.join(os.path.dirname(__file__), "ollama_sigma_feed.py")
17+
if not os.path.isfile(script):
18+
print("[live] ERROR: ollama_sigma_feed.py not found next to this script.", file=sys.stderr)
19+
sys.exit(1)
20+
21+
print(f"[live] starting σ loop → model={args.model} host={args.host} window={args.window} interval={args.interval}s")
22+
print(f"[live] writing to: {args.sigma_file} (Ctrl+C to stop)")
23+
runs = 0
24+
try:
25+
while True:
26+
cmd = [
27+
sys.executable, script,
28+
"--model", args.model,
29+
"--prompt", args.prompt,
30+
"--sigma-file", args.sigma_file,
31+
"--window", str(args.window),
32+
"--host", args.host,
33+
]
34+
if args.use_logprobs:
35+
cmd.append("--use-logprobs")
36+
37+
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
38+
out = (proc.stdout or "").strip().splitlines()
39+
if out:
40+
print("[live] " + out[-1])
41+
42+
runs += 1
43+
if args.max_runs and runs >= args.max_runs:
44+
break
45+
46+
time.sleep(args.interval)
47+
except KeyboardInterrupt:
48+
print("\n[live] stopped by user.")
49+
50+
if __name__ == "__main__":
51+
main()

0 commit comments

Comments
 (0)