forked from NeuroSkill-com/skill
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcli.ts
More file actions
3946 lines (3622 loc) · 177 KB
/
cli.ts
File metadata and controls
3946 lines (3622 loc) · 177 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: GPL-3.0-only
// Copyright (C) 2026 NeuroSkill.com
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 3 only.
#!/usr/bin/env npx tsx
/** Current CLI version — bump when breaking changes are made. */
const CLI_VERSION = "1.1.0";
/**
* cli.ts — Command-line interface for the Skill WebSocket API.
*
* Usage:
* npx tsx cli.ts <command> [options]
*
* Commands:
* status Full device/session/embeddings/scores snapshot
* session [index] All metrics + trends for one session (0=latest, 1=prev, …)
* sessions List all recording sessions across all days
* say "text" Speak text aloud via on-device TTS (fire-and-forget)
* notify "title" ["body"] Show a native OS notification
* label "text" Create a timestamped annotation on the current moment
* search-labels "query" Search labels by free text (text/context/both modes)
* interactive "keyword" Cross-modal 4-layer graph search (labels → EEG → found labels)
* search ANN EEG-similarity search (auto: last session, k=5)
* compare Side-by-side A/B metrics (auto: last 2 sessions)
* sleep [index] Sleep staging — index selects session (0=latest, 1=prev, …)
* calibrate Open calibration window and start immediately
* timer Open focus-timer window and start work phase immediately
* umap 3D UMAP projection with live progress bar
* listen Stream broadcast events for N seconds
* hooks List Proactive Hook rules, scenarios, and last-trigger metadata
* hooks suggest "kw1,kw2" Suggest threshold from real EEG/label data
* hooks log [--limit N --offset M] View paginated hook trigger audit log rows
* dnd Show DND automation status (config + live eligibility + OS state)
* dnd on Force-enable DND immediately (bypass EEG threshold)
* dnd off Force-disable DND immediately
* llm status LLM server status (stopped/loading/running)
* llm start Load active model and start LLM inference server
* llm stop Stop LLM inference server and free GPU memory
* llm catalog Show model catalog with download states
* llm download <filename> Download a GGUF model (fire-and-forget; poll catalog for progress)
* llm cancel <filename> Cancel an in-progress model download
* llm delete <filename> Delete a locally-cached model file
* llm logs Print last 500 LLM server log lines
* llm chat Interactive multi-turn chat REPL (WebSocket only)
* llm chat "message" Single-shot: send one message and stream the reply
* llm chat "describe" --image a.jpg --image b.png Vision: attach images to message
* raw '{"command":"..."}' Send arbitrary JSON, print full response
*
* Transport selection (default: try WebSocket, fall back to HTTP):
* --ws Force WebSocket (error if unavailable)
* --http Force HTTP REST (no live events)
* (neither) Auto: try WebSocket, silently fall back to HTTP
*
* All time-range commands auto-select from your actual session history when
* no --start/--end flags are given. The resolved parameters are printed as a
* rerun command you can copy-paste for reproducible results.
*
* Options:
* --port <n> Connect to explicit port (skips mDNS discovery)
* --json Output raw JSON (no colors, pipeable to jq)
* --full Print full JSON response in addition to the human-readable summary
* --dot (interactive) Output Graphviz DOT format (pipe to dot -Tsvg)
* --mode <m> Search mode for search-labels: text|context|both (default: text)
* --k-text <n> (interactive) k for text-label search (default: 5)
* --k-eeg <n> (interactive) k for EEG-similarity search (default: 5)
* --k-labels <n> (interactive) k for label-proximity search (default: 3)
* --reach <n> (interactive) temporal reach in minutes around EEG points (default: 10)
* --help Show full help with examples
* --version Print CLI version and exit
* --no-color Disable ANSI colors (also honours NO_COLOR env var)
* --poll <n> (status) Re-poll every N seconds and print fresh snapshots
*
* When parameters are omitted, ranges are auto-selected from your session
* history. A `rerun:` line is printed so you can copy-paste it later.
*
* Examples:
* npx tsx cli.ts status # → device, scores, sleep, embeddings
* npx tsx cli.ts status --json | jq '.scores' # → pipe to jq
* npx tsx cli.ts sessions # → 3 session(s) with timestamps
* npx tsx cli.ts sessions --json | jq '.sessions[0]'
* npx tsx cli.ts say "Eyes open. Starting calibration."
* npx tsx cli.ts say "Break time. Next: Eyes Closed." --voice Jasper
* npx tsx cli.ts say "Break time. Next: Eyes Closed." --http
* npx tsx cli.ts notify "Session done" "Great work!"
* npx tsx cli.ts label "meditation start" # → { label_id: 42 }
* npx tsx cli.ts label "eyes closed" --context "4-7-8 breathing" --at 1740412800
* npx tsx cli.ts calibrations # → list all profiles
* npx tsx cli.ts calibrations get <id> # → full profile JSON
* npx tsx cli.ts search-labels "focused reading" # → semantic label search
* npx tsx cli.ts search-labels "deep work" --mode context --k 5
* npx tsx cli.ts interactive "deep focus" # → cross-modal graph (summary)
* npx tsx cli.ts interactive "meditation" --json # → raw JSON (nodes + edges + dot)
* npx tsx cli.ts interactive "flow state" --dot | dot -Tsvg > graph.svg
* npx tsx cli.ts interactive "anxiety" --full --k-text 8 --k-eeg 8 --reach 15
* npx tsx cli.ts search # auto: last session, k=5
* npx tsx cli.ts search --start 1740412800 --end 1740415500 --k 10
* npx tsx cli.ts compare # auto: last 2 sessions as A/B
* npx tsx cli.ts compare --a-start 1740380100 --a-end 1740382665 \
* --b-start 1740412800 --b-end 1740415510
* npx tsx cli.ts sleep # auto: last 24h → sleep summary
* npx tsx cli.ts sleep --start 1740380100 --end 1740415510
* npx tsx cli.ts calibrate # → opens calibration + auto-starts
* npx tsx cli.ts timer # → opens focus-timer + auto-starts
* npx tsx cli.ts umap # auto: last 2 sessions → 3D points
* npx tsx cli.ts umap --json | jq '.points | length'
* npx tsx cli.ts listen --seconds 30 # 30s event stream
* npx tsx cli.ts hooks --json | jq '.hooks[] | {name: .hook.name, scenario: .hook.scenario, last: .last_trigger.triggered_at_utc}'
* npx tsx cli.ts hooks suggest "focus,deep work"
* npx tsx cli.ts hooks log --limit 10 --offset 0
* npx tsx cli.ts raw '{"command":"search","start_utc":1740412800,"end_utc":1740415500,"k":3}'
*
* Requires: Node ≥ 18, bonjour-service + ws (devDependencies).
*/
import { Bonjour } from "bonjour-service";
import { execSync } from "child_process";
import WebSocket from "ws";
// ── ANSI colors ───────────────────────────────────────────────────────────────
// These are module-level `let`s so that `applyNoColor()` can zero them all
// out when --no-color / NO_COLOR / non-TTY mode is active.
let GRAY = "\x1b[90m";
let GREEN = "\x1b[32m";
let RED = "\x1b[31m";
let CYAN = "\x1b[36m";
let YELLOW = "\x1b[33m";
let BLUE = "\x1b[34m";
let MAGENTA= "\x1b[35m";
let BOLD = "\x1b[1m";
let DIM = "\x1b[2m";
let RESET = "\x1b[0m";
/**
* Zero-out all ANSI escape constants so no color codes reach stdout/stderr.
* Called once at startup when NO_COLOR env var is set, stdout is not a TTY,
* or the user passes `--no-color`.
*/
function applyNoColor(): void {
GRAY = GREEN = RED = CYAN = YELLOW = BLUE = MAGENTA = BOLD = DIM = RESET = "";
}
let jsonMode = false;
let fullMode = false;
let globalTimer: ReturnType<typeof setTimeout>;
// Honour NO_COLOR (https://no-color.org/) and non-TTY stdout.
// Checked before parseArgs() so that color output is never emitted when the
// caller has opted out via environment convention.
let noColorMode = !!process.env.NO_COLOR || !process.stdout.isTTY;
// ── Colorized JSON printer ────────────────────────────────────────────────────
/**
* Render any JS value as a colorized (or plain) JSON string.
*
* In `--json` mode returns standard `JSON.stringify` output (no ANSI codes).
* Otherwise delegates to {@link colorizeValue} for recursive ANSI coloring:
* keys in blue, strings green, numbers cyan, booleans yellow, null magenta.
*
* @param obj - The value to serialize.
* @param indent - Current nesting depth (used for pretty-print indentation).
* @returns A printable string (may contain ANSI escape codes).
*/
function colorizeJson(obj: unknown, indent = 0): string {
if (jsonMode) return JSON.stringify(obj, null, 2);
return colorizeValue(obj, indent);
}
/**
* Recursively colorize a single JS value with ANSI escape codes.
*
* Type → color mapping:
* - `null` → magenta
* - `undefined` → dim
* - `boolean` → yellow
* - `number` → cyan
* - `string` → green (with JSON escaping via {@link escapeStr})
* - `Array` → delegates to {@link colorizeArray}
* - `object` → delegates to {@link colorizeObject}
*
* @param val - The value to colorize.
* @param indent - Current nesting depth for indentation.
*/
function colorizeValue(val: unknown, indent: number): string {
if (val === null) return `${MAGENTA}null${RESET}`;
if (val === undefined) return `${DIM}undefined${RESET}`;
if (typeof val === "boolean") return `${YELLOW}${val}${RESET}`;
if (typeof val === "number") return `${CYAN}${val}${RESET}`;
if (typeof val === "string") return `${GREEN}"${escapeStr(val)}"${RESET}`;
if (Array.isArray(val)) return colorizeArray(val, indent);
if (typeof val === "object") return colorizeObject(val as Record<string, unknown>, indent);
return String(val);
}
/**
* Escape special characters for JSON string display.
* Handles backslash, double-quote, newline, and tab.
*/
function escapeStr(s: string): string {
return s.replace(/\\/g, "\\\\").replace(/"/g, '\\"').replace(/\n/g, "\\n").replace(/\t/g, "\\t");
}
/**
* Colorize a JSON array with ANSI codes.
*
* Short primitive arrays (≤8 elements, no nested objects) are rendered on a
* single line for compactness: `[1, 2, 3]`. Longer or nested arrays use
* one-element-per-line formatting with indentation.
*
* @param arr - The array to render.
* @param indent - Current nesting depth.
*/
function colorizeArray(arr: unknown[], indent: number): string {
if (arr.length === 0) return `${DIM}[]${RESET}`;
// Compact for short primitive arrays
if (arr.length <= 8 && arr.every(v => typeof v !== "object" || v === null)) {
const items = arr.map(v => colorizeValue(v, 0)).join(`${DIM},${RESET} `);
return `${DIM}[${RESET}${items}${DIM}]${RESET}`;
}
const pad = " ".repeat(indent + 1);
const endPad = " ".repeat(indent);
const items = arr.map(v => `${pad}${colorizeValue(v, indent + 1)}`).join(`${DIM},${RESET}\n`);
return `${DIM}[${RESET}\n${items}\n${endPad}${DIM}]${RESET}`;
}
/**
* Colorize a JSON object with ANSI codes.
*
* Keys are rendered in blue, values recursively colorized.
* Each key-value pair occupies its own line, indented by depth.
*
* @param obj - The object to render.
* @param indent - Current nesting depth.
*/
function colorizeObject(obj: Record<string, unknown>, indent: number): string {
const keys = Object.keys(obj);
if (keys.length === 0) return `${DIM}{}${RESET}`;
const pad = " ".repeat(indent + 1);
const endPad = " ".repeat(indent);
const entries = keys.map(k => {
const colorKey = `${BLUE}"${k}"${RESET}`;
const colorVal = colorizeValue(obj[k], indent + 1);
return `${pad}${colorKey}${DIM}:${RESET} ${colorVal}`;
}).join(`${DIM},${RESET}\n`);
return `${DIM}{${RESET}\n${entries}\n${endPad}${DIM}}${RESET}`;
}
// ── Output helpers ────────────────────────────────────────────────────────────
/**
* Print a decorative/informational line to stdout.
* Suppressed in `--json` mode so only machine-readable JSON reaches stdout.
*/
function print(msg: string) {
if (!jsonMode) console.log(msg);
}
/**
* Print the final command result to stdout.
*
* - `--json` mode: plain JSON only (no summary, no colors — pipe-safe).
* - `--full` mode: colorized JSON printed after the human-readable summary.
* - default: suppressed — only the human-readable summary is shown.
*/
function printResult(data: unknown) {
if (jsonMode || fullMode) console.log(colorizeJson(data));
}
/**
* Print a fatal error message, clean up resources, and exit with code 1.
* In `--json` mode outputs `{"error":"..."}` to stdout for programmatic
* consumption; otherwise prints a red-bold error to stderr.
*/
function printError(msg: string): never {
if (jsonMode) {
console.log(JSON.stringify({ error: msg }));
} else {
console.error(`${RED}${BOLD}error:${RESET} ${msg}`);
}
try { ws?.terminate(); } catch {}
clearTimeout(globalTimer);
process.exit(1);
}
/**
* Print a dim informational message to stderr (e.g. "connected to ...").
* Suppressed in `--json` mode. Uses stderr so it never pollutes JSON output.
*/
function printInfo(msg: string) {
if (!jsonMode) console.error(`${DIM}${msg}${RESET}`);
}
/**
* Overwrite the current terminal line on stderr with a progress message.
* Used for UMAP epoch progress bars and GPU wait indicators.
* The `\r` carriage return keeps the cursor on the same line.
*/
function printProgress(msg: string) {
if (!jsonMode) process.stderr.write(`\r${DIM}${msg}${RESET}`);
}
/**
* Clear the current progress line on stderr.
* Called after a progress sequence finishes (e.g. UMAP completes).
*/
function clearProgress() {
if (!jsonMode) process.stderr.write("\r\x1b[K");
}
// ── WebSocket helpers ─────────────────────────────────────────────────────────
/** The active WebSocket connection to the Skill server. */
let ws: WebSocket;
/**
* Send a JSON command to the Skill WebSocket server and wait for its response.
*
* The server echoes back `{ "command": "<same>" }` in every response, so we
* match on `data.command === cmd.command` to pair request→response. Any
* broadcast events (which have `"event"` instead of `"command"`) are ignored.
*
* @param cmd - The command payload, e.g. `{ command: "status" }`.
* Additional fields are forwarded as command parameters.
* @param timeoutMs - How long to wait before rejecting (default 30 s).
* UMAP poll uses 60 s because GPU work can block the WS thread.
* @returns The parsed JSON response from the server.
* @throws On timeout (no matching response within `timeoutMs`).
*/
/**
* Send a command and return the parsed response.
* In WebSocket mode this is the WS request/response loop.
* In `--http` mode `main()` replaces this with {@link sendHttp} so all
* command handlers work transparently over either transport.
*/
let send = function wsSend(cmd: { command: string; [k: string]: unknown }, timeoutMs = 30000): Promise<any> {
return new Promise((resolve, reject) => {
let timer: ReturnType<typeof setTimeout>;
const handler = (raw: WebSocket.RawData) => {
let data: any;
try { data = JSON.parse(raw.toString()); } catch { return; }
if (data.command === cmd.command) {
clearTimeout(timer);
ws.off("message", handler);
resolve(data);
}
};
ws.on("message", handler);
ws.send(JSON.stringify(cmd));
timer = setTimeout(() => {
ws.off("message", handler);
reject(new Error(`timeout after ${timeoutMs}ms`));
}, timeoutMs);
});
};
/**
* Passively collect broadcast events from the server for a fixed duration.
*
* The Skill server pushes real-time events (EEG packets, PPG, scores, IMU,
* label-created, etc.) as JSON messages with an `"event"` field. This
* function accumulates them into an array without sending any commands.
*
* @param ms - How long to listen, in milliseconds.
* @returns Array of event objects received during the window.
*/
function collectEvents(ms: number): Promise<any[]> {
return new Promise((resolve) => {
const events: any[] = [];
const handler = (raw: WebSocket.RawData) => {
try {
const data = JSON.parse(raw.toString());
if (data.event) events.push(data);
} catch {}
};
ws.on("message", handler);
const t = setTimeout(() => { ws.off("message", handler); resolve(events); }, ms);
t.unref(); // don't keep the process alive just for this timer
});
}
// ── Transport state ───────────────────────────────────────────────────────────
/** Active transport: set in `main()` after connection negotiation. */
let transport: "ws" | "http" = "ws";
/**
* The resolved HTTP base URL once the port is known.
* Set by `main()` after port discovery when HTTP transport is active.
*/
let httpBase = "";
/**
* Send a command via HTTP POST to the universal tunnel (`POST /`).
*
* Used when `--http` is passed. Mirrors the WS `send()` API so command
* handlers don't need to care which transport is active.
*
* @param cmd - The command payload (must have `command` field).
* @param _timeout - Ignored for HTTP (native fetch has its own timeout).
*/
async function sendHttp(cmd: { command: string; [k: string]: unknown }, _timeout?: number): Promise<any> {
let res: Response;
try {
res = await fetch(`${httpBase}/`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(cmd),
});
} catch (e: any) {
throw new Error(
`could not reach Skill at ${httpBase}/ — is it running? (${e.message})\n` +
` Tip: use --port <n> to specify the port manually, or omit --http for auto-transport.`
);
}
return res.json();
}
// ── Port discovery ────────────────────────────────────────────────────────────
/**
* Discover the Skill WebSocket server port.
*
* Resolution order:
* 1. If `--port <n>` was given, return it immediately (no discovery).
* 2. mDNS via `bonjour-service` — looks for `_skill._tcp` services on the
* local network. Times out after 5 s.
* 3. `lsof` fallback (macOS/Linux) — finds processes named "skill" with
* TCP LISTEN sockets, then probes each port with a test WebSocket
* handshake (1.5 s timeout per port).
* 4. If all strategies fail, prints an error and exits.
*
* @param explicitPort - Port from `--port` flag, or `null` for auto-discovery.
* @returns The resolved TCP port number.
*/
async function discover(explicitPort: number | null): Promise<number> {
// Use `!= null` rather than truthiness so that an explicit --port 0 would
// be caught (port 0 is not valid for our use case, but the intent is clear).
if (explicitPort != null) return explicitPort;
// mDNS via bonjour-service
printInfo("discovering Skill via mDNS…");
const port = await new Promise<number | null>((resolve) => {
const instance = new Bonjour();
const timeout = setTimeout(() => { browser.stop(); instance.destroy(); resolve(null); }, 5000);
const browser = instance.find({ type: "skill" }, (service) => {
clearTimeout(timeout);
browser.stop();
printInfo(`found: ${service.name} @ ${service.host}:${service.port}`);
instance.destroy();
resolve(service.port);
});
});
if (port) return port;
// lsof fallback
printInfo("trying lsof fallback…");
try {
const ps = execSync("pgrep -if 'skill' 2>/dev/null || true", { encoding: "utf8" }).trim();
if (ps) {
for (const pid of ps.split("\n").filter(Boolean)) {
try {
const lsof = execSync(`lsof -iTCP -sTCP:LISTEN -nP -p ${pid} 2>/dev/null || true`, { encoding: "utf8" });
for (const m of lsof.matchAll(/:(\d{4,5})\s+\(LISTEN\)/g)) {
const p = Number(m[1]);
const ok = await new Promise<boolean>((resolve) => {
try {
const w = new WebSocket(`ws://127.0.0.1:${p}`);
const t = setTimeout(() => { try { w.close(); } catch {} resolve(false); }, 1500);
w.on("open", () => { clearTimeout(t); w.close(); resolve(true); });
w.on("error", () => { clearTimeout(t); resolve(false); });
} catch { resolve(false); }
});
if (ok) return p;
}
} catch {}
}
}
} catch {}
printError("could not discover Skill. Is it running? Use --port <n> to specify manually.");
}
/**
* Open a WebSocket connection to `ws://127.0.0.1:<port>`.
*
* Retries up to 3 times with a 1 s delay between attempts (handles the
* case where the server is still starting up). Each attempt has a 5 s
* handshake timeout. On success, stores the socket in the module-level
* `ws` variable.
*
* @param port - The TCP port to connect to.
* @throws Exits the process after 3 failed attempts.
*/
async function connect(port: number): Promise<void> {
for (let attempt = 1; attempt <= 3; attempt++) {
try {
await new Promise<void>((resolve, reject) => {
const w = new WebSocket(`ws://127.0.0.1:${port}`);
const t = setTimeout(() => { try { w.close(); } catch {} reject(new Error("timeout")); }, 5000);
w.on("open", () => { clearTimeout(t); ws = w; resolve(); });
w.on("error", () => { clearTimeout(t); reject(new Error("connection refused")); });
});
printInfo(`connected to ws://127.0.0.1:${port}`);
return;
} catch (e: any) {
if (attempt >= 3) printError(`failed to connect after 3 attempts: ${e.message}`);
await new Promise(r => setTimeout(r, 1000));
}
}
}
/**
* Try a single WebSocket connection attempt with a short timeout.
* Returns `true` and stores the open socket in `ws` on success.
* Returns `false` silently on any failure — no side-effects.
* Used by auto-transport mode to probe WS before falling back to HTTP.
*/
async function tryConnectOnce(port: number, timeoutMs = 3000): Promise<boolean> {
return new Promise<boolean>((resolve) => {
try {
const w = new WebSocket(`ws://127.0.0.1:${port}`);
const t = setTimeout(() => { try { w.close(); } catch {} resolve(false); }, timeoutMs);
w.on("open", () => { clearTimeout(t); ws = w; resolve(true); });
w.on("error", () => { clearTimeout(t); resolve(false); });
} catch { resolve(false); }
});
}
// ── Argument parsing ──────────────────────────────────────────────────────────
/** Parsed command-line arguments. */
interface Args {
/** The subcommand name: "status", "sessions", "search", etc. */
command: string;
/** Explicit WebSocket port, or `null` for auto-discovery. */
port: number | null;
/** If true, output raw JSON without ANSI colors. */
json: boolean;
/** If true, print full JSON response after the human-readable summary. */
full: boolean;
/** Force HTTP transport (no WebSocket). */
http: boolean;
/** Force WebSocket transport (error if unavailable). */
ws: boolean;
/** Annotation text for the `label` command, or title for `notify`. */
text?: string;
/** Notification body for the `notify` command (second positional arg). */
body?: string;
/** Start of time range (unix seconds) for `search` / `sleep`. */
start?: number;
/** End of time range (unix seconds) for `search` / `sleep`. */
end?: number;
/** Start of session A (unix seconds) for `compare` / `umap`. */
aStart?: number;
/** End of session A (unix seconds) for `compare` / `umap`. */
aEnd?: number;
/** Start of session B (unix seconds) for `compare` / `umap`. */
bStart?: number;
/** End of session B (unix seconds) for `compare` / `umap`. */
bEnd?: number;
/** Number of nearest neighbors for `search` / `search-labels` (default varies). */
k?: number;
/** HNSW ef parameter for `search-labels` (default: max(k×4, 64)). */
ef?: number;
/** Search mode for `search-labels`: "text" | "context" | "both" (default "text"). */
mode?: string;
/** Calibration profile name or UUID for the `calibrate` command. */
profile?: string;
/** Duration in seconds for `listen` (default 5). */
seconds?: number;
/** Raw JSON string for the `raw` command. */
rawJson?: string;
/** Show per-session metric trends when listing sessions. */
trends: boolean;
/** Index for `session` / `sleep` commands: 0 = latest, 1 = previous, … Null = not specified. */
sessionIndex: number | null;
/** Keyword / query string for the `interactive` command. */
keyword?: string;
/** If true, emit Graphviz DOT format to stdout (interactive command only). */
dot: boolean;
/** k for text-label HNSW search in `interactive` (default 5). */
kText?: number;
/** k for EEG-similarity HNSW search in `interactive` (default 5). */
kEeg?: number;
/** k for label-proximity search in `interactive` (default 3). */
kLabels?: number;
/** Temporal reach in minutes around each EEG point in `interactive` (default 10). */
reach?: number;
/**
* Re-poll interval in seconds for `status --poll N`.
* When set, `cmdStatus` loops indefinitely, printing a fresh snapshot every
* N seconds over the same open WebSocket connection.
*/
poll?: number;
/**
* Long-form annotation body for `label --context "..."`.
* Stored alongside the short label text; used in `search-labels` context mode.
*/
context?: string;
/**
* Backdate timestamp (unix seconds) for `label --at <utc>`.
* Overrides the default "now" for both `label_start_utc` and `label_end_utc`.
*/
at?: number;
/** Voice name for the `say` command (e.g. "Jasper"). Uses server default when omitted. */
voice?: string;
/** Subaction for the `calibrations` command: "list" | "get". */
subAction?: string;
/** Numeric ID for `calibrations get <id>`. */
id?: number;
/** Generic pagination limit for subcommands that support it (e.g. hooks log). */
limit?: number;
/** Generic pagination offset for subcommands that support it (e.g. hooks log). */
offset?: number;
/**
* One or more image file paths for `llm chat`.
* Each file is base64-encoded and embedded as an `image_url` content part.
* Can be specified multiple times: `--image a.jpg --image b.png`.
* Requires the LLM server to be loaded with a vision-capable model (mmproj).
*/
images?: string[];
/**
* System prompt for `llm chat` (prepended as a `{ role: "system" }` message).
* Example: `--system "You are a concise EEG assistant."`.
* Omit to let the model use its built-in defaults.
*/
system?: string;
/**
* Maximum tokens to generate per llm_chat turn.
* Passed as `max_tokens` in GenParams. Default: model default (2048).
*/
maxTokens?: number;
/**
* Sampling temperature for llm_chat (0 = deterministic, 1 = creative).
* Passed as `temperature` in GenParams. Default: 0.8.
*/
temperature?: number;
}
/**
* Parse `process.argv` into a typed {@link Args} object.
*
* Supports `--flag value` style for all options. The first positional
* argument becomes the command name; subsequent positional args are
* interpreted contextually (e.g. label text, raw JSON body).
*
* Numeric flags are validated: a non-numeric or missing value after the flag
* name is a fatal error rather than a silent `NaN`.
*
* Unrecognized flags (anything starting with `--` that is not listed below)
* are rejected with a clear error message to catch typos early.
*
* @returns Parsed arguments with defaults (`port: null`, `json: false`).
*/
function parseArgs(): Args {
const argv = process.argv.slice(2);
const args: Args = { command: "", port: null, json: false, full: false, http: false, ws: false, trends: false, sessionIndex: null, dot: false };
/**
* Read and validate the next argv token as a positive integer.
* Exits with a helpful message if the value is missing or not a number.
*/
function nextInt(flag: string): number {
const raw = argv[++i];
const n = Number(raw);
if (raw == null || raw.trim() === "" || isNaN(n)) {
// printError not yet callable (jsonMode not set), so use process.exit directly.
console.error(`error: ${flag} requires a numeric value (got: ${JSON.stringify(raw)})`);
process.exit(1);
}
return n;
}
// Known flags — used to catch typos (see "unrecognized flag" guard below).
const KNOWN_FLAGS = new Set([
"--port", "--json", "--full", "--trends", "--http", "--ws", "--dot",
"--help", "-h", "--version", "-v", "--no-color",
"--start", "--end", "--a-start", "--a-end", "--b-start", "--b-end",
"--k", "--k-text", "--k-eeg", "--k-labels", "--reach", "--ef",
"--mode", "--profile", "--seconds", "--poll",
"--limit", "--offset",
"--context", "--at", "--voice",
"--system", "--max-tokens", "--temperature", "--image",
]);
let i = 0;
while (i < argv.length) {
const a = argv[i];
if (a === "--port") { args.port = nextInt("--port"); }
else if (a === "--json") { args.json = true; }
else if (a === "--full") { args.full = true; }
else if (a === "--trends") { args.trends = true; }
else if (a === "--http") { args.http = true; }
else if (a === "--ws") { args.ws = true; }
else if (a === "--dot") { args.dot = true; }
else if (a === "--no-color") { noColorMode = true; }
else if (a === "--help" || a === "-h") { args.command = "help"; }
else if (a === "--version" || a === "-v") { args.command = "version"; }
else if (a === "--start") { args.start = nextInt("--start"); }
else if (a === "--end") { args.end = nextInt("--end"); }
else if (a === "--a-start") { args.aStart = nextInt("--a-start"); }
else if (a === "--a-end") { args.aEnd = nextInt("--a-end"); }
else if (a === "--b-start") { args.bStart = nextInt("--b-start"); }
else if (a === "--b-end") { args.bEnd = nextInt("--b-end"); }
else if (a === "--k") { args.k = nextInt("--k"); }
else if (a === "--k-text") { args.kText = nextInt("--k-text"); }
else if (a === "--k-eeg") { args.kEeg = nextInt("--k-eeg"); }
else if (a === "--k-labels") { args.kLabels = nextInt("--k-labels");}
else if (a === "--reach") { args.reach = nextInt("--reach"); }
else if (a === "--ef") { args.ef = nextInt("--ef"); }
else if (a === "--seconds") { args.seconds = nextInt("--seconds"); }
else if (a === "--poll") { args.poll = nextInt("--poll"); }
else if (a === "--limit") { args.limit = nextInt("--limit"); }
else if (a === "--offset") { args.offset = nextInt("--offset"); }
else if (a === "--at") { args.at = nextInt("--at"); }
else if (a === "--max-tokens") { args.maxTokens = nextInt("--max-tokens"); }
else if (a === "--mode") { args.mode = argv[++i]; }
else if (a === "--profile") { args.profile = argv[++i]; }
else if (a === "--context") { args.context = argv[++i]; }
else if (a === "--voice") { args.voice = argv[++i]; }
else if (a === "--system") { args.system = argv[++i]; }
else if (a === "--image") { (args.images ??= []).push(argv[++i]); }
else if (a === "--temperature") {
const raw = argv[++i];
const n = Number(raw);
if (raw == null || raw.trim() === "" || isNaN(n)) {
console.error(`error: --temperature requires a numeric value (got: ${JSON.stringify(raw)})`);
process.exit(1);
}
args.temperature = n;
}
// ── Positional arguments ─────────────────────────────────────────────
else if (!args.command) { args.command = a.toLowerCase(); }
else if (args.command === "label" && !args.text) { args.text = a; }
else if (args.command === "search-labels" && !args.text) { args.text = a; }
else if (args.command === "interactive" && !args.keyword) { args.keyword = a; }
else if (args.command === "say" && !args.text) { args.text = a; }
else if (args.command === "notify" && !args.text) { args.text = a; }
else if (args.command === "notify" && !args.body) { args.body = a; }
else if (args.command === "raw" && !args.rawJson) { args.rawJson = a; }
else if (args.command === "llm" && !args.subAction) {
// llm <subAction> [arg]
args.subAction = a.toLowerCase();
}
else if (args.command === "llm" && args.subAction === "download" && !args.text) {
args.text = a; // model filename
}
else if (args.command === "llm" && args.subAction === "cancel" && !args.text) {
args.text = a; // model filename
}
else if (args.command === "llm" && args.subAction === "delete" && !args.text) {
args.text = a; // model filename
}
else if (args.command === "llm" && args.subAction === "chat" && !args.text) {
args.text = a; // user message
}
else if (args.command === "dnd" && !args.subAction && (a === "on" || a === "off")) {
args.subAction = a; // "on" or "off" → maps to dnd_set { enabled: true/false }
}
else if (args.command === "calibrations" && !args.subAction) {
// calibrations [list|get] [<id>]
args.subAction = a.toLowerCase();
}
else if (args.command === "calibrations" && args.subAction === "get" && args.id == null) {
const n = Number(a);
if (isNaN(n)) { console.error(`error: calibrations get requires a numeric id (got: ${JSON.stringify(a)})`); process.exit(1); }
args.id = n;
}
else if (args.command === "hooks" && !args.subAction) {
args.subAction = a.toLowerCase();
}
else if (args.command === "hooks" && args.subAction === "suggest" && !args.text) {
args.text = a;
}
else if (args.command === "session" || args.command === "sleep") {
const n = Number(a);
if (!isNaN(n) && a.trim() !== "") args.sessionIndex = n;
}
// ── Unrecognized flag guard ──────────────────────────────────────────
else if (a.startsWith("--") && !KNOWN_FLAGS.has(a)) {
console.error(`error: unrecognized option "${a}". Run with --help to see available options.`);
process.exit(1);
}
i++;
}
return args;
}
/**
* Print the full help text (commands, options, examples with sample output)
* to stdout and exit with code 0.
*/
function showHelp(): never {
const m = (cmd: string, desc: string) => ` ${CYAN}${cmd.padEnd(50)}${RESET} ${desc}`;
console.log(`
${BOLD}skill cli${RESET} — command-line interface for the Skill WebSocket API
${BOLD}USAGE${RESET}
npx tsx cli.ts <command> [options]
${BOLD}COMMANDS${RESET}
${m("status [--poll <secs>]", "full device, session, embeddings, scores snapshot; re-poll every N secs")}
${m('say "text"', "speak text aloud via on-device TTS (fire-and-forget; returns immediately)")}
${m("session [index]", "all metrics + trends for one session (0=latest, 1=prev, …)")}
${m("sessions", "list all recording sessions across all days")}
${m('notify "title" ["body"]', "show a native OS notification")}
${m('label "my annotation" [--context "..."] [--at <utc>]', "create a timestamped text annotation")}
${m('search-labels "query" [--mode text|context|both] [--k <n>] [--ef <n>]', "search labels by free text")}
${m('interactive "keyword" [--k-text <n>] [--k-eeg <n>] [--k-labels <n>] [--reach <n>]', "cross-modal 4-layer graph search")}
${m("search [--start <utc>] [--end <utc>] [--k <n>]", "ANN EEG-similarity search on embeddings")}
${m("compare --a-start .. --a-end .. --b-start .. --b-end ..", "side-by-side metrics + UMAP")}
${m("sleep [index] [--start <utc>] [--end <utc>]", "sleep staging — index selects session (0=latest, 1=prev)")}
${m("calibrations [list|get <id>]", "list calibration profiles or inspect one by ID")}
${m("calibrate [--profile <name-or-id>]", "open calibration window and start profile immediately")}
${m("timer", "open focus-timer window and start work phase immediately")}
${m("umap [--a-start .. --a-end .. --b-start .. --b-end ..]", "3D UMAP projection (waits for result)")}
${m("dnd [on|off]", "show DND automation status; 'on'/'off' force-overrides immediately")}
${m("llm status", "LLM server status (stopped/loading/running)")}
${m("llm start", "load active model and start LLM inference server")}
${m("llm stop", "stop LLM inference server and free GPU memory")}
${m("llm catalog", "show model catalog with download states")}
${m("llm download <filename>", "download a GGUF model by filename (fire-and-forget)")}
${m("llm cancel <filename>", "cancel an in-progress model download")}
${m("llm delete <filename>", "delete a locally-cached model file")}
${m("llm logs", "print last 500 LLM server log lines")}
${m("llm chat", "interactive multi-turn chat REPL; type /help inside for commands")}
${m('llm chat "message"', "single-shot: send one message, stream the reply, and exit")}
${m("listen [--seconds <n>]", "listen for broadcast events (default: 5s)")}
${m("hooks", "list Proactive Hooks (scenario + last trigger metadata)")}
${m('hooks suggest "kw1,kw2"', "suggest threshold from matching labels + recent EEG embeddings")}
${m("hooks log [--limit <n>] [--offset <n>]", "show hook trigger audit history from hooks.sqlite")}
${m("raw '{\"command\":\"status\"}'", "send raw JSON, print full response")}
${BOLD}OPTIONS${RESET}
${YELLOW}--port <n>${RESET} connect to explicit port (skips mDNS discovery)
${YELLOW}--ws${RESET} force WebSocket transport (error if unavailable)
${YELLOW}--http${RESET} force HTTP transport (no live-event commands)
${DIM}(neither)${RESET} auto: try WebSocket, silently fall back to HTTP
${YELLOW}--json${RESET} output raw JSON (no colors, machine-readable)
${YELLOW}--full${RESET} print full JSON response after the human-readable summary
${YELLOW}--no-color${RESET} disable ANSI color output (also honoured via NO_COLOR env var)
${YELLOW}--poll <n>${RESET} (status) re-poll every N seconds; keeps the socket open
${YELLOW}--limit <n>${RESET} (hooks log) page size (default: 20)
${YELLOW}--offset <n>${RESET} (hooks log) row offset (default: 0)
${YELLOW}--trends${RESET} (sessions) show per-session metric trends and first/second-half deltas
${YELLOW}--context "..."${RESET} (label) long-form annotation body; used by search-labels --mode context
${YELLOW}--at <utc>${RESET} (label) backdate to a specific unix second (default: now)
${YELLOW}--mode <m>${RESET} search-labels mode: text | context | both (default: text)
${YELLOW}--ef <n>${RESET} HNSW ef parameter for search-labels (default: max(k×4, 64))
${YELLOW}--dot${RESET} (interactive) output Graphviz DOT to stdout — pipe to ${DIM}dot -Tsvg > out.svg${RESET}
${YELLOW}--k-text <n>${RESET} (interactive) k for text-label HNSW search (default: 5)
${YELLOW}--k-eeg <n>${RESET} (interactive) k for EEG-similarity HNSW search (default: 5)
${YELLOW}--k-labels <n>${RESET} (interactive) k for label-proximity step (default: 3)
${YELLOW}--reach <n>${RESET} (interactive) temporal window in minutes around each EEG point (default: 10)
${YELLOW}--voice <name>${RESET} say: voice name to use (e.g. ${GREEN}Jasper${RESET}); omit to use the server default
${YELLOW}--profile <p>${RESET} calibrate: profile name or UUID to run (default: active profile)
${YELLOW}--image <path>${RESET} llm chat: attach an image (can be repeated: --image a.jpg --image b.png)
${YELLOW}--system "..."${RESET} llm chat: prepend a system prompt (e.g. ${GREEN}"You are a concise EEG assistant."${RESET})
${YELLOW}--temperature <f>${RESET} llm chat: sampling temperature 0–2 (default 0.8; 0 = deterministic)
${YELLOW}--max-tokens <n>${RESET} llm chat: maximum tokens to generate per turn (default 2048)
${YELLOW}--help${RESET} show this help
${YELLOW}--version${RESET} print CLI version and exit
${BOLD}EXAMPLES${RESET}
When parameters are omitted, the CLI auto-selects ranges from your session
history and prints a ${YELLOW}rerun:${RESET} line you can copy-paste for reproducible results.
${BOLD}status${RESET} — device, session, embeddings, scores, sleep
${DIM}$${RESET} npx tsx cli.ts status
${DIM}$${RESET} npx tsx cli.ts status --json
${DIM}$${RESET} npx tsx cli.ts status --json | jq '.scores.focus'
${DIM}$${RESET} npx tsx cli.ts status --port 62853
${DIM}$${RESET} npx tsx cli.ts status --poll 5 ${DIM}# refresh every 5 s${RESET}
${DIM}$${RESET} npx tsx cli.ts status --poll 10 --json ${DIM}# JSON snapshot every 10 s${RESET}
${DIM}# Output:${RESET}
${DIM}# { "command": "status", "ok": true,${RESET}
${DIM}# "device": { "state": "connected", "name": "Muse-A1B2", "battery": 73, ... },${RESET}
${DIM}# "session": { "start_utc": 1740412800, "duration_secs": 1847 },${RESET}
${DIM}# "embeddings": { "today": 342, "total": 14820, "recording_days": 31 },${RESET}
${DIM}# "scores": { "focus": 0.7, "relaxation": 0.4, "engagement": 0.6,${RESET}
${DIM}# "hr": 68.2, "meditation": 0.52, "drowsiness": 0.1, ... },${RESET}
${DIM}# "signal_quality": { "tp9": 0.95, "af7": 0.88, "af8": 0.91, "tp10": 0.97 },${RESET}
${DIM}# "sleep": { "total_epochs": 420, "wake_epochs": 38, "n2_epochs": 210, ... } }${RESET}
${BOLD}sessions${RESET} — list all recordings
${DIM}$${RESET} npx tsx cli.ts sessions
${DIM}$${RESET} npx tsx cli.ts sessions --json | jq '.sessions | length'
${DIM}$${RESET} npx tsx cli.ts sessions --json | jq '.sessions[0]'
${DIM}# Output:${RESET}
${DIM}# 3 session(s)${RESET}
${DIM}# 20260223 2/23/2026, 9:15:00 AM → 10:02:33 AM 47m 33s 570 epochs${RESET}
${DIM}# 20260223 2/23/2026, 2:30:00 PM → 3:12:45 PM 42m 45s 513 epochs${RESET}
${DIM}# 20260224 2/24/2026, 8:00:00 AM → 8:45:10 AM 45m 10s 541 epochs${RESET}
${BOLD}say${RESET} — speak text aloud via on-device TTS (fire-and-forget)
${DIM}$${RESET} npx tsx cli.ts say "Eyes open. Starting calibration."
${DIM}$${RESET} npx tsx cli.ts say "Break time. Next: Eyes Closed." ${YELLOW}--voice Jasper${RESET}
${DIM}$${RESET} npx tsx cli.ts say "Calibration complete." --http
${DIM}$${RESET} npx tsx cli.ts say "Hello!" --json
${DIM}# Output (no --voice):${RESET}
${DIM}# { "command": "say", "ok": true, "spoken": "Eyes open. Starting calibration." }${RESET}
${DIM}# Output (with --voice Jasper):${RESET}
${DIM}# { "command": "say", "ok": true, "spoken": "Break time. Next: Eyes Closed.", "voice": "Jasper" }${RESET}
${DIM}# Note: --voice is optional; omitting it uses the voice last selected in Settings → Voice.${RESET}
${DIM}# Requires espeak-ng on PATH. First run downloads the TTS model (~30 MB).${RESET}
${BOLD}notify${RESET} — send a native OS notification
${DIM}$${RESET} npx tsx cli.ts notify "Session complete"
${DIM}$${RESET} npx tsx cli.ts notify "Focus done" "Take a 5-minute break"
${DIM}# Output:${RESET}
${DIM}# { "command": "notify", "ok": true }${RESET}
${BOLD}label${RESET} — annotate the current EEG moment
${DIM}$${RESET} npx tsx cli.ts label "eyes closed relaxation"
${DIM}$${RESET} npx tsx cli.ts label "meditation start"
${DIM}$${RESET} npx tsx cli.ts label "feeling anxious"
${DIM}$${RESET} npx tsx cli.ts label "breathwork" --context "box breathing 4-4-4-4, 10 min"
${DIM}$${RESET} npx tsx cli.ts label "retrospective note" --at 1740412800
${DIM}# Output:${RESET}
${DIM}# { "command": "label", "ok": true, "label_id": 42 }${RESET}
${BOLD}calibrations${RESET} — manage calibration profiles
${DIM}$${RESET} npx tsx cli.ts calibrations ${DIM}# list all profiles${RESET}
${DIM}$${RESET} npx tsx cli.ts calibrations list ${DIM}# same as above${RESET}
${DIM}$${RESET} npx tsx cli.ts calibrations get 3 ${DIM}# full detail for profile id=3${RESET}
${DIM}$${RESET} npx tsx cli.ts calibrations --json | jq '.profiles[].name'
${DIM}# Output (list):${RESET}
${DIM}# id name actions loop${RESET}
${DIM}# ──────────────────────────────────────────────────────────${RESET}
${DIM}# 1 Eyes Open/Closed 4 2${RESET}
${DIM}# 2 Relaxation 3 1${RESET}
${DIM}# 3 Focus Baseline 5 1${RESET}
${BOLD}calibrate${RESET} — open calibration window and start immediately
${DIM}$${RESET} npx tsx cli.ts calibrate ${DIM}# uses active profile${RESET}
${DIM}$${RESET} npx tsx cli.ts calibrate --profile "Eyes Open/Closed" ${DIM}# by name${RESET}
${DIM}$${RESET} npx tsx cli.ts calibrate --profile default ${DIM}# by id${RESET}
${DIM}$${RESET} npx tsx cli.ts calibrate --json | jq '.profile'
${DIM}# Output:${RESET}
${DIM}# { "command": "run_calibration", "ok": true }${RESET}
${DIM}# Note: requires a Muse headband to be connected and streaming.${RESET}
${BOLD}timer${RESET} — open focus-timer window and auto-start the work phase
${DIM}$${RESET} npx tsx cli.ts timer
${DIM}# Output:${RESET}
${DIM}# { "command": "timer", "ok": true }${RESET}
${DIM}# The timer opens using the last-saved preset (Pomodoro / Deep Work / Short Focus).${RESET}
${BOLD}search-labels${RESET} — semantic label search via fastembed HNSW
${DIM}$${RESET} npx tsx cli.ts search-labels "deep focus"
${DIM}$${RESET} npx tsx cli.ts search-labels "relaxed meditation" --k 10
${DIM}$${RESET} npx tsx cli.ts search-labels "anxiety" --mode context
${DIM}$${RESET} npx tsx cli.ts search-labels "flow state" --mode both --k 5
${DIM}$${RESET} npx tsx cli.ts search-labels "creative work" --json | jq '.results[].text'
${DIM}# Output:${RESET}
${DIM}# ⚡ search-labels "deep focus" (mode: text, k: 10)${RESET}
${DIM}# { "command": "search_labels", "ok": true,${RESET}
${DIM}# "query": "deep focus", "mode": "text", "model": "Xenova/bge-small-en-v1.5",${RESET}
${DIM}# "k": 10, "count": 3,${RESET}
${DIM}# "results": [${RESET}
${DIM}# { "label_id": 7, "text": "focused reading session",${RESET}
${DIM}# "context": "", "distance": 0.12, "similarity": 0.88,${RESET}
${DIM}# "eeg_start": 1740412800, "eeg_end": 1740413100,${RESET}
${DIM}# "created_at": 1740412810, "eeg_metrics": { "focus": 0.74, ... } },${RESET}
${DIM}# ...${RESET}
${DIM}# ] }${RESET}
${BOLD}interactive${RESET} — cross-modal graph search: query → text labels → EEG moments → nearby labels
${DIM}$${RESET} npx tsx cli.ts interactive "deep focus"
${DIM}$${RESET} npx tsx cli.ts interactive "meditation" --k-text 8 --k-eeg 8 --reach 15
${DIM}$${RESET} npx tsx cli.ts interactive "flow state" --json | jq '.nodes | length'
${DIM}$${RESET} npx tsx cli.ts interactive "anxiety" --dot | dot -Tsvg > graph.svg
${DIM}$${RESET} npx tsx cli.ts interactive "stress" --full
${DIM}# Output (default summary):${RESET}
${DIM}# ⚡ interactive "deep focus" (k-text:5, k-eeg:5, k-labels:3, reach:10m)${RESET}
${DIM}# ${RESET}
${DIM}# Graph 7 nodes · 9 edges${RESET}
${DIM}# ──────────────────────────────────────${RESET}
${DIM}# ● query "deep focus"${RESET}
${DIM}# ${RESET}
${DIM}# ◆ Text Labels (2 found)${RESET}
${DIM}# #0 "focused reading session" sim 88% 2/24/2026 8:00 AM${RESET}
${DIM}# #1 "concentration phase" sim 82% 2/23/2026 2:30 PM${RESET}
${DIM}# ${RESET}
${DIM}# ◈ EEG Moments (3 found)${RESET}
${DIM}# #0 2/24/2026, 8:12:45 AM dist 0.023${RESET}
${DIM}# ${RESET}
${DIM}# ◉ Nearby Labels (2 found)${RESET}
${DIM}# #0 "eyes closed" 2/24/2026, 8:13:00 AM 0.8m${RESET}
${DIM}# --dot format:${RESET}
${DIM}# digraph interactive_search { … } (pipe to graphviz)${RESET}
${DIM}# --json format:${RESET}
${DIM}# { "query": "deep focus", "nodes": [...], "edges": [...], "dot": "…" }${RESET}